Compare commits
33 Commits
develop
...
rsc-2025-0
Author | SHA1 | Date | |
---|---|---|---|
![]() |
280017a9ba | ||
![]() |
b05e9fb6c7 | ||
![]() |
8c47cefec6 | ||
![]() |
eb264b1261 | ||
![]() |
19cbb0301d | ||
![]() |
3bdd9f1d4d | ||
![]() |
547d35f6c8 | ||
![]() |
de6c3dc218 | ||
![]() |
f7204385f9 | ||
![]() |
f82dead7cc | ||
![]() |
7f2b579ccd | ||
![]() |
812cbd4e02 | ||
![]() |
8e3ab8a04c | ||
![]() |
8698691466 | ||
![]() |
e7e6985b13 | ||
![]() |
3a5db5623f | ||
![]() |
620fbc1b01 | ||
![]() |
966eb502fe | ||
![]() |
b251e2e43f | ||
![]() |
269b3aa6bf | ||
![]() |
dcf19395fa | ||
![]() |
51930949c4 | ||
![]() |
aaadce6995 | ||
![]() |
d7301f66c9 | ||
![]() |
2bcba95f75 | ||
![]() |
d0c9e671eb | ||
![]() |
de9b3ec8b3 | ||
![]() |
ba86a0eb48 | ||
![]() |
dcedb2ead5 | ||
![]() |
df040fddd9 | ||
![]() |
02f76a8ca6 | ||
![]() |
02fa070f0a | ||
![]() |
89c8d44418 |
@ -2,9 +2,10 @@
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
import collections.abc
|
||||
import enum
|
||||
import os
|
||||
import re
|
||||
from typing import Tuple
|
||||
from typing import Optional, Tuple
|
||||
|
||||
import llnl.util.filesystem as fs
|
||||
import llnl.util.tty as tty
|
||||
@ -13,6 +14,7 @@
|
||||
import spack.spec
|
||||
import spack.util.prefix
|
||||
from spack.directives import depends_on
|
||||
from spack.util.executable import which_string
|
||||
|
||||
from .cmake import CMakeBuilder, CMakePackage
|
||||
|
||||
@ -180,6 +182,64 @@ def initconfig_compiler_entries(self):
|
||||
|
||||
return entries
|
||||
|
||||
class Scheduler(enum.Enum):
|
||||
LSF = enum.auto()
|
||||
SLURM = enum.auto()
|
||||
FLUX = enum.auto()
|
||||
|
||||
def get_scheduler(self) -> Optional[Scheduler]:
|
||||
spec = self.pkg.spec
|
||||
|
||||
# Check for Spectrum-mpi, which always uses LSF or LSF MPI variant
|
||||
if spec.satisfies("^spectrum-mpi") or spec["mpi"].satisfies("schedulers=lsf"):
|
||||
return self.Scheduler.LSF
|
||||
|
||||
# Check for Slurm MPI variants
|
||||
slurm_checks = ["+slurm", "schedulers=slurm", "process_managers=slurm"]
|
||||
if any(spec["mpi"].satisfies(variant) for variant in slurm_checks):
|
||||
return self.Scheduler.SLURM
|
||||
|
||||
# TODO improve this when MPI implementations support flux
|
||||
# Do this check last to avoid using a flux wrapper present next to Slurm/ LSF schedulers
|
||||
if which_string("flux") is not None:
|
||||
return self.Scheduler.FLUX
|
||||
|
||||
return None
|
||||
|
||||
def get_mpi_exec(self) -> Optional[str]:
|
||||
spec = self.pkg.spec
|
||||
scheduler = self.get_scheduler()
|
||||
|
||||
if scheduler == self.Scheduler.LSF:
|
||||
return which_string("lrun")
|
||||
|
||||
elif scheduler == self.Scheduler.SLURM:
|
||||
if spec["mpi"].external:
|
||||
return which_string("srun")
|
||||
else:
|
||||
return os.path.join(spec["slurm"].prefix.bin, "srun")
|
||||
|
||||
elif scheduler == self.Scheduler.FLUX:
|
||||
flux = which_string("flux")
|
||||
return f"{flux};run" if flux else None
|
||||
|
||||
elif hasattr(spec["mpi"].package, "mpiexec"):
|
||||
return spec["mpi"].package.mpiexec
|
||||
|
||||
else:
|
||||
mpiexec = os.path.join(spec["mpi"].prefix.bin, "mpirun")
|
||||
if not os.path.exists(mpiexec):
|
||||
mpiexec = os.path.join(spec["mpi"].prefix.bin, "mpiexec")
|
||||
return mpiexec
|
||||
|
||||
def get_mpi_exec_num_proc(self) -> str:
|
||||
scheduler = self.get_scheduler()
|
||||
|
||||
if scheduler in [self.Scheduler.FLUX, self.Scheduler.LSF, self.Scheduler.SLURM]:
|
||||
return "-n"
|
||||
else:
|
||||
return "-np"
|
||||
|
||||
def initconfig_mpi_entries(self):
|
||||
spec = self.pkg.spec
|
||||
|
||||
@ -199,27 +259,10 @@ def initconfig_mpi_entries(self):
|
||||
if hasattr(spec["mpi"], "mpifc"):
|
||||
entries.append(cmake_cache_path("MPI_Fortran_COMPILER", spec["mpi"].mpifc))
|
||||
|
||||
# Check for slurm
|
||||
using_slurm = False
|
||||
slurm_checks = ["+slurm", "schedulers=slurm", "process_managers=slurm"]
|
||||
if any(spec["mpi"].satisfies(variant) for variant in slurm_checks):
|
||||
using_slurm = True
|
||||
|
||||
# Determine MPIEXEC
|
||||
if using_slurm:
|
||||
if spec["mpi"].external:
|
||||
# Heuristic until we have dependents on externals
|
||||
mpiexec = "/usr/bin/srun"
|
||||
else:
|
||||
mpiexec = os.path.join(spec["slurm"].prefix.bin, "srun")
|
||||
elif hasattr(spec["mpi"].package, "mpiexec"):
|
||||
mpiexec = spec["mpi"].package.mpiexec
|
||||
else:
|
||||
mpiexec = os.path.join(spec["mpi"].prefix.bin, "mpirun")
|
||||
if not os.path.exists(mpiexec):
|
||||
mpiexec = os.path.join(spec["mpi"].prefix.bin, "mpiexec")
|
||||
mpiexec = self.get_mpi_exec()
|
||||
|
||||
if not os.path.exists(mpiexec):
|
||||
if mpiexec is None or not os.path.exists(mpiexec.split(";")[0]):
|
||||
msg = "Unable to determine MPIEXEC, %s tests may fail" % self.pkg.name
|
||||
entries.append("# {0}\n".format(msg))
|
||||
tty.warn(msg)
|
||||
@ -232,10 +275,7 @@ def initconfig_mpi_entries(self):
|
||||
entries.append(cmake_cache_path("MPIEXEC", mpiexec))
|
||||
|
||||
# Determine MPIEXEC_NUMPROC_FLAG
|
||||
if using_slurm:
|
||||
entries.append(cmake_cache_string("MPIEXEC_NUMPROC_FLAG", "-n"))
|
||||
else:
|
||||
entries.append(cmake_cache_string("MPIEXEC_NUMPROC_FLAG", "-np"))
|
||||
entries.append(cmake_cache_string("MPIEXEC_NUMPROC_FLAG", self.get_mpi_exec_num_proc()))
|
||||
|
||||
return entries
|
||||
|
||||
@ -278,30 +318,18 @@ def initconfig_hardware_entries(self):
|
||||
entries.append("# ROCm")
|
||||
entries.append("#------------------{0}\n".format("-" * 30))
|
||||
|
||||
if spec.satisfies("^blt@0.7:"):
|
||||
rocm_root = os.path.dirname(spec["llvm-amdgpu"].prefix)
|
||||
entries.append(cmake_cache_path("ROCM_PATH", rocm_root))
|
||||
else:
|
||||
# Explicitly setting HIP_ROOT_DIR may be a patch that is no longer necessary
|
||||
entries.append(cmake_cache_path("HIP_ROOT_DIR", "{0}".format(spec["hip"].prefix)))
|
||||
llvm_bin = spec["llvm-amdgpu"].prefix.bin
|
||||
llvm_prefix = spec["llvm-amdgpu"].prefix
|
||||
# Some ROCm systems seem to point to /<path>/rocm-<ver>/ and
|
||||
# others point to /<path>/rocm-<ver>/llvm
|
||||
if os.path.basename(os.path.normpath(llvm_prefix)) != "llvm":
|
||||
llvm_bin = os.path.join(llvm_prefix, "llvm/bin/")
|
||||
entries.append(
|
||||
cmake_cache_filepath(
|
||||
"CMAKE_HIP_COMPILER", os.path.join(llvm_bin, "amdclang++")
|
||||
)
|
||||
)
|
||||
rocm_root = os.path.dirname(spec["llvm-amdgpu"].prefix)
|
||||
entries.append(cmake_cache_path("ROCM_PATH", rocm_root))
|
||||
|
||||
archs = self.spec.variants["amdgpu_target"].value
|
||||
if archs[0] != "none":
|
||||
arch_str = ";".join(archs)
|
||||
entries.append(cmake_cache_string("CMAKE_HIP_ARCHITECTURES", arch_str))
|
||||
entries.append(cmake_cache_string("AMDGPU_TARGETS", arch_str))
|
||||
entries.append(cmake_cache_string("GPU_TARGETS", arch_str))
|
||||
|
||||
llvm_bin = spec["llvm-amdgpu"].prefix.bin
|
||||
entries.append(
|
||||
cmake_cache_filepath("CMAKE_HIP_COMPILER", os.path.join(llvm_bin, "amdclang++"))
|
||||
)
|
||||
|
||||
if spec.satisfies("%gcc"):
|
||||
entries.append(
|
||||
@ -310,6 +338,15 @@ def initconfig_hardware_entries(self):
|
||||
)
|
||||
)
|
||||
|
||||
# Extra definitions that might be required in other cases
|
||||
if not spec.satisfies("^blt"):
|
||||
entries.append(cmake_cache_path("HIP_ROOT_DIR", "{0}".format(spec["hip"].prefix)))
|
||||
|
||||
if archs[0] != "none":
|
||||
arch_str = ";".join(archs)
|
||||
entries.append(cmake_cache_string("AMDGPU_TARGETS", arch_str))
|
||||
entries.append(cmake_cache_string("GPU_TARGETS", arch_str))
|
||||
|
||||
return entries
|
||||
|
||||
def std_initconfig_entries(self):
|
||||
|
@ -8,7 +8,6 @@
|
||||
from os.path import join as pjoin
|
||||
|
||||
from spack.package import *
|
||||
from spack.util.executable import which_string
|
||||
|
||||
|
||||
def get_spec_path(spec, package_name, path_replacements={}, use_bin=False):
|
||||
@ -452,19 +451,6 @@ def initconfig_mpi_entries(self):
|
||||
entries.append(cmake_cache_option("ENABLE_MPI", True))
|
||||
if spec["mpi"].name == "spectrum-mpi":
|
||||
entries.append(cmake_cache_string("BLT_MPI_COMMAND_APPEND", "mpibind"))
|
||||
|
||||
# Replace /usr/bin/srun path with srun flux wrapper path on TOSS 4
|
||||
# TODO: Remove this logic by adding `using_flux` case in
|
||||
# spack/lib/spack/spack/build_systems/cached_cmake.py:196 and remove hard-coded
|
||||
# path to srun in same file.
|
||||
if "toss_4" in self._get_sys_type(spec):
|
||||
srun_wrapper = which_string("srun")
|
||||
mpi_exec_index = [
|
||||
index for index, entry in enumerate(entries) if "MPIEXEC_EXECUTABLE" in entry
|
||||
]
|
||||
if mpi_exec_index:
|
||||
del entries[mpi_exec_index[0]]
|
||||
entries.append(cmake_cache_path("MPIEXEC_EXECUTABLE", srun_wrapper))
|
||||
else:
|
||||
entries.append(cmake_cache_option("ENABLE_MPI", False))
|
||||
|
||||
|
@ -3,6 +3,7 @@
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
import os
|
||||
import socket
|
||||
import sys
|
||||
|
||||
from spack.package import *
|
||||
@ -149,6 +150,19 @@ def _get_sys_type(self, spec):
|
||||
sys_type = env["SYS_TYPE"]
|
||||
return sys_type
|
||||
|
||||
@property
|
||||
def cache_name(self):
|
||||
hostname = socket.gethostname()
|
||||
if "SYS_TYPE" in env:
|
||||
hostname = hostname.rstrip("1234567890")
|
||||
return "{0}-{1}-{2}@{3}-{4}.cmake".format(
|
||||
hostname,
|
||||
self._get_sys_type(self.spec),
|
||||
self.spec.compiler.name,
|
||||
self.spec.compiler.version,
|
||||
self.spec.dag_hash(8),
|
||||
)
|
||||
|
||||
def initconfig_compiler_entries(self):
|
||||
spec = self.spec
|
||||
entries = super().initconfig_compiler_entries()
|
||||
|
@ -15,11 +15,17 @@ class Camp(CMakePackage, CudaPackage, ROCmPackage):
|
||||
git = "https://github.com/LLNL/camp.git"
|
||||
url = "https://github.com/LLNL/camp/archive/v0.1.0.tar.gz"
|
||||
|
||||
maintainers("trws", "adrienbernede")
|
||||
maintainers("adrienbernede", "kab163", "trws")
|
||||
|
||||
license("BSD-3-Clause")
|
||||
|
||||
version("main", branch="main", submodules=False)
|
||||
version(
|
||||
"2025.03.0",
|
||||
tag="v2025.03.0",
|
||||
commit="ee0a3069a7ae72da8bcea63c06260fad34901d43",
|
||||
submodules=False,
|
||||
)
|
||||
version(
|
||||
"2024.07.0",
|
||||
tag="v2024.07.0",
|
||||
|
@ -24,6 +24,30 @@ class Care(CachedCMakePackage, CudaPackage, ROCmPackage):
|
||||
|
||||
version("develop", branch="develop", submodules=False)
|
||||
version("master", branch="master", submodules=False)
|
||||
version(
|
||||
"0.15.1",
|
||||
tag="v0.15.1",
|
||||
commit="f198c8b3d5dcfd274107b4263331818e86b50c7a",
|
||||
submodules=False,
|
||||
)
|
||||
version(
|
||||
"0.15.0",
|
||||
tag="v0.15.0",
|
||||
commit="aff9eea69b6d95342371aacc44b73bef785255f3",
|
||||
submodules=False,
|
||||
)
|
||||
version(
|
||||
"0.14.1",
|
||||
tag="v0.14.1",
|
||||
commit="110c6e5766ead59b231e2b05deecd7567874e907",
|
||||
submodules=False,
|
||||
)
|
||||
version(
|
||||
"0.14.0",
|
||||
tag="v0.14.0",
|
||||
commit="2784188a067abac35747d58b5a5daa1b3852756b",
|
||||
submodules=False,
|
||||
)
|
||||
version(
|
||||
"0.13.3",
|
||||
tag="v0.13.3",
|
||||
@ -71,6 +95,7 @@ class Care(CachedCMakePackage, CudaPackage, ROCmPackage):
|
||||
variant(
|
||||
"implicit_conversions",
|
||||
default=False,
|
||||
when="@:0.14",
|
||||
description="Enable implicit" "conversions to/from raw pointers",
|
||||
)
|
||||
variant("tests", default=False, description="Build tests")
|
||||
@ -111,7 +136,6 @@ class Care(CachedCMakePackage, CudaPackage, ROCmPackage):
|
||||
depends_on("raja@2024.02.0:", when="@0.12.0:")
|
||||
depends_on("raja@2022.10.5:", when="@0.10.0:")
|
||||
|
||||
# TODO: Add an enable_pick variant
|
||||
depends_on("chai+enable_pick+raja")
|
||||
depends_on("chai@2024.07.0:", when="@0.13.2:")
|
||||
depends_on("chai@2024.02.2:", when="@0.13.1:")
|
||||
@ -129,12 +153,7 @@ class Care(CachedCMakePackage, CudaPackage, ROCmPackage):
|
||||
depends_on("chai+openmp")
|
||||
|
||||
with when("+cuda"):
|
||||
# WARNING: this package currently only supports an internal cub
|
||||
# package. This will cause a race condition if compiled with another
|
||||
# package that uses cub. TODO: have all packages point to the same external
|
||||
# cub package.
|
||||
depends_on("cub")
|
||||
|
||||
depends_on("umpire+cuda")
|
||||
depends_on("raja+cuda")
|
||||
depends_on("chai+cuda")
|
||||
@ -209,12 +228,6 @@ def initconfig_hardware_entries(self):
|
||||
|
||||
if spec.satisfies("+rocm"):
|
||||
entries.append(cmake_cache_option("ENABLE_HIP", True))
|
||||
archs = self.spec.variants["amdgpu_target"].value
|
||||
if archs != "none":
|
||||
arch_str = ",".join(archs)
|
||||
entries.append(
|
||||
cmake_cache_string("HIP_HIPCC_FLAGS", "--amdgpu-target={0}".format(arch_str))
|
||||
)
|
||||
else:
|
||||
entries.append(cmake_cache_option("ENABLE_HIP", False))
|
||||
|
||||
@ -250,6 +263,13 @@ def initconfig_package_entries(self):
|
||||
|
||||
entries.append(cmake_cache_string("CMAKE_BUILD_TYPE", spec.variants["build_type"].value))
|
||||
|
||||
# C++14
|
||||
if spec.satisfies("@:0.14.1"):
|
||||
entries.append(cmake_cache_string("BLT_CXX_STD", "c++14"))
|
||||
# C++17
|
||||
else:
|
||||
entries.append(cmake_cache_string("BLT_CXX_STD", "c++17"))
|
||||
|
||||
entries.append(cmake_cache_option("ENABLE_TESTS", spec.satisfies("+tests")))
|
||||
entries.append(cmake_cache_option("CARE_ENABLE_TESTS", spec.satisfies("+tests")))
|
||||
# For tests to work, we also need BLT_ENABLE_TESTS to be on.
|
||||
|
@ -18,11 +18,17 @@ class Chai(CachedCMakePackage, CudaPackage, ROCmPackage):
|
||||
git = "https://github.com/LLNL/CHAI.git"
|
||||
tags = ["ecp", "e4s", "radiuss"]
|
||||
|
||||
maintainers("davidbeckingsale", "adayton1", "adrienbernede")
|
||||
maintainers("adayton1", "adrienbernede", "davidbeckingsale", "kab163")
|
||||
|
||||
license("BSD-3-Clause")
|
||||
|
||||
version("develop", branch="develop", submodules=False)
|
||||
version(
|
||||
"2025.03.0",
|
||||
tag="v2025.03.0",
|
||||
commit="79f6414a00a89070054ac97baed47d21d10c83a4",
|
||||
submodules=False,
|
||||
)
|
||||
version(
|
||||
"2024.07.0",
|
||||
tag="v2024.07.0",
|
||||
@ -104,7 +110,7 @@ class Chai(CachedCMakePackage, CudaPackage, ROCmPackage):
|
||||
# We propagate the patch here.
|
||||
patch("change_mpi_target_name_umpire_patch.patch", when="@2022.10.0:2023.06.0")
|
||||
|
||||
variant("enable_pick", default=False, description="Enable pick method")
|
||||
variant("enable_pick", default=False, when="@:2024", description="Enable pick method")
|
||||
variant(
|
||||
"separable_compilation",
|
||||
default=True,
|
||||
|
@ -99,6 +99,11 @@ class RajaPerf(CachedCMakePackage, CudaPackage, ROCmPackage):
|
||||
description="Tests to run",
|
||||
)
|
||||
variant("caliper", default=False, description="Build with support for Caliper based profiling")
|
||||
variant(
|
||||
"lowopttest",
|
||||
default=False,
|
||||
description="For developers, lowers optimization level to pass tests with some compilers",
|
||||
)
|
||||
|
||||
depends_on("blt")
|
||||
depends_on("blt@0.6.2:", type="build", when="@2024.07.0:")
|
||||
@ -176,6 +181,9 @@ def initconfig_compiler_entries(self):
|
||||
# Default entries are already defined in CachedCMakePackage, inherit them:
|
||||
entries = super().initconfig_compiler_entries()
|
||||
|
||||
if spec.satisfies("+lowopttest"):
|
||||
entries.append(cmake_cache_string("CMAKE_CXX_FLAGS_RELEASE", "-O1"))
|
||||
|
||||
if spec.satisfies("+rocm ^blt@:0.6"):
|
||||
entries.insert(0, cmake_cache_path("CMAKE_CXX_COMPILER", spec["hip"].hipcc))
|
||||
|
||||
@ -249,7 +257,7 @@ def initconfig_hardware_entries(self):
|
||||
else:
|
||||
entries.append(cmake_cache_option("ENABLE_HIP", False))
|
||||
|
||||
entries.append(cmake_cache_option("ENABLE_OPENMP_TARGET", "+omptarget" in spec))
|
||||
entries.append(cmake_cache_option("RAJA_ENABLE_TARGET_OPENMP", "+omptarget" in spec))
|
||||
if "+omptarget" in spec:
|
||||
if "%xl" in spec:
|
||||
entries.append(
|
||||
@ -320,7 +328,7 @@ def initconfig_package_entries(self):
|
||||
entries.append(cmake_cache_option("BUILD_SHARED_LIBS", "+shared" in spec))
|
||||
entries.append(cmake_cache_option("ENABLE_OPENMP", "+openmp" in spec))
|
||||
entries.append(cmake_cache_option("RAJA_ENABLE_OPENMP_TASK", "+omptask" in spec))
|
||||
entries.append(cmake_cache_option("ENABLE_SYCL", spec.satisfies("+sycl")))
|
||||
entries.append(cmake_cache_option("RAJA_ENABLE_SYCL", spec.satisfies("+sycl")))
|
||||
|
||||
# C++17
|
||||
if spec.satisfies("@2024.07.0:") and spec.satisfies("+sycl"):
|
||||
|
@ -25,12 +25,18 @@ class Raja(CachedCMakePackage, CudaPackage, ROCmPackage):
|
||||
git = "https://github.com/LLNL/RAJA.git"
|
||||
tags = ["radiuss", "e4s"]
|
||||
|
||||
maintainers("davidbeckingsale", "adrienbernede")
|
||||
maintainers("adrienbernede", "davidbeckingsale", "kab163")
|
||||
|
||||
license("BSD-3-Clause")
|
||||
|
||||
version("develop", branch="develop", submodules=submodules)
|
||||
version("main", branch="main", submodules=submodules)
|
||||
version(
|
||||
"2025.03.0",
|
||||
tag="v2025.03.0",
|
||||
commit="1d70abf171474d331f1409908bdf1b1c3fe19222",
|
||||
submodules=submodules,
|
||||
)
|
||||
version(
|
||||
"2024.07.0",
|
||||
tag="v2024.07.0",
|
||||
@ -208,6 +214,12 @@ class Raja(CachedCMakePackage, CudaPackage, ROCmPackage):
|
||||
description="Run all the tests, including those known to fail.",
|
||||
)
|
||||
|
||||
variant(
|
||||
"lowopttest",
|
||||
default=False,
|
||||
description="For developers, lowers optimization level to pass tests with some compilers",
|
||||
)
|
||||
|
||||
depends_on("blt", type="build")
|
||||
depends_on("blt@0.6.2:", type="build", when="@2024.02.1:")
|
||||
depends_on("blt@0.6.1", type="build", when="@2024.02.0")
|
||||
@ -365,6 +377,9 @@ def initconfig_package_entries(self):
|
||||
|
||||
entries.append(cmake_cache_option("RAJA_ENABLE_SYCL", spec.satisfies("+sycl")))
|
||||
|
||||
if spec.satisfies("+lowopttest"):
|
||||
entries.append(cmake_cache_string("CMAKE_CXX_FLAGS_RELEASE", "-O1"))
|
||||
|
||||
# C++17
|
||||
if spec.satisfies("@2024.07.0:") and spec.satisfies("+sycl"):
|
||||
entries.append(cmake_cache_string("BLT_CXX_STD", "c++17"))
|
||||
|
@ -18,11 +18,17 @@ class Umpire(CachedCMakePackage, CudaPackage, ROCmPackage):
|
||||
git = "https://github.com/LLNL/Umpire.git"
|
||||
tags = ["radiuss", "e4s"]
|
||||
|
||||
maintainers("davidbeckingsale", "adrienbernede")
|
||||
maintainers("adrienbernede", "davidbeckingsale", "kab163")
|
||||
|
||||
license("MIT")
|
||||
|
||||
version("develop", branch="develop", submodules=False)
|
||||
version(
|
||||
"2025.03.0",
|
||||
tag="v2025.03.0",
|
||||
commit="1ed0669c57f041baa1f1070693991c3a7a43e7ee",
|
||||
submodules=False,
|
||||
)
|
||||
version(
|
||||
"2024.07.0",
|
||||
tag="v2024.07.0",
|
||||
@ -193,6 +199,7 @@ class Umpire(CachedCMakePackage, CudaPackage, ROCmPackage):
|
||||
variant("c", default=True, description="Build C API")
|
||||
variant("mpi", default=False, description="Enable MPI support")
|
||||
variant("ipc_shmem", default=False, description="Enable POSIX shared memory")
|
||||
variant("mpi3_shmem", default=False, description="Enable MPI3 shared memory")
|
||||
variant(
|
||||
"sqlite_experimental",
|
||||
default=False,
|
||||
@ -229,6 +236,7 @@ class Umpire(CachedCMakePackage, CudaPackage, ROCmPackage):
|
||||
depends_on("cmake@3.8:", type="build")
|
||||
|
||||
depends_on("blt", type="build")
|
||||
depends_on("blt@0.7.0:", type="build", when="@2025.03.0:")
|
||||
depends_on("blt@0.6.2:", type="build", when="@2024.02.1:")
|
||||
depends_on("blt@0.6.1", type="build", when="@2024.02.0")
|
||||
depends_on("blt@0.5.3", type="build", when="@2023.06.0")
|
||||
@ -256,9 +264,9 @@ class Umpire(CachedCMakePackage, CudaPackage, ROCmPackage):
|
||||
depends_on("sqlite", when="+sqlite_experimental")
|
||||
depends_on("mpi", when="+mpi")
|
||||
|
||||
depends_on("fmt@9.1:", when="@2024.02.0:")
|
||||
depends_on("fmt@9.1:11.0", when="@2024.02.0:")
|
||||
# For some reason, we need c++ 17 explicitly only with intel
|
||||
depends_on("fmt@9.1: cxxstd=17", when="@2024.02.0: %intel@19.1")
|
||||
depends_on("fmt@9.1:11.0 cxxstd=17", when="@2024.02.0: %intel@19.1")
|
||||
|
||||
with when("@5.0.0:"):
|
||||
with when("+cuda"):
|
||||
@ -291,6 +299,9 @@ class Umpire(CachedCMakePackage, CudaPackage, ROCmPackage):
|
||||
"+rocm", when="+omptarget", msg="Cant support both rocm and openmp device backends at once"
|
||||
)
|
||||
conflicts("+ipc_shmem", when="@:5.0.1")
|
||||
conflicts("+mpi3_shmem", when="@:2024.07.0")
|
||||
conflicts("+mpi3_shmem", when="~mpi")
|
||||
conflicts("+ipc_shmem", when="+mpi3_shmem")
|
||||
|
||||
conflicts("+sqlite_experimental", when="@:6.0.0")
|
||||
conflicts("+sanitizer_tests", when="~asan")
|
||||
@ -386,6 +397,9 @@ def initconfig_mpi_entries(self):
|
||||
|
||||
entries = super().initconfig_mpi_entries()
|
||||
entries.append(cmake_cache_option("ENABLE_MPI", spec.satisfies("+mpi")))
|
||||
entries.append(
|
||||
cmake_cache_option("UMPIRE_ENABLE_MPI3_SHARED_MEMORY", spec.satisfies("+mpi3_shmem"))
|
||||
)
|
||||
|
||||
return entries
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user