mfem: fix self-referential dependencies (#42487)

This commit is contained in:
Massimiliano Culpo 2024-02-10 13:08:13 +01:00 committed by GitHub
parent 9b42f9ab18
commit 686d1bc1ea
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -260,24 +260,21 @@ class Mfem(Package, CudaPackage, ROCmPackage):
depends_on("mpi", when="+mpi") depends_on("mpi", when="+mpi")
depends_on("hipsparse", when="@4.4.0:+rocm") depends_on("hipsparse", when="@4.4.0:+rocm")
depends_on("hypre@2.10.0:2.13", when="@:3.3+mpi")
depends_on("hypre@:2.20.0", when="@3.4:4.2+mpi") with when("+mpi"):
depends_on("hypre@:2.23.0", when="@4.3.0+mpi") depends_on("hypre")
depends_on("hypre", when="+mpi") depends_on("hypre@2.10.0:2.13", when="@:3.3")
# Propagate 'cuda_arch' to 'hypre' without propagating the '+cuda' depends_on("hypre@:2.20.0", when="@3.4:4.2")
# variant because we want to allow 'mfem+cuda ^hypre~cuda': depends_on("hypre@:2.23.0", when="@4.3.0")
# If hypre is built with +cuda, propagate cuda_arch
requires("^hypre@2.22.1:", when="+mpi+cuda ^hypre+cuda")
for sm_ in CudaPackage.cuda_arch_values: for sm_ in CudaPackage.cuda_arch_values:
depends_on( requires(f"^hypre cuda_arch={sm_}", when=f"+mpi+cuda cuda_arch={sm_} ^hypre+cuda")
"hypre@2.22.1:+cuda cuda_arch={0}".format(sm_), # If hypre is built with +rocm, propagate amdgpu_target
when="+mpi+cuda cuda_arch={0} ^hypre+cuda".format(sm_), requires("^hypre@2.23.0: ", when="+mpi+rocm ^hypre+rocm")
)
# Propagate 'amdgpu_target' to 'hypre' without propagating the '+rocm'
# variant because we want to allow 'mfem+rocm ^hypre~rocm':
for gfx in ROCmPackage.amdgpu_targets: for gfx in ROCmPackage.amdgpu_targets:
depends_on( requires(f"^hypre amdgpu_target={gfx}", when=f"+mpi+rocm amdgpu_target={gfx} ^hypre+rocm")
"hypre@2.23.0:+rocm amdgpu_target={0}".format(gfx),
when="+mpi+rocm amdgpu_target={0} ^hypre+rocm".format(gfx),
)
depends_on("metis", when="+metis") depends_on("metis", when="+metis")
depends_on("blas", when="+lapack") depends_on("blas", when="+lapack")
@ -311,19 +308,17 @@ class Mfem(Package, CudaPackage, ROCmPackage):
depends_on("gslib@1.0.7:", when="@4.3.0:+gslib") depends_on("gslib@1.0.7:", when="@4.3.0:+gslib")
depends_on("suite-sparse", when="+suite-sparse") depends_on("suite-sparse", when="+suite-sparse")
depends_on("superlu-dist", when="+superlu-dist") depends_on("superlu-dist", when="+superlu-dist")
# Propagate 'cuda_arch' to 'superlu-dist' without propagating the '+cuda' # If superlu-dist is built with +cuda, propagate cuda_arch
# variant so we can build 'mfem+cuda+superlu-dist ^superlu-dist~cuda':
for sm_ in CudaPackage.cuda_arch_values: for sm_ in CudaPackage.cuda_arch_values:
depends_on( requires(
"superlu-dist+cuda cuda_arch={0}".format(sm_), f"^superlu-dist cuda_arch={sm_}",
when="+superlu-dist+cuda cuda_arch={0} ^superlu-dist+cuda".format(sm_), when=f"+superlu-dist+cuda cuda_arch={sm_} ^superlu-dist+cuda",
) )
# Propagate 'amdgpu_target' to 'superlu-dist' without propagating the '+rocm' # If superlu-dist is built with +rocm, propagate amdgpu_target
# variant so we can build 'mfem+rocm+superlu-dist ^superlu-dist~rocm':
for gfx in ROCmPackage.amdgpu_targets: for gfx in ROCmPackage.amdgpu_targets:
depends_on( requires(
"superlu-dist+rocm amdgpu_target={0}".format(gfx), f"^superlu-dist+rocm amdgpu_target={gfx}",
when="+superlu-dist+rocm amdgpu_target={0} ^superlu-dist+rocm".format(gfx), when=f"+superlu-dist+rocm amdgpu_target={gfx} ^superlu-dist+rocm",
) )
depends_on("strumpack@3.0.0:", when="+strumpack~shared") depends_on("strumpack@3.0.0:", when="+strumpack~shared")
depends_on("strumpack@3.0.0:+shared", when="+strumpack+shared") depends_on("strumpack@3.0.0:+shared", when="+strumpack+shared")
@ -342,27 +337,17 @@ class Mfem(Package, CudaPackage, ROCmPackage):
# with MUMPS is not strictly required, so we do not require it here. # with MUMPS is not strictly required, so we do not require it here.
depends_on("petsc@3.8:+mpi+double+hypre", when="+petsc") depends_on("petsc@3.8:+mpi+double+hypre", when="+petsc")
depends_on("slepc@3.8.0:", when="+slepc") depends_on("slepc@3.8.0:", when="+slepc")
# Propagate 'cuda_arch' to 'petsc'/'slepc' without propagating the '+cuda' # If petsc is built with +cuda, propagate cuda_arch to petsc and slepc
# variant because we want to allow 'mfem+cuda+petsc ^petsc~cuda':
for sm_ in CudaPackage.cuda_arch_values: for sm_ in CudaPackage.cuda_arch_values:
depends_on( requires(f"^petsc cuda_arch={sm_}", when=f"+cuda+petsc cuda_arch={sm_} ^petsc+cuda")
"petsc+cuda cuda_arch={0}".format(sm_), depends_on(f"slepc+cuda cuda_arch={sm_}", when=f"+cuda+slepc cuda_arch={sm_} ^petsc+cuda")
when="+cuda+petsc cuda_arch={0} ^petsc+cuda".format(sm_), # If petsc is built with +rocm, propagate amdgpu_target to petsc and slepc
)
depends_on(
"slepc+cuda cuda_arch={0}".format(sm_),
when="+cuda+slepc cuda_arch={0} ^petsc+cuda".format(sm_),
)
# Propagate 'amdgpu_target' to 'petsc'/'slepc' without propagating the
# '+rocm' variant because we want to allow 'mfem+rocm+petsc ^petsc~rocm':
for gfx in ROCmPackage.amdgpu_targets: for gfx in ROCmPackage.amdgpu_targets:
depends_on( requires(
"petsc+rocm amdgpu_target={0}".format(gfx), f"^petsc amdgpu_target={gfx}", when=f"+rocm+petsc amdgpu_target={gfx} ^petsc+rocm"
when="+rocm+petsc amdgpu_target={0} ^petsc+rocm".format(gfx),
) )
depends_on( depends_on(
"slepc+rocm amdgpu_target={0}".format(gfx), f"slepc+rocm amdgpu_target={gfx}", when=f"+rocm+slepc amdgpu_target={gfx} ^petsc+rocm"
when="+rocm+slepc amdgpu_target={0} ^petsc+rocm".format(gfx),
) )
depends_on("mpfr", when="+mpfr") depends_on("mpfr", when="+mpfr")
depends_on("netcdf-c@4.1.3:", when="+netcdf") depends_on("netcdf-c@4.1.3:", when="+netcdf")