Add mfem v4.5.2 and related updates/tweaks in other packages (#36154)

* Add mfem v4.5.2 and related updates/tweaks in other packages

* [mfem] Add the release source link for MFEM v4.5.2

* [mfem] Remove 'goxberry' (his request) from MFEM's maintainers list
This commit is contained in:
Veselin Dobrev 2023-04-19 05:55:31 -07:00 committed by GitHub
parent ae909b3688
commit 6ca41cfbcb
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 115 additions and 51 deletions

View File

@ -72,5 +72,10 @@ def cmake_args(self):
self.define_from_variant("BUILD_SHARED_LIBS", "shared"),
]
args.append("-Denable_openmp=%s" % ("ON" if "+openmp" in spec else "OFF"))
if "%cce" in spec:
# Assume the proper Cray CCE module (cce) is loaded:
craylibs_path = env["CRAYLIBS_" + env["MACHTYPE"].capitalize()]
env.setdefault("LDFLAGS", "")
env["LDFLAGS"] += " -Wl,-rpath," + craylibs_path
return args

View File

@ -39,6 +39,7 @@ class Camp(CMakePackage, CudaPackage, ROCmPackage):
maintainers("trws")
version("main", branch="main", submodules="True")
version("2022.10.1", sha256="2d12f1a46f5a6d01880fc075cfbd332e2cf296816a7c1aa12d4ee5644d386f02")
version("2022.03.2", sha256="bc4aaeacfe8f2912e28f7a36fc731ab9e481bee15f2c6daf0cb208eed3f201eb")
version("2022.03.0", sha256="e9090d5ee191ea3a8e36b47a8fe78f3ac95d51804f1d986d931e85b8f8dad721")
version("0.3.0", sha256="129431a049ca5825443038ad5a37a86ba6d09b2618d5fe65d35f83136575afdb")

View File

@ -131,6 +131,8 @@ def cmake_args(self):
sep = "" if "@:2.2.0" in spec else "_"
capabilities = " ".join("sm{0}{1}".format(sep, i) for i in cuda_arch)
options.append(define("GPU_TARGET", capabilities))
archs = ";".join("%s" % i for i in cuda_arch)
options.append(define("CMAKE_CUDA_ARCHITECTURES", archs))
if "@2.5.0" in spec:
options.append(define("MAGMA_SPARSE", False))

View File

@ -18,7 +18,7 @@ class Mfem(Package, CudaPackage, ROCmPackage):
homepage = "http://www.mfem.org"
git = "https://github.com/mfem/mfem.git"
maintainers("v-dobrev", "tzanio", "acfisher", "goxberry", "markcmiller86")
maintainers("v-dobrev", "tzanio", "acfisher", "markcmiller86")
test_requires_compiler = True
@ -48,6 +48,13 @@ class Mfem(Package, CudaPackage, ROCmPackage):
# other version.
version("develop", branch="master")
version(
"4.5.2",
sha256="7003c908c8265810ff97cb37531521b3aed24959975833a01ea05adfdb36e0f7",
url="https://bit.ly/mfem-4-5-2",
extension="tar.gz",
)
version(
"4.5.0",
sha256="4f201bec02fc5460a902596697b6c1deb7b15ac57c71f615b2ab4a8eb65665f7",
@ -372,7 +379,8 @@ class Mfem(Package, CudaPackage, ROCmPackage):
depends_on("raja@0.7.0:0.9.0", when="@4.0.0+raja")
depends_on("raja@0.10.0:0.12.1", when="@4.0.1:4.2.0+raja")
depends_on("raja@0.13.0", when="@4.3.0+raja")
depends_on("raja@0.14.0:", when="@4.4.0:+raja")
depends_on("raja@0.14.0:2022.03", when="@4.4.0:4.5.0+raja")
depends_on("raja@2022.10.3:", when="@4.5.2:+raja")
for sm_ in CudaPackage.cuda_arch_values:
depends_on(
"raja+cuda cuda_arch={0}".format(sm_), when="+raja+cuda cuda_arch={0}".format(sm_)
@ -528,7 +536,7 @@ def find_optional_library(name, prefix):
else:
mfem_mpiexec = "jsrun"
mfem_mpiexec_np = "-p"
elif "FLUX_JOB_ID" in os.environ:
elif "FLUX_EXEC_PATH" in os.environ:
mfem_mpiexec = "flux mini run"
mfem_mpiexec_np = "-n"

View File

@ -1,5 +1,11 @@
#!/bin/bash
dry_run=yes
# use 'dev-build' in "$mfem_src_dir":
spack_dev_build=no
mfem_src_dir=$HOME/mfem-spack
# Set a compiler to test with, e.g. '%gcc', '%clang', etc.
compiler=''
cuda_arch="70"
@ -8,14 +14,15 @@ rocm_arch="gfx908"
spack_jobs=''
# spack_jobs='-j 128'
mfem='mfem@4.5.0'${compiler}
mfem_dev='mfem@develop'${compiler}
mfem='mfem@4.5.2'${compiler}
# mfem_dev='mfem@develop'${compiler}
mfem_dev='mfem@4.5.2'${compiler}
backends='+occa+raja+libceed'
backends_specs='^occa~cuda ^raja~openmp'
# help the concrtizer find suitable hdf5 version (conduit constraint)
hdf5_spec='^hdf5@1.8.19:1.8'
# ~fortran is needed for Cray Fortran linking with tcmalloc*
conduit_spec='^conduit~fortran'
# petsc spec
petsc_spec='^petsc+suite-sparse+mumps'
petsc_spec_cuda='^petsc+cuda+suite-sparse+mumps'
@ -24,7 +31,7 @@ superlu_spec_cuda='^superlu-dist+cuda cuda_arch='"${cuda_arch}"
superlu_spec_rocm='^superlu-dist+rocm amdgpu_target='"${rocm_arch}"
# strumpack spec without cuda (use version > 6.3.1)
strumpack_spec='^strumpack~slate~openmp~cuda'
strumpack_cuda_spec='^strumpack~slate~openmp'
strumpack_cuda_spec='^strumpack+cuda~slate~openmp'
strumpack_rocm_spec='^strumpack+rocm~slate~openmp~cuda'
builds=(
@ -33,28 +40,27 @@ builds=(
${mfem}'~mpi~metis~zlib'
${mfem}"$backends"'+superlu-dist+strumpack+suite-sparse+petsc+slepc+gslib \
+sundials+pumi+mpfr+netcdf+zlib+gnutls+libunwind+conduit+ginkgo+hiop \
'"$backends_specs $strumpack_spec $petsc_spec $hdf5_spec"
'"$backends_specs $strumpack_spec $petsc_spec $conduit_spec"
${mfem}'~mpi \
'"$backends"'+suite-sparse+sundials+gslib+mpfr+netcdf \
+zlib+gnutls+libunwind+conduit+ginkgo+hiop \
'"$backends_specs $hdf5_spec"' ^sundials~mpi'
'"$backends_specs $conduit_spec"' ^sundials~mpi'
# develop version, shared builds:
${mfem_dev}'+shared~static'
${mfem_dev}'+shared~static~mpi~metis~zlib'
# NOTE: Shared build with +gslib works on mac but not on linux
# TODO: add back '+gslib' when the above NOTE
# is addressed.
# TODO: add back '+gslib' when the above NOTE is addressed.
${mfem_dev}'+shared~static \
'"$backends"'+superlu-dist+strumpack+suite-sparse+petsc+slepc \
+sundials+pumi+mpfr+netcdf+zlib+gnutls+libunwind+conduit+ginkgo+hiop \
'"$backends_specs $strumpack_spec $petsc_spec $hdf5_spec"
'"$backends_specs $strumpack_spec $petsc_spec $conduit_spec"
# NOTE: Shared build with +gslib works on mac but not on linux
# TODO: add back '+gslib' when the above NOTE is addressed.
${mfem_dev}'+shared~static~mpi \
'"$backends"'+suite-sparse+sundials+mpfr+netcdf \
+zlib+gnutls+libunwind+conduit+ginkgo+hiop \
'"$backends_specs $hdf5_spec"' ^sundials~mpi'
'"$backends_specs $conduit_spec"' ^sundials~mpi'
)
builds2=(
@ -72,14 +78,17 @@ builds2=(
${mfem}'+netcdf'
${mfem}'+mpfr'
${mfem}'+gnutls'
${mfem}'+conduit~mpi'
${mfem}'+conduit'
${mfem}'+conduit~mpi'" $conduit_spec"
${mfem}'+conduit'" $conduit_spec"
${mfem}'+umpire'
${mfem}'+petsc'" $petsc_spec"
${mfem}'+petsc+slepc'" $petsc_spec"
${mfem}'+ginkgo'
${mfem}'+hiop'
${mfem}'+threadsafe'
# hypre+int64 requires 64-bit blas+lapack
# ${mfem}' ^hypre+int64'
${mfem}' ^hypre+mixedint'
#
# develop version
${mfem_dev}"$backends $backends_specs"
@ -95,14 +104,17 @@ builds2=(
${mfem_dev}'+netcdf'
${mfem_dev}'+mpfr'
${mfem_dev}'+gnutls'
${mfem_dev}'+conduit~mpi'
${mfem_dev}'+conduit'
${mfem_dev}'+conduit~mpi'" $conduit_spec"
${mfem_dev}'+conduit'" $conduit_spec"
${mfem_dev}'+umpire'
${mfem_dev}'+petsc'" $petsc_spec"
${mfem_dev}'+petsc+slepc'" $petsc_spec"
${mfem_dev}'+ginkgo'
${mfem_dev}'+hiop'
${mfem_dev}'+threadsafe'
# hypre+int64 requires 64-bit blas+lapack
# ${mfem_dev}' ^hypre+int64'
${mfem_dev}' ^hypre+mixedint'
)
@ -126,22 +138,19 @@ builds_cuda=(
+superlu-dist+strumpack+suite-sparse+gslib+petsc+slepc \
+sundials+pumi+mpfr+netcdf+zlib+gnutls+libunwind+conduit+ginkgo+hiop \
^raja+cuda+openmp ^hiop+shared'" $strumpack_cuda_spec"' \
'"$superlu_spec_cuda $petsc_spec_cuda $hdf5_spec"
'"$superlu_spec_cuda $petsc_spec_cuda $conduit_spec"
# hypre with cuda:
# TODO: restore '+libceed' when the libCEED CUDA unit tests take less time.
# TODO: restore '+superlu-dist $superlu_spec_cuda' when we support it with
# '^hypre+cuda'.
# TODO: add back "+strumpack $strumpack_cuda_spec" when it's supported.
# TODO: add back "+petsc+slepc $petsc_spec_cuda" when it works.
# NOTE: PETSc tests may need PETSC_OPTIONS="-use_gpu_aware_mpi 0"
# TODO: add back "+sundials" when it's supported with '^hypre+cuda'.
# TODO: remove "^hiop+shared" when the default static build is fixed.
${mfem}'+cuda+openmp+raja+occa cuda_arch='"${cuda_arch}"' \
+suite-sparse+gslib \
+superlu-dist+strumpack+suite-sparse+gslib \
+pumi+mpfr+netcdf+zlib+gnutls+libunwind+conduit+ginkgo+hiop \
^raja+cuda+openmp ^hiop+shared ^hypre+cuda \
'"$hdf5_spec"
'" $superlu_spec_cuda $strumpack_cuda_spec $conduit_spec"
#
# same builds as above with ${mfem_dev}
@ -166,22 +175,19 @@ builds_cuda=(
+superlu-dist+strumpack+suite-sparse+gslib+petsc+slepc \
+sundials+pumi+mpfr+netcdf+zlib+gnutls+libunwind+conduit+ginkgo+hiop \
^raja+cuda+openmp ^hiop+shared'" $strumpack_cuda_spec"' \
'"$superlu_spec_cuda $petsc_spec_cuda $hdf5_spec"
'"$superlu_spec_cuda $petsc_spec_cuda $conduit_spec"
# hypre with cuda:
# TODO: restore '+libceed' when the libCEED CUDA unit tests take less time.
# TODO: restore '+superlu-dist $superlu_spec_cuda' when we support it with
# '^hypre+cuda'.
# TODO: add back "+strumpack $strumpack_cuda_spec" when it's supported.
# TODO: add back "+petsc+slepc $petsc_spec_cuda" when it works.
# NOTE: PETSc tests may need PETSC_OPTIONS="-use_gpu_aware_mpi 0"
# TODO: add back "+sundials" when it's supported with '^hypre+cuda'.
# TODO: remove "^hiop+shared" when the default static build is fixed.
${mfem_dev}'+cuda+openmp+raja+occa cuda_arch='"${cuda_arch}"' \
+suite-sparse+gslib \
+superlu-dist+strumpack+suite-sparse+gslib \
+pumi+mpfr+netcdf+zlib+gnutls+libunwind+conduit+ginkgo+hiop \
^raja+cuda+openmp ^hiop+shared ^hypre+cuda \
'"$hdf5_spec"
'"$superlu_spec_cuda $strumpack_cuda_spec $conduit_spec"
)
@ -194,35 +200,30 @@ builds_rocm=(
# hypre with rocm:
${mfem}'+rocm+raja+occa+libceed amdgpu_target='"${rocm_arch}"' \
^raja+rocm~openmp ^occa~cuda ^hypre+rocm'
^raja+rocm~openmp ^occa~cuda~openmp ^hypre+rocm'
# hypre without rocm:
# TODO: add "+petsc+slepc $petsc_spec_rocm" when it is supported.
# TODO: add back '+conduit' when it is no longer linked with tcmalloc*.
# TODO: add back '+hiop' when it is no longer linked with tcmalloc* through
# its magma dependency.
# TODO: add back '+ginkgo' when the Ginkgo example works.
${mfem}'+rocm+openmp+raja+occa+libceed amdgpu_target='"${rocm_arch}"' \
+superlu-dist+strumpack+suite-sparse+gslib \
+sundials+pumi+mpfr+netcdf+zlib+gnutls+libunwind \
+sundials+pumi+mpfr+netcdf+zlib+gnutls+libunwind+conduit \
^raja+rocm~openmp ^occa~cuda'" $strumpack_rocm_spec"' \
'"$superlu_spec_rocm $hdf5_spec"
'"$superlu_spec_rocm $conduit_spec"
# hypre with rocm:
# TODO: restore '+superlu-dist $superlu_spec_rocm' when we support it with
# '^hypre+rocm'.
# TODO: add back "+strumpack $strumpack_rocm_spec" when it's supported.
# TODO: add back "+petsc+slepc $petsc_spec_rocm" when it works.
# TODO: add back '+conduit' when it is no longer linked with tcmalloc*.
# TODO: add back '+hiop' when it is no longer linked with tcmalloc* through
# its magma dependency.
# TODO: add back '+ginkgo' when the Ginkgo example works.
# TODO: add back "+sundials" when it's supported with '^hypre+rocm'.
${mfem}'+rocm+openmp+raja+occa+libceed amdgpu_target='"${rocm_arch}"' \
+suite-sparse+gslib \
+pumi+mpfr+netcdf+zlib+gnutls+libunwind \
+superlu-dist+strumpack+suite-sparse+gslib \
+pumi+mpfr+netcdf+zlib+gnutls+libunwind+conduit \
^raja+rocm~openmp ^occa~cuda ^hypre+rocm \
'"$hdf5_spec"
'"$strumpack_rocm_spec $superlu_spec_rocm $conduit_spec"
#
# same builds as above with ${mfem_dev}
@ -244,19 +245,51 @@ run_builds=("${builds[@]}" "${builds2[@]}")
# PETSc CUDA tests on Lassen need this:
# export PETSC_OPTIONS="-use_gpu_aware_mpi 0"
# spack files to clean in "$mfem_src_dir" when using 'dev-build'
clean_files=(
.spack_no_patches
install-time-test-log.txt
spack-build-\*.txt
)
if [[ "$spack_dev_build" != "yes" ]]; then
spack_action=(install)
else
spack_action=(dev-build -q -d "$mfem_src_dir")
fi
TIMEFORMAT=$'real: %3Rs (%lR) user: %3Us sys: %3Ss %%cpu: %P'
# main loop over specs:
for bld in "${run_builds[@]}"; do
eval bbb="\"${bld}\""
printf "\n%s\n" "${SEP}"
printf " %s\n" "${bld}"
printf "%s\n" "${SEP}"
spack spec --fresh -I $bbb || exit 1
printf "%s\n" "${sep}"
spack install $spack_jobs --fresh --test=root $bbb || exit 2
# echo ./bin/spack spec --fresh -I $bbb
# echo ./bin/spack install $spack_jobs --fresh --test=root $bbb
# echo
if [[ "$dry_run" != "yes" ]]; then
if [[ "$spack_dev_build" == "yes" ]]; then
echo "Cleaning $mfem_src_dir ..."
(cd "$mfem_src_dir" && make distclean && rm -f ${clean_files[@]})
printf "%s\n" "${sep}"
fi
time ./bin/spack spec --fresh -I $bbb || exit 1
printf "%s\n" "${sep}"
time ./bin/spack "${spack_action[@]}" $spack_jobs \
--fresh --test=root $bbb || exit 2
else # dry run
if [[ "$spack_dev_build" == "yes" ]]; then
printf '(cd "'"$mfem_src_dir"'" && make distclean && rm -f'
printf " %s)\n" "${clean_files[*]}"
fi
echo time ./bin/spack spec --fresh -I $bbb
echo time ./bin/spack "${spack_action[@]}" $spack_jobs \
--fresh --test=root $bbb
echo
fi
done
# Uninstall all mfem builds:

View File

@ -184,7 +184,7 @@ def write_makefile_inc(self):
makefile_conf.append("FC_PIC_FLAG={0}".format(fpic))
makefile_conf.append("CC_PIC_FLAG={0}".format(cpic))
opt_level = "3" if using_xl else ""
opt_level = "3" if using_xl else "2"
optc = ["-O{0}".format(opt_level)]
optf = ["-O{0}".format(opt_level)]

View File

@ -100,6 +100,9 @@ def setup_build_environment(self, env):
if "~opencl" in spec:
env.set("OCCA_OPENCL_ENABLED", "0")
if "~openmp" in spec:
env.set("OCCA_OPENMP_ENABLED", "0")
# Setup run-time environment for testing.
env.set("OCCA_VERBOSE", "1")
self._setup_runtime_flags(env)

View File

@ -20,6 +20,7 @@ class Raja(CachedCMakePackage, CudaPackage, ROCmPackage):
version("develop", branch="develop", submodules=False)
version("main", branch="main", submodules=False)
version("2022.10.4", tag="v2022.10.4", submodules=False)
version("2022.03.0", tag="v2022.03.0", submodules=False)
version("0.14.0", tag="v0.14.0", submodules="True")
version("0.13.0", tag="v0.13.0", submodules="True")
@ -62,12 +63,14 @@ class Raja(CachedCMakePackage, CudaPackage, ROCmPackage):
depends_on("camp@0.2.2:0.2.3", when="@0.14.0")
depends_on("camp@0.1.0", when="@0.10.0:0.13.0")
depends_on("camp@2022.03.2:", when="@2022.03.0:")
depends_on("camp@2022.03.2:2022.03", when="@2022.03.0:2022.03")
depends_on("camp@2022.10:", when="@2022.10:")
depends_on("camp@main", when="@main")
depends_on("camp@main", when="@develop")
depends_on("camp+openmp", when="+openmp")
depends_on("cmake@:3.20", when="+rocm", type="build")
depends_on("cmake@:3.20", when="@:2022.03+rocm", type="build")
depends_on("cmake@3.23:", when="@2022.10:+rocm", type="build")
depends_on("cmake@3.14:", when="@2022.03.0:")
depends_on("llvm-openmp", when="+openmp %apple-clang")

View File

@ -155,6 +155,12 @@ def cmake_args(self):
hipcc_flags.append("--amdgpu-target={0}".format(",".join(rocm_archs)))
args.append("-DHIP_HIPCC_FLAGS={0}".format(" ".join(hipcc_flags)))
if "%cce" in spec:
# Assume the proper Cray CCE module (cce) is loaded:
craylibs_path = env["CRAYLIBS_" + env["MACHTYPE"].capitalize()]
env.setdefault("LDFLAGS", "")
env["LDFLAGS"] += " -Wl,-rpath," + craylibs_path
return args
test_src_dir = "test"

View File

@ -228,6 +228,9 @@ def install(self, spec, prefix):
+ " -DCMAKE_LIBRARY_PATH=%s" % prefix.lib
]
if spec.satisfies("%gcc platform=darwin"):
make_args += ["LDLIBS=-lm"]
make_args.append("INSTALL=%s" % prefix)
# Filter the targets we're interested in
@ -254,7 +257,7 @@ def install(self, spec, prefix):
targets.append("SLIP_LU")
# Finally make and install
make("-C", "SuiteSparse_config", "library", "config")
make("-C", "SuiteSparse_config", "config", *make_args)
for target in targets:
make("-C", target, "library", *make_args)
make("-C", target, "install", *make_args)