CEED v5.0 release (#29710)

* ceed50: add ceed 5.0.0 and pumi 2.2.7

* libceed-0.10

* ceed50: add omegah

* omega-h: mpi and cuda builds work

* omega-h: fix style

* New package: libfms

* New version: gslib@1.0.7

CEED: add some TODO items for the 5.0 release

* ceed: variant name consistent with package name

* LAGHOS: allow newer versions of MFEM to be used with v3.1

* LIBCEED: add missing 'install' target in 'install_targets'

* CEED: address some TODO items + some tweaks

* MFEM: add new variant for FMS (libfms)

* CEED: v5.0.0 depends on 'libfms' and 'mfem+fms'

* RATEL: add missing 'install' target in 'install_targets'

* CEED: add dependency for v5.0.0 on Ratel v0.1.2

* CEED: add Nek-related dependencies for ceed@5.0.0

* CEED: v5.0.0 depends on MAGMA v2.6.2

* libCEED: set the `CUDA_ARCH` makefile parameter

* libCEED: set the `HIP_ARCH` makefile parameter

Co-authored-by: Jed Brown <jed@jedbrown.org>
Co-authored-by: Veselin Dobrev <dobrev@llnl.gov>
Co-authored-by: Veselin Dobrev <v-dobrev@users.noreply.github.com>
This commit is contained in:
Cameron Smith 2022-05-13 21:29:02 -04:00 committed by GitHub
parent ccab7bf4fd
commit 4a1041dbc3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 180 additions and 18 deletions

View File

@ -18,6 +18,7 @@ class Ceed(BundlePackage, CudaPackage, ROCmPackage):
maintainers = ['jedbrown', 'v-dobrev', 'tzanio']
version('5.0.0')
version('4.0.0')
version('3.0.0')
version('2.0.0')
@ -32,11 +33,24 @@ class Ceed(BundlePackage, CudaPackage, ROCmPackage):
description='Build PETSc and HPGMG')
variant('pumi', default=True,
description='Build PUMI')
variant('omega-h', default=True,
description='Build Omega_h')
variant('quickbuild', default=True,
description='Speed-up the build by disabling variants in packages')
# TODO: Add 'int64' variant?
# LibCEED
# ceed 5.0
with when('@5.0.0'):
depends_on('libceed@0.10~occa')
depends_on('libceed~cuda', when='~cuda')
for arch in CudaPackage.cuda_arch_values:
depends_on('libceed+cuda+magma cuda_arch={0}'.format(arch),
when='+cuda cuda_arch={0}'.format(arch))
depends_on('libceed~rocm', when='~rocm')
for target in ROCmPackage.amdgpu_targets:
depends_on('libceed+rocm amdgpu_target={0}'.format(target),
when='+rocm amdgpu_target={0}'.format(target))
# ceed 4.0
depends_on('libceed@0.8~cuda', when='@4.0.0~cuda')
for arch in CudaPackage.cuda_arch_values:
@ -64,7 +78,15 @@ class Ceed(BundlePackage, CudaPackage, ROCmPackage):
depends_on('libceed@0.2+occa', when='@1.0.0+occa')
depends_on('libceed@0.2~occa', when='@1.0.0~occa')
# FMS
# ceed-5.0
depends_on('libfms@0.2.0', when='@5.0.0')
depends_on('libfms@0.2.0~conduit', when='@5.0.0+quickbuild')
# OCCA
# ceed-5.0
depends_on('occa@1.1.0~cuda', when='@5.0.0+occa~cuda')
depends_on('occa@1.1.0+cuda', when='@5.0.0+occa+cuda')
# ceed-4.0
depends_on('occa@1.1.0~cuda', when='@4.0.0+occa~cuda')
depends_on('occa@1.1.0+cuda', when='@4.0.0+occa+cuda')
@ -79,22 +101,24 @@ class Ceed(BundlePackage, CudaPackage, ROCmPackage):
depends_on('occa@1.0.0-alpha.5+cuda', when='@1.0.0+occa+cuda')
# NekRS
# ceed-4.0
depends_on('nekrs@21.0', when='@4.0.0+nek')
# ceed-4.0 and ceed-5.0
depends_on('nekrs@21.0%gcc', when='@4.0.0:5+nek')
for arch in CudaPackage.cuda_arch_values:
depends_on('nekrs@21.0+cuda cuda_arch={0}'.format(arch),
when='@4.0.0+nek+cuda cuda_arch={0}'.format(arch))
when='@4.0.0:5+nek+cuda cuda_arch={0}'.format(arch))
for target in ROCmPackage.amdgpu_targets:
depends_on('nekrs@21.0+rocm amdgpu_target={0}'.format(target),
when='@4.0.0+nek+rocm amdgpu_target={0}'.format(target))
when='@4.0.0:5+nek+rocm amdgpu_target={0}'.format(target))
# Nek5000, GSLIB, Nekbone, and NekCEM
# ceed-3.0 and ceed-4.0
depends_on('nek5000@19.0', when='@3.0.0:4+nek')
depends_on('nektools@19.0%gcc', when='@3.0.0:4+nek')
# ceed-5.0 - specific
depends_on('gslib@1.0.7', when='@5.0.0+nek')
# ceed-3.0, ceed-4.0, and ceed-5.0
depends_on('nek5000@19.0', when='@3.0.0:5+nek')
depends_on('nektools@19.0%gcc', when='@3.0.0:5+nek')
depends_on('gslib@1.0.6', when='@3.0.0:4+nek')
depends_on('nekbone@17.0', when='@3.0.0:4+nek')
depends_on('nekcem@c8db04b', when='@3.0.0:4+nek')
depends_on('nekbone@17.0', when='@3.0.0:5+nek')
depends_on('nekcem@c8db04b', when='@3.0.0:5+nek')
# ceed-2.0
depends_on('nek5000@17.0', when='@2.0.0+nek')
depends_on('nektools@17.0%gcc', when='@2.0.0+nek')
@ -109,6 +133,21 @@ class Ceed(BundlePackage, CudaPackage, ROCmPackage):
depends_on('nekcem@0b8bedd', when='@1.0.0+nek')
# PETSc
# ceed 5.0
with when('@5.0.0+petsc'):
depends_on('petsc@3.17')
depends_on('ratel@0.1.2')
for arch in CudaPackage.cuda_arch_values:
depends_on('petsc+cuda cuda_arch={0}'.format(arch),
when='+cuda cuda_arch={0}'.format(arch))
depends_on('ratel+cuda cuda_arch={0}'.format(arch),
when='+cuda cuda_arch={0}'.format(arch))
for target in ROCmPackage.amdgpu_targets:
depends_on('petsc+rocm amdgpu_target={0}'.format(target),
when='+rocm amdgpu_target={0}'.format(target))
depends_on('ratel+rocm amdgpu_target={0}'.format(target),
when='+rocm amdgpu_target={0}'.format(target))
depends_on('petsc~hdf5~superlu-dist', when='+quickbuild')
# ceed 4.0
depends_on('petsc@3.15.0:3.15', when='@4.0.0:4+petsc')
for arch in CudaPackage.cuda_arch_values:
@ -159,6 +198,13 @@ class Ceed(BundlePackage, CudaPackage, ROCmPackage):
depends_on('hpgmg@a0a5510df23b+fe', when='@1.0.0+petsc')
# MAGMA
# ceed 5.0
for arch in CudaPackage.cuda_arch_values:
depends_on('magma@2.6.2+cuda cuda_arch={0}'.format(arch),
when='@5.0.0+cuda cuda_arch={0}'.format(arch))
for target in ROCmPackage.amdgpu_targets:
depends_on('magma@2.6.2~cuda+rocm amdgpu_target={0}'.format(target),
when='@5.0.0+rocm amdgpu_target={0}'.format(target))
# ceed-4.0
for arch in CudaPackage.cuda_arch_values:
depends_on('magma@2.5.4 cuda_arch={0}'.format(arch),
@ -171,6 +217,8 @@ class Ceed(BundlePackage, CudaPackage, ROCmPackage):
depends_on('magma@2.3.0', when='@1.0.0+cuda')
# PUMI
# ceed-5.0
depends_on('pumi@2.2.7', when='@5.0.0+pumi')
# ceed-4.0
depends_on('pumi@2.2.5', when='@4.0.0+pumi')
# ceed-3.0
@ -180,7 +228,28 @@ class Ceed(BundlePackage, CudaPackage, ROCmPackage):
# ceed-1.0
depends_on('pumi@2.1.0', when='@1.0.0+pumi')
# Omega_h
# ceed-5.0
depends_on('omega-h@scorec.10.1.0', when='@5.0.0+omega-h')
depends_on('omega-h~trilinos', when='@5.0.0+omega-h+quickbuild')
# MFEM, Laghos, Remhos
# ceed 5.0
with when('@5.0.0+mfem'):
depends_on('mfem@4.4.0+mpi+examples+miniapps')
depends_on('mfem+petsc', when='+petsc')
depends_on('mfem+pumi', when='+pumi')
depends_on('mfem+gslib', when='+nek')
depends_on('mfem+libceed+fms')
for arch in CudaPackage.cuda_arch_values:
depends_on('mfem+cuda cuda_arch={0}'.format(arch),
when='+cuda cuda_arch={0}'.format(arch))
for target in ROCmPackage.amdgpu_targets:
depends_on('mfem+rocm amdgpu_target={0}'.format(target),
when='+rocm amdgpu_target={0}'.format(target))
depends_on('mfem+occa', when='+occa')
depends_on('laghos@3.1')
depends_on('remhos@1.0')
# ceed-4.0
depends_on('mfem@4.2.0+mpi+examples+miniapps', when='@4.0.0+mfem~petsc')
depends_on('mfem@4.2.0+mpi+petsc+examples+miniapps',

View File

@ -13,6 +13,7 @@ class Gslib(Package):
git = "https://github.com/gslib/gslib.git"
version('develop', branch='master')
version('1.0.7', tag='v1.0.7')
version('1.0.6', tag='v1.0.6')
version('1.0.5', tag='v1.0.5')
version('1.0.4', tag='v1.0.4')

View File

@ -34,7 +34,7 @@ class Laghos(MakefilePackage):
depends_on('mfem+mpi~metis', when='~metis')
depends_on('mfem@develop', when='@develop')
depends_on('mfem@4.2.0', when='@3.1')
depends_on('mfem@4.2.0:', when='@3.1')
depends_on('mfem@4.1.0:4.1', when='@3.0')
# Recommended mfem version for laghos v2.0 is: ^mfem@3.4.1-laghos-v2.0
depends_on('mfem@3.4.0:', when='@2.0')

View File

@ -12,7 +12,7 @@ class Libceed(MakefilePackage, CudaPackage, ROCmPackage):
homepage = "https://github.com/CEED/libCEED"
git = "https://github.com/CEED/libCEED.git"
maintainers = ['jedbrown', 'v-dobrev', 'tzanio']
maintainers = ['jedbrown', 'v-dobrev', 'tzanio', 'jeremylt']
version('develop', branch='main')
version('0.10.1', tag='v0.10.1')
@ -107,6 +107,8 @@ def common_make_opts(self):
if '+cuda' in spec:
makeopts += ['CUDA_DIR=%s' % spec['cuda'].prefix]
makeopts += ['CUDA_ARCH=sm_%s' %
spec.variants['cuda_arch'].value]
if spec.satisfies('@:0.4'):
nvccflags = ['-ccbin %s -Xcompiler "%s" -Xcompiler %s' %
(compiler.cxx, opt, compiler.cc_pic_flag)]
@ -118,6 +120,8 @@ def common_make_opts(self):
if '+rocm' in spec:
makeopts += ['HIP_DIR=%s' % spec['hip'].prefix]
amdgpu_target = ','.join(spec.variants['amdgpu_target'].value)
makeopts += ['HIP_ARCH=%s' % amdgpu_target]
if spec.satisfies('@0.8'):
makeopts += ['HIPBLAS_DIR=%s' % spec['hipblas'].prefix]
@ -138,7 +142,8 @@ def build_targets(self):
@property
def install_targets(self):
return ['prefix={0}'.format(self.prefix)] + self.common_make_opts
return ['install', 'prefix={0}'.format(self.prefix)] + \
self.common_make_opts
def check(self):
make('prove', *self.common_make_opts, parallel=False)

View File

@ -0,0 +1,59 @@
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Libfms(CMakePackage):
"""Field and Mesh Specification (FMS) library"""
homepage = "https://github.com/CEED/FMS"
git = "https://github.com/CEED/FMS.git"
tags = ['FEM', 'Meshes', 'Fields', 'High-order', 'I/O', 'Data-exchange']
maintainers = ['v-dobrev', 'tzanio', 'cwsmith']
version('develop', branch='master')
version('0.2.0', tag='v0.2')
variant('conduit', default=True,
description='Build with Conduit I/O support')
variant('shared', default=True,
description='Build shared libraries')
depends_on('cmake@3.1:', type='build')
depends_on('conduit@0.7.1:', when='+conduit')
def cmake_args(self):
args = []
args.extend([
self.define_from_variant('BUILD_SHARED_LIBS', 'shared'),
])
if '+conduit' in self.spec:
args.extend([
self.define('CONDUIT_DIR', self.spec['conduit'].prefix)
])
return args
@property
def headers(self):
"""Export the FMS headers.
Sample usage: spec['libfms'].headers.cpp_flags
"""
fms_h_names = ['fms', 'fmsio']
hdrs = find_headers(fms_h_names, self.prefix.include, recursive=False)
return hdrs or None # Raise an error if no headers are found
@property
def libs(self):
"""Export the FMS library.
Sample usage: spec['libfms'].libs.ld_flags
"""
is_shared = '+shared' in self.spec
libs = find_libraries('libfms', root=self.prefix, shared=is_shared,
recursive=True)
return libs or None # Raise an error if no libs are found

View File

@ -102,10 +102,10 @@ class Mfem(Package, CudaPackage, ROCmPackage):
description='Build static library')
variant('shared', default=False,
description='Build shared library')
variant('mpi', default=True,
variant('mpi', default=True, sticky=True,
description='Enable MPI parallelism')
# Can we make the default value for 'metis' to depend on the 'mpi' value?
variant('metis', default=True,
variant('metis', default=True, sticky=True,
description='Enable METIS support')
variant('openmp', default=False,
description='Enable OpenMP parallelism')
@ -153,6 +153,8 @@ class Mfem(Package, CudaPackage, ROCmPackage):
description='Enable secure sockets using GnuTLS')
variant('libunwind', default=False,
description='Enable backtrace on error support using Libunwind')
variant('fms', default=False, when='@4.3.0:',
description='Enable FMS I/O support')
# TODO: SIMD, Ginkgo, ADIOS2, HiOp, MKL CPardiso, Axom/Sidre
variant('timer', default='auto',
values=('auto', 'std', 'posix', 'mac', 'mpi'),
@ -287,6 +289,7 @@ class Mfem(Package, CudaPackage, ROCmPackage):
depends_on('gnutls', when='+gnutls')
depends_on('conduit@0.3.1:,master:', when='+conduit')
depends_on('conduit+mpi', when='+conduit+mpi')
depends_on('libfms@0.2.0:', when='+fms')
# The MFEM 4.0.0 SuperLU interface fails when using hypre@2.16.0 and
# superlu-dist@6.1.1. See https://github.com/mfem/mfem/issues/983.
@ -486,6 +489,7 @@ def find_optional_library(name, prefix):
'MFEM_USE_AMGX=%s' % yes_no('+amgx'),
'MFEM_USE_CEED=%s' % yes_no('+libceed'),
'MFEM_USE_UMPIRE=%s' % yes_no('+umpire'),
'MFEM_USE_FMS=%s' % yes_no('+fms'),
'MFEM_MPIEXEC=%s' % mfem_mpiexec,
'MFEM_MPIEXEC_NP=%s' % mfem_mpiexec_np]
@ -830,6 +834,12 @@ def find_optional_library(name, prefix):
'CONDUIT_OPT=%s' % conduit_opt_flags,
'CONDUIT_LIB=%s' % ld_flags_from_library_list(libs)]
if '+fms' in spec:
libfms = spec['libfms']
options += [
'FMS_OPT=%s' % libfms.headers.cpp_flags,
'FMS_LIB=%s' % ld_flags_from_library_list(libfms.libs)]
make('config', *options, parallel=False)
make('info', parallel=False)

View File

@ -4,7 +4,7 @@
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
class OmegaH(CMakePackage):
class OmegaH(CMakePackage, CudaPackage):
"""Omega_h is a C++11 library providing data structures and algorithms
for adaptive discretizations. Its specialty is anisotropic triangle and
tetrahedral mesh adaptation. It runs efficiently on most modern HPC
@ -45,6 +45,12 @@ class OmegaH(CMakePackage):
depends_on('mpi', when='+mpi')
depends_on('trilinos +kokkos', when='+trilinos')
depends_on('zlib', when='+zlib')
# Note: '+cuda' and 'cuda_arch' variants are added by the CudaPackage
depends_on('cuda', when='+cuda')
conflicts('cuda@11.2:', when='@scorec.10.1.0:', msg='Thrust is broken in CUDA >= 11.2.* see https://github.com/sandialabs/omega_h/issues/366')
# the sandia repo has a fix for cuda > 11.2 support
# see github.com/sandialabs/omega_h/pull/373
conflicts('cuda@11.2:', when='@:9.34.4', msg='Thrust is broken in CUDA >= 11.2.* see https://github.com/sandialabs/omega_h/issues/366')
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=86610
conflicts('%gcc@8:8.2', when='@:9.22.1')
@ -66,10 +72,21 @@ def cmake_args(self):
args.append('-DBUILD_SHARED_LIBS:BOOL=OFF')
if '+mpi' in self.spec:
args.append('-DOmega_h_USE_MPI:BOOL=ON')
ver = self.spec.version
# old versions don't call find_package(MPI)
if ver < Version('9.33.2') and 'scorec' not in str(ver):
args.append('-DCMAKE_CXX_COMPILER:FILEPATH={0}'.format(
self.spec['mpi'].mpicxx))
else:
args.append('-DOmega_h_USE_MPI:BOOL=OFF')
if '+cuda' in self.spec:
args.append('-DOmega_h_USE_CUDA:BOOL=ON')
cuda_arch_list = self.spec.variants['cuda_arch'].value
cuda_arch = cuda_arch_list[0]
if cuda_arch != 'none':
args.append('-DOmega_h_CUDA_ARCH={0}'.format(cuda_arch))
else:
args.append('-DOmega_h_USE_CUDA:BOOL=OFF')
if '+trilinos' in self.spec:
args.append('-DOmega_h_USE_Trilinos:BOOL=ON')
if '+zlib' in self.spec:

View File

@ -64,7 +64,8 @@ def build_targets(self):
@property
def install_targets(self):
return ['prefix={0}'.format(self.prefix)] + self.common_make_opts
return ['install', 'prefix={0}'.format(self.prefix)] + \
self.common_make_opts
def check(self):
make('prove', *self.common_make_opts, parallel=False)