Compare commits

...

13 Commits

Author SHA1 Message Date
Axel Huebl
22d486aab5 WarpX: Patch no-MPI & Lib Install (#34913)
Fixes WarpX issues:
- https://github.com/ECP-WarpX/WarpX/pull/3134
- https://github.com/ECP-WarpX/WarpX/pull/3141

and uses GitHub patch URLs directly instead of storing
patch copies.
2023-01-13 12:04:29 -08:00
Hans Johansen
6040c82740 Adding new package bricks for x86, cuda (#30863)
* Adding new package bricks for x86, cuda

* Fixed complaints from "spack style" that CI found

* add license comment at top

Co-authored-by: drhansj <drhansj@berkeley.edu>
Co-authored-by: eugeneswalker <38933153+eugeneswalker@users.noreply.github.com>
2022-05-26 07:40:33 -07:00
eugeneswalker
19eb982d53 tau: add v2.31.1 (#30820) 2022-05-25 07:40:25 -07:00
Danny McClanahan
a681fd7b42 Introduce GroupedExceptionHandler and use it to simplify bootstrap error handling (#30192) 2022-05-15 10:59:11 +00:00
Alberto Invernizzi
f40f1b5c7c Fix for spack stage command not extracting packages in custom paths (#30448) 2022-05-15 12:13:42 +02:00
Michael Kuhn
edd3cf0b17 qt: add 5.15.4 (#30656) 2022-05-14 23:34:06 -06:00
Michael Kuhn
ff03e2ef4c uninstall: fix dependency check (#30674)
The dependency check currently checks whether there are only build
dependencies left for a particular package. However, the database also
contains uninstalled packages, which can cause the check to fail.

For instance, with `bison` and `flex` having already been uninstalled,
`m4` will have the following dependents:
```
bison ('build', 'run')--> m4
flex ('build',)--> m4
libnl ('build',)--> m4
```
`bison` and `flex` should be ignored in this case because they are not
installed anymore.

Fixes #30673
2022-05-14 18:01:29 -07:00
Zack Galbreath
bee311edf3 Update GitLab environment variable name (#30671)
Use the IAM credentials that correspond to our new binary mirror
(s3://spack-binaries vs. s3://spack-binaries-develop)
2022-05-14 16:33:32 -06:00
Jen Herting
73b69cfeec [py-pyworld] Limiting numpy version. See: https://zenn.dev/ymd_h/articles/934a90e1468a05 (#30670) 2022-05-14 09:22:31 -05:00
Cameron Smith
4a1041dbc3 CEED v5.0 release (#29710)
* ceed50: add ceed 5.0.0 and pumi 2.2.7

* libceed-0.10

* ceed50: add omegah

* omega-h: mpi and cuda builds work

* omega-h: fix style

* New package: libfms

* New version: gslib@1.0.7

CEED: add some TODO items for the 5.0 release

* ceed: variant name consistent with package name

* LAGHOS: allow newer versions of MFEM to be used with v3.1

* LIBCEED: add missing 'install' target in 'install_targets'

* CEED: address some TODO items + some tweaks

* MFEM: add new variant for FMS (libfms)

* CEED: v5.0.0 depends on 'libfms' and 'mfem+fms'

* RATEL: add missing 'install' target in 'install_targets'

* CEED: add dependency for v5.0.0 on Ratel v0.1.2

* CEED: add Nek-related dependencies for ceed@5.0.0

* CEED: v5.0.0 depends on MAGMA v2.6.2

* libCEED: set the `CUDA_ARCH` makefile parameter

* libCEED: set the `HIP_ARCH` makefile parameter

Co-authored-by: Jed Brown <jed@jedbrown.org>
Co-authored-by: Veselin Dobrev <dobrev@llnl.gov>
Co-authored-by: Veselin Dobrev <v-dobrev@users.noreply.github.com>
2022-05-13 18:29:02 -07:00
Jen Herting
ccab7bf4fd New package: py-pyworld (#28641)
* espnet first build with depends

* added cython>=0.24.0' and type='build'

* [py-pyworld] updated copyright

Co-authored-by: Sid Pendelberry <sid@rit.edu>
2022-05-13 17:19:58 -05:00
John W. Parent
e24e71be6a Preserve Permissions on .zip extraction (#30407)
#24556 merged in support for Python's .zip file support via ZipFile.
However as per #30200 ZipFile does not preserve file permissions of
the extracted contents. This PR returns to using the `unzip`
executable on non-Windows systems (as was the case before #24556)
and now uses `tar` on Windows to extract .zip files.
2022-05-13 13:38:05 -07:00
kwryankrattiger
72d83a6f94 Ascent: Patch 0.8.0 for finding ADIOS2. (#30609) 2022-05-13 13:26:53 -07:00
25 changed files with 578 additions and 111 deletions

View File

@@ -11,7 +11,9 @@
import os
import re
import sys
import traceback
from datetime import datetime, timedelta
from typing import List, Tuple
import six
from six import string_types
@@ -1009,3 +1011,64 @@ def __repr__(self):
def __str__(self):
return str(self.data)
class GroupedExceptionHandler(object):
"""A generic mechanism to coalesce multiple exceptions and preserve tracebacks."""
def __init__(self):
self.exceptions = [] # type: List[Tuple[str, Exception, List[str]]]
def __bool__(self):
"""Whether any exceptions were handled."""
return bool(self.exceptions)
def forward(self, context):
# type: (str) -> GroupedExceptionForwarder
"""Return a contextmanager which extracts tracebacks and prefixes a message."""
return GroupedExceptionForwarder(context, self)
def _receive_forwarded(self, context, exc, tb):
# type: (str, Exception, List[str]) -> None
self.exceptions.append((context, exc, tb))
def grouped_message(self, with_tracebacks=True):
# type: (bool) -> str
"""Print out an error message coalescing all the forwarded errors."""
each_exception_message = [
'{0} raised {1}: {2}{3}'.format(
context,
exc.__class__.__name__,
exc,
'\n{0}'.format(''.join(tb)) if with_tracebacks else '',
)
for context, exc, tb in self.exceptions
]
return 'due to the following failures:\n{0}'.format(
'\n'.join(each_exception_message)
)
class GroupedExceptionForwarder(object):
"""A contextmanager to capture exceptions and forward them to a
GroupedExceptionHandler."""
def __init__(self, context, handler):
# type: (str, GroupedExceptionHandler) -> None
self._context = context
self._handler = handler
def __enter__(self):
return None
def __exit__(self, exc_type, exc_value, tb):
if exc_value is not None:
self._handler._receive_forwarded(
self._context,
exc_value,
traceback.format_tb(tb),
)
# Suppress any exception from being re-raised:
# https://docs.python.org/3/reference/datamodel.html#object.__exit__.
return True

View File

@@ -21,6 +21,7 @@
import llnl.util.filesystem as fs
import llnl.util.tty as tty
from llnl.util.lang import GroupedExceptionHandler
import spack.binary_distribution
import spack.config
@@ -417,11 +418,10 @@ def _make_bootstrapper(conf):
return _bootstrap_methods[btype](conf)
def _source_is_trusted(conf):
def _validate_source_is_trusted(conf):
trusted, name = spack.config.get('bootstrap:trusted'), conf['name']
if name not in trusted:
return False
return trusted[name]
raise ValueError('source is not trusted')
def spec_for_current_python():
@@ -488,34 +488,25 @@ def ensure_module_importable_or_raise(module, abstract_spec=None):
abstract_spec = abstract_spec or module
source_configs = spack.config.get('bootstrap:sources', [])
errors = {}
h = GroupedExceptionHandler()
for current_config in source_configs:
if not _source_is_trusted(current_config):
msg = ('[BOOTSTRAP MODULE {0}] Skipping source "{1}" since it is '
'not trusted').format(module, current_config['name'])
tty.debug(msg)
continue
with h.forward(current_config['name']):
_validate_source_is_trusted(current_config)
b = _make_bootstrapper(current_config)
try:
b = _make_bootstrapper(current_config)
if b.try_import(module, abstract_spec):
return
except Exception as e:
msg = '[BOOTSTRAP MODULE {0}] Unexpected error "{1}"'
tty.debug(msg.format(module, str(e)))
errors[current_config['name']] = e
# We couldn't import in any way, so raise an import error
msg = 'cannot bootstrap the "{0}" Python module'.format(module)
assert h, 'expected at least one exception to have been raised at this point: while bootstrapping {0}'.format(module) # noqa: E501
msg = 'cannot bootstrap the "{0}" Python module '.format(module)
if abstract_spec:
msg += ' from spec "{0}"'.format(abstract_spec)
msg += ' due to the following failures:\n'
for method in errors:
err = errors[method]
msg += " '{0}' raised {1}: {2}\n".format(
method, err.__class__.__name__, str(err))
msg += ' Please run `spack -d spec zlib` for more verbose error messages'
msg += 'from spec "{0}" '.format(abstract_spec)
if tty.is_debug():
msg += h.grouped_message(with_tracebacks=True)
else:
msg += h.grouped_message(with_tracebacks=False)
msg += '\nRun `spack --debug ...` for more detailed errors'
raise ImportError(msg)
@@ -539,15 +530,14 @@ def ensure_executables_in_path_or_raise(executables, abstract_spec):
executables_str = ', '.join(executables)
source_configs = spack.config.get('bootstrap:sources', [])
for current_config in source_configs:
if not _source_is_trusted(current_config):
msg = ('[BOOTSTRAP EXECUTABLES {0}] Skipping source "{1}" since it is '
'not trusted').format(executables_str, current_config['name'])
tty.debug(msg)
continue
b = _make_bootstrapper(current_config)
try:
h = GroupedExceptionHandler()
for current_config in source_configs:
with h.forward(current_config['name']):
_validate_source_is_trusted(current_config)
b = _make_bootstrapper(current_config)
if b.try_search_path(executables, abstract_spec):
# Additional environment variables needed
concrete_spec, cmd = b.last_search['spec'], b.last_search['command']
@@ -562,14 +552,16 @@ def ensure_executables_in_path_or_raise(executables, abstract_spec):
)
cmd.add_default_envmod(env_mods)
return cmd
except Exception as e:
msg = '[BOOTSTRAP EXECUTABLES {0}] Unexpected error "{1}"'
tty.debug(msg.format(executables_str, str(e)))
# We couldn't import in any way, so raise an import error
msg = 'cannot bootstrap any of the {0} executables'.format(executables_str)
assert h, 'expected at least one exception to have been raised at this point: while bootstrapping {0}'.format(executables_str) # noqa: E501
msg = 'cannot bootstrap any of the {0} executables '.format(executables_str)
if abstract_spec:
msg += ' from spec "{0}"'.format(abstract_spec)
msg += 'from spec "{0}" '.format(abstract_spec)
if tty.is_debug():
msg += h.grouped_message(with_tracebacks=True)
else:
msg += h.grouped_message(with_tracebacks=False)
msg += '\nRun `spack --debug ...` for more detailed errors'
raise RuntimeError(msg)

View File

@@ -27,12 +27,6 @@ def setup_parser(subparser):
def stage(parser, args):
# We temporarily modify the working directory when setting up a stage, so we need to
# convert this to an absolute path here in order for it to remain valid later.
custom_path = os.path.abspath(args.path) if args.path else None
if custom_path:
spack.stage.create_stage_root(custom_path)
if not args.specs:
env = ev.active_environment()
if env:
@@ -54,6 +48,10 @@ def stage(parser, args):
specs = spack.cmd.parse_specs(args.specs, concretize=False)
# We temporarily modify the working directory when setting up a stage, so we need to
# convert this to an absolute path here in order for it to remain valid later.
custom_path = os.path.abspath(args.path) if args.path else None
# prevent multiple specs from extracting in the same folder
if len(specs) > 1 and custom_path:
tty.die("`--path` requires a single spec, but multiple were provided")

View File

@@ -235,7 +235,7 @@ def is_ready(dag_hash):
# If this spec is only used as a build dependency, we can uninstall
return all(
dspec.deptypes == ("build",)
dspec.deptypes == ("build",) or not dspec.parent.installed
for dspec in record.spec.edges_from_dependents()
)

View File

@@ -62,6 +62,22 @@ def test_raising_exception_if_bootstrap_disabled(mutable_config):
spack.bootstrap.store_path()
def test_raising_exception_module_importable():
with pytest.raises(
ImportError,
match='cannot bootstrap the "asdf" Python module',
):
spack.bootstrap.ensure_module_importable_or_raise("asdf")
def test_raising_exception_executables_in_path():
with pytest.raises(
RuntimeError,
match="cannot bootstrap any of the asdf, fdsa executables",
):
spack.bootstrap.ensure_executables_in_path_or_raise(["asdf", "fdsa"], "python")
@pytest.mark.regression('25603')
def test_bootstrap_deactivates_environments(active_mock_environment):
assert spack.environment.active_environment() == active_mock_environment

View File

@@ -41,7 +41,6 @@ def check_stage_path(monkeypatch, tmpdir):
def fake_stage(pkg, mirror_only=False):
assert pkg.path == expected_path
assert os.path.isdir(expected_path), expected_path
monkeypatch.setattr(spack.package.PackageBase, 'do_stage', fake_stage)

View File

@@ -6,6 +6,7 @@
import os.path
import sys
from datetime import datetime, timedelta
from textwrap import dedent
import pytest
@@ -270,3 +271,37 @@ def f(*args, **kwargs):
def test_dedupe():
assert [x for x in dedupe([1, 2, 1, 3, 2])] == [1, 2, 3]
assert [x for x in dedupe([1, -2, 1, 3, 2], key=abs)] == [1, -2, 3]
def test_grouped_exception():
h = llnl.util.lang.GroupedExceptionHandler()
def inner():
raise ValueError('wow!')
with h.forward('inner method'):
inner()
with h.forward('top-level'):
raise TypeError('ok')
assert h.grouped_message(with_tracebacks=False) == dedent("""\
due to the following failures:
inner method raised ValueError: wow!
top-level raised TypeError: ok""")
assert h.grouped_message(with_tracebacks=True) == dedent("""\
due to the following failures:
inner method raised ValueError: wow!
File "{0}", \
line 283, in test_grouped_exception
inner()
File "{0}", \
line 280, in inner
raise ValueError('wow!')
top-level raised TypeError: ok
File "{0}", \
line 286, in test_grouped_exception
raise TypeError('ok')
""").format(__file__)

View File

@@ -5,6 +5,7 @@
import os
import re
import sys
from itertools import product
from spack.util.executable import which
@@ -18,6 +19,8 @@
ALLOWED_ARCHIVE_TYPES = [".".join(ext) for ext in product(
PRE_EXTS, EXTS)] + PRE_EXTS + EXTS + NOTAR_EXTS
is_windows = sys.platform == 'win32'
def allowed_archive(path):
return any(path.endswith(t) for t in ALLOWED_ARCHIVE_TYPES)
@@ -48,15 +51,14 @@ def _unzip(archive_file):
Args:
archive_file (str): absolute path of the file to be decompressed
"""
try:
from zipfile import ZipFile
destination_abspath = os.getcwd()
with ZipFile(archive_file, 'r') as zf:
zf.extractall(destination_abspath)
except ImportError:
unzip = which('unzip', required=True)
unzip.add_default_arg('-q')
return unzip
exe = 'unzip'
arg = '-q'
if is_windows:
exe = 'tar'
arg = '-xf'
unzip = which(exe, required=True)
unzip.add_default_arg(arg)
unzip(archive_file)
def decompressor_for(path, extension=None):

View File

@@ -60,8 +60,8 @@ default:
.develop-build:
extends: [ ".develop", ".build" ]
variables:
AWS_ACCESS_KEY_ID: ${MIRRORS_AWS_ACCESS_KEY_ID}
AWS_SECRET_ACCESS_KEY: ${MIRRORS_AWS_SECRET_ACCESS_KEY}
AWS_ACCESS_KEY_ID: ${PROTECTED_MIRRORS_AWS_ACCESS_KEY_ID}
AWS_SECRET_ACCESS_KEY: ${PROTECTED_MIRRORS_AWS_SECRET_ACCESS_KEY}
SPACK_SIGNING_KEY: ${PACKAGE_SIGNING_KEY}
########################################

View File

@@ -0,0 +1,99 @@
diff --git a/src/cmake/thirdparty/SetupADIOS.cmake b/src/cmake/thirdparty/SetupADIOS.cmake
index 7a7f038d..4b56b7e0 100644
--- a/src/cmake/thirdparty/SetupADIOS.cmake
+++ b/src/cmake/thirdparty/SetupADIOS.cmake
@@ -10,32 +10,32 @@
#
###############################################################################
-# first Check for ADIOS_DIR
+# first Check for ADIOS_ROOT
-if(NOT ADIOS_DIR)
- MESSAGE(FATAL_ERROR "ADIOS support needs explicit ADIOS_DIR")
+if(NOT ADIOS_ROOT)
+ MESSAGE(FATAL_ERROR "ADIOS support needs explicit ADIOS_ROOT")
endif()
-MESSAGE(STATUS "Looking for ADIOS using ADIOS_DIR = ${ADIOS_DIR}")
+MESSAGE(STATUS "Looking for ADIOS using ADIOS_ROOT = ${ADIOS_ROOT}")
# CMake's FindADIOS module uses the ADIOS_ROOT env var
-set(ADIOS_ROOT ${ADIOS_DIR})
+set(ADIOS_ROOT ${ADIOS_ROOT})
set(ENV{ADIOS_ROOT} ${ADIOS_ROOT})
# Use CMake's FindADIOS module, which uses hdf5's compiler wrappers to extract
# all the info about the hdf5 install
-include(${ADIOS_DIR}/etc/FindADIOS.cmake)
+include(${ADIOS_ROOT}/etc/FindADIOS.cmake)
-# FindADIOS sets ADIOS_DIR to it's installed CMake info if it exists
-# we want to keep ADIOS_DIR as the root dir of the install to be
+# FindADIOS sets ADIOS_ROOT to it's installed CMake info if it exists
+# we want to keep ADIOS_ROOT as the root dir of the install to be
# consistent with other packages
-set(ADIOS_DIR ${ADIOS_ROOT} CACHE PATH "" FORCE)
+set(ADIOS_ROOT ${ADIOS_ROOT} CACHE PATH "" FORCE)
# not sure why we need to set this, but we do
#set(ADIOS_FOUND TRUE CACHE PATH "" FORCE)
if(NOT ADIOS_FOUND)
- message(FATAL_ERROR "ADIOS_DIR is not a path to a valid ADIOS install")
+ message(FATAL_ERROR "ADIOS_ROOT is not a path to a valid ADIOS install")
endif()
blt_register_library(NAME adios
diff --git a/src/cmake/thirdparty/SetupADIOS2.cmake b/src/cmake/thirdparty/SetupADIOS2.cmake
index d93c3e5b..3133c72c 100644
--- a/src/cmake/thirdparty/SetupADIOS2.cmake
+++ b/src/cmake/thirdparty/SetupADIOS2.cmake
@@ -10,21 +10,39 @@
#
###############################################################################
-# first Check for ADIOS_DIR
-if(NOT ADIOS2_DIR)
- MESSAGE(FATAL_ERROR "ADIOS2 support needs explicit ADIOS2_DIR")
+# Handle legacy usage of ADIOS2_DIR
+if (ADIOS2_DIR AND NOT ADIOS2_ROOT)
+ # If find_package(ADIOS2) has already been called this will fail
+ if (NOT EXISTS ${ADIOS2_DIR}/include)
+ get_filename_component(tmp "${ADIOS2_DIR}" DIRECTORY)
+ get_filename_component(tmp "${tmp}" DIRECTORY)
+ get_filename_component(tmp "${tmp}" DIRECTORY)
+ if (EXISTS ${tmp}/include)
+ set(ADIOS2_ROOT "${tmp}" CACHE PATH "")
+ else ()
+ message(FATAL_ERROR "Could not determine ADIOS2_ROOT from ADIOS2_DIR")
+ endif ()
+ else ()
+ set(ADIOS2_ROOT "${ADIOS2_DIR}" CACHE PATH "")
+ endif ()
+endif ()
+
+# Check for ADIOS_ROOT
+if(NOT ADIOS2_ROOT)
+ MESSAGE(FATAL_ERROR "ADIOS2 support needs explicit ADIOS2_ROOT")
endif()
-MESSAGE(STATUS "Looking for ADIOS2 using ADIOS2_DIR = ${ADIOS2_DIR}")
+MESSAGE(STATUS "Looking for ADIOS2 using ADIOS2_ROOT = ${ADIOS2_ROOT}")
-set(ADIOS2_DIR_ORIG ${ADIOS2_DIR})
+set(ADIOS2_DIR_ORIG ${ADIOS2_ROOT})
find_package(ADIOS2 REQUIRED
NO_DEFAULT_PATH
- PATHS ${ADIOS2_DIR}/lib/cmake/adios2)
+ PATHS ${ADIOS2_ROOT})
+# ADIOS2_DIR is set by find_package
message(STATUS "FOUND ADIOS2 at ${ADIOS2_DIR}")
blt_register_library(NAME adios2
- INCLUDES ${ADIOS2_INCLUDE_DIR}
- LIBRARIES ${ADIOS2_LIB_DIRS} ${ADIOS2_LIBRARIES} )
+ INCLUDES ${ADIOS2_INCLUDE_DIR}
+ LIBRARIES ${ADIOS2_LIB_DIRS} ${ADIOS2_LIBRARIES} )

View File

@@ -102,6 +102,9 @@ class Ascent(CMakePackage, CudaPackage):
# patch for allowing +shared+cuda
# https://github.com/Alpine-DAV/ascent/pull/903
patch('ascent-shared-cuda-pr903.patch', when='@0.8.0')
# patch for finding ADIOS2 more reliably
# https://github.com/Alpine-DAV/ascent/pull/922
patch('ascent-find-adios2-pr922.patch', when='@0.8.0')
##########################################################################
# package dependencies

View File

@@ -0,0 +1,88 @@
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Bricks(CMakePackage):
"""Bricks is a data layout and code generation framework,
enabling performance-portable stencil computations across
a multitude of architectures."""
# url for your package's homepage here.
homepage = "https://bricks.run/"
git = 'https://github.com/CtopCsUtahEdu/bricklib.git'
test_requires_compiler = True
# List of GitHub accounts to notify when the package is updated.
maintainers = ['ztuowen', 'drhansj']
version('r0.1', branch='r0.1')
variant('cuda', default=False, description='Build bricks with CUDA enabled')
# Building a variant of cmake without openssl is to match how the
# ECP E4S project builds cmake in their e4s-base-cuda Docker image
depends_on('cmake', type='build')
depends_on('autoconf', type='build')
depends_on('automake', type='build')
depends_on('libtool', type='build')
depends_on('opencl-clhpp', when='+cuda')
depends_on('cuda', when='+cuda')
depends_on('mpi')
def cmake_args(self):
"""CMake arguments for configure stage"""
args = []
return args
def flag_handler(self, name, flags):
"""Set build flags as needed"""
if name in ['cflags', 'cxxflags', 'cppflags']:
# There are many vector instrinsics used in this package. If
# the package is built on a native architecture, then it likely
# will not run (illegal instruction fault) on a less feature-
# rich architecture.
# If you intend to use this package in an architecturally-
# heterogeneous environment, then the package should be build
# with "target=x86_64". This will ensure that all Intel
# architectures can use the libraries and tests in this
# project by forceing the AVX2 flag in gcc.
if name == 'cxxflags' and self.spec.target == 'x86_64':
flags.append('-mavx2')
return (None, flags, None)
return(flags, None, None)
@run_after('install')
def copy_test_sources(self):
"""Files to copy into test cache"""
srcs = [join_path('examples', 'external', 'CMakeLists.txt'),
join_path('examples', 'external', 'main.cpp'),
join_path('examples', 'external', '7pt.py')]
self.cache_extra_test_sources(srcs)
def test(self):
"""Test bricklib package"""
# Test prebuilt binary
source_dir = join_path(self.test_suite.current_test_cache_dir,
'examples', 'external')
self.run_test(exe='cmake',
options=['.'],
purpose='Configure bricklib example',
work_dir=source_dir)
self.run_test(exe='cmake',
options=['--build', '.'],
purpose='Build bricklib example',
work_dir=source_dir)
self.run_test(exe=join_path(source_dir, 'example'),
options=[],
purpose='Execute bricklib example',
work_dir=source_dir)

View File

@@ -18,6 +18,7 @@ class Ceed(BundlePackage, CudaPackage, ROCmPackage):
maintainers = ['jedbrown', 'v-dobrev', 'tzanio']
version('5.0.0')
version('4.0.0')
version('3.0.0')
version('2.0.0')
@@ -32,11 +33,24 @@ class Ceed(BundlePackage, CudaPackage, ROCmPackage):
description='Build PETSc and HPGMG')
variant('pumi', default=True,
description='Build PUMI')
variant('omega-h', default=True,
description='Build Omega_h')
variant('quickbuild', default=True,
description='Speed-up the build by disabling variants in packages')
# TODO: Add 'int64' variant?
# LibCEED
# ceed 5.0
with when('@5.0.0'):
depends_on('libceed@0.10~occa')
depends_on('libceed~cuda', when='~cuda')
for arch in CudaPackage.cuda_arch_values:
depends_on('libceed+cuda+magma cuda_arch={0}'.format(arch),
when='+cuda cuda_arch={0}'.format(arch))
depends_on('libceed~rocm', when='~rocm')
for target in ROCmPackage.amdgpu_targets:
depends_on('libceed+rocm amdgpu_target={0}'.format(target),
when='+rocm amdgpu_target={0}'.format(target))
# ceed 4.0
depends_on('libceed@0.8~cuda', when='@4.0.0~cuda')
for arch in CudaPackage.cuda_arch_values:
@@ -64,7 +78,15 @@ class Ceed(BundlePackage, CudaPackage, ROCmPackage):
depends_on('libceed@0.2+occa', when='@1.0.0+occa')
depends_on('libceed@0.2~occa', when='@1.0.0~occa')
# FMS
# ceed-5.0
depends_on('libfms@0.2.0', when='@5.0.0')
depends_on('libfms@0.2.0~conduit', when='@5.0.0+quickbuild')
# OCCA
# ceed-5.0
depends_on('occa@1.1.0~cuda', when='@5.0.0+occa~cuda')
depends_on('occa@1.1.0+cuda', when='@5.0.0+occa+cuda')
# ceed-4.0
depends_on('occa@1.1.0~cuda', when='@4.0.0+occa~cuda')
depends_on('occa@1.1.0+cuda', when='@4.0.0+occa+cuda')
@@ -79,22 +101,24 @@ class Ceed(BundlePackage, CudaPackage, ROCmPackage):
depends_on('occa@1.0.0-alpha.5+cuda', when='@1.0.0+occa+cuda')
# NekRS
# ceed-4.0
depends_on('nekrs@21.0', when='@4.0.0+nek')
# ceed-4.0 and ceed-5.0
depends_on('nekrs@21.0%gcc', when='@4.0.0:5+nek')
for arch in CudaPackage.cuda_arch_values:
depends_on('nekrs@21.0+cuda cuda_arch={0}'.format(arch),
when='@4.0.0+nek+cuda cuda_arch={0}'.format(arch))
when='@4.0.0:5+nek+cuda cuda_arch={0}'.format(arch))
for target in ROCmPackage.amdgpu_targets:
depends_on('nekrs@21.0+rocm amdgpu_target={0}'.format(target),
when='@4.0.0+nek+rocm amdgpu_target={0}'.format(target))
when='@4.0.0:5+nek+rocm amdgpu_target={0}'.format(target))
# Nek5000, GSLIB, Nekbone, and NekCEM
# ceed-3.0 and ceed-4.0
depends_on('nek5000@19.0', when='@3.0.0:4+nek')
depends_on('nektools@19.0%gcc', when='@3.0.0:4+nek')
# ceed-5.0 - specific
depends_on('gslib@1.0.7', when='@5.0.0+nek')
# ceed-3.0, ceed-4.0, and ceed-5.0
depends_on('nek5000@19.0', when='@3.0.0:5+nek')
depends_on('nektools@19.0%gcc', when='@3.0.0:5+nek')
depends_on('gslib@1.0.6', when='@3.0.0:4+nek')
depends_on('nekbone@17.0', when='@3.0.0:4+nek')
depends_on('nekcem@c8db04b', when='@3.0.0:4+nek')
depends_on('nekbone@17.0', when='@3.0.0:5+nek')
depends_on('nekcem@c8db04b', when='@3.0.0:5+nek')
# ceed-2.0
depends_on('nek5000@17.0', when='@2.0.0+nek')
depends_on('nektools@17.0%gcc', when='@2.0.0+nek')
@@ -109,6 +133,21 @@ class Ceed(BundlePackage, CudaPackage, ROCmPackage):
depends_on('nekcem@0b8bedd', when='@1.0.0+nek')
# PETSc
# ceed 5.0
with when('@5.0.0+petsc'):
depends_on('petsc@3.17')
depends_on('ratel@0.1.2')
for arch in CudaPackage.cuda_arch_values:
depends_on('petsc+cuda cuda_arch={0}'.format(arch),
when='+cuda cuda_arch={0}'.format(arch))
depends_on('ratel+cuda cuda_arch={0}'.format(arch),
when='+cuda cuda_arch={0}'.format(arch))
for target in ROCmPackage.amdgpu_targets:
depends_on('petsc+rocm amdgpu_target={0}'.format(target),
when='+rocm amdgpu_target={0}'.format(target))
depends_on('ratel+rocm amdgpu_target={0}'.format(target),
when='+rocm amdgpu_target={0}'.format(target))
depends_on('petsc~hdf5~superlu-dist', when='+quickbuild')
# ceed 4.0
depends_on('petsc@3.15.0:3.15', when='@4.0.0:4+petsc')
for arch in CudaPackage.cuda_arch_values:
@@ -159,6 +198,13 @@ class Ceed(BundlePackage, CudaPackage, ROCmPackage):
depends_on('hpgmg@a0a5510df23b+fe', when='@1.0.0+petsc')
# MAGMA
# ceed 5.0
for arch in CudaPackage.cuda_arch_values:
depends_on('magma@2.6.2+cuda cuda_arch={0}'.format(arch),
when='@5.0.0+cuda cuda_arch={0}'.format(arch))
for target in ROCmPackage.amdgpu_targets:
depends_on('magma@2.6.2~cuda+rocm amdgpu_target={0}'.format(target),
when='@5.0.0+rocm amdgpu_target={0}'.format(target))
# ceed-4.0
for arch in CudaPackage.cuda_arch_values:
depends_on('magma@2.5.4 cuda_arch={0}'.format(arch),
@@ -171,6 +217,8 @@ class Ceed(BundlePackage, CudaPackage, ROCmPackage):
depends_on('magma@2.3.0', when='@1.0.0+cuda')
# PUMI
# ceed-5.0
depends_on('pumi@2.2.7', when='@5.0.0+pumi')
# ceed-4.0
depends_on('pumi@2.2.5', when='@4.0.0+pumi')
# ceed-3.0
@@ -180,7 +228,28 @@ class Ceed(BundlePackage, CudaPackage, ROCmPackage):
# ceed-1.0
depends_on('pumi@2.1.0', when='@1.0.0+pumi')
# Omega_h
# ceed-5.0
depends_on('omega-h@scorec.10.1.0', when='@5.0.0+omega-h')
depends_on('omega-h~trilinos', when='@5.0.0+omega-h+quickbuild')
# MFEM, Laghos, Remhos
# ceed 5.0
with when('@5.0.0+mfem'):
depends_on('mfem@4.4.0+mpi+examples+miniapps')
depends_on('mfem+petsc', when='+petsc')
depends_on('mfem+pumi', when='+pumi')
depends_on('mfem+gslib', when='+nek')
depends_on('mfem+libceed+fms')
for arch in CudaPackage.cuda_arch_values:
depends_on('mfem+cuda cuda_arch={0}'.format(arch),
when='+cuda cuda_arch={0}'.format(arch))
for target in ROCmPackage.amdgpu_targets:
depends_on('mfem+rocm amdgpu_target={0}'.format(target),
when='+rocm amdgpu_target={0}'.format(target))
depends_on('mfem+occa', when='+occa')
depends_on('laghos@3.1')
depends_on('remhos@1.0')
# ceed-4.0
depends_on('mfem@4.2.0+mpi+examples+miniapps', when='@4.0.0+mfem~petsc')
depends_on('mfem@4.2.0+mpi+petsc+examples+miniapps',

View File

@@ -13,6 +13,7 @@ class Gslib(Package):
git = "https://github.com/gslib/gslib.git"
version('develop', branch='master')
version('1.0.7', tag='v1.0.7')
version('1.0.6', tag='v1.0.6')
version('1.0.5', tag='v1.0.5')
version('1.0.4', tag='v1.0.4')

View File

@@ -34,7 +34,7 @@ class Laghos(MakefilePackage):
depends_on('mfem+mpi~metis', when='~metis')
depends_on('mfem@develop', when='@develop')
depends_on('mfem@4.2.0', when='@3.1')
depends_on('mfem@4.2.0:', when='@3.1')
depends_on('mfem@4.1.0:4.1', when='@3.0')
# Recommended mfem version for laghos v2.0 is: ^mfem@3.4.1-laghos-v2.0
depends_on('mfem@3.4.0:', when='@2.0')

View File

@@ -12,7 +12,7 @@ class Libceed(MakefilePackage, CudaPackage, ROCmPackage):
homepage = "https://github.com/CEED/libCEED"
git = "https://github.com/CEED/libCEED.git"
maintainers = ['jedbrown', 'v-dobrev', 'tzanio']
maintainers = ['jedbrown', 'v-dobrev', 'tzanio', 'jeremylt']
version('develop', branch='main')
version('0.10.1', tag='v0.10.1')
@@ -107,6 +107,8 @@ def common_make_opts(self):
if '+cuda' in spec:
makeopts += ['CUDA_DIR=%s' % spec['cuda'].prefix]
makeopts += ['CUDA_ARCH=sm_%s' %
spec.variants['cuda_arch'].value]
if spec.satisfies('@:0.4'):
nvccflags = ['-ccbin %s -Xcompiler "%s" -Xcompiler %s' %
(compiler.cxx, opt, compiler.cc_pic_flag)]
@@ -118,6 +120,8 @@ def common_make_opts(self):
if '+rocm' in spec:
makeopts += ['HIP_DIR=%s' % spec['hip'].prefix]
amdgpu_target = ','.join(spec.variants['amdgpu_target'].value)
makeopts += ['HIP_ARCH=%s' % amdgpu_target]
if spec.satisfies('@0.8'):
makeopts += ['HIPBLAS_DIR=%s' % spec['hipblas'].prefix]
@@ -138,7 +142,8 @@ def build_targets(self):
@property
def install_targets(self):
return ['prefix={0}'.format(self.prefix)] + self.common_make_opts
return ['install', 'prefix={0}'.format(self.prefix)] + \
self.common_make_opts
def check(self):
make('prove', *self.common_make_opts, parallel=False)

View File

@@ -0,0 +1,59 @@
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Libfms(CMakePackage):
"""Field and Mesh Specification (FMS) library"""
homepage = "https://github.com/CEED/FMS"
git = "https://github.com/CEED/FMS.git"
tags = ['FEM', 'Meshes', 'Fields', 'High-order', 'I/O', 'Data-exchange']
maintainers = ['v-dobrev', 'tzanio', 'cwsmith']
version('develop', branch='master')
version('0.2.0', tag='v0.2')
variant('conduit', default=True,
description='Build with Conduit I/O support')
variant('shared', default=True,
description='Build shared libraries')
depends_on('cmake@3.1:', type='build')
depends_on('conduit@0.7.1:', when='+conduit')
def cmake_args(self):
args = []
args.extend([
self.define_from_variant('BUILD_SHARED_LIBS', 'shared'),
])
if '+conduit' in self.spec:
args.extend([
self.define('CONDUIT_DIR', self.spec['conduit'].prefix)
])
return args
@property
def headers(self):
"""Export the FMS headers.
Sample usage: spec['libfms'].headers.cpp_flags
"""
fms_h_names = ['fms', 'fmsio']
hdrs = find_headers(fms_h_names, self.prefix.include, recursive=False)
return hdrs or None # Raise an error if no headers are found
@property
def libs(self):
"""Export the FMS library.
Sample usage: spec['libfms'].libs.ld_flags
"""
is_shared = '+shared' in self.spec
libs = find_libraries('libfms', root=self.prefix, shared=is_shared,
recursive=True)
return libs or None # Raise an error if no libs are found

View File

@@ -102,10 +102,10 @@ class Mfem(Package, CudaPackage, ROCmPackage):
description='Build static library')
variant('shared', default=False,
description='Build shared library')
variant('mpi', default=True,
variant('mpi', default=True, sticky=True,
description='Enable MPI parallelism')
# Can we make the default value for 'metis' to depend on the 'mpi' value?
variant('metis', default=True,
variant('metis', default=True, sticky=True,
description='Enable METIS support')
variant('openmp', default=False,
description='Enable OpenMP parallelism')
@@ -153,6 +153,8 @@ class Mfem(Package, CudaPackage, ROCmPackage):
description='Enable secure sockets using GnuTLS')
variant('libunwind', default=False,
description='Enable backtrace on error support using Libunwind')
variant('fms', default=False, when='@4.3.0:',
description='Enable FMS I/O support')
# TODO: SIMD, Ginkgo, ADIOS2, HiOp, MKL CPardiso, Axom/Sidre
variant('timer', default='auto',
values=('auto', 'std', 'posix', 'mac', 'mpi'),
@@ -287,6 +289,7 @@ class Mfem(Package, CudaPackage, ROCmPackage):
depends_on('gnutls', when='+gnutls')
depends_on('conduit@0.3.1:,master:', when='+conduit')
depends_on('conduit+mpi', when='+conduit+mpi')
depends_on('libfms@0.2.0:', when='+fms')
# The MFEM 4.0.0 SuperLU interface fails when using hypre@2.16.0 and
# superlu-dist@6.1.1. See https://github.com/mfem/mfem/issues/983.
@@ -486,6 +489,7 @@ def find_optional_library(name, prefix):
'MFEM_USE_AMGX=%s' % yes_no('+amgx'),
'MFEM_USE_CEED=%s' % yes_no('+libceed'),
'MFEM_USE_UMPIRE=%s' % yes_no('+umpire'),
'MFEM_USE_FMS=%s' % yes_no('+fms'),
'MFEM_MPIEXEC=%s' % mfem_mpiexec,
'MFEM_MPIEXEC_NP=%s' % mfem_mpiexec_np]
@@ -830,6 +834,12 @@ def find_optional_library(name, prefix):
'CONDUIT_OPT=%s' % conduit_opt_flags,
'CONDUIT_LIB=%s' % ld_flags_from_library_list(libs)]
if '+fms' in spec:
libfms = spec['libfms']
options += [
'FMS_OPT=%s' % libfms.headers.cpp_flags,
'FMS_LIB=%s' % ld_flags_from_library_list(libfms.libs)]
make('config', *options, parallel=False)
make('info', parallel=False)

View File

@@ -4,7 +4,7 @@
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
class OmegaH(CMakePackage):
class OmegaH(CMakePackage, CudaPackage):
"""Omega_h is a C++11 library providing data structures and algorithms
for adaptive discretizations. Its specialty is anisotropic triangle and
tetrahedral mesh adaptation. It runs efficiently on most modern HPC
@@ -45,6 +45,12 @@ class OmegaH(CMakePackage):
depends_on('mpi', when='+mpi')
depends_on('trilinos +kokkos', when='+trilinos')
depends_on('zlib', when='+zlib')
# Note: '+cuda' and 'cuda_arch' variants are added by the CudaPackage
depends_on('cuda', when='+cuda')
conflicts('cuda@11.2:', when='@scorec.10.1.0:', msg='Thrust is broken in CUDA >= 11.2.* see https://github.com/sandialabs/omega_h/issues/366')
# the sandia repo has a fix for cuda > 11.2 support
# see github.com/sandialabs/omega_h/pull/373
conflicts('cuda@11.2:', when='@:9.34.4', msg='Thrust is broken in CUDA >= 11.2.* see https://github.com/sandialabs/omega_h/issues/366')
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=86610
conflicts('%gcc@8:8.2', when='@:9.22.1')
@@ -66,10 +72,21 @@ def cmake_args(self):
args.append('-DBUILD_SHARED_LIBS:BOOL=OFF')
if '+mpi' in self.spec:
args.append('-DOmega_h_USE_MPI:BOOL=ON')
args.append('-DCMAKE_CXX_COMPILER:FILEPATH={0}'.format(
self.spec['mpi'].mpicxx))
ver = self.spec.version
# old versions don't call find_package(MPI)
if ver < Version('9.33.2') and 'scorec' not in str(ver):
args.append('-DCMAKE_CXX_COMPILER:FILEPATH={0}'.format(
self.spec['mpi'].mpicxx))
else:
args.append('-DOmega_h_USE_MPI:BOOL=OFF')
if '+cuda' in self.spec:
args.append('-DOmega_h_USE_CUDA:BOOL=ON')
cuda_arch_list = self.spec.variants['cuda_arch'].value
cuda_arch = cuda_arch_list[0]
if cuda_arch != 'none':
args.append('-DOmega_h_CUDA_ARCH={0}'.format(cuda_arch))
else:
args.append('-DOmega_h_USE_CUDA:BOOL=OFF')
if '+trilinos' in self.spec:
args.append('-DOmega_h_USE_Trilinos:BOOL=ON')
if '+zlib' in self.spec:

View File

@@ -0,0 +1,22 @@
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyPyworld(PythonPackage):
"""PyWorld wrappers WORLD, which is a free software for high-quality speech
analysis, manipulation and synthesis. It can estimate fundamental frequency
(F0), aperiodicity and spectral envelope and also generate the speech like
input speech with only estimated parameters.i"""
homepage = "https://github.com/JeremyCCHsu/Python-Wrapper-for-World-Vocoder"
pypi = "pyworld/pyworld-0.3.0.tar.gz"
version('0.3.0', sha256='e19b5d8445e0c4fc45ded71863aeaaf2680064b4626b0e7c90f72e9ace9f6b5b')
depends_on('py-setuptools', type='build')
depends_on('py-numpy@:1.19', type=('build', 'run'))
depends_on('py-cython@0.24.0:', type='build')

View File

@@ -30,6 +30,7 @@ class Qt(Package):
phases = ['configure', 'build', 'install']
version('5.15.4', sha256='615ff68d7af8eef3167de1fd15eac1b150e1fd69d1e2f4239e54447e7797253b')
version('5.15.3', sha256='b7412734698a87f4a0ae20751bab32b1b07fdc351476ad8e35328dbe10efdedb')
version('5.15.2', sha256='3a530d1b243b5dec00bc54937455471aaa3e56849d2593edb8ded07228202240')
version('5.14.2', sha256='c6fcd53c744df89e7d3223c02838a33309bd1c291fcb6f9341505fe99f7f19fa')

View File

@@ -64,7 +64,8 @@ def build_targets(self):
@property
def install_targets(self):
return ['prefix={0}'.format(self.prefix)] + self.common_make_opts
return ['install', 'prefix={0}'.format(self.prefix)] + \
self.common_make_opts
def check(self):
make('prove', *self.common_make_opts, parallel=False)

View File

@@ -28,6 +28,7 @@ class Tau(Package):
tags = ['e4s']
version('master', branch='master')
version('2.31.1', sha256='bf445b9d4fe40a5672a7b175044d2133791c4dfb36a214c1a55a931aebc06b9d')
version('2.31', sha256='27e73c395dd2a42b91591ce4a76b88b1f67663ef13aa19ef4297c68f45d946c2')
version('2.30.2', sha256='43f84a15b71a226f8a64d966f0cb46022bcfbaefb341295ecc6fa80bb82bbfb4')
version('2.30.1', sha256='9c20ca1b4f4e80d885f24491cee598068871f0e9dd67906a5e47e4b4147d08fc')

View File

@@ -1,34 +0,0 @@
From 9785e706229622626133c4b03c7abd004f62023f Mon Sep 17 00:00:00 2001
From: Axel Huebl <axel.huebl@plasma.ninja>
Date: Sat, 4 Dec 2021 15:28:13 -0800
Subject: [PATCH] Fix: Installed Symlink LIB
The latest patch to these routines broke our library alias in installs.
By default, this variable is relative and needs the prefix appended.
In some cases, e.g., if externally set, it can already be absolute. In that
case, we skip adding the prefix.
---
CMakeLists.txt | 7 ++++++-
1 file changed, 6 insertions(+), 1 deletion(-)
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 04092ba962..a549546ab9 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -343,9 +343,14 @@ if(WarpX_LIB)
else()
set(mod_ext "so")
endif()
+ if(IS_ABSOLUTE ${CMAKE_INSTALL_LIBDIR})
+ set(ABS_INSTALL_LIB_DIR ${CMAKE_INSTALL_LIBDIR})
+ else()
+ set(ABS_INSTALL_LIB_DIR ${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_LIBDIR})
+ endif()
install(CODE "file(CREATE_LINK
$<TARGET_FILE_NAME:shared>
- ${CMAKE_INSTALL_LIBDIR}/libwarpx.${lib_suffix}.${mod_ext}
+ ${ABS_INSTALL_LIB_DIR}/libwarpx.${lib_suffix}.${mod_ext}
COPY_ON_ERROR SYMBOLIC)")
endif()

View File

@@ -132,7 +132,22 @@ class Warpx(CMakePackage):
# The symbolic aliases for our +lib target were missing in the install
# location
# https://github.com/ECP-WarpX/WarpX/pull/2626
patch('2626.patch', when='@21.12')
patch('https://github.com/ECP-WarpX/WarpX/pull/2626.patch?full_index=1',
sha256='a431d4664049d6dcb6454166d6a948d8069322a111816ca5ce01553800607544',
when='@21.12')
# Workaround for AMReX<=22.06 no-MPI Gather
# https://github.com/ECP-WarpX/WarpX/pull/3134
# https://github.com/AMReX-Codes/amrex/pull/2793
patch('https://github.com/ECP-WarpX/WarpX/pull/3134.patch?full_index=1',
sha256='b786ce64a3c2c2b96ff2e635f0ee48532e4ae7ad9637dbf03f11c0768c290690',
when='@22.02:22.05')
# Forgot to install ABLASTR library
# https://github.com/ECP-WarpX/WarpX/pull/3141
patch('https://github.com/ECP-WarpX/WarpX/pull/3141.patch?full_index=1',
sha256='dab6fb44556ee1fd466a4cb0e20f89bde1ce445c9a51a2c0f59d1740863b5e7d',
when='@22.04,22.05')
def cmake_args(self):
spec = self.spec
@@ -168,10 +183,15 @@ def cmake_args(self):
def libs(self):
libsuffix = {'1': '1d', '2': '2d', '3': '3d', 'rz': 'rz'}
dims = self.spec.variants['dims'].value
return find_libraries(
libs = find_libraries(
['libwarpx.' + libsuffix[dims]], root=self.prefix, recursive=True,
shared=True
)
libs += find_libraries(
['libablastr'], root=self.prefix, recursive=True,
shared=self.spec.variants['shared']
)
return libs
# WarpX has many examples to serve as a suitable smoke check. One
# that is typical was chosen here