Compare commits

..

7 Commits

Author SHA1 Message Date
Harmen Stoppels
6593d22c4e spack.modules.commmon: pass spec to SetupContext (#40886)
Currently module globals aren't set before running
`setup_[dependent_]run_environment` to compute environment modifications
for module files. This commit fixes that.
2023-11-04 20:42:47 +00:00
Massimiliano Culpo
f51dad976e hdf5-vol-async: better specify dependency condition (#40882) 2023-11-04 20:31:52 +01:00
Cameron Rutherford
ff8cd597e0 hiop: fix cuda constraints (#40875) 2023-11-04 13:09:59 -05:00
eugeneswalker
fd22d109a6 sundials +sycl: add cxxflags=-fsycl via flag_handler (#40845) 2023-11-04 08:55:19 -05:00
zv-io
88ee3a0fba linux-headers: support multiple versions (#40877)
The download URL for linux-headers was hardcoded to 4.x;
we need to derive the correct URL from the version number.
2023-11-04 12:21:12 +01:00
Massimiliano Culpo
f50377de7f environment: solve one spec per child process (#40876)
Looking at the memory profiles of concurrent solves
for environment with unify:false, it seems memory
is only ramping up.

This exchange in the potassco mailing list:
 https://sourceforge.net/p/potassco/mailman/potassco-users/thread/b55b5b8c2e8945409abb3fa3c935c27e%40lohn.at/#msg36517698

Seems to suggest that clingo doesn't release memory
until end of the application.

Since when unify:false we distribute work to processes,
here we give a maxtaskperchild=1, so we clean memory
after each solve.
2023-11-03 23:10:42 +00:00
Adam J. Stewart
8e96d3a051 GDAL: add v3.7.3 (#40865) 2023-11-03 22:59:52 +01:00
14 changed files with 37 additions and 43 deletions

View File

@@ -1016,16 +1016,10 @@ def get_env_modifications(self) -> EnvironmentModifications:
self._make_runnable(dspec, env)
if self.should_setup_run_env & flag:
run_env_mods = EnvironmentModifications()
for spec in dspec.dependents(deptype=dt.LINK | dt.RUN):
if id(spec) in self.nodes_in_subdag:
pkg.setup_dependent_run_environment(run_env_mods, spec)
pkg.setup_run_environment(run_env_mods)
run_env_dict = run_env_mods.group_by_name()
if self.context == Context.BUILD:
run_env_mods.drop("CC", "CXX", "F77", "FC")
env.extend(run_env_mods)
pkg.setup_dependent_run_environment(env, spec)
pkg.setup_run_environment(env)
return env
def _make_buildtime_detectable(self, dep: spack.spec.Spec, env: EnvironmentModifications):

View File

@@ -1525,7 +1525,11 @@ def _concretize_separately(self, tests=False):
batch = []
for j, (i, concrete, duration) in enumerate(
spack.util.parallel.imap_unordered(
_concretize_task, args, processes=num_procs, debug=tty.is_debug()
_concretize_task,
args,
processes=num_procs,
debug=tty.is_debug(),
maxtaskperchild=1,
)
):
batch.append((i, concrete))

View File

@@ -731,7 +731,9 @@ def environment_modifications(self):
# for that to work, globals have to be set on the package modules, and the
# whole chain of setup_dependent_package has to be followed from leaf to spec.
# So: just run it here, but don't collect env mods.
spack.build_environment.SetupContext(context=Context.RUN).set_all_package_py_globals()
spack.build_environment.SetupContext(
spec, context=Context.RUN
).set_all_package_py_globals()
# Then run setup_dependent_run_environment before setup_run_environment.
for dep in spec.dependencies(deptype=("link", "run")):

View File

@@ -596,14 +596,6 @@ def group_by_name(self) -> Dict[str, ModificationList]:
modifications[item.name].append(item)
return modifications
def drop(self, *name) -> bool:
"""Drop all modifications to the variable with the given name."""
old_mods = self.env_modifications
new_mods = [x for x in self.env_modifications if x.name not in name]
self.env_modifications = new_mods
return len(old_mods) != len(new_mods)
def is_unset(self, variable_name: str) -> bool:
"""Returns True if the last modification to a variable is to unset it, False otherwise."""
modifications = self.group_by_name()

View File

@@ -6,6 +6,7 @@
import os
import sys
import traceback
from typing import Optional
class ErrorFromWorker:
@@ -53,7 +54,9 @@ def __call__(self, *args, **kwargs):
return value
def imap_unordered(f, list_of_args, *, processes: int, debug=False):
def imap_unordered(
f, list_of_args, *, processes: int, maxtaskperchild: Optional[int] = None, debug=False
):
"""Wrapper around multiprocessing.Pool.imap_unordered.
Args:
@@ -62,6 +65,8 @@ def imap_unordered(f, list_of_args, *, processes: int, debug=False):
processes: maximum number of processes allowed
debug: if False, raise an exception containing just the error messages
from workers, if True an exception with complete stacktraces
maxtaskperchild: number of tasks to be executed by a child before being
killed and substituted
Raises:
RuntimeError: if any error occurred in the worker processes
@@ -70,7 +75,7 @@ def imap_unordered(f, list_of_args, *, processes: int, debug=False):
yield from map(f, list_of_args)
return
with multiprocessing.Pool(processes) as p:
with multiprocessing.Pool(processes, maxtasksperchild=maxtaskperchild) as p:
for result in p.imap_unordered(Task(f), list_of_args):
if isinstance(result, ErrorFromWorker):
raise RuntimeError(result.stacktrace if debug else str(result))

View File

@@ -200,11 +200,11 @@ spack:
- kokkos +sycl +openmp cxxstd=17 +tests +examples
- kokkos-kernels build_type=Release %oneapi ^kokkos +sycl +openmp cxxstd=17 +tests +examples
- slate +sycl
- sundials +sycl cxxstd=17 +examples-install
- tau +mpi +opencl +level_zero ~pdt +syscall # tau: requires libdrm.so to be installed
# --
# - ginkgo +oneapi # InstallError: Ginkgo's oneAPI backend requires theDPC++ compiler as main CXX compiler.
# - hpctoolkit +level_zero # dyninst@12.3.0%gcc: /usr/bin/ld: libiberty/./d-demangle.c:142: undefined reference to `_intel_fast_memcpy'; can't mix intel-tbb@%oneapi with dyninst%gcc
# - sundials +sycl cxxstd=17 # sundials: include/sunmemory/sunmemory_sycl.h:20:10: fatal error: 'CL/sycl.hpp' file not found
- py-scipy

View File

@@ -51,6 +51,8 @@ spack:
require: "@3.4.4"
vtk-m:
require: "+examples"
visit:
require: "~gui"
cuda:
version: [11.8.0]
paraview:

View File

@@ -30,6 +30,7 @@ class Gdal(CMakePackage, AutotoolsPackage, PythonExtension):
maintainers("adamjstewart")
version("3.7.3", sha256="e0a6f0c453ea7eb7c09967f50ac49426808fcd8f259dbc9888140eb69d7ffee6")
version("3.7.2", sha256="40c0068591d2c711c699bbb734319398485ab169116ac28005d8302f80b923ad")
version("3.7.1", sha256="9297948f0a8ba9e6369cd50e87c7e2442eda95336b94d2b92ef1829d260b9a06")
version("3.7.0", sha256="af4b26a6b6b3509ae9ccf1fcc5104f7fe015ef2110f5ba13220816398365adce")

View File

@@ -35,9 +35,8 @@ class Hdf5VolAsync(CMakePackage):
depends_on("hdf5@1.14.0: +mpi +threadsafe")
# Require MPI_THREAD_MULTIPLE.
depends_on("openmpi +thread_multiple", when="^openmpi@:2")
depends_on("openmpi", when="^openmpi@3:")
depends_on("mvapich2 threads=multiple", when="^mvapich2")
depends_on("openmpi +thread_multiple", when="^[virtuals=mpi] openmpi@:2")
depends_on("mvapich2 threads=multiple", when="^[virtuals=mpi] mvapich2")
def setup_run_environment(self, env):
env.prepend_path("HDF5_PLUGIN_PATH", self.spec.prefix.lib)

View File

@@ -115,7 +115,7 @@ class Hiop(CMakePackage, CudaPackage, ROCmPackage):
# 1.0.2 fixes bug with cuda 12 compatibility
# hiop@0.6.0 requires cusolver API in cuda@11
depends_on("cuda@11:11.9", when="@0.6.0:1.0.1")
depends_on("cuda@11:11.9", when="@0.6.0:1.0.1+cuda")
depends_on("cuda@11:", when="@develop:+cuda")
# Before hiop@0.6.0 only cuda requirement was magma
depends_on("cuda", when="@:0.5.4+cuda")

View File

@@ -20,6 +20,10 @@ class LinuxHeaders(Package):
version("6.2.8", sha256="fed0ad87d42f83a70ce019ff2800bc30a855e672e72bf6d54a014d98d344f665")
version("4.9.10", sha256="bd6e05476fd8d9ea4945e11598d87bc97806bbc8d03556abbaaf809707661525")
def url_for_version(self, version):
url = "https://www.kernel.org/pub/linux/kernel/v{0}.x/linux-{1}.tar.xz"
return url.format(version.up_to(1), version)
def setup_build_environment(self, env):
# This variable is used in the Makefile. If it is defined on the
# system, it can break the build if there is no build recipe for

View File

@@ -977,9 +977,7 @@ def post_install(self):
ninja()
ninja("install")
if "+python" in self.spec:
if spec.version < Version("17.0.0"):
# llvm bindings were removed in v17: https://releases.llvm.org/17.0.1/docs/ReleaseNotes.html#changes-to-the-python-bindings
install_tree("llvm/bindings/python", python_platlib)
install_tree("llvm/bindings/python", python_platlib)
if "+clang" in self.spec:
install_tree("clang/bindings/python", python_platlib)

View File

@@ -3,7 +3,6 @@
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
import re
from spack.package import *
@@ -93,20 +92,8 @@ def setup_build_environment(self, env):
env.set("AR", ar.path)
# Manually inject the path of openssl's certs for build.
certs = None
for p in (
"etc/openssl/cert.pem",
"../etc/openssl/cert.pem",
"etc/ssl/certs/ca-bundle.crt",
"../etc/ssl/certs/ca-bundle.crt",
):
certs = join_path(self.spec["openssl"].prefix, p)
if os.path.exists(certs):
break
else:
certs = None
if certs is not None:
env.set("CARGO_HTTP_CAINFO", certs)
certs = join_path(self.spec["openssl"].prefix, "etc/openssl/cert.pem")
env.set("CARGO_HTTP_CAINFO", certs)
def configure(self, spec, prefix):
opts = []

View File

@@ -292,6 +292,12 @@ class Sundials(CMakePackage, CudaPackage, ROCmPackage):
# fix issues with exported PETSc target(s) in SUNDIALSConfig.cmake
patch("sundials-v5.8.0.patch", when="@5.8.0")
def flag_handler(self, name, flags):
if name == "cxxflags":
if self.spec.satisfies("+sycl"):
flags.append("-fsycl")
return (flags, None, None)
# ==========================================================================
# SUNDIALS Settings
# ==========================================================================