Compare commits

..

2 Commits

Author SHA1 Message Date
Wouter Deconinck
29554e8446 intel-oneapi-mpi: strip @ from IMPI_OFFICIALVERSION 2024-11-18 14:09:02 -06:00
Wouter Deconinck
d92cea0a07 intel-oneapi-mpi: external detection support 2024-07-27 08:23:56 -05:00
866 changed files with 8037 additions and 11629 deletions

View File

@@ -5,10 +5,14 @@ updates:
directory: "/"
schedule:
interval: "daily"
# Requirements to run style checks and build documentation
# Requirements to build documentation
- package-ecosystem: "pip"
directories:
- "/.github/workflows/requirements/style/*"
- "/lib/spack/docs"
directory: "/lib/spack/docs"
schedule:
interval: "daily"
# Requirements to run style checks
- package-ecosystem: "pip"
directories:
- "/.github/workflows/requirements/*"
schedule:
interval: "daily"

View File

@@ -44,7 +44,6 @@ jobs:
run: |
. share/spack/setup-env.sh
coverage run $(which spack) audit packages
coverage run $(which spack) audit configs
coverage run $(which spack) -d audit externals
coverage combine
coverage xml
@@ -53,7 +52,6 @@ jobs:
run: |
. share/spack/setup-env.sh
spack -d audit packages
spack -d audit configs
spack -d audit externals
- name: Package audits (without coverage)
if: ${{ runner.os == 'Windows' }}
@@ -61,8 +59,6 @@ jobs:
. share/spack/setup-env.sh
spack -d audit packages
./share/spack/qa/validate_last_exit.ps1
spack -d audit configs
./share/spack/qa/validate_last_exit.ps1
spack -d audit externals
./share/spack/qa/validate_last_exit.ps1
- uses: codecov/codecov-action@e28ff129e5465c2c0dcc6f003fc735cb6ae0c673

View File

@@ -87,7 +87,7 @@ jobs:
fi
- name: Upload Dockerfile
uses: actions/upload-artifact@834a144ee995460fba8ed112a2fc961b36a5ec5a
uses: actions/upload-artifact@0b2256b8c012f0828dc542b3febcab082c67f72b
with:
name: dockerfiles_${{ matrix.dockerfile[0] }}
path: dockerfiles
@@ -96,7 +96,7 @@ jobs:
uses: docker/setup-qemu-action@49b3bc8e6bdd4a60e6116a5414239cba5943d3cf
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db
uses: docker/setup-buildx-action@aa33708b10e362ff993539393ff100fa93ed6a27
- name: Log in to GitHub Container Registry
uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567
@@ -113,7 +113,7 @@ jobs:
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build & Deploy ${{ matrix.dockerfile[0] }}
uses: docker/build-push-action@5cd11c3a4ced054e52742c5fd54dca954e0edd85
uses: docker/build-push-action@5176d81f87c23d6fc96624dfdbcd9f3830bbe445
with:
context: dockerfiles/${{ matrix.dockerfile[0] }}
platforms: ${{ matrix.dockerfile[1] }}
@@ -126,7 +126,7 @@ jobs:
needs: deploy-images
steps:
- name: Merge Artifacts
uses: actions/upload-artifact/merge@834a144ee995460fba8ed112a2fc961b36a5ec5a
uses: actions/upload-artifact/merge@0b2256b8c012f0828dc542b3febcab082c67f72b
with:
name: dockerfiles
pattern: dockerfiles_*

View File

@@ -1,6 +1,6 @@
black==24.8.0
black==24.4.2
clingo==5.7.1
flake8==7.1.1
flake8==7.1.0
isort==5.13.2
mypy==1.8.0
types-six==1.16.21.20240513

View File

@@ -16,27 +16,38 @@ jobs:
matrix:
os: [ubuntu-latest]
python-version: ['3.7', '3.8', '3.9', '3.10', '3.11', '3.12']
concretizer: ['clingo']
on_develop:
- ${{ github.ref == 'refs/heads/develop' }}
include:
- python-version: '3.11'
os: ubuntu-latest
concretizer: original
on_develop: ${{ github.ref == 'refs/heads/develop' }}
- python-version: '3.6'
os: ubuntu-20.04
concretizer: clingo
on_develop: ${{ github.ref == 'refs/heads/develop' }}
exclude:
- python-version: '3.7'
os: ubuntu-latest
concretizer: 'clingo'
on_develop: false
- python-version: '3.8'
os: ubuntu-latest
concretizer: 'clingo'
on_develop: false
- python-version: '3.9'
os: ubuntu-latest
concretizer: 'clingo'
on_develop: false
- python-version: '3.10'
os: ubuntu-latest
concretizer: 'clingo'
on_develop: false
- python-version: '3.11'
os: ubuntu-latest
concretizer: 'clingo'
on_develop: false
steps:
@@ -74,6 +85,7 @@ jobs:
- name: Run unit tests
env:
SPACK_PYTHON: python
SPACK_TEST_SOLVER: ${{ matrix.concretizer }}
SPACK_TEST_PARALLEL: 2
COVERAGE: true
UNIT_TEST_COVERAGE: ${{ matrix.python-version == '3.11' }}
@@ -170,6 +182,7 @@ jobs:
- name: Run unit tests (full suite with coverage)
env:
COVERAGE: true
SPACK_TEST_SOLVER: clingo
run: |
share/spack/qa/run-unit-tests
- uses: codecov/codecov-action@e28ff129e5465c2c0dcc6f003fc735cb6ae0c673
@@ -200,6 +213,7 @@ jobs:
brew install dash fish gcc gnupg2 kcov
- name: Run unit tests
env:
SPACK_TEST_SOLVER: clingo
SPACK_TEST_PARALLEL: 4
run: |
git --version

1
.gitignore vendored
View File

@@ -7,7 +7,6 @@
/var/spack/environments
/var/spack/repos/*/index.yaml
/var/spack/repos/*/lock
/var/spack/repos/*/packages.zip
/opt
# Ignore everything in /etc/spack except /etc/spack/defaults
/etc/spack/*

View File

@@ -170,6 +170,23 @@ config:
# If set to true, Spack will use ccache to cache C compiles.
ccache: false
# The concretization algorithm to use in Spack. Options are:
#
# 'clingo': Uses a logic solver under the hood to solve DAGs with full
# backtracking and optimization for user preferences. Spack will
# try to bootstrap the logic solver, if not already available.
#
# 'original': Spack's original greedy, fixed-point concretizer. This
# algorithm can make decisions too early and will not backtrack
# sufficiently for many specs. This will soon be deprecated in
# favor of clingo.
#
# See `concretizer.yaml` for more settings you can fine-tune when
# using clingo.
concretizer: clingo
# How long to wait to lock the Spack installation database. This lock is used
# when Spack needs to manage its own package metadata and all operations are
# expected to complete within the default time limit. The timeout should

View File

@@ -20,14 +20,11 @@ packages:
awk: [gawk]
armci: [armcimpi]
blas: [openblas, amdblis]
c: [gcc]
cxx: [gcc]
D: [ldc]
daal: [intel-oneapi-daal]
elf: [elfutils]
fftw-api: [fftw, amdfftw]
flame: [libflame, amdlibflame]
fortran: [gcc]
fortran-rt: [gcc-runtime, intel-oneapi-runtime]
fuse: [libfuse]
gl: [glx, osmesa]
@@ -64,7 +61,6 @@ packages:
tbb: [intel-tbb]
unwind: [libunwind]
uuid: [util-linux-uuid, libuuid]
wasi-sdk: [wasi-sdk-prebuilt]
xxd: [xxd-standalone, vim]
yacc: [bison, byacc]
ziglang: [zig]

View File

@@ -1,5 +1,6 @@
config:
locks: false
concretizer: clingo
build_stage::
- '$spack/.staging'
stage_name: '{name}-{version}-{hash:7}'

View File

@@ -206,7 +206,6 @@ def setup(sphinx):
("py:class", "six.moves.urllib.parse.ParseResult"),
("py:class", "TextIO"),
("py:class", "hashlib._Hash"),
("py:class", "concurrent.futures._base.Executor"),
# Spack classes that are private and we don't want to expose
("py:class", "spack.provider_index._IndexBase"),
("py:class", "spack.repo._PrependFileLoader"),

View File

@@ -1263,11 +1263,6 @@ Git fetching supports the following parameters to ``version``:
option ``--depth 1`` will be used if the version of git and the specified
transport protocol support it, and ``--single-branch`` will be used if the
version of git supports it.
* ``git_sparse_paths``: Use ``sparse-checkout`` to only clone these relative paths.
This feature requires ``git`` to be version ``2.25.0`` or later but is useful for
large repositories that have separate portions that can be built independently.
If paths provided are directories then all the subdirectories and associated files
will also be cloned.
Only one of ``tag``, ``branch``, or ``commit`` can be used at a time.
@@ -1366,41 +1361,6 @@ Submodules
For more information about git submodules see the manpage of git: ``man
git-submodule``.
Sparse-Checkout
You can supply ``git_sparse_paths`` at the package or version level to utilize git's
sparse-checkout feature. This will only clone the paths that are specified in the
``git_sparse_paths`` attribute for the package along with the files in the top level directory.
This feature allows you to only clone what you need from a large repository.
Note that this is a newer feature in git and requries git ``2.25.0`` or greater.
If ``git_sparse_paths`` is supplied and the git version is too old
then a warning will be issued and that package will use the standard cloning operations instead.
``git_sparse_paths`` should be supplied as a list of paths, a callable function for versions,
or a more complex package attribute using the ``@property`` decorator. The return value should be
a list for a callable implementation of ``git_sparse_paths``.
.. code-block:: python
def sparse_path_function(package)
"""a callable function that can be used in side a version"""
# paths can be directories or functions, all subdirectories and files are included
paths = ["doe", "rae", "me/file.cpp"]
if package.spec.version > Version("1.2.0"):
paths.extend(["fae"])
return paths
class MyPackage(package):
# can also be a package attribute that will be used if not specified in versions
git_sparse_paths = ["doe", "rae"]
# use the package attribute
version("1.0.0")
version("1.1.0")
# use the function
version("1.1.5", git_sparse_paths=sparse_path_func)
version("1.2.0", git_sparse_paths=sparse_path_func)
version("1.2.5", git_sparse_paths=sparse_path_func)
version("1.1.5", git_sparse_paths=sparse_path_func)
.. _github-fetch:
^^^^^^

View File

@@ -1,13 +1,13 @@
sphinx==7.4.7
sphinxcontrib-programoutput==0.17
sphinx_design==0.6.1
sphinx_design==0.6.0
sphinx-rtd-theme==2.0.0
python-levenshtein==0.25.1
docutils==0.20.1
pygments==2.18.0
urllib3==2.2.2
pytest==8.3.2
pytest==8.3.1
isort==5.13.2
black==24.8.0
flake8==7.1.1
mypy==1.11.1
black==24.4.2
flake8==7.1.0
mypy==1.11.0

96
lib/spack/env/cc vendored
View File

@@ -174,46 +174,6 @@ preextend() {
unset IFS
}
execute() {
# dump the full command if the caller supplies SPACK_TEST_COMMAND=dump-args
if [ -n "${SPACK_TEST_COMMAND=}" ]; then
case "$SPACK_TEST_COMMAND" in
dump-args)
IFS="$lsep"
for arg in $full_command_list; do
echo "$arg"
done
unset IFS
exit
;;
dump-env-*)
var=${SPACK_TEST_COMMAND#dump-env-}
eval "printf '%s\n' \"\$0: \$var: \$$var\""
;;
*)
die "Unknown test command: '$SPACK_TEST_COMMAND'"
;;
esac
fi
#
# Write the input and output commands to debug logs if it's asked for.
#
if [ "$SPACK_DEBUG" = TRUE ]; then
input_log="$SPACK_DEBUG_LOG_DIR/spack-cc-$SPACK_DEBUG_LOG_ID.in.log"
output_log="$SPACK_DEBUG_LOG_DIR/spack-cc-$SPACK_DEBUG_LOG_ID.out.log"
echo "[$mode] $command $input_command" >> "$input_log"
IFS="$lsep"
echo "[$mode] "$full_command_list >> "$output_log"
unset IFS
fi
# Execute the full command, preserving spaces with IFS set
# to the alarm bell separator.
IFS="$lsep"; exec $full_command_list
exit
}
# Fail with a clear message if the input contains any bell characters.
if eval "[ \"\${*#*${lsep}}\" != \"\$*\" ]"; then
die "Compiler command line contains our separator ('${lsep}'). Cannot parse."
@@ -271,17 +231,12 @@ fi
# ld link
# ccld compile & link
# Note. SPACK_ALWAYS_XFLAGS are applied for all compiler invocations,
# including version checks (SPACK_XFLAGS variants are not applied
# for version checks).
command="${0##*/}"
comp="CC"
vcheck_flags=""
case "$command" in
cpp)
mode=cpp
debug_flags="-g"
vcheck_flags="${SPACK_ALWAYS_CPPFLAGS}"
;;
cc|c89|c99|gcc|clang|armclang|icc|icx|pgcc|nvc|xlc|xlc_r|fcc|amdclang|cl.exe|craycc)
command="$SPACK_CC"
@@ -289,7 +244,6 @@ case "$command" in
comp="CC"
lang_flags=C
debug_flags="-g"
vcheck_flags="${SPACK_ALWAYS_CFLAGS}"
;;
c++|CC|g++|clang++|armclang++|icpc|icpx|pgc++|nvc++|xlc++|xlc++_r|FCC|amdclang++|crayCC)
command="$SPACK_CXX"
@@ -297,7 +251,6 @@ case "$command" in
comp="CXX"
lang_flags=CXX
debug_flags="-g"
vcheck_flags="${SPACK_ALWAYS_CXXFLAGS}"
;;
ftn|f90|fc|f95|gfortran|flang|armflang|ifort|ifx|pgfortran|nvfortran|xlf90|xlf90_r|nagfor|frt|amdflang|crayftn)
command="$SPACK_FC"
@@ -305,7 +258,6 @@ case "$command" in
comp="FC"
lang_flags=F
debug_flags="-g"
vcheck_flags="${SPACK_ALWAYS_FFLAGS}"
;;
f77|xlf|xlf_r|pgf77)
command="$SPACK_F77"
@@ -313,7 +265,6 @@ case "$command" in
comp="F77"
lang_flags=F
debug_flags="-g"
vcheck_flags="${SPACK_ALWAYS_FFLAGS}"
;;
ld|ld.gold|ld.lld)
mode=ld
@@ -414,11 +365,7 @@ unset IFS
export PATH="$new_dirs"
if [ "$mode" = vcheck ]; then
full_command_list="$command"
args="$@"
extend full_command_list vcheck_flags
extend full_command_list args
execute
exec "${command}" "$@"
fi
# Darwin's linker has a -r argument that merges object files together.
@@ -775,7 +722,6 @@ case "$mode" in
cc|ccld)
case $lang_flags in
F)
extend spack_flags_list SPACK_ALWAYS_FFLAGS
extend spack_flags_list SPACK_FFLAGS
;;
esac
@@ -785,7 +731,6 @@ esac
# C preprocessor flags come before any C/CXX flags
case "$mode" in
cpp|as|cc|ccld)
extend spack_flags_list SPACK_ALWAYS_CPPFLAGS
extend spack_flags_list SPACK_CPPFLAGS
;;
esac
@@ -796,11 +741,9 @@ case "$mode" in
cc|ccld)
case $lang_flags in
C)
extend spack_flags_list SPACK_ALWAYS_CFLAGS
extend spack_flags_list SPACK_CFLAGS
;;
CXX)
extend spack_flags_list SPACK_ALWAYS_CXXFLAGS
extend spack_flags_list SPACK_CXXFLAGS
;;
esac
@@ -990,4 +933,39 @@ if [ -n "$SPACK_CCACHE_BINARY" ]; then
esac
fi
execute
# dump the full command if the caller supplies SPACK_TEST_COMMAND=dump-args
if [ -n "${SPACK_TEST_COMMAND=}" ]; then
case "$SPACK_TEST_COMMAND" in
dump-args)
IFS="$lsep"
for arg in $full_command_list; do
echo "$arg"
done
unset IFS
exit
;;
dump-env-*)
var=${SPACK_TEST_COMMAND#dump-env-}
eval "printf '%s\n' \"\$0: \$var: \$$var\""
;;
*)
die "Unknown test command: '$SPACK_TEST_COMMAND'"
;;
esac
fi
#
# Write the input and output commands to debug logs if it's asked for.
#
if [ "$SPACK_DEBUG" = TRUE ]; then
input_log="$SPACK_DEBUG_LOG_DIR/spack-cc-$SPACK_DEBUG_LOG_ID.in.log"
output_log="$SPACK_DEBUG_LOG_DIR/spack-cc-$SPACK_DEBUG_LOG_ID.out.log"
echo "[$mode] $command $input_command" >> "$input_log"
IFS="$lsep"
echo "[$mode] "$full_command_list >> "$output_log"
unset IFS
fi
# Execute the full command, preserving spaces with IFS set
# to the alarm bell separator.
IFS="$lsep"; exec $full_command_list

View File

@@ -18,7 +18,7 @@
* Homepage: https://pypi.python.org/pypi/archspec
* Usage: Labeling, comparison and detection of microarchitectures
* Version: 0.2.5-dev (commit 7e6740012b897ae4a950f0bba7e9726b767e921f)
* Version: 0.2.4 (commit 48b92512b9ce203ded0ebd1ac41b42593e931f7c)
astunparse
----------------

View File

@@ -47,11 +47,7 @@ def decorator(factory):
def partial_uarch(
name: str = "",
vendor: str = "",
features: Optional[Set[str]] = None,
generation: int = 0,
cpu_part: str = "",
name: str = "", vendor: str = "", features: Optional[Set[str]] = None, generation: int = 0
) -> Microarchitecture:
"""Construct a partial microarchitecture, from information gathered during system scan."""
return Microarchitecture(
@@ -61,7 +57,6 @@ def partial_uarch(
features=features or set(),
compilers={},
generation=generation,
cpu_part=cpu_part,
)
@@ -95,7 +90,6 @@ def proc_cpuinfo() -> Microarchitecture:
return partial_uarch(
vendor=_canonicalize_aarch64_vendor(data),
features=_feature_set(data, key="Features"),
cpu_part=data.get("CPU part", ""),
)
if architecture in (PPC64LE, PPC64):
@@ -351,10 +345,6 @@ def sorting_fn(item):
generic_candidates = [c for c in candidates if c.vendor == "generic"]
best_generic = max(generic_candidates, key=sorting_fn)
# Relevant for AArch64. Filter on "cpu_part" if we have any match
if info.cpu_part != "" and any(c for c in candidates if info.cpu_part == c.cpu_part):
candidates = [c for c in candidates if info.cpu_part == c.cpu_part]
# Filter the candidates to be descendant of the best generic candidate.
# This is to avoid that the lack of a niche feature that can be disabled
# from e.g. BIOS prevents detection of a reasonably performant architecture

View File

@@ -2,7 +2,9 @@
# Archspec Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""Types and functions to manage information on CPU microarchitectures."""
"""Types and functions to manage information
on CPU microarchitectures.
"""
import functools
import platform
import re
@@ -63,24 +65,21 @@ class Microarchitecture:
passed in as argument above.
* versions: versions that support this micro-architecture.
generation (int): generation of the micro-architecture, if relevant.
cpu_part (str): cpu part of the architecture, if relevant.
generation (int): generation of the micro-architecture, if
relevant.
"""
# pylint: disable=too-many-arguments,too-many-instance-attributes
# pylint: disable=too-many-arguments
#: Aliases for micro-architecture's features
feature_aliases = FEATURE_ALIASES
def __init__(self, name, parents, vendor, features, compilers, generation=0, cpu_part=""):
def __init__(self, name, parents, vendor, features, compilers, generation=0):
self.name = name
self.parents = parents
self.vendor = vendor
self.features = features
self.compilers = compilers
# Only relevant for PowerPC
self.generation = generation
# Only relevant for AArch64
self.cpu_part = cpu_part
# Cache the ancestor computation
self._ancestors = None
@@ -112,7 +111,6 @@ def __eq__(self, other):
and self.parents == other.parents # avoid ancestors here
and self.compilers == other.compilers
and self.generation == other.generation
and self.cpu_part == other.cpu_part
)
@coerce_target_names
@@ -145,8 +143,7 @@ def __repr__(self):
cls_name = self.__class__.__name__
fmt = (
cls_name + "({0.name!r}, {0.parents!r}, {0.vendor!r}, "
"{0.features!r}, {0.compilers!r}, generation={0.generation!r}, "
"cpu_part={0.cpu_part!r})"
"{0.features!r}, {0.compilers!r}, {0.generation!r})"
)
return fmt.format(self)
@@ -193,7 +190,6 @@ def to_dict(self):
"generation": self.generation,
"parents": [str(x) for x in self.parents],
"compilers": self.compilers,
"cpupart": self.cpu_part,
}
@staticmethod
@@ -206,7 +202,6 @@ def from_dict(data) -> "Microarchitecture":
features=set(data["features"]),
compilers=data.get("compilers", {}),
generation=data.get("generation", 0),
cpu_part=data.get("cpupart", ""),
)
def optimization_flags(self, compiler, version):
@@ -365,11 +360,8 @@ def fill_target_from_dict(name, data, targets):
features = set(values["features"])
compilers = values.get("compilers", {})
generation = values.get("generation", 0)
cpu_part = values.get("cpupart", "")
targets[name] = Microarchitecture(
name, parents, vendor, features, compilers, generation=generation, cpu_part=cpu_part
)
targets[name] = Microarchitecture(name, parents, vendor, features, compilers, generation)
known_targets = {}
data = archspec.cpu.schema.TARGETS_JSON["microarchitectures"]

View File

@@ -2225,14 +2225,10 @@
],
"nvhpc": [
{
"versions": "21.11:23.8",
"versions": "21.11:",
"name": "zen3",
"flags": "-tp {name}",
"warnings": "zen4 is not fully supported by nvhpc versions < 23.9, falling back to zen3"
},
{
"versions": "23.9:",
"flags": "-tp {name}"
"warnings": "zen4 is not fully supported by nvhpc yet, falling back to zen3"
}
]
}
@@ -2715,8 +2711,7 @@
"flags": "-mcpu=thunderx2t99"
}
]
},
"cpupart": "0x0af"
}
},
"a64fx": {
"from": ["armv8.2a"],
@@ -2784,8 +2779,7 @@
"flags": "-march=armv8.2-a+crc+crypto+fp16+sve"
}
]
},
"cpupart": "0x001"
}
},
"cortex_a72": {
"from": ["aarch64"],
@@ -2822,8 +2816,7 @@
"flags" : "-mcpu=cortex-a72"
}
]
},
"cpupart": "0xd08"
}
},
"neoverse_n1": {
"from": ["cortex_a72", "armv8.2a"],
@@ -2909,8 +2902,7 @@
"flags": "-tp {name}"
}
]
},
"cpupart": "0xd0c"
}
},
"neoverse_v1": {
"from": ["neoverse_n1", "armv8.4a"],
@@ -2934,6 +2926,8 @@
"lrcpc",
"dcpop",
"sha3",
"sm3",
"sm4",
"asimddp",
"sha512",
"sve",
@@ -3034,8 +3028,7 @@
"flags": "-tp {name}"
}
]
},
"cpupart": "0xd40"
}
},
"neoverse_v2": {
"from": ["neoverse_n1", "armv9.0a"],
@@ -3059,10 +3052,13 @@
"lrcpc",
"dcpop",
"sha3",
"sm3",
"sm4",
"asimddp",
"sha512",
"sve",
"asimdfhm",
"dit",
"uscat",
"ilrcpc",
"flagm",
@@ -3070,12 +3066,18 @@
"sb",
"dcpodp",
"sve2",
"sveaes",
"svepmull",
"svebitperm",
"svesha3",
"svesm4",
"flagm2",
"frint",
"svei8mm",
"svebf16",
"i8mm",
"bf16"
"bf16",
"dgh"
],
"compilers" : {
"gcc": [
@@ -3100,19 +3102,15 @@
"flags" : "-march=armv8.5-a+sve -mtune=cortex-a76"
},
{
"versions": "10.0:11.3.99",
"versions": "10.0:11.99",
"flags" : "-march=armv8.5-a+sve+sve2+i8mm+bf16 -mtune=cortex-a77"
},
{
"versions": "11.4:11.99",
"flags" : "-mcpu=neoverse-v2"
},
{
"versions": "12.0:12.2.99",
"versions": "12.0:12.99",
"flags" : "-march=armv9-a+i8mm+bf16 -mtune=cortex-a710"
},
{
"versions": "12.3:",
"versions": "13.0:",
"flags" : "-mcpu=neoverse-v2"
}
],
@@ -3147,113 +3145,7 @@
"flags": "-tp {name}"
}
]
},
"cpupart": "0xd4f"
},
"neoverse_n2": {
"from": ["neoverse_n1", "armv9.0a"],
"vendor": "ARM",
"features": [
"fp",
"asimd",
"evtstrm",
"aes",
"pmull",
"sha1",
"sha2",
"crc32",
"atomics",
"fphp",
"asimdhp",
"cpuid",
"asimdrdm",
"jscvt",
"fcma",
"lrcpc",
"dcpop",
"sha3",
"asimddp",
"sha512",
"sve",
"asimdfhm",
"uscat",
"ilrcpc",
"flagm",
"ssbs",
"sb",
"dcpodp",
"sve2",
"flagm2",
"frint",
"svei8mm",
"svebf16",
"i8mm",
"bf16"
],
"compilers" : {
"gcc": [
{
"versions": "4.8:5.99",
"flags": "-march=armv8-a"
},
{
"versions": "6:6.99",
"flags" : "-march=armv8.1-a"
},
{
"versions": "7.0:7.99",
"flags" : "-march=armv8.2-a -mtune=cortex-a72"
},
{
"versions": "8.0:8.99",
"flags" : "-march=armv8.4-a+sve -mtune=cortex-a72"
},
{
"versions": "9.0:9.99",
"flags" : "-march=armv8.5-a+sve -mtune=cortex-a76"
},
{
"versions": "10.0:10.99",
"flags" : "-march=armv8.5-a+sve+sve2+i8mm+bf16 -mtune=cortex-a77"
},
{
"versions": "11.0:",
"flags" : "-mcpu=neoverse-n2"
}
],
"clang" : [
{
"versions": "9.0:10.99",
"flags" : "-march=armv8.5-a+sve"
},
{
"versions": "11.0:13.99",
"flags" : "-march=armv8.5-a+sve+sve2+i8mm+bf16"
},
{
"versions": "14.0:15.99",
"flags" : "-march=armv9-a+i8mm+bf16"
},
{
"versions": "16.0:",
"flags" : "-mcpu=neoverse-n2"
}
],
"arm" : [
{
"versions": "23.04.0:",
"flags" : "-mcpu=neoverse-n2"
}
],
"nvhpc" : [
{
"versions": "23.3:",
"name": "neoverse-n1",
"flags": "-tp {name}"
}
]
},
"cpupart": "0xd49"
}
},
"m1": {
"from": ["armv8.4a"],
@@ -3319,8 +3211,7 @@
"flags" : "-mcpu=apple-m1"
}
]
},
"cpupart": "0x022"
}
},
"m2": {
"from": ["m1", "armv8.5a"],
@@ -3398,8 +3289,7 @@
"flags" : "-mcpu=apple-m2"
}
]
},
"cpupart": "0x032"
}
},
"arm": {
"from": [],

View File

@@ -52,9 +52,6 @@
}
}
}
},
"cpupart": {
"type": "string"
}
},
"required": [
@@ -110,4 +107,4 @@
"additionalProperties": false
}
}
}
}

View File

@@ -1624,12 +1624,6 @@ def remove_linked_tree(path):
shutil.rmtree(os.path.realpath(path), **kwargs)
os.unlink(path)
else:
if sys.platform == "win32":
# Adding this prefix allows shutil to remove long paths on windows
# https://learn.microsoft.com/en-us/windows/win32/fileio/maximum-file-path-limitation?tabs=registry
long_path_pfx = "\\\\?\\"
if not path.startswith(long_path_pfx):
path = long_path_pfx + path
shutil.rmtree(path, **kwargs)

View File

@@ -351,22 +351,6 @@ def _wrongly_named_spec(error_cls):
return errors
@config_packages
def _ensure_all_virtual_packages_have_default_providers(error_cls):
"""All virtual packages must have a default provider explicitly set."""
configuration = spack.config.create()
defaults = configuration.get("packages", scope="defaults")
default_providers = defaults["all"]["providers"]
virtuals = spack.repo.PATH.provider_index.providers
default_providers_filename = configuration.scopes["defaults"].get_section_filename("packages")
return [
error_cls(f"'{virtual}' must have a default provider in {default_providers_filename}", [])
for virtual in virtuals
if virtual not in default_providers
]
def _make_config_error(config_data, summary, error_cls):
s = io.StringIO()
s.write("Occurring in the following file:\n")

File diff suppressed because it is too large Load Diff

View File

@@ -1,154 +0,0 @@
# Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""Bootstrap concrete specs for clingo
Spack uses clingo to concretize specs. When clingo itself needs to be bootstrapped from sources,
we need to rely on another mechanism to get a concrete spec that fits the current host.
This module contains the logic to get a concrete spec for clingo, starting from a prototype
JSON file for a similar platform.
"""
import pathlib
import sys
from typing import Dict, Optional, Tuple
import archspec.cpu
import spack.compiler
import spack.compilers
import spack.platforms
import spack.spec
import spack.traverse
from .config import spec_for_current_python
class ClingoBootstrapConcretizer:
def __init__(self, configuration):
self.host_platform = spack.platforms.host()
self.host_os = self.host_platform.operating_system("frontend")
self.host_target = archspec.cpu.host().family
self.host_architecture = spack.spec.ArchSpec.frontend_arch()
self.host_architecture.target = str(self.host_target)
self.host_compiler = self._valid_compiler_or_raise()
self.host_python = self.python_external_spec()
if str(self.host_platform) == "linux":
self.host_libc = self.libc_external_spec()
self.external_cmake, self.external_bison = self._externals_from_yaml(configuration)
def _valid_compiler_or_raise(self) -> "spack.compiler.Compiler":
if str(self.host_platform) == "linux":
compiler_name = "gcc"
elif str(self.host_platform) == "darwin":
compiler_name = "apple-clang"
elif str(self.host_platform) == "windows":
compiler_name = "msvc"
elif str(self.host_platform) == "freebsd":
compiler_name = "clang"
else:
raise RuntimeError(f"Cannot bootstrap clingo from sources on {self.host_platform}")
candidates = spack.compilers.compilers_for_spec(
compiler_name, arch_spec=self.host_architecture
)
if not candidates:
raise RuntimeError(
f"Cannot find any version of {compiler_name} to bootstrap clingo from sources"
)
candidates.sort(key=lambda x: x.spec.version, reverse=True)
return candidates[0]
def _externals_from_yaml(
self, configuration: "spack.config.Configuration"
) -> Tuple[Optional["spack.spec.Spec"], Optional["spack.spec.Spec"]]:
packages_yaml = configuration.get("packages")
requirements = {"cmake": "@3.20:", "bison": "@2.5:"}
selected: Dict[str, Optional["spack.spec.Spec"]] = {"cmake": None, "bison": None}
for pkg_name in ["cmake", "bison"]:
if pkg_name not in packages_yaml:
continue
candidates = packages_yaml[pkg_name].get("externals", [])
for candidate in candidates:
s = spack.spec.Spec(candidate["spec"], external_path=candidate["prefix"])
if not s.satisfies(requirements[pkg_name]):
continue
if not s.intersects(f"%{self.host_compiler.spec}"):
continue
if not s.intersects(f"arch={self.host_architecture}"):
continue
selected[pkg_name] = self._external_spec(s)
break
return selected["cmake"], selected["bison"]
def prototype_path(self) -> pathlib.Path:
"""Path to a prototype concrete specfile for clingo"""
parent_dir = pathlib.Path(__file__).parent
result = parent_dir / "prototypes" / f"clingo-{self.host_platform}-{self.host_target}.json"
if str(self.host_platform) == "linux":
# Using aarch64 as a fallback, since it has gnuconfig (x86_64 doesn't have it)
if not result.exists():
result = parent_dir / "prototypes" / f"clingo-{self.host_platform}-aarch64.json"
elif str(self.host_platform) == "freebsd":
result = parent_dir / "prototypes" / f"clingo-{self.host_platform}-amd64.json"
elif not result.exists():
raise RuntimeError(f"Cannot bootstrap clingo from sources on {self.host_platform}")
return result
def concretize(self) -> "spack.spec.Spec":
# Read the prototype and mark it NOT concrete
s = spack.spec.Spec.from_specfile(str(self.prototype_path()))
s._mark_concrete(False)
# Tweak it to conform to the host architecture
for node in s.traverse():
node.architecture.os = str(self.host_os)
node.compiler = self.host_compiler.spec
node.architecture = self.host_architecture
if node.name == "gcc-runtime":
node.versions = self.host_compiler.spec.versions
for edge in spack.traverse.traverse_edges([s], cover="edges"):
if edge.spec.name == "python":
edge.spec = self.host_python
if edge.spec.name == "bison" and self.external_bison:
edge.spec = self.external_bison
if edge.spec.name == "cmake" and self.external_cmake:
edge.spec = self.external_cmake
if "libc" in edge.virtuals:
edge.spec = self.host_libc
s._finalize_concretization()
# Work around the fact that the installer calls Spec.dependents() and
# we modified edges inconsistently
return s.copy()
def python_external_spec(self) -> "spack.spec.Spec":
"""Python external spec corresponding to the current running interpreter"""
result = spack.spec.Spec(spec_for_current_python(), external_path=sys.exec_prefix)
return self._external_spec(result)
def libc_external_spec(self) -> "spack.spec.Spec":
result = self.host_compiler.default_libc
return self._external_spec(result)
def _external_spec(self, initial_spec) -> "spack.spec.Spec":
initial_spec.namespace = "builtin"
initial_spec.compiler = self.host_compiler.spec
initial_spec.architecture = self.host_architecture
for flag_type in spack.spec.FlagMap.valid_compiler_flags():
initial_spec.compiler_flags[flag_type] = []
return spack.spec.parse_with_version_concrete(initial_spec)

View File

@@ -54,7 +54,6 @@
import spack.version
from ._common import _executables_in_store, _python_import, _root_spec, _try_import_from_store
from .clingo import ClingoBootstrapConcretizer
from .config import spack_python_interpreter, spec_for_current_python
#: Name of the file containing metadata about the bootstrapping source
@@ -269,13 +268,15 @@ def try_import(self, module: str, abstract_spec_str: str) -> bool:
# Try to build and install from sources
with spack_python_interpreter():
# Add hint to use frontend operating system on Cray
concrete_spec = spack.spec.Spec(abstract_spec_str + " ^" + spec_for_current_python())
if module == "clingo":
bootstrapper = ClingoBootstrapConcretizer(configuration=spack.config.CONFIG)
concrete_spec = bootstrapper.concretize()
else:
concrete_spec = spack.spec.Spec(
abstract_spec_str + " ^" + spec_for_current_python()
# TODO: remove when the old concretizer is deprecated # pylint: disable=fixme
concrete_spec._old_concretize( # pylint: disable=protected-access
deprecation_warning=False
)
else:
concrete_spec.concretize()
msg = "[BOOTSTRAP MODULE {0}] Try installing '{1}' from sources"
@@ -302,7 +303,14 @@ def try_search_path(self, executables: Tuple[str], abstract_spec_str: str) -> bo
# might reduce compilation time by a fair amount
_add_externals_if_missing()
concrete_spec = spack.spec.Spec(abstract_spec_str).concretized()
concrete_spec = spack.spec.Spec(abstract_spec_str)
if concrete_spec.name == "patchelf":
concrete_spec._old_concretize( # pylint: disable=protected-access
deprecation_warning=False
)
else:
concrete_spec.concretize()
msg = "[BOOTSTRAP] Try installing '{0}' from sources"
tty.debug(msg.format(abstract_spec_str))
with spack.config.override(self.mirror_scope):

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@@ -3,6 +3,7 @@
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""Common utilities for managing intel oneapi packages."""
import getpass
import os
import platform
import shutil
@@ -12,7 +13,6 @@
from llnl.util.filesystem import HeaderList, LibraryList, find_libraries, join_path, mkdirp
from llnl.util.link_tree import LinkTree
import spack.util.path
from spack.build_environment import dso_suffix
from spack.directives import conflicts, license, redistribute, variant
from spack.package_base import InstallError
@@ -99,7 +99,7 @@ def install_component(self, installer_path):
# with other install depends on the userid. For root, we
# delete the installercache before and after install. For
# non root we redefine the HOME environment variable.
if spack.util.path.get_user() == "root":
if getpass.getuser() == "root":
shutil.rmtree("/var/intel/installercache", ignore_errors=True)
bash = Executable("bash")
@@ -122,7 +122,7 @@ def install_component(self, installer_path):
self.prefix,
)
if spack.util.path.get_user() == "root":
if getpass.getuser() == "root":
shutil.rmtree("/var/intel/installercache", ignore_errors=True)
# Some installers have a bug and do not return an error code when failing

View File

@@ -139,10 +139,6 @@ def configure(self, pkg, spec, prefix):
args = ["--verbose", "--target-dir", inspect.getmodule(self.pkg).python_platlib]
args.extend(self.configure_args())
# https://github.com/Python-SIP/sip/commit/cb0be6cb6e9b756b8b0db3136efb014f6fb9b766
if spec["py-sip"].satisfies("@6.1.0:"):
args.extend(["--scripts-dir", pkg.prefix.bin])
sip_build = Executable(spec["py-sip"].prefix.bin.join("sip-build"))
sip_build(*args)

View File

@@ -38,7 +38,6 @@
import spack.paths
import spack.repo
import spack.spec
import spack.stage
import spack.util.git
import spack.util.gpg as gpg_util
import spack.util.spack_yaml as syaml
@@ -72,7 +71,7 @@
# TODO: Remove this in Spack 0.23
SHARED_PR_MIRROR_URL = "s3://spack-binaries-prs/shared_pr_mirror"
JOB_NAME_FORMAT = (
"{name}{@version} {/hash:7} {%compiler.name}{@compiler.version}{ arch=architecture}"
"{name}{@version} {/hash:7} {%compiler.name}{@compiler.version}{arch=architecture}"
)
IS_WINDOWS = sys.platform == "win32"
spack_gpg = spack.main.SpackCommand("gpg")
@@ -1108,7 +1107,7 @@ def main_script_replacements(cmd):
if cdash_handler and cdash_handler.auth_token:
try:
cdash_handler.populate_buildgroup(all_job_names)
except (SpackError, HTTPError, URLError, TimeoutError) as err:
except (SpackError, HTTPError, URLError) as err:
tty.warn(f"Problem populating buildgroup: {err}")
else:
tty.warn("Unable to populate buildgroup without CDash credentials")
@@ -1371,6 +1370,15 @@ def can_verify_binaries():
return len(gpg_util.public_keys()) >= 1
def _push_to_build_cache(spec: spack.spec.Spec, sign_binaries: bool, mirror_url: str) -> None:
"""Unchecked version of the public API, for easier mocking"""
bindist.push_or_raise(
spec,
spack.mirror.Mirror.from_url(mirror_url).push_url,
bindist.PushOptions(force=True, unsigned=not sign_binaries),
)
def push_to_build_cache(spec: spack.spec.Spec, mirror_url: str, sign_binaries: bool) -> bool:
"""Push one or more binary packages to the mirror.
@@ -1381,13 +1389,20 @@ def push_to_build_cache(spec: spack.spec.Spec, mirror_url: str, sign_binaries: b
sign_binaries: If True, spack will attempt to sign binary package before pushing.
"""
tty.debug(f"Pushing to build cache ({'signed' if sign_binaries else 'unsigned'})")
signing_key = bindist.select_signing_key() if sign_binaries else None
try:
bindist.push_or_raise([spec], out_url=mirror_url, signing_key=signing_key)
_push_to_build_cache(spec, sign_binaries, mirror_url)
return True
except bindist.PushToBuildCacheError as e:
tty.error(f"Problem writing to {mirror_url}: {e}")
tty.error(str(e))
return False
except Exception as e:
# TODO (zackgalbreath): write an adapter for boto3 exceptions so we can catch a specific
# exception instead of parsing str(e)...
msg = str(e)
if any(x in msg for x in ["Access Denied", "InvalidAccessKeyId"]):
tty.error(f"Permission problem writing to {mirror_url}: {msg}")
return False
raise
def remove_other_mirrors(mirrors_to_keep, scope=None):
@@ -1433,6 +1448,10 @@ def copy_stage_logs_to_artifacts(job_spec: spack.spec.Spec, job_log_dir: str) ->
job_log_dir: path into which build log should be copied
"""
tty.debug(f"job spec: {job_spec}")
if not job_spec:
msg = f"Cannot copy stage logs: job spec ({job_spec}) is required"
tty.error(msg)
return
try:
pkg_cls = spack.repo.PATH.get_pkg_class(job_spec.name)
@@ -2064,7 +2083,7 @@ def read_broken_spec(broken_spec_url):
"""
try:
_, _, fs = web_util.read_from_url(broken_spec_url)
except web_util.SpackWebError:
except (URLError, web_util.SpackWebError, HTTPError):
tty.warn(f"Unable to read broken spec from {broken_spec_url}")
return None

View File

@@ -237,7 +237,7 @@ def ensure_single_spec_or_die(spec, matching_specs):
if len(matching_specs) <= 1:
return
format_string = "{name}{@version}{%compiler.name}{@compiler.version}{ arch=architecture}"
format_string = "{name}{@version}{%compiler.name}{@compiler.version}{arch=architecture}"
args = ["%s matches multiple packages." % spec, "Matching packages:"]
args += [
colorize(" @K{%s} " % s.dag_hash(7)) + s.cformat(format_string) for s in matching_specs

View File

@@ -3,24 +3,28 @@
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import argparse
import copy
import glob
import hashlib
import json
import multiprocessing
import multiprocessing.pool
import os
import shutil
import sys
import tempfile
from typing import List, Tuple
from typing import Dict, List, Optional, Tuple, Union
import llnl.util.tty as tty
from llnl.string import plural
from llnl.util.lang import elide_list, stable_partition
from llnl.util.lang import elide_list
import spack.binary_distribution as bindist
import spack.cmd
import spack.config
import spack.deptypes as dt
import spack.environment as ev
import spack.error
import spack.hash_types as ht
import spack.mirror
import spack.oci.oci
import spack.oci.opener
@@ -31,13 +35,28 @@
import spack.store
import spack.user_environment
import spack.util.crypto
import spack.util.parallel
import spack.util.url as url_util
import spack.util.web as web_util
from spack import traverse
from spack.build_environment import determine_number_of_jobs
from spack.cmd import display_specs
from spack.cmd.common import arguments
from spack.oci.image import ImageReference
from spack.oci.image import (
Digest,
ImageReference,
default_config,
default_index_tag,
default_manifest,
default_tag,
tag_is_spec,
)
from spack.oci.oci import (
copy_missing_layers_with_retry,
get_manifest_and_config_with_retry,
list_tags,
upload_blob_with_retry,
upload_manifest_with_retry,
)
from spack.spec import Spec, save_dependency_specfiles
description = "create, download and install binary packages"
@@ -93,17 +112,6 @@ def setup_parser(subparser: argparse.ArgumentParser):
"Alternatively, one can decide to build a cache for only the package or only the "
"dependencies",
)
with_or_without_build_deps = push.add_mutually_exclusive_group()
with_or_without_build_deps.add_argument(
"--with-build-dependencies",
action="store_true",
help="include build dependencies in the buildcache",
)
with_or_without_build_deps.add_argument(
"--without-build-dependencies",
action="store_true",
help="exclude build dependencies from the buildcache",
)
push.add_argument(
"--fail-fast",
action="store_true",
@@ -321,6 +329,39 @@ def _format_spec(spec: Spec) -> str:
return spec.cformat("{name}{@version}{/hash:7}")
def _progress(i: int, total: int):
if total > 1:
digits = len(str(total))
return f"[{i+1:{digits}}/{total}] "
return ""
class NoPool:
def map(self, func, args):
return [func(a) for a in args]
def starmap(self, func, args):
return [func(*a) for a in args]
def __enter__(self):
return self
def __exit__(self, *args):
pass
MaybePool = Union[multiprocessing.pool.Pool, NoPool]
def _make_pool() -> MaybePool:
"""Can't use threading because it's unsafe, and can't use spawned processes because of globals.
That leaves only forking"""
if multiprocessing.get_start_method() == "fork":
return multiprocessing.pool.Pool(determine_number_of_jobs(parallel=True))
else:
return NoPool()
def _skip_no_redistribute_for_public(specs):
remaining_specs = list()
removed_specs = list()
@@ -340,45 +381,6 @@ def _skip_no_redistribute_for_public(specs):
return remaining_specs
class PackagesAreNotInstalledError(spack.error.SpackError):
"""Raised when a list of specs is not installed but picked to be packaged."""
def __init__(self, specs: List[Spec]):
super().__init__(
"Cannot push non-installed packages",
", ".join(elide_list([_format_spec(s) for s in specs], 5)),
)
class PackageNotInstalledError(spack.error.SpackError):
"""Raised when a spec is not installed but picked to be packaged."""
def _specs_to_be_packaged(
requested: List[Spec], things_to_install: str, build_deps: bool
) -> List[Spec]:
"""Collect all non-external with or without roots and dependencies"""
if "dependencies" not in things_to_install:
deptype = dt.NONE
elif build_deps:
deptype = dt.ALL
else:
deptype = dt.RUN | dt.LINK | dt.TEST
specs = [
s
for s in traverse.traverse_nodes(
requested,
root="package" in things_to_install,
deptype=deptype,
order="breadth",
key=traverse.by_dag_hash,
)
if not s.external
]
specs.reverse()
return specs
def push_fn(args):
"""create a binary package and push it to a mirror"""
if args.spec_file:
@@ -410,84 +412,91 @@ def push_fn(args):
# For OCI images, we require dependencies to be pushed for now.
if target_image:
if "dependencies" not in args.things_to_install:
tty.die("Dependencies must be pushed for OCI images.")
if not unsigned:
tty.warn(
"Code signing is currently not supported for OCI images. "
"Use --unsigned to silence this warning."
)
unsigned = True
# Select a signing key, or None if unsigned.
signing_key = None if unsigned else (args.key or bindist.select_signing_key())
specs = _specs_to_be_packaged(
# This is a list of installed, non-external specs.
specs = bindist.specs_to_be_packaged(
roots,
things_to_install=args.things_to_install,
build_deps=args.with_build_dependencies or not args.without_build_dependencies,
root="package" in args.things_to_install,
dependencies="dependencies" in args.things_to_install,
)
if not args.private:
specs = _skip_no_redistribute_for_public(specs)
# When pushing multiple specs, print the url once ahead of time, as well as how
# many specs are being pushed.
if len(specs) > 1:
tty.info(f"Selected {len(specs)} specs to push to {push_url}")
# Pushing not installed specs is an error. Either fail fast or populate the error list and
# push installed package in best effort mode.
failed: List[Tuple[Spec, BaseException]] = []
with spack.store.STORE.db.read_transaction():
if any(not s.installed for s in specs):
specs, not_installed = stable_partition(specs, lambda s: s.installed)
if args.fail_fast:
raise PackagesAreNotInstalledError(not_installed)
else:
failed.extend(
(s, PackageNotInstalledError("package not installed")) for s in not_installed
)
failed = []
with bindist.default_push_context() as (tmpdir, executor):
if target_image:
base_image = ImageReference.from_string(args.base_image) if args.base_image else None
skipped, base_images, checksums, upload_errors = bindist._push_oci(
# TODO: unify this logic in the future.
if target_image:
base_image = ImageReference.from_string(args.base_image) if args.base_image else None
with tempfile.TemporaryDirectory(
dir=spack.stage.get_stage_root()
) as tmpdir, _make_pool() as pool:
skipped, base_images, checksums = _push_oci(
target_image=target_image,
base_image=base_image,
installed_specs_with_deps=specs,
force=args.force,
tmpdir=tmpdir,
executor=executor,
pool=pool,
)
if upload_errors:
failed.extend(upload_errors)
# Apart from creating manifests for each individual spec, we allow users to create a
# separate image tag for all root specs and their runtime dependencies.
elif args.tag:
if args.tag:
tagged_image = target_image.with_tag(args.tag)
# _push_oci may not populate base_images if binaries were already in the registry
for spec in roots:
bindist._oci_update_base_images(
_update_base_images(
base_image=base_image,
target_image=target_image,
spec=spec,
base_image_cache=base_images,
)
bindist._oci_put_manifest(
base_images, checksums, tagged_image, tmpdir, None, None, *roots
)
_put_manifest(base_images, checksums, tagged_image, tmpdir, None, None, *roots)
tty.info(f"Tagged {tagged_image}")
else:
skipped, upload_errors = bindist._push(
specs,
out_url=push_url,
force=args.force,
update_index=args.update_index,
signing_key=signing_key,
tmpdir=tmpdir,
executor=executor,
)
failed.extend(upload_errors)
else:
skipped = []
for i, spec in enumerate(specs):
try:
bindist.push_or_raise(
spec,
push_url,
bindist.PushOptions(
force=args.force,
unsigned=unsigned,
key=args.key,
regenerate_index=args.update_index,
),
)
msg = f"{_progress(i, len(specs))}Pushed {_format_spec(spec)}"
if len(specs) == 1:
msg += f" to {push_url}"
tty.info(msg)
except bindist.NoOverwriteException:
skipped.append(_format_spec(spec))
# Catch any other exception unless the fail fast option is set
except Exception as e:
if args.fail_fast or isinstance(
e, (bindist.PickKeyException, bindist.NoKeyException)
):
raise
failed.append((_format_spec(spec), e))
if skipped:
if len(specs) == 1:
@@ -510,22 +519,389 @@ def push_fn(args):
raise spack.error.SpackError(
f"The following {len(failed)} errors occurred while pushing specs to the buildcache",
"\n".join(
elide_list(
[
f" {_format_spec(spec)}: {e.__class__.__name__}: {e}"
for spec, e in failed
],
5,
)
elide_list([f" {spec}: {e.__class__.__name__}: {e}" for spec, e in failed], 5)
),
)
# Update the OCI index if requested
# Update the index if requested
# TODO: remove update index logic out of bindist; should be once after all specs are pushed
# not once per spec.
if target_image and len(skipped) < len(specs) and args.update_index:
with tempfile.TemporaryDirectory(
dir=spack.stage.get_stage_root()
) as tmpdir, spack.util.parallel.make_concurrent_executor() as executor:
bindist._oci_update_index(target_image, tmpdir, executor)
) as tmpdir, _make_pool() as pool:
_update_index_oci(target_image, tmpdir, pool)
def _get_spack_binary_blob(image_ref: ImageReference) -> Optional[spack.oci.oci.Blob]:
"""Get the spack tarball layer digests and size if it exists"""
try:
manifest, config = get_manifest_and_config_with_retry(image_ref)
return spack.oci.oci.Blob(
compressed_digest=Digest.from_string(manifest["layers"][-1]["digest"]),
uncompressed_digest=Digest.from_string(config["rootfs"]["diff_ids"][-1]),
size=manifest["layers"][-1]["size"],
)
except Exception:
return None
def _push_single_spack_binary_blob(image_ref: ImageReference, spec: spack.spec.Spec, tmpdir: str):
filename = os.path.join(tmpdir, f"{spec.dag_hash()}.tar.gz")
# Create an oci.image.layer aka tarball of the package
compressed_tarfile_checksum, tarfile_checksum = spack.oci.oci.create_tarball(spec, filename)
blob = spack.oci.oci.Blob(
Digest.from_sha256(compressed_tarfile_checksum),
Digest.from_sha256(tarfile_checksum),
os.path.getsize(filename),
)
# Upload the blob
upload_blob_with_retry(image_ref, file=filename, digest=blob.compressed_digest)
# delete the file
os.unlink(filename)
return blob
def _retrieve_env_dict_from_config(config: dict) -> dict:
"""Retrieve the environment variables from the image config file.
Sets a default value for PATH if it is not present.
Args:
config (dict): The image config file.
Returns:
dict: The environment variables.
"""
env = {"PATH": "/bin:/usr/bin"}
if "Env" in config.get("config", {}):
for entry in config["config"]["Env"]:
key, value = entry.split("=", 1)
env[key] = value
return env
def _archspec_to_gooarch(spec: spack.spec.Spec) -> str:
name = spec.target.family.name
name_map = {"aarch64": "arm64", "x86_64": "amd64"}
return name_map.get(name, name)
def _put_manifest(
base_images: Dict[str, Tuple[dict, dict]],
checksums: Dict[str, spack.oci.oci.Blob],
image_ref: ImageReference,
tmpdir: str,
extra_config: Optional[dict],
annotations: Optional[dict],
*specs: spack.spec.Spec,
):
architecture = _archspec_to_gooarch(specs[0])
dependencies = list(
reversed(
list(
s
for s in traverse.traverse_nodes(
specs, order="topo", deptype=("link", "run"), root=True
)
if not s.external
)
)
)
base_manifest, base_config = base_images[architecture]
env = _retrieve_env_dict_from_config(base_config)
# If the base image uses `vnd.docker.distribution.manifest.v2+json`, then we use that too.
# This is because Singularity / Apptainer is very strict about not mixing them.
base_manifest_mediaType = base_manifest.get(
"mediaType", "application/vnd.oci.image.manifest.v1+json"
)
use_docker_format = (
base_manifest_mediaType == "application/vnd.docker.distribution.manifest.v2+json"
)
spack.user_environment.environment_modifications_for_specs(*specs).apply_modifications(env)
# Create an oci.image.config file
config = copy.deepcopy(base_config)
# Add the diff ids of the dependencies
for s in dependencies:
config["rootfs"]["diff_ids"].append(str(checksums[s.dag_hash()].uncompressed_digest))
# Set the environment variables
config["config"]["Env"] = [f"{k}={v}" for k, v in env.items()]
if extra_config:
# From the OCI v1.0 spec:
# > Any extra fields in the Image JSON struct are considered implementation
# > specific and MUST be ignored by any implementations which are unable to
# > interpret them.
config.update(extra_config)
config_file = os.path.join(tmpdir, f"{specs[0].dag_hash()}.config.json")
with open(config_file, "w") as f:
json.dump(config, f, separators=(",", ":"))
config_file_checksum = Digest.from_sha256(
spack.util.crypto.checksum(hashlib.sha256, config_file)
)
# Upload the config file
upload_blob_with_retry(image_ref, file=config_file, digest=config_file_checksum)
manifest = {
"mediaType": base_manifest_mediaType,
"schemaVersion": 2,
"config": {
"mediaType": base_manifest["config"]["mediaType"],
"digest": str(config_file_checksum),
"size": os.path.getsize(config_file),
},
"layers": [
*(layer for layer in base_manifest["layers"]),
*(
{
"mediaType": (
"application/vnd.docker.image.rootfs.diff.tar.gzip"
if use_docker_format
else "application/vnd.oci.image.layer.v1.tar+gzip"
),
"digest": str(checksums[s.dag_hash()].compressed_digest),
"size": checksums[s.dag_hash()].size,
}
for s in dependencies
),
],
}
if not use_docker_format and annotations:
manifest["annotations"] = annotations
# Finally upload the manifest
upload_manifest_with_retry(image_ref, manifest=manifest)
# delete the config file
os.unlink(config_file)
def _update_base_images(
*,
base_image: Optional[ImageReference],
target_image: ImageReference,
spec: spack.spec.Spec,
base_image_cache: Dict[str, Tuple[dict, dict]],
):
"""For a given spec and base image, copy the missing layers of the base image with matching
arch to the registry of the target image. If no base image is specified, create a dummy
manifest and config file."""
architecture = _archspec_to_gooarch(spec)
if architecture in base_image_cache:
return
if base_image is None:
base_image_cache[architecture] = (
default_manifest(),
default_config(architecture, "linux"),
)
else:
base_image_cache[architecture] = copy_missing_layers_with_retry(
base_image, target_image, architecture
)
def _push_oci(
*,
target_image: ImageReference,
base_image: Optional[ImageReference],
installed_specs_with_deps: List[Spec],
tmpdir: str,
pool: MaybePool,
force: bool = False,
) -> Tuple[List[str], Dict[str, Tuple[dict, dict]], Dict[str, spack.oci.oci.Blob]]:
"""Push specs to an OCI registry
Args:
image_ref: The target OCI image
base_image: Optional base image, which will be copied to the target registry.
installed_specs_with_deps: The installed specs to push, excluding externals,
including deps, ordered from roots to leaves.
force: Whether to overwrite existing layers and manifests in the buildcache.
Returns:
A tuple consisting of the list of skipped specs already in the build cache,
a dictionary mapping architectures to base image manifests and configs,
and a dictionary mapping each spec's dag hash to a blob.
"""
# Reverse the order
installed_specs_with_deps = list(reversed(installed_specs_with_deps))
# Spec dag hash -> blob
checksums: Dict[str, spack.oci.oci.Blob] = {}
# arch -> (manifest, config)
base_images: Dict[str, Tuple[dict, dict]] = {}
# Specs not uploaded because they already exist
skipped = []
if not force:
tty.info("Checking for existing specs in the buildcache")
to_be_uploaded = []
tags_to_check = (target_image.with_tag(default_tag(s)) for s in installed_specs_with_deps)
available_blobs = pool.map(_get_spack_binary_blob, tags_to_check)
for spec, maybe_blob in zip(installed_specs_with_deps, available_blobs):
if maybe_blob is not None:
checksums[spec.dag_hash()] = maybe_blob
skipped.append(_format_spec(spec))
else:
to_be_uploaded.append(spec)
else:
to_be_uploaded = installed_specs_with_deps
if not to_be_uploaded:
return skipped, base_images, checksums
tty.info(
f"{len(to_be_uploaded)} specs need to be pushed to "
f"{target_image.domain}/{target_image.name}"
)
# Upload blobs
new_blobs = pool.starmap(
_push_single_spack_binary_blob, ((target_image, spec, tmpdir) for spec in to_be_uploaded)
)
# And update the spec to blob mapping
for spec, blob in zip(to_be_uploaded, new_blobs):
checksums[spec.dag_hash()] = blob
# Copy base images if necessary
for spec in to_be_uploaded:
_update_base_images(
base_image=base_image,
target_image=target_image,
spec=spec,
base_image_cache=base_images,
)
def extra_config(spec: Spec):
spec_dict = spec.to_dict(hash=ht.dag_hash)
spec_dict["buildcache_layout_version"] = 1
spec_dict["binary_cache_checksum"] = {
"hash_algorithm": "sha256",
"hash": checksums[spec.dag_hash()].compressed_digest.digest,
}
return spec_dict
# Upload manifests
tty.info("Uploading manifests")
pool.starmap(
_put_manifest,
(
(
base_images,
checksums,
target_image.with_tag(default_tag(spec)),
tmpdir,
extra_config(spec),
{"org.opencontainers.image.description": spec.format()},
spec,
)
for spec in to_be_uploaded
),
)
# Print the image names of the top-level specs
for spec in to_be_uploaded:
tty.info(f"Pushed {_format_spec(spec)} to {target_image.with_tag(default_tag(spec))}")
return skipped, base_images, checksums
def _config_from_tag(image_ref: ImageReference, tag: str) -> Optional[dict]:
# Don't allow recursion here, since Spack itself always uploads
# vnd.oci.image.manifest.v1+json, not vnd.oci.image.index.v1+json
_, config = get_manifest_and_config_with_retry(image_ref.with_tag(tag), tag, recurse=0)
# Do very basic validation: if "spec" is a key in the config, it
# must be a Spec object too.
return config if "spec" in config else None
def _update_index_oci(image_ref: ImageReference, tmpdir: str, pool: MaybePool) -> None:
tags = list_tags(image_ref)
# Fetch all image config files in parallel
spec_dicts = pool.starmap(
_config_from_tag, ((image_ref, tag) for tag in tags if tag_is_spec(tag))
)
# Populate the database
db_root_dir = os.path.join(tmpdir, "db_root")
db = bindist.BuildCacheDatabase(db_root_dir)
for spec_dict in spec_dicts:
spec = Spec.from_dict(spec_dict)
db.add(spec, directory_layout=None)
db.mark(spec, "in_buildcache", True)
# Create the index.json file
index_json_path = os.path.join(tmpdir, "index.json")
with open(index_json_path, "w") as f:
db._write_to_file(f)
# Create an empty config.json file
empty_config_json_path = os.path.join(tmpdir, "config.json")
with open(empty_config_json_path, "wb") as f:
f.write(b"{}")
# Upload the index.json file
index_shasum = Digest.from_sha256(spack.util.crypto.checksum(hashlib.sha256, index_json_path))
upload_blob_with_retry(image_ref, file=index_json_path, digest=index_shasum)
# Upload the config.json file
empty_config_digest = Digest.from_sha256(
spack.util.crypto.checksum(hashlib.sha256, empty_config_json_path)
)
upload_blob_with_retry(image_ref, file=empty_config_json_path, digest=empty_config_digest)
# Push a manifest file that references the index.json file as a layer
# Notice that we push this as if it is an image, which it of course is not.
# When the ORAS spec becomes official, we can use that instead of a fake image.
# For now we just use the OCI image spec, so that we don't run into issues with
# automatic garbage collection of blobs that are not referenced by any image manifest.
oci_manifest = {
"mediaType": "application/vnd.oci.image.manifest.v1+json",
"schemaVersion": 2,
# Config is just an empty {} file for now, and irrelevant
"config": {
"mediaType": "application/vnd.oci.image.config.v1+json",
"digest": str(empty_config_digest),
"size": os.path.getsize(empty_config_json_path),
},
# The buildcache index is the only layer, and is not a tarball, we lie here.
"layers": [
{
"mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
"digest": str(index_shasum),
"size": os.path.getsize(index_json_path),
}
],
}
upload_manifest_with_retry(image_ref.with_tag(default_index_tag), oci_manifest)
def install_fn(args):
@@ -806,15 +1182,14 @@ def update_index(mirror: spack.mirror.Mirror, update_keys=False):
if image_ref:
with tempfile.TemporaryDirectory(
dir=spack.stage.get_stage_root()
) as tmpdir, spack.util.parallel.make_concurrent_executor() as executor:
bindist._oci_update_index(image_ref, tmpdir, executor)
) as tmpdir, _make_pool() as pool:
_update_index_oci(image_ref, tmpdir, pool)
return
# Otherwise, assume a normal mirror.
url = mirror.push_url
with tempfile.TemporaryDirectory(dir=spack.stage.get_stage_root()) as tmpdir:
bindist.generate_package_index(url, tmpdir)
bindist.generate_package_index(url_util.join(url, bindist.build_cache_relative_path()))
if update_keys:
keys_url = url_util.join(
@@ -822,8 +1197,7 @@ def update_index(mirror: spack.mirror.Mirror, update_keys=False):
)
try:
with tempfile.TemporaryDirectory(dir=spack.stage.get_stage_root()) as tmpdir:
bindist.generate_key_index(keys_url, tmpdir)
bindist.generate_key_index(keys_url)
except bindist.CannotListKeys as e:
# Do not error out if listing keys went wrong. This usually means that the _gpg path
# does not exist. TODO: distinguish between this and other errors.

View File

@@ -11,6 +11,7 @@
from argparse import ArgumentParser, Namespace
from typing import IO, Any, Callable, Dict, Iterable, List, Optional, Sequence, Set, Tuple, Union
import llnl.util.filesystem as fs
import llnl.util.tty as tty
from llnl.util.argparsewriter import ArgparseRstWriter, ArgparseWriter, Command
from llnl.util.tty.colify import colify
@@ -866,6 +867,9 @@ def _commands(parser: ArgumentParser, args: Namespace) -> None:
prepend_header(args, f)
formatter(args, f)
if args.update_completion:
fs.set_executable(args.update)
else:
prepend_header(args, sys.stdout)
formatter(args, sys.stdout)

View File

@@ -6,7 +6,6 @@
import os
import platform
import re
import sys
from datetime import datetime
from glob import glob
@@ -63,10 +62,9 @@ def create_db_tarball(args):
base = os.path.basename(str(spack.store.STORE.root))
transform_args = []
# Currently --transform and -s are not supported by Windows native tar
if "GNU" in tar("--version", output=str):
transform_args = ["--transform", "s/^%s/%s/" % (base, tarball_name)]
elif sys.platform != "win32":
else:
transform_args = ["-s", "/^%s/%s/" % (base, tarball_name)]
wd = os.path.dirname(str(spack.store.STORE.root))
@@ -92,6 +90,7 @@ def report(args):
print("* **Spack:**", get_version())
print("* **Python:**", platform.python_version())
print("* **Platform:**", architecture)
print("* **Concretizer:**", spack.config.get("config:concretizer"))
def debug(parser, args):

View File

@@ -468,30 +468,32 @@ def env_remove(args):
This removes an environment managed by Spack. Directory environments
and manifests embedded in repositories should be removed manually.
"""
remove_envs = []
read_envs = []
valid_envs = []
bad_envs = []
invalid_envs = []
for env_name in ev.all_environment_names():
try:
env = ev.read(env_name)
valid_envs.append(env)
valid_envs.append(env_name)
if env_name in args.rm_env:
remove_envs.append(env)
read_envs.append(env)
except (spack.config.ConfigFormatError, ev.SpackEnvironmentConfigError):
invalid_envs.append(env_name)
if env_name in args.rm_env:
bad_envs.append(env_name)
# Check if remove_env is included from another env before trying to remove
for env in valid_envs:
for remove_env in remove_envs:
# Check if env is linked to another before trying to remove
for name in valid_envs:
# don't check if environment is included to itself
if env.name == remove_env.name:
if name == env_name:
continue
if remove_env.path in env.included_concrete_envs:
msg = f'Environment "{remove_env.name}" is being used by environment "{env.name}"'
environ = ev.Environment(ev.root(name))
if ev.root(env_name) in environ.included_concrete_envs:
msg = f'Environment "{env_name}" is being used by environment "{name}"'
if args.force:
tty.warn(msg)
else:
@@ -504,7 +506,7 @@ def env_remove(args):
if not answer:
tty.die("Will not remove any environments")
for env in remove_envs:
for env in read_envs:
name = env.name
if env.active:
tty.die(f"Environment {name} can't be removed while activated.")

View File

@@ -135,7 +135,9 @@ def external_find(args):
candidate_packages = packages_to_search_for(
names=args.packages, tags=args.tags, exclude=args.exclude
)
detected_packages = spack.detection.by_path(candidate_packages, path_hints=args.path)
detected_packages = spack.detection.by_path(
candidate_packages, path_hints=args.path, max_workers=args.jobs
)
new_specs = spack.detection.update_configuration(
detected_packages, scope=args.scope, buildable=not args.not_buildable

View File

@@ -5,12 +5,10 @@
import argparse
import os
import tempfile
import spack.binary_distribution
import spack.mirror
import spack.paths
import spack.stage
import spack.util.gpg
import spack.util.url
from spack.cmd.common import arguments
@@ -117,7 +115,6 @@ def setup_parser(subparser):
help="URL of the mirror where keys will be published",
)
publish.add_argument(
"--update-index",
"--rebuild-index",
action="store_true",
default=False,
@@ -223,10 +220,9 @@ def gpg_publish(args):
elif args.mirror_url:
mirror = spack.mirror.Mirror(args.mirror_url, args.mirror_url)
with tempfile.TemporaryDirectory(dir=spack.stage.get_stage_root()) as tmpdir:
spack.binary_distribution.push_keys(
mirror, keys=args.keys, tmpdir=tmpdir, update_index=args.update_index
)
spack.binary_distribution.push_keys(
mirror, keys=args.keys, regenerate_index=args.rebuild_index
)
def gpg(parser, args):

View File

@@ -502,7 +502,7 @@ def print_licenses(pkg, args):
def info(parser, args):
spec = spack.spec.Spec(args.package)
pkg_cls = spack.repo.PATH.get_pkg_class(spec.fullname)
pkg_cls = spack.repo.PATH.get_pkg_class(spec.name)
pkg = pkg_cls(spec)
# Output core package information

View File

@@ -3,13 +3,8 @@
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import filecmp
import os
import pathlib
import sys
import tempfile
import zipfile
from typing import List, Optional, Tuple
import llnl.util.tty as tty
@@ -17,7 +12,6 @@
import spack.repo
import spack.util.path
from spack.cmd.common import arguments
from spack.util.archive import reproducible_zipfile_from_prefix
description = "manage package source repositories"
section = "config"
@@ -73,12 +67,6 @@ def setup_parser(subparser):
help="configuration scope to modify",
)
# Zip
zip_parser = sp.add_parser("zip", help=repo_zip.__doc__)
zip_parser.add_argument(
"namespace_or_path", help="namespace or path of a Spack package repository"
)
def repo_create(args):
"""create a new package repository"""
@@ -121,18 +109,31 @@ def repo_add(args):
def repo_remove(args):
"""remove a repository from Spack's configuration"""
repos = spack.config.get("repos", scope=args.scope)
namespace_or_path = args.namespace_or_path
key, repo = _get_repo(repos, args.namespace_or_path)
# If the argument is a path, remove that repository from config.
canon_path = spack.util.path.canonicalize_path(namespace_or_path)
for repo_path in repos:
repo_canon_path = spack.util.path.canonicalize_path(repo_path)
if canon_path == repo_canon_path:
repos.remove(repo_path)
spack.config.set("repos", repos, args.scope)
tty.msg("Removed repository %s" % repo_path)
return
if not key:
tty.die(f"No repository with path or namespace: {args.namespace_or_path}")
# If it is a namespace, remove corresponding repo
for path in repos:
try:
repo = spack.repo.from_path(path)
if repo.namespace == namespace_or_path:
repos.remove(path)
spack.config.set("repos", repos, args.scope)
tty.msg("Removed repository %s with namespace '%s'." % (repo.root, repo.namespace))
return
except spack.repo.RepoError:
continue
repos.remove(key)
spack.config.set("repos", repos, args.scope)
if repo:
tty.msg(f"Removed repository {repo.root} with namespace '{repo.namespace}'")
else:
tty.msg(f"Removed repository {key}")
tty.die("No repository with path or namespace: %s" % namespace_or_path)
def repo_list(args):
@@ -146,77 +147,17 @@ def repo_list(args):
continue
if sys.stdout.isatty():
tty.msg(f"{len(repos)} package repositor{'y.' if len(repos) == 1 else 'ies.'}")
msg = "%d package repositor" % len(repos)
msg += "y." if len(repos) == 1 else "ies."
tty.msg(msg)
if not repos:
return
max_ns_len = max(len(r.namespace) for r in repos)
for repo in repos:
print(f"{repo.namespace:<{max_ns_len}} {repo.root}")
def repo_zip(args):
"""zip a package repository to make it immutable and faster to load"""
key, _ = _get_repo(spack.config.get("repos"), args.namespace_or_path)
if not key:
tty.die(f"No repository with path or namespace: {args.namespace_or_path}")
try:
repo = spack.repo.from_path(key)
except spack.repo.RepoError:
tty.die(f"No repository at path: {key}")
def _zip_repo_skip(entry: os.DirEntry):
return entry.name == "__pycache__"
def _zip_repo_path_to_name(path: str) -> str:
# use spack/pkg/<repo>/* prefix and rename `package.py` as `__init__.py`
rel_path = pathlib.PurePath(path).relative_to(repo.packages_path)
if rel_path.name == "package.py":
rel_path = rel_path.with_name("__init__.py")
return str(rel_path)
# Create a zipfile in a temporary file
with tempfile.NamedTemporaryFile(delete=False, mode="wb", dir=repo.root) as f, zipfile.ZipFile(
f, "w", compression=zipfile.ZIP_DEFLATED
) as zip:
reproducible_zipfile_from_prefix(
zip, repo.packages_path, skip=_zip_repo_skip, path_to_name=_zip_repo_path_to_name
)
packages_zip = os.path.join(repo.root, "packages.zip")
try:
# Inform the user whether or not the repo was modified since it was last zipped
if os.path.exists(packages_zip) and filecmp.cmp(f.name, packages_zip):
tty.msg(f"{repo.namespace}: {packages_zip} is up to date")
return
else:
os.rename(f.name, packages_zip)
tty.msg(f"{repo.namespace} was zipped: {packages_zip}")
finally:
try:
os.unlink(f.name)
except OSError:
pass
def _get_repo(repos: List[str], path_or_name) -> Tuple[Optional[str], Optional[spack.repo.Repo]]:
"""Find repo by path or namespace"""
canon_path = spack.util.path.canonicalize_path(path_or_name)
for path in repos:
if canon_path == spack.util.path.canonicalize_path(path):
return path, None
for path in repos:
try:
repo = spack.repo.from_path(path)
except spack.repo.RepoError:
continue
if repo.namespace == path_or_name:
return path, repo
return None, None
fmt = "%%-%ds%%s" % (max_ns_len + 4)
print(fmt % (repo.namespace, repo.root))
def repo(parser, args):
@@ -226,6 +167,5 @@ def repo(parser, args):
"add": repo_add,
"remove": repo_remove,
"rm": repo_remove,
"zip": repo_zip,
}
action[args.repo_command](args)

View File

@@ -339,7 +339,7 @@ def add(self, pkg_name, fetcher):
for pkg_cls in spack.repo.PATH.all_package_classes():
npkgs += 1
for v in list(pkg_cls.versions):
for v in pkg_cls.versions:
try:
pkg = pkg_cls(spack.spec.Spec(pkg_cls.name))
fetcher = fs.for_package_version(pkg, v)

View File

@@ -23,6 +23,11 @@ def setup_parser(subparser):
output.add_argument(
"-s", "--safe", action="store_true", help="only list safe versions of the package"
)
output.add_argument(
"--safe-only",
action="store_true",
help="[deprecated] only list safe versions of the package",
)
output.add_argument(
"-r", "--remote", action="store_true", help="only list remote versions of the package"
)
@@ -42,13 +47,17 @@ def versions(parser, args):
safe_versions = pkg.versions
if args.safe_only:
tty.warn('"--safe-only" is deprecated. Use "--safe" instead.')
args.safe = args.safe_only
if not (args.remote or args.new):
if sys.stdout.isatty():
tty.msg("Safe versions (already checksummed):")
if not safe_versions:
if sys.stdout.isatty():
tty.warn(f"Found no versions for {pkg.name}")
tty.warn("Found no versions for {0}".format(pkg.name))
tty.debug("Manually add versions to the package.")
else:
colify(sorted(safe_versions, reverse=True), indent=2)
@@ -74,12 +83,12 @@ def versions(parser, args):
if not remote_versions:
if sys.stdout.isatty():
if not fetched_versions:
tty.warn(f"Found no versions for {pkg.name}")
tty.warn("Found no versions for {0}".format(pkg.name))
tty.debug(
"Check the list_url and list_depth attributes of "
"the package to help Spack find versions."
)
else:
tty.warn(f"Found no unchecksummed versions for {pkg.name}")
tty.warn("Found no unchecksummed versions for {0}".format(pkg.name))
else:
colify(sorted(remote_versions, reverse=True), indent=2)

View File

@@ -278,6 +278,11 @@ def debug_flags(self):
def opt_flags(self):
return ["-O", "-O0", "-O1", "-O2", "-O3"]
# Cray PrgEnv name that can be used to load this compiler
PrgEnv: Optional[str] = None
# Name of module used to switch versions of this compiler
PrgEnv_compiler: Optional[str] = None
def __init__(
self,
cspec,

View File

@@ -25,6 +25,9 @@ class Aocc(Compiler):
# Subclasses use possible names of Fortran 90 compiler
fc_names = ["flang"]
PrgEnv = "PrgEnv-aocc"
PrgEnv_compiler = "aocc"
version_argument = "--version"
@property

View File

@@ -34,9 +34,12 @@ def __init__(self, *args, **kwargs):
# MacPorts builds gcc versions with prefixes and -mp-X.Y suffixes.
suffixes = [r"-mp-\d\.\d"]
PrgEnv = "PrgEnv-cray"
PrgEnv_compiler = "cce"
@property
def link_paths(self):
if any("PrgEnv-cray" in m for m in self.modules):
if any(self.PrgEnv in m for m in self.modules):
# Old module-based interface to cray compilers
return {
"cc": os.path.join("cce", "cc"),

View File

@@ -40,6 +40,9 @@ class Gcc(spack.compiler.Compiler):
"fc": os.path.join("gcc", "gfortran"),
}
PrgEnv = "PrgEnv-gnu"
PrgEnv_compiler = "gcc"
@property
def verbose_flag(self):
return "-v"

View File

@@ -31,6 +31,9 @@ class Intel(Compiler):
"fc": os.path.join("intel", "ifort"),
}
PrgEnv = "PrgEnv-intel"
PrgEnv_compiler = "intel"
if sys.platform == "win32":
version_argument = "/QV"
else:
@@ -123,14 +126,3 @@ def fc_pic_flag(self):
@property
def stdcxx_libs(self):
return ("-cxxlib",)
def setup_custom_environment(self, pkg, env):
# Edge cases for Intel's oneAPI compilers when using the legacy classic compilers:
# Always pass flags to disable deprecation warnings, since these warnings can
# confuse tools that parse the output of compiler commands (e.g. version checks).
if self.cc and self.cc.endswith("icc") and self.real_version >= Version("2021"):
env.append_flags("SPACK_ALWAYS_CFLAGS", "-diag-disable=10441")
if self.cxx and self.cxx.endswith("icpc") and self.real_version >= Version("2021"):
env.append_flags("SPACK_ALWAYS_CXXFLAGS", "-diag-disable=10441")
if self.fc and self.fc.endswith("ifort") and self.real_version >= Version("2021"):
env.append_flags("SPACK_ALWAYS_FFLAGS", "-diag-disable=10448")

View File

@@ -231,55 +231,24 @@ def msvc_version(self):
@property
def short_msvc_version(self):
"""This is the shorthand VCToolset version of form
MSVC<short-ver>
"""
return "MSVC" + self.vc_toolset_ver
@property
def vc_toolset_ver(self):
"""
The toolset version is the version of the combined set of cl and link
This typically relates directly to VS version i.e. VS 2022 is v143
VS 19 is v142, etc.
This value is defined by the first three digits of the major + minor
version of the VS toolset (143 for 14.3x.bbbbb). Traditionally the
minor version has remained a static two digit number for a VS release
series, however, as of VS22, this is no longer true, both
14.4x.bbbbb and 14.3x.bbbbb are considered valid VS22 VC toolset
versions due to a change in toolset minor version sentiment.
This is *NOT* the full version, for that see
This is the shorthand VCToolset version of form
MSVC<short-ver> *NOT* the full version, for that see
Msvc.msvc_version or MSVC.platform_toolset_ver for the
raw platform toolset version
"""
ver = self.msvc_version[:2].joined.string[:3]
return ver
ver = self.platform_toolset_ver
return "MSVC" + ver
@property
def platform_toolset_ver(self):
"""
This is the platform toolset version of current MSVC compiler
i.e. 142. The platform toolset is the targeted MSVC library/compiler
versions by compilation (this is different from the VC Toolset)
i.e. 142.
This is different from the VC toolset version as established
by `short_msvc_version`, but typically are represented by the same
three digit value
by `short_msvc_version`
"""
# Typically VS toolset version and platform toolset versions match
# VS22 introduces the first divergence of VS toolset version
# (144 for "recent" releases) and platform toolset version (143)
# so it needs additional handling until MS releases v144
# (assuming v144 is also for VS22)
# or adds better support for detection
# TODO: (johnwparent) Update this logic for the next platform toolset
# or VC toolset version update
toolset_ver = self.vc_toolset_ver
vs22_toolset = Version(toolset_ver) > Version("142")
return toolset_ver if not vs22_toolset else "143"
return self.msvc_version[:2].joined.string[:3]
def _compiler_version(self, compiler):
"""Returns version object for given compiler"""

View File

@@ -29,6 +29,9 @@ class Nvhpc(Compiler):
"fc": os.path.join("nvhpc", "nvfortran"),
}
PrgEnv = "PrgEnv-nvhpc"
PrgEnv_compiler = "nvhpc"
version_argument = "--version"
version_regex = r"nv[^ ]* (?:[^ ]+ Dev-r)?([0-9.]+)(?:-[0-9]+)?"

View File

@@ -9,7 +9,6 @@
from llnl.util import tty
from spack.compiler import Compiler
from spack.version import Version
class Oneapi(Compiler):
@@ -33,6 +32,9 @@ class Oneapi(Compiler):
"fc": os.path.join("oneapi", "ifx"),
}
PrgEnv = "PrgEnv-oneapi"
PrgEnv_compiler = "oneapi"
version_argument = "--version"
version_regex = r"(?:(?:oneAPI DPC\+\+(?:\/C\+\+)? Compiler)|(?:\(IFORT\))|(?:\(IFX\))) (\S+)"
@@ -140,16 +142,6 @@ def setup_custom_environment(self, pkg, env):
env.prepend_path("PATH", dirname(self.cxx))
env.prepend_path("LD_LIBRARY_PATH", join(dirname(dirname(self.cxx)), "lib"))
# Edge cases for Intel's oneAPI compilers when using the legacy classic compilers:
# Always pass flags to disable deprecation warnings, since these warnings can
# confuse tools that parse the output of compiler commands (e.g. version checks).
if self.cc and self.cc.endswith("icc") and self.real_version >= Version("2021"):
env.append_flags("SPACK_ALWAYS_CFLAGS", "-diag-disable=10441")
if self.cxx and self.cxx.endswith("icpc") and self.real_version >= Version("2021"):
env.append_flags("SPACK_ALWAYS_CXXFLAGS", "-diag-disable=10441")
if self.fc and self.fc.endswith("ifort") and self.real_version >= Version("2021"):
env.append_flags("SPACK_ALWAYS_FFLAGS", "-diag-disable=10448")
# 2024 release bumped the libsycl version because of an ABI
# change, 2024 compilers are required. You will see this
# error:

View File

@@ -30,6 +30,9 @@ class Pgi(Compiler):
"fc": os.path.join("pgi", "pgfortran"),
}
PrgEnv = "PrgEnv-pgi"
PrgEnv_compiler = "pgi"
version_argument = "-V"
ignore_version_errors = [2] # `pgcc -V` on PowerPC annoyingly returns 2
version_regex = r"pg[^ ]* ([0-9.]+)-[0-9]+ (LLVM )?[^ ]+ target on "

View File

@@ -23,6 +23,9 @@ class Rocmcc(spack.compilers.clang.Clang):
# Subclasses use possible names of Fortran 90 compiler
fc_names = ["amdflang"]
PrgEnv = "PrgEnv-amd"
PrgEnv_compiler = "amd"
@property
def link_paths(self):
link_paths = {

View File

@@ -2,11 +2,29 @@
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""
(DEPRECATED) Used to contain the code for the original concretizer
Functions here are used to take abstract specs and make them concrete.
For example, if a spec asks for a version between 1.8 and 1.9, these
functions might take will take the most recent 1.9 version of the
package available. Or, if the user didn't specify a compiler for a
spec, then this will assign a compiler to the spec based on defaults
or user preferences.
TODO: make this customizable and allow users to configure
concretization policies.
"""
import functools
import platform
import tempfile
from contextlib import contextmanager
from itertools import chain
from typing import Union
import archspec.cpu
import llnl.util.lang
import llnl.util.tty as tty
import spack.abi
import spack.compilers
@@ -19,20 +37,639 @@
import spack.target
import spack.tengine
import spack.util.path
import spack.variant as vt
from spack.package_prefs import PackagePrefs, is_spec_buildable, spec_externals
from spack.version import ClosedOpenRange, VersionList, ver
#: impements rudimentary logic for ABI compatibility
_abi: Union[spack.abi.ABI, llnl.util.lang.Singleton] = llnl.util.lang.Singleton(
lambda: spack.abi.ABI()
)
@functools.total_ordering
class reverse_order:
"""Helper for creating key functions.
This is a wrapper that inverts the sense of the natural
comparisons on the object.
"""
def __init__(self, value):
self.value = value
def __eq__(self, other):
return other.value == self.value
def __lt__(self, other):
return other.value < self.value
class Concretizer:
"""(DEPRECATED) Only contains logic to enable/disable compiler existence checks."""
"""You can subclass this class to override some of the default
concretization strategies, or you can override all of them.
"""
#: Controls whether we check that compiler versions actually exist
#: during concretization. Used for testing and for mirror creation
check_for_compiler_existence = None
def __init__(self):
#: Packages that the old concretizer cannot deal with correctly, and cannot build anyway.
#: Those will not be considered as providers for virtuals.
non_buildable_packages = {"glibc", "musl"}
def __init__(self, abstract_spec=None):
if Concretizer.check_for_compiler_existence is None:
Concretizer.check_for_compiler_existence = not spack.config.get(
"config:install_missing_compilers", False
)
self.abstract_spec = abstract_spec
self._adjust_target_answer_generator = None
def concretize_develop(self, spec):
"""
Add ``dev_path=*`` variant to packages built from local source.
"""
env = spack.environment.active_environment()
dev_info = env.dev_specs.get(spec.name, {}) if env else {}
if not dev_info:
return False
path = spack.util.path.canonicalize_path(dev_info["path"], default_wd=env.path)
if "dev_path" in spec.variants:
assert spec.variants["dev_path"].value == path
changed = False
else:
spec.variants.setdefault("dev_path", vt.SingleValuedVariant("dev_path", path))
changed = True
changed |= spec.constrain(dev_info["spec"])
return changed
def _valid_virtuals_and_externals(self, spec):
"""Returns a list of candidate virtual dep providers and external
packages that coiuld be used to concretize a spec.
Preferred specs come first in the list.
"""
# First construct a list of concrete candidates to replace spec with.
candidates = [spec]
pref_key = lambda spec: 0 # no-op pref key
if spec.virtual:
candidates = [
s
for s in spack.repo.PATH.providers_for(spec)
if s.name not in self.non_buildable_packages
]
if not candidates:
raise spack.error.UnsatisfiableProviderSpecError(candidates[0], spec)
# Find nearest spec in the DAG (up then down) that has prefs.
spec_w_prefs = find_spec(
spec, lambda p: PackagePrefs.has_preferred_providers(p.name, spec.name), spec
) # default to spec itself.
# Create a key to sort candidates by the prefs we found
pref_key = PackagePrefs(spec_w_prefs.name, "providers", spec.name)
# For each candidate package, if it has externals, add those
# to the usable list. if it's not buildable, then *only* add
# the externals.
usable = []
for cspec in candidates:
if is_spec_buildable(cspec):
usable.append(cspec)
externals = spec_externals(cspec)
for ext in externals:
if ext.intersects(spec):
usable.append(ext)
# If nothing is in the usable list now, it's because we aren't
# allowed to build anything.
if not usable:
raise NoBuildError(spec)
# Use a sort key to order the results
return sorted(
usable,
key=lambda spec: (
not spec.external, # prefer externals
pref_key(spec), # respect prefs
spec.name, # group by name
reverse_order(spec.versions), # latest version
spec, # natural order
),
)
def choose_virtual_or_external(self, spec: spack.spec.Spec):
"""Given a list of candidate virtual and external packages, try to
find one that is most ABI compatible.
"""
candidates = self._valid_virtuals_and_externals(spec)
if not candidates:
return candidates
# Find the nearest spec in the dag that has a compiler. We'll
# use that spec to calibrate compiler compatibility.
abi_exemplar = find_spec(spec, lambda x: x.compiler)
if abi_exemplar is None:
abi_exemplar = spec.root
# Sort candidates from most to least compatibility.
# We reverse because True > False.
# Sort is stable, so candidates keep their order.
return sorted(
candidates,
reverse=True,
key=lambda spec: (
_abi.compatible(spec, abi_exemplar, loose=True),
_abi.compatible(spec, abi_exemplar),
),
)
def concretize_version(self, spec):
"""If the spec is already concrete, return. Otherwise take
the preferred version from spackconfig, and default to the package's
version if there are no available versions.
TODO: In many cases we probably want to look for installed
versions of each package and use an installed version
if we can link to it. The policy implemented here will
tend to rebuild a lot of stuff becasue it will prefer
a compiler in the spec to any compiler already-
installed things were built with. There is likely
some better policy that finds some middle ground
between these two extremes.
"""
# return if already concrete.
if spec.versions.concrete:
return False
# List of versions we could consider, in sorted order
pkg_versions = spec.package_class.versions
usable = [v for v in pkg_versions if any(v.intersects(sv) for sv in spec.versions)]
yaml_prefs = PackagePrefs(spec.name, "version")
# The keys below show the order of precedence of factors used
# to select a version when concretizing. The item with
# the "largest" key will be selected.
#
# NOTE: When COMPARING VERSIONS, the '@develop' version is always
# larger than other versions. BUT when CONCRETIZING,
# the largest NON-develop version is selected by default.
keyfn = lambda v: (
# ------- Special direction from the user
# Respect order listed in packages.yaml
-yaml_prefs(v),
# The preferred=True flag (packages or packages.yaml or both?)
pkg_versions.get(v).get("preferred", False),
# ------- Regular case: use latest non-develop version by default.
# Avoid @develop version, which would otherwise be the "largest"
# in straight version comparisons
not v.isdevelop(),
# Compare the version itself
# This includes the logic:
# a) develop > everything (disabled by "not v.isdevelop() above)
# b) numeric > non-numeric
# c) Numeric or string comparison
v,
)
usable.sort(key=keyfn, reverse=True)
if usable:
spec.versions = ver([usable[0]])
else:
# We don't know of any SAFE versions that match the given
# spec. Grab the spec's versions and grab the highest
# *non-open* part of the range of versions it specifies.
# Someone else can raise an error if this happens,
# e.g. when we go to fetch it and don't know how. But it
# *might* work.
if not spec.versions or spec.versions == VersionList([":"]):
raise NoValidVersionError(spec)
else:
last = spec.versions[-1]
if isinstance(last, ClosedOpenRange):
range_as_version = VersionList([last]).concrete_range_as_version
if range_as_version:
spec.versions = ver([range_as_version])
else:
raise NoValidVersionError(spec)
else:
spec.versions = ver([last])
return True # Things changed
def concretize_architecture(self, spec):
"""If the spec is empty provide the defaults of the platform. If the
architecture is not a string type, then check if either the platform,
target or operating system are concretized. If any of the fields are
changed then return True. If everything is concretized (i.e the
architecture attribute is a namedtuple of classes) then return False.
If the target is a string type, then convert the string into a
concretized architecture. If it has no architecture and the root of the
DAG has an architecture, then use the root otherwise use the defaults
on the platform.
"""
# ensure type safety for the architecture
if spec.architecture is None:
spec.architecture = spack.spec.ArchSpec()
if spec.architecture.concrete:
return False
# Get platform of nearest spec with a platform, including spec
# If spec has a platform, easy
if spec.architecture.platform:
new_plat = spack.platforms.by_name(spec.architecture.platform)
else:
# Else if anyone else has a platform, take the closest one
# Search up, then down, along build/link deps first
# Then any nearest. Algorithm from compilerspec search
platform_spec = find_spec(spec, lambda x: x.architecture and x.architecture.platform)
if platform_spec:
new_plat = spack.platforms.by_name(platform_spec.architecture.platform)
else:
# If no platform anywhere in this spec, grab the default
new_plat = spack.platforms.host()
# Get nearest spec with relevant platform and an os
# Generally, same algorithm as finding platform, except we only
# consider specs that have a platform
if spec.architecture.os:
new_os = spec.architecture.os
else:
new_os_spec = find_spec(
spec,
lambda x: (
x.architecture
and x.architecture.platform == str(new_plat)
and x.architecture.os
),
)
if new_os_spec:
new_os = new_os_spec.architecture.os
else:
new_os = new_plat.operating_system("default_os")
# Get the nearest spec with relevant platform and a target
# Generally, same algorithm as finding os
curr_target = None
if spec.architecture.target:
curr_target = spec.architecture.target
if spec.architecture.target and spec.architecture.target_concrete:
new_target = spec.architecture.target
else:
new_target_spec = find_spec(
spec,
lambda x: (
x.architecture
and x.architecture.platform == str(new_plat)
and x.architecture.target
and x.architecture.target != curr_target
),
)
if new_target_spec:
if curr_target:
# constrain one target by the other
new_target_arch = spack.spec.ArchSpec(
(None, None, new_target_spec.architecture.target)
)
curr_target_arch = spack.spec.ArchSpec((None, None, curr_target))
curr_target_arch.constrain(new_target_arch)
new_target = curr_target_arch.target
else:
new_target = new_target_spec.architecture.target
else:
# To get default platform, consider package prefs
if PackagePrefs.has_preferred_targets(spec.name):
new_target = self.target_from_package_preferences(spec)
else:
new_target = new_plat.target("default_target")
if curr_target:
# convert to ArchSpec to compare satisfaction
new_target_arch = spack.spec.ArchSpec((None, None, str(new_target)))
curr_target_arch = spack.spec.ArchSpec((None, None, str(curr_target)))
if not new_target_arch.intersects(curr_target_arch):
# new_target is an incorrect guess based on preferences
# and/or default
valid_target_ranges = str(curr_target).split(",")
for target_range in valid_target_ranges:
t_min, t_sep, t_max = target_range.partition(":")
if not t_sep:
new_target = t_min
break
elif t_max:
new_target = t_max
break
elif t_min:
# TODO: something better than picking first
new_target = t_min
break
# Construct new architecture, compute whether spec changed
arch_spec = (str(new_plat), str(new_os), str(new_target))
new_arch = spack.spec.ArchSpec(arch_spec)
spec_changed = new_arch != spec.architecture
spec.architecture = new_arch
return spec_changed
def target_from_package_preferences(self, spec):
"""Returns the preferred target from the package preferences if
there's any.
Args:
spec: abstract spec to be concretized
"""
target_prefs = PackagePrefs(spec.name, "target")
target_specs = [spack.spec.Spec("target=%s" % tname) for tname in archspec.cpu.TARGETS]
def tspec_filter(s):
# Filter target specs by whether the architecture
# family is the current machine type. This ensures
# we only consider x86_64 targets when on an
# x86_64 machine, etc. This may need to change to
# enable setting cross compiling as a default
target = archspec.cpu.TARGETS[str(s.architecture.target)]
arch_family_name = target.family.name
return arch_family_name == platform.machine()
# Sort filtered targets by package prefs
target_specs = list(filter(tspec_filter, target_specs))
target_specs.sort(key=target_prefs)
new_target = target_specs[0].architecture.target
return new_target
def concretize_variants(self, spec):
"""If the spec already has variants filled in, return. Otherwise, add
the user preferences from packages.yaml or the default variants from
the package specification.
"""
changed = False
preferred_variants = PackagePrefs.preferred_variants(spec.name)
pkg_cls = spec.package_class
for name, entry in pkg_cls.variants.items():
variant, when = entry
var = spec.variants.get(name, None)
if var and "*" in var:
# remove variant wildcard before concretizing
# wildcard cannot be combined with other variables in a
# multivalue variant, a concrete variant cannot have the value
# wildcard, and a wildcard does not constrain a variant
spec.variants.pop(name)
if name not in spec.variants and any(spec.satisfies(w) for w in when):
changed = True
if name in preferred_variants:
spec.variants[name] = preferred_variants.get(name)
else:
spec.variants[name] = variant.make_default()
if name in spec.variants and not any(spec.satisfies(w) for w in when):
raise vt.InvalidVariantForSpecError(name, when, spec)
return changed
def concretize_compiler(self, spec):
"""If the spec already has a compiler, we're done. If not, then take
the compiler used for the nearest ancestor with a compiler
spec and use that. If the ancestor's compiler is not
concrete, then used the preferred compiler as specified in
spackconfig.
Intuition: Use the spackconfig default if no package that depends on
this one has a strict compiler requirement. Otherwise, try to
build with the compiler that will be used by libraries that
link to this one, to maximize compatibility.
"""
# Pass on concretizing the compiler if the target or operating system
# is not yet determined
if not spec.architecture.concrete:
# We haven't changed, but other changes need to happen before we
# continue. `return True` here to force concretization to keep
# running.
return True
# Only use a matching compiler if it is of the proper style
# Takes advantage of the proper logic already existing in
# compiler_for_spec Should think whether this can be more
# efficient
def _proper_compiler_style(cspec, aspec):
compilers = spack.compilers.compilers_for_spec(cspec, arch_spec=aspec)
# If the spec passed as argument is concrete we want to check
# the versions match exactly
if (
cspec.concrete
and compilers
and cspec.version not in [c.version for c in compilers]
):
return []
return compilers
if spec.compiler and spec.compiler.concrete:
if self.check_for_compiler_existence and not _proper_compiler_style(
spec.compiler, spec.architecture
):
_compiler_concretization_failure(spec.compiler, spec.architecture)
return False
# Find another spec that has a compiler, or the root if none do
other_spec = spec if spec.compiler else find_spec(spec, lambda x: x.compiler, spec.root)
other_compiler = other_spec.compiler
assert other_spec
# Check if the compiler is already fully specified
if other_compiler and other_compiler.concrete:
if self.check_for_compiler_existence and not _proper_compiler_style(
other_compiler, spec.architecture
):
_compiler_concretization_failure(other_compiler, spec.architecture)
spec.compiler = other_compiler
return True
if other_compiler: # Another node has abstract compiler information
compiler_list = spack.compilers.find_specs_by_arch(other_compiler, spec.architecture)
if not compiler_list:
# We don't have a matching compiler installed
if not self.check_for_compiler_existence:
# Concretize compiler spec versions as a package to build
cpkg_spec = spack.compilers.pkg_spec_for_compiler(other_compiler)
self.concretize_version(cpkg_spec)
spec.compiler = spack.spec.CompilerSpec(
other_compiler.name, cpkg_spec.versions
)
return True
else:
# No compiler with a satisfactory spec was found
raise UnavailableCompilerVersionError(other_compiler, spec.architecture)
else:
# We have no hints to go by, grab any compiler
compiler_list = spack.compilers.all_compiler_specs()
if not compiler_list:
# Spack has no compilers.
raise spack.compilers.NoCompilersError()
# By default, prefer later versions of compilers
compiler_list = sorted(compiler_list, key=lambda x: (x.name, x.version), reverse=True)
ppk = PackagePrefs(other_spec.name, "compiler")
matches = sorted(compiler_list, key=ppk)
# copy concrete version into other_compiler
try:
spec.compiler = next(
c for c in matches if _proper_compiler_style(c, spec.architecture)
).copy()
except StopIteration:
# No compiler with a satisfactory spec has a suitable arch
_compiler_concretization_failure(other_compiler, spec.architecture)
assert spec.compiler.concrete
return True # things changed.
def concretize_compiler_flags(self, spec):
"""
The compiler flags are updated to match those of the spec whose
compiler is used, defaulting to no compiler flags in the spec.
Default specs set at the compiler level will still be added later.
"""
# Pass on concretizing the compiler flags if the target or operating
# system is not set.
if not spec.architecture.concrete:
# We haven't changed, but other changes need to happen before we
# continue. `return True` here to force concretization to keep
# running.
return True
compiler_match = lambda other: (
spec.compiler == other.compiler and spec.architecture == other.architecture
)
ret = False
for flag in spack.spec.FlagMap.valid_compiler_flags():
if flag not in spec.compiler_flags:
spec.compiler_flags[flag] = list()
try:
nearest = next(
p
for p in spec.traverse(direction="parents")
if (compiler_match(p) and (p is not spec) and flag in p.compiler_flags)
)
nearest_flags = nearest.compiler_flags.get(flag, [])
flags = spec.compiler_flags.get(flag, [])
if set(nearest_flags) - set(flags):
spec.compiler_flags[flag] = list(llnl.util.lang.dedupe(nearest_flags + flags))
ret = True
except StopIteration:
pass
# Include the compiler flag defaults from the config files
# This ensures that spack will detect conflicts that stem from a change
# in default compiler flags.
try:
compiler = spack.compilers.compiler_for_spec(spec.compiler, spec.architecture)
except spack.compilers.NoCompilerForSpecError:
if self.check_for_compiler_existence:
raise
return ret
for flag in compiler.flags:
config_flags = compiler.flags.get(flag, [])
flags = spec.compiler_flags.get(flag, [])
spec.compiler_flags[flag] = list(llnl.util.lang.dedupe(config_flags + flags))
if set(config_flags) - set(flags):
ret = True
return ret
def adjust_target(self, spec):
"""Adjusts the target microarchitecture if the compiler is too old
to support the default one.
Args:
spec: spec to be concretized
Returns:
True if spec was modified, False otherwise
"""
# To minimize the impact on performance this function will attempt
# to adjust the target only at the very first call once necessary
# information is set. It will just return False on subsequent calls.
# The way this is achieved is by initializing a generator and making
# this function return the next answer.
if not (spec.architecture and spec.architecture.concrete):
# Not ready, but keep going because we have work to do later
return True
def _make_only_one_call(spec):
yield self._adjust_target(spec)
while True:
yield False
if self._adjust_target_answer_generator is None:
self._adjust_target_answer_generator = _make_only_one_call(spec)
return next(self._adjust_target_answer_generator)
def _adjust_target(self, spec):
"""Assumes that the architecture and the compiler have been
set already and checks if the current target microarchitecture
is the default and can be optimized by the compiler.
If not, downgrades the microarchitecture until a suitable one
is found. If none can be found raise an error.
Args:
spec: spec to be concretized
Returns:
True if any modification happened, False otherwise
"""
import archspec.cpu
# Try to adjust the target only if it is the default
# target for this platform
current_target = spec.architecture.target
current_platform = spack.platforms.by_name(spec.architecture.platform)
default_target = current_platform.target("default_target")
if PackagePrefs.has_preferred_targets(spec.name):
default_target = self.target_from_package_preferences(spec)
if current_target != default_target or (
self.abstract_spec
and self.abstract_spec.architecture
and self.abstract_spec.architecture.concrete
):
return False
try:
current_target.optimization_flags(spec.compiler)
except archspec.cpu.UnsupportedMicroarchitecture:
microarchitecture = current_target.microarchitecture
for ancestor in microarchitecture.ancestors:
candidate = None
try:
candidate = spack.target.Target(ancestor)
candidate.optimization_flags(spec.compiler)
except archspec.cpu.UnsupportedMicroarchitecture:
continue
if candidate is not None:
msg = (
"{0.name}@{0.version} cannot build optimized "
'binaries for "{1}". Using best target possible: '
'"{2}"'
)
msg = msg.format(spec.compiler, current_target, candidate)
tty.warn(msg)
spec.architecture.target = candidate
return True
else:
raise
return False
@contextmanager
@@ -82,6 +719,19 @@ def find_spec(spec, condition, default=None):
return default # Nothing matched the condition; return default.
def _compiler_concretization_failure(compiler_spec, arch):
# Distinguish between the case that there are compilers for
# the arch but not with the given compiler spec and the case that
# there are no compilers for the arch at all
if not spack.compilers.compilers_for_arch(arch):
available_os_targets = set(
(c.operating_system, c.target) for c in spack.compilers.all_compilers()
)
raise NoCompilersForArchError(arch, available_os_targets)
else:
raise UnavailableCompilerVersionError(compiler_spec, arch)
def concretize_specs_together(*abstract_specs, **kwargs):
"""Given a number of specs as input, tries to concretize them together.
@@ -94,6 +744,12 @@ def concretize_specs_together(*abstract_specs, **kwargs):
Returns:
List of concretized specs
"""
if spack.config.get("config:concretizer", "clingo") == "original":
return _concretize_specs_together_original(*abstract_specs, **kwargs)
return _concretize_specs_together_new(*abstract_specs, **kwargs)
def _concretize_specs_together_new(*abstract_specs, **kwargs):
import spack.solver.asp
allow_deprecated = spack.config.get("config:deprecated", False)
@@ -104,6 +760,51 @@ def concretize_specs_together(*abstract_specs, **kwargs):
return [s.copy() for s in result.specs]
def _concretize_specs_together_original(*abstract_specs, **kwargs):
abstract_specs = [spack.spec.Spec(s) for s in abstract_specs]
tmpdir = tempfile.mkdtemp()
builder = spack.repo.MockRepositoryBuilder(tmpdir)
# Split recursive specs, as it seems the concretizer has issue
# respecting conditions on dependents expressed like
# depends_on('foo ^bar@1.0'), see issue #11160
split_specs = [
dep.copy(deps=False) for spec1 in abstract_specs for dep in spec1.traverse(root=True)
]
builder.add_package(
"concretizationroot", dependencies=[(str(x), None, None) for x in split_specs]
)
with spack.repo.use_repositories(builder.root, override=False):
# Spec from a helper package that depends on all the abstract_specs
concretization_root = spack.spec.Spec("concretizationroot")
concretization_root.concretize(tests=kwargs.get("tests", False))
# Retrieve the direct dependencies
concrete_specs = [concretization_root[spec.name].copy() for spec in abstract_specs]
return concrete_specs
class NoCompilersForArchError(spack.error.SpackError):
def __init__(self, arch, available_os_targets):
err_msg = (
"No compilers found"
" for operating system %s and target %s."
"\nIf previous installations have succeeded, the"
" operating system may have been updated." % (arch.os, arch.target)
)
available_os_target_strs = list()
for operating_system, t in available_os_targets:
os_target_str = "%s-%s" % (operating_system, t) if t else operating_system
available_os_target_strs.append(os_target_str)
err_msg += (
"\nCompilers are defined for the following"
" operating systems and targets:\n\t" + "\n\t".join(available_os_target_strs)
)
super().__init__(err_msg, "Run 'spack compiler find' to add compilers.")
class UnavailableCompilerVersionError(spack.error.SpackError):
"""Raised when there is no available compiler that satisfies a
compiler spec."""
@@ -119,3 +820,37 @@ def __init__(self, compiler_spec, arch=None):
"'spack compilers' to see which compilers are already recognized"
" by spack.",
)
class NoValidVersionError(spack.error.SpackError):
"""Raised when there is no way to have a concrete version for a
particular spec."""
def __init__(self, spec):
super().__init__(
"There are no valid versions for %s that match '%s'" % (spec.name, spec.versions)
)
class InsufficientArchitectureInfoError(spack.error.SpackError):
"""Raised when details on architecture cannot be collected from the
system"""
def __init__(self, spec, archs):
super().__init__(
"Cannot determine necessary architecture information for '%s': %s"
% (spec.name, str(archs))
)
class NoBuildError(spack.error.SpecError):
"""Raised when a package is configured with the buildable option False, but
no satisfactory external versions can be found
"""
def __init__(self, spec):
msg = (
"The spec\n '%s'\n is configured as not buildable, "
"and no matching external installs were found"
)
super().__init__(msg % spec)

View File

@@ -99,6 +99,7 @@
"dirty": False,
"build_jobs": min(16, cpus_available()),
"build_stage": "$tempdir/spack-stage",
"concretizer": "clingo",
"license_dir": spack.paths.default_license_dir,
}
}

View File

@@ -24,7 +24,6 @@
import llnl.util.tty
import spack.config
import spack.error
import spack.operating_systems.windows_os as winOs
import spack.spec
import spack.util.spack_yaml
@@ -137,10 +136,10 @@ def path_to_dict(search_paths: List[str]):
# entry overrides later entries
for search_path in reversed(search_paths):
try:
with os.scandir(search_path) as entries:
path_to_lib.update(
{entry.path: entry.name for entry in entries if entry.is_file()}
)
for lib in os.listdir(search_path):
lib_path = os.path.join(search_path, lib)
if llnl.util.filesystem.is_readable_file(lib_path):
path_to_lib[lib_path] = lib
except OSError as e:
msg = f"cannot scan '{search_path}' for external software: {str(e)}"
llnl.util.tty.debug(msg)

View File

@@ -12,19 +12,16 @@
import re
import sys
import warnings
from typing import Dict, Iterable, List, Optional, Set, Tuple, Type
from typing import Dict, List, Optional, Set, Tuple, Type
import llnl.util.filesystem
import llnl.util.lang
import llnl.util.tty
import spack.package_base
import spack.repo
import spack.util.elf as elf_utils
import spack.util.environment
import spack.util.environment as environment
import spack.util.ld_so_conf
import spack.util.parallel
from .common import (
DetectedPackage,
@@ -82,27 +79,26 @@ def executables_in_path(path_hints: List[str]) -> Dict[str, str]:
path_hints: list of paths to be searched. If None the list will be
constructed based on the PATH environment variable.
"""
return path_to_dict(llnl.util.filesystem.search_paths_for_executables(*path_hints))
search_paths = llnl.util.filesystem.search_paths_for_executables(*path_hints)
return path_to_dict(search_paths)
def accept_elf(entry: os.DirEntry, host_compat: Tuple[bool, bool, int]):
def accept_elf(path, host_compat):
"""Accept an ELF file if the header matches the given compat triplet. In case it's not an ELF
(e.g. static library, or some arbitrary file, fall back to is_readable_file)."""
# Fast path: assume libraries at least have .so in their basename.
# Note: don't replace with splitext, because of libsmth.so.1.2.3 file names.
if ".so" not in entry.name:
return is_readable_file(entry)
if ".so" not in os.path.basename(path):
return llnl.util.filesystem.is_readable_file(path)
try:
return host_compat == elf_utils.get_elf_compat(entry.path)
return host_compat == elf_utils.get_elf_compat(path)
except (OSError, elf_utils.ElfParsingError):
return is_readable_file(entry)
return llnl.util.filesystem.is_readable_file(path)
def is_readable_file(entry: os.DirEntry) -> bool:
return entry.is_file() and os.access(entry.path, os.R_OK)
def system_library_paths() -> List[str]:
def libraries_in_ld_and_system_library_path(
path_hints: Optional[List[str]] = None,
) -> Dict[str, str]:
"""Get the paths of all libraries available from ``path_hints`` or the
following defaults:
@@ -116,57 +112,82 @@ def system_library_paths() -> List[str]:
(i.e. the basename of the library path).
There may be multiple paths with the same basename. In this case it is
assumed there are two different instances of the library."""
assumed there are two different instances of the library.
search_paths: List[str] = []
Args:
path_hints: list of paths to be searched. If None the list will be
constructed based on the set of LD_LIBRARY_PATH, LIBRARY_PATH,
DYLD_LIBRARY_PATH, and DYLD_FALLBACK_LIBRARY_PATH environment
variables as well as the standard system library paths.
path_hints (list): list of paths to be searched. If ``None``, the default
system paths are used.
"""
if path_hints:
search_paths = llnl.util.filesystem.search_paths_for_libraries(*path_hints)
else:
search_paths = []
if sys.platform == "win32":
search_hints = spack.util.environment.get_path("PATH")
search_paths.extend(llnl.util.filesystem.search_paths_for_libraries(*search_hints))
# on Windows, some libraries (.dlls) are found in the bin directory or sometimes
# at the search root. Add both of those options to the search scheme
search_paths.extend(llnl.util.filesystem.search_paths_for_executables(*search_hints))
# if no user provided path was given, add defaults to the search
search_paths.extend(WindowsKitExternalPaths.find_windows_kit_lib_paths())
# SDK and WGL should be handled by above, however on occasion the WDK is in an atypical
# location, so we handle that case specifically.
search_paths.extend(WindowsKitExternalPaths.find_windows_driver_development_kit_paths())
elif sys.platform == "darwin":
search_paths.extend(environment.get_path("DYLD_LIBRARY_PATH"))
search_paths.extend(environment.get_path("DYLD_FALLBACK_LIBRARY_PATH"))
search_paths.extend(spack.util.ld_so_conf.host_dynamic_linker_search_paths())
elif sys.platform.startswith("linux"):
search_paths.extend(environment.get_path("LD_LIBRARY_PATH"))
# Environment variables
if sys.platform == "darwin":
search_paths.extend(environment.get_path("DYLD_LIBRARY_PATH"))
search_paths.extend(environment.get_path("DYLD_FALLBACK_LIBRARY_PATH"))
elif sys.platform.startswith("linux"):
search_paths.extend(environment.get_path("LD_LIBRARY_PATH"))
# Dynamic linker paths
search_paths.extend(spack.util.ld_so_conf.host_dynamic_linker_search_paths())
# Drop redundant paths
search_paths = list(filter(os.path.isdir, search_paths))
# Drop redundant paths
search_paths = list(filter(os.path.isdir, search_paths))
# Make use we don't doubly list /usr/lib and /lib etc
search_paths = list(llnl.util.lang.dedupe(search_paths, key=file_identifier))
return search_paths
def libraries_in_path(search_paths: List[str]) -> Dict[str, str]:
try:
host_compat = elf_utils.get_elf_compat(sys.executable)
accept = lambda entry: accept_elf(entry, host_compat)
accept = lambda path: accept_elf(path, host_compat)
except (OSError, elf_utils.ElfParsingError):
accept = is_readable_file
accept = llnl.util.filesystem.is_readable_file
path_to_lib = {}
# Reverse order of search directories so that a lib in the first
# search path entry overrides later entries
for search_path in reversed(search_paths):
with os.scandir(search_path) as it:
for entry in it:
if accept(entry):
path_to_lib[entry.path] = entry.name
for lib in os.listdir(search_path):
lib_path = os.path.join(search_path, lib)
if accept(lib_path):
path_to_lib[lib_path] = lib
return path_to_lib
def _group_by_prefix(paths: List[str]) -> Dict[str, Set[str]]:
def libraries_in_windows_paths(path_hints: Optional[List[str]] = None) -> Dict[str, str]:
"""Get the paths of all libraries available from the system PATH paths.
For more details, see `libraries_in_ld_and_system_library_path` regarding
return type and contents.
Args:
path_hints: list of paths to be searched. If None the list will be
constructed based on the set of PATH environment
variables as well as the standard system library paths.
"""
search_hints = (
path_hints if path_hints is not None else spack.util.environment.get_path("PATH")
)
search_paths = llnl.util.filesystem.search_paths_for_libraries(*search_hints)
# on Windows, some libraries (.dlls) are found in the bin directory or sometimes
# at the search root. Add both of those options to the search scheme
search_paths.extend(llnl.util.filesystem.search_paths_for_executables(*search_hints))
if path_hints is None:
# if no user provided path was given, add defaults to the search
search_paths.extend(WindowsKitExternalPaths.find_windows_kit_lib_paths())
# SDK and WGL should be handled by above, however on occasion the WDK is in an atypical
# location, so we handle that case specifically.
search_paths.extend(WindowsKitExternalPaths.find_windows_driver_development_kit_paths())
return path_to_dict(search_paths)
def _group_by_prefix(paths: Set[str]) -> Dict[str, Set[str]]:
groups = collections.defaultdict(set)
for p in paths:
groups[os.path.dirname(p)].add(p)
@@ -176,13 +197,10 @@ def _group_by_prefix(paths: List[str]) -> Dict[str, Set[str]]:
class Finder:
"""Inspects the file-system looking for packages. Guesses places where to look using PATH."""
def __init__(self, paths: Dict[str, str]):
self.paths = paths
def default_path_hints(self) -> List[str]:
return []
def search_patterns(self, *, pkg: Type[spack.package_base.PackageBase]) -> Optional[List[str]]:
def search_patterns(self, *, pkg: Type["spack.package_base.PackageBase"]) -> List[str]:
"""Returns the list of patterns used to match candidate files.
Args:
@@ -190,6 +208,15 @@ def search_patterns(self, *, pkg: Type[spack.package_base.PackageBase]) -> Optio
"""
raise NotImplementedError("must be implemented by derived classes")
def candidate_files(self, *, patterns: List[str], paths: List[str]) -> List[str]:
"""Returns a list of candidate files found on the system.
Args:
patterns: search patterns to be used for matching files
paths: paths where to search for files
"""
raise NotImplementedError("must be implemented by derived classes")
def prefix_from_path(self, *, path: str) -> str:
"""Given a path where a file was found, returns the corresponding prefix.
@@ -199,7 +226,7 @@ def prefix_from_path(self, *, path: str) -> str:
raise NotImplementedError("must be implemented by derived classes")
def detect_specs(
self, *, pkg: Type[spack.package_base.PackageBase], paths: List[str]
self, *, pkg: Type["spack.package_base.PackageBase"], paths: List[str]
) -> List[DetectedPackage]:
"""Given a list of files matching the search patterns, returns a list of detected specs.
@@ -216,9 +243,7 @@ def detect_specs(
return []
result = []
for candidate_path, items_in_prefix in _group_by_prefix(
llnl.util.lang.dedupe(paths)
).items():
for candidate_path, items_in_prefix in sorted(_group_by_prefix(set(paths)).items()):
# TODO: multiple instances of a package can live in the same
# prefix, and a package implementation can return multiple specs
# for one prefix, but without additional details (e.g. about the
@@ -273,36 +298,50 @@ def detect_specs(
return result
def find(self, *, pkg_name: str, repository: spack.repo.Repo) -> List[DetectedPackage]:
def find(
self, *, pkg_name: str, initial_guess: Optional[List[str]] = None
) -> List[DetectedPackage]:
"""For a given package, returns a list of detected specs.
Args:
pkg_name: package being detected
repository: repository to retrieve the package
initial_guess: initial list of paths to search from the caller
if None, default paths are searched. If this
is an empty list, nothing will be searched.
"""
pkg_cls = repository.get_pkg_class(pkg_name)
import spack.repo
pkg_cls = spack.repo.PATH.get_pkg_class(pkg_name)
patterns = self.search_patterns(pkg=pkg_cls)
if not patterns:
return []
regex = re.compile("|".join(patterns))
paths = [path for path, file in self.paths.items() if regex.search(file)]
paths.sort()
return self.detect_specs(pkg=pkg_cls, paths=paths)
if initial_guess is None:
initial_guess = self.default_path_hints()
initial_guess.extend(common_windows_package_paths(pkg_cls))
candidates = self.candidate_files(patterns=patterns, paths=initial_guess)
result = self.detect_specs(pkg=pkg_cls, paths=candidates)
return result
class ExecutablesFinder(Finder):
@classmethod
def in_search_paths(cls, paths: List[str]):
return cls(executables_in_path(paths))
def default_path_hints(self) -> List[str]:
return spack.util.environment.get_path("PATH")
@classmethod
def in_default_paths(cls):
return cls.in_search_paths(spack.util.environment.get_path("PATH"))
def search_patterns(self, *, pkg: Type[spack.package_base.PackageBase]) -> Optional[List[str]]:
def search_patterns(self, *, pkg: Type["spack.package_base.PackageBase"]) -> List[str]:
result = []
if hasattr(pkg, "executables") and hasattr(pkg, "platform_executables"):
return pkg.platform_executables()
return None
result = pkg.platform_executables()
return result
def candidate_files(self, *, patterns: List[str], paths: List[str]) -> List[str]:
executables_by_path = executables_in_path(path_hints=paths)
patterns = [re.compile(x) for x in patterns]
result = []
for compiled_re in patterns:
for path, exe in executables_by_path.items():
if compiled_re.search(exe):
result.append(path)
return list(sorted(set(result)))
def prefix_from_path(self, *, path: str) -> str:
result = executable_prefix(path)
@@ -313,18 +352,29 @@ def prefix_from_path(self, *, path: str) -> str:
class LibrariesFinder(Finder):
"""Finds libraries in the provided paths matching package search patterns."""
"""Finds libraries on the system, searching by LD_LIBRARY_PATH, LIBRARY_PATH,
DYLD_LIBRARY_PATH, DYLD_FALLBACK_LIBRARY_PATH, and standard system library paths
"""
@classmethod
def in_search_paths(cls, paths: List[str]):
return cls(libraries_in_path(paths))
def search_patterns(self, *, pkg: Type["spack.package_base.PackageBase"]) -> List[str]:
result = []
if hasattr(pkg, "libraries"):
result = pkg.libraries
return result
@classmethod
def in_default_paths(cls):
return cls.in_search_paths(system_library_paths())
def search_patterns(self, *, pkg: Type[spack.package_base.PackageBase]) -> Optional[List[str]]:
return getattr(pkg, "libraries", None)
def candidate_files(self, *, patterns: List[str], paths: List[str]) -> List[str]:
libraries_by_path = (
libraries_in_ld_and_system_library_path(path_hints=paths)
if sys.platform != "win32"
else libraries_in_windows_paths(path_hints=paths)
)
patterns = [re.compile(x) for x in patterns]
result = []
for compiled_re in patterns:
for path, exe in libraries_by_path.items():
if compiled_re.search(exe):
result.append(path)
return result
def prefix_from_path(self, *, path: str) -> str:
result = library_prefix(path)
@@ -335,7 +385,10 @@ def prefix_from_path(self, *, path: str) -> str:
def by_path(
packages_to_search: Iterable[str], *, path_hints: Optional[List[str]] = None
packages_to_search: List[str],
*,
path_hints: Optional[List[str]] = None,
max_workers: Optional[int] = None,
) -> Dict[str, List[DetectedPackage]]:
"""Return the list of packages that have been detected on the system, keyed by
unqualified package name.
@@ -344,26 +397,22 @@ def by_path(
packages_to_search: list of packages to be detected. Each package can be either unqualified
of fully qualified
path_hints: initial list of paths to be searched
max_workers: maximum number of workers to search for packages in parallel
"""
# TODO: Packages should be able to define both .libraries and .executables in the future
# TODO: determine_spec_details should get all relevant libraries and executables in one call
if path_hints is None:
exe_finder = ExecutablesFinder.in_default_paths()
lib_finder = LibrariesFinder.in_default_paths()
else:
exe_finder = ExecutablesFinder.in_search_paths(path_hints)
lib_finder = LibrariesFinder.in_search_paths(path_hints)
executables_finder, libraries_finder = ExecutablesFinder(), LibrariesFinder()
detected_specs_by_package: Dict[str, Tuple[concurrent.futures.Future, ...]] = {}
result = collections.defaultdict(list)
repository = spack.repo.PATH.ensure_unwrapped()
with spack.util.parallel.make_concurrent_executor() as executor:
with concurrent.futures.ProcessPoolExecutor(max_workers=max_workers) as executor:
for pkg in packages_to_search:
executable_future = executor.submit(
exe_finder.find, pkg_name=pkg, repository=repository
executables_finder.find, pkg_name=pkg, initial_guess=path_hints
)
library_future = executor.submit(
libraries_finder.find, pkg_name=pkg, initial_guess=path_hints
)
library_future = executor.submit(lib_finder.find, pkg_name=pkg, repository=repository)
detected_specs_by_package[pkg] = executable_future, library_future
for pkg_name, futures in detected_specs_by_package.items():
@@ -379,7 +428,7 @@ def by_path(
)
except Exception as e:
llnl.util.tty.debug(
f"[EXTERNAL DETECTION] Skipping {pkg_name} due to: {e.__class__}: {e}"
f"[EXTERNAL DETECTION] Skipping {pkg_name}: exception occured {e}"
)
return result

View File

@@ -81,17 +81,7 @@ class OpenMpi(Package):
]
#: These are variant names used by Spack internally; packages can't use them
reserved_names = [
"arch",
"architecture",
"dev_path",
"namespace",
"operating_system",
"os",
"patches",
"platform",
"target",
]
reserved_names = ["patches", "dev_path"]
#: Names of possible directives. This list is mostly populated using the @directive decorator.
#: Some directives leverage others and in that case are not automatically added.

View File

@@ -1214,6 +1214,7 @@ def scope_name(self):
def include_concrete_envs(self):
"""Copy and save the included envs' specs internally"""
lockfile_meta = None
root_hash_seen = set()
concrete_hash_seen = set()
self.included_concrete_spec_data = {}
@@ -1224,26 +1225,37 @@ def include_concrete_envs(self):
raise SpackEnvironmentError(f"Unable to find env at {env_path}")
env = Environment(env_path)
self.included_concrete_spec_data[env_path] = {"roots": [], "concrete_specs": {}}
with open(env.lock_path) as f:
lockfile_as_dict = env._read_lockfile(f)
# Lockfile_meta must match each env and use at least format version 5
if lockfile_meta is None:
lockfile_meta = lockfile_as_dict["_meta"]
elif lockfile_meta != lockfile_as_dict["_meta"]:
raise SpackEnvironmentError("All lockfile _meta values must match")
elif lockfile_meta["lockfile-version"] < 5:
raise SpackEnvironmentError("The lockfile format must be at version 5 or higher")
# Copy unique root specs from env
for root_dict in env._concrete_roots_dict():
self.included_concrete_spec_data[env_path] = {"roots": []}
for root_dict in lockfile_as_dict["roots"]:
if root_dict["hash"] not in root_hash_seen:
self.included_concrete_spec_data[env_path]["roots"].append(root_dict)
root_hash_seen.add(root_dict["hash"])
# Copy unique concrete specs from env
for dag_hash, spec_details in env._concrete_specs_dict().items():
if dag_hash not in concrete_hash_seen:
self.included_concrete_spec_data[env_path]["concrete_specs"].update(
{dag_hash: spec_details}
for concrete_spec in lockfile_as_dict["concrete_specs"]:
if concrete_spec not in concrete_hash_seen:
self.included_concrete_spec_data[env_path].update(
{"concrete_specs": lockfile_as_dict["concrete_specs"]}
)
concrete_hash_seen.add(dag_hash)
concrete_hash_seen.add(concrete_spec)
# Copy transitive include data
transitive = env.included_concrete_spec_data
if transitive:
self.included_concrete_spec_data[env_path]["include_concrete"] = transitive
if "include_concrete" in lockfile_as_dict.keys():
self.included_concrete_spec_data[env_path]["include_concrete"] = lockfile_as_dict[
"include_concrete"
]
self._read_lockfile_dict(self._to_lockfile_dict())
self.write()
@@ -1632,8 +1644,9 @@ def _concretize_separately(self, tests=False):
i += 1
# Ensure we don't try to bootstrap clingo in parallel
with spack.bootstrap.ensure_bootstrap_configuration():
spack.bootstrap.ensure_clingo_importable_or_raise()
if spack.config.get("config:concretizer", "clingo") == "clingo":
with spack.bootstrap.ensure_bootstrap_configuration():
spack.bootstrap.ensure_clingo_importable_or_raise()
# Ensure all the indexes have been built or updated, since
# otherwise the processes in the pool may timeout on waiting
@@ -2161,23 +2174,16 @@ def _get_environment_specs(self, recurse_dependencies=True):
return specs
def _concrete_specs_dict(self):
def _to_lockfile_dict(self):
"""Create a dictionary to store a lockfile for this environment."""
concrete_specs = {}
for s in traverse.traverse_nodes(self.specs_by_hash.values(), key=traverse.by_dag_hash):
spec_dict = s.node_dict_with_hashes(hash=ht.dag_hash)
# Assumes no legacy formats, since this was just created.
spec_dict[ht.dag_hash.name] = s.dag_hash()
concrete_specs[s.dag_hash()] = spec_dict
return concrete_specs
def _concrete_roots_dict(self):
hash_spec_list = zip(self.concretized_order, self.concretized_user_specs)
return [{"hash": h, "spec": str(s)} for h, s in hash_spec_list]
def _to_lockfile_dict(self):
"""Create a dictionary to store a lockfile for this environment."""
concrete_specs = self._concrete_specs_dict()
root_specs = self._concrete_roots_dict()
spack_dict = {"version": spack.spack_version}
spack_commit = spack.main.get_spack_commit()
@@ -2198,7 +2204,7 @@ def _to_lockfile_dict(self):
# spack version information
"spack": spack_dict,
# users specs + hashes are the 'roots' of the environment
"roots": root_specs,
"roots": [{"hash": h, "spec": str(s)} for h, s in hash_spec_list],
# Concrete specs by hash, including dependencies
"concrete_specs": concrete_specs,
}

View File

@@ -30,7 +30,6 @@
import shutil
import urllib.error
import urllib.parse
import urllib.request
from pathlib import PurePath
from typing import List, Optional
@@ -54,7 +53,7 @@
import spack.version
import spack.version.git_ref_lookup
from spack.util.compression import decompressor_for
from spack.util.executable import CommandNotFoundError, Executable, which
from spack.util.executable import CommandNotFoundError, which
#: List of all fetch strategies, created by FetchStrategy metaclass.
all_strategies = []
@@ -246,30 +245,38 @@ class URLFetchStrategy(FetchStrategy):
# these are checksum types. The generic 'checksum' is deprecated for
# specific hash names, but we need it for backward compatibility
optional_attrs = [*crypto.hashes.keys(), "checksum"]
optional_attrs = list(crypto.hashes.keys()) + ["checksum"]
def __init__(self, *, url: str, checksum: Optional[str] = None, **kwargs) -> None:
def __init__(self, url=None, checksum=None, **kwargs):
super().__init__(**kwargs)
self.url = url
# Prefer values in kwargs to the positionals.
self.url = kwargs.get("url", url)
self.mirrors = kwargs.get("mirrors", [])
# digest can be set as the first argument, or from an explicit
# kwarg by the hash name.
self.digest: Optional[str] = checksum
self.digest = kwargs.get("checksum", checksum)
for h in self.optional_attrs:
if h in kwargs:
self.digest = kwargs[h]
self.expand_archive: bool = kwargs.get("expand", True)
self.extra_options: dict = kwargs.get("fetch_options", {})
self._curl: Optional[Executable] = None
self.extension: Optional[str] = kwargs.get("extension", None)
self.expand_archive = kwargs.get("expand", True)
self.extra_options = kwargs.get("fetch_options", {})
self._curl = None
self.extension = kwargs.get("extension", None)
if not self.url:
raise ValueError("URLFetchStrategy requires a url for fetching.")
@property
def curl(self) -> Executable:
def curl(self):
if not self._curl:
self._curl = web_util.require_curl()
try:
self._curl = which("curl", required=True)
except CommandNotFoundError as exc:
tty.error(str(exc))
return self._curl
def source_id(self):
@@ -290,23 +297,27 @@ def candidate_urls(self):
@_needs_stage
def fetch(self):
if self.archive_file:
tty.debug(f"Already downloaded {self.archive_file}")
tty.debug("Already downloaded {0}".format(self.archive_file))
return
errors: List[Exception] = []
url = None
errors = []
for url in self.candidate_urls:
if not web_util.url_exists(url):
tty.debug("URL does not exist: " + url)
continue
try:
self._fetch_from_url(url)
break
except FailedDownloadError as e:
errors.extend(e.exceptions)
else:
raise FailedDownloadError(*errors)
errors.append(str(e))
for msg in errors:
tty.debug(msg)
if not self.archive_file:
raise FailedDownloadError(
RuntimeError(f"Missing archive {self.archive_file} after fetching")
)
raise FailedDownloadError(url)
def _fetch_from_url(self, url):
if spack.config.get("config:url_fetch_method") == "curl":
@@ -325,28 +336,27 @@ def _check_headers(self, headers):
@_needs_stage
def _fetch_urllib(self, url):
save_file = self.stage.save_filename
tty.msg("Fetching {0}".format(url))
request = urllib.request.Request(url, headers={"User-Agent": web_util.SPACK_USER_AGENT})
# Run urllib but grab the mime type from the http headers
try:
response = web_util.urlopen(request)
except (TimeoutError, urllib.error.URLError) as e:
url, headers, response = web_util.read_from_url(url)
except web_util.SpackWebError as e:
# clean up archive on failure.
if self.archive_file:
os.remove(self.archive_file)
if os.path.lexists(save_file):
os.remove(save_file)
raise FailedDownloadError(e) from e
tty.msg(f"Fetching {url}")
msg = "urllib failed to fetch with error {0}".format(e)
raise FailedDownloadError(url, msg)
if os.path.lexists(save_file):
os.remove(save_file)
with open(save_file, "wb") as f:
shutil.copyfileobj(response, f)
with open(save_file, "wb") as _open_file:
shutil.copyfileobj(response, _open_file)
self._check_headers(str(response.headers))
self._check_headers(str(headers))
@_needs_stage
def _fetch_curl(self, url):
@@ -355,7 +365,7 @@ def _fetch_curl(self, url):
if self.stage.save_filename:
save_file = self.stage.save_filename
partial_file = self.stage.save_filename + ".part"
tty.msg(f"Fetching {url}")
tty.msg("Fetching {0}".format(url))
if partial_file:
save_args = [
"-C",
@@ -395,8 +405,8 @@ def _fetch_curl(self, url):
try:
web_util.check_curl_code(curl.returncode)
except spack.error.FetchError as e:
raise FailedDownloadError(e) from e
except spack.error.FetchError as err:
raise spack.fetch_strategy.FailedDownloadError(url, str(err))
self._check_headers(headers)
@@ -463,7 +473,7 @@ def check(self):
"""Check the downloaded archive against a checksum digest.
No-op if this stage checks code out of a repository."""
if not self.digest:
raise NoDigestError(f"Attempt to check {self.__class__.__name__} with no digest.")
raise NoDigestError("Attempt to check URLFetchStrategy with no digest.")
verify_checksum(self.archive_file, self.digest)
@@ -474,8 +484,8 @@ def reset(self):
"""
if not self.archive_file:
raise NoArchiveFileError(
f"Tried to reset {self.__class__.__name__} before fetching",
f"Failed on reset() for URL{self.url}",
"Tried to reset URLFetchStrategy before fetching",
"Failed on reset() for URL %s" % self.url,
)
# Remove everything but the archive from the stage
@@ -488,10 +498,14 @@ def reset(self):
self.expand()
def __repr__(self):
return f"{self.__class__.__name__}<{self.url}>"
url = self.url if self.url else "no url"
return "%s<%s>" % (self.__class__.__name__, url)
def __str__(self):
return self.url
if self.url:
return self.url
else:
return "[no url]"
@fetcher
@@ -504,7 +518,7 @@ def fetch(self):
# check whether the cache file exists.
if not os.path.isfile(path):
raise NoCacheError(f"No cache of {path}")
raise NoCacheError("No cache of %s" % path)
# remove old symlink if one is there.
filename = self.stage.save_filename
@@ -514,8 +528,8 @@ def fetch(self):
# Symlink to local cached archive.
symlink(path, filename)
# Remove link if checksum fails, or subsequent fetchers will assume they don't need to
# download.
# Remove link if checksum fails, or subsequent fetchers
# will assume they don't need to download.
if self.digest:
try:
self.check()
@@ -524,12 +538,12 @@ def fetch(self):
raise
# Notify the user how we fetched.
tty.msg(f"Using cached archive: {path}")
tty.msg("Using cached archive: {0}".format(path))
class OCIRegistryFetchStrategy(URLFetchStrategy):
def __init__(self, *, url: str, checksum: Optional[str] = None, **kwargs):
super().__init__(url=url, checksum=checksum, **kwargs)
def __init__(self, url=None, checksum=None, **kwargs):
super().__init__(url, checksum, **kwargs)
self._urlopen = kwargs.get("_urlopen", spack.oci.opener.urlopen)
@@ -540,13 +554,13 @@ def fetch(self):
try:
response = self._urlopen(self.url)
except (TimeoutError, urllib.error.URLError) as e:
except urllib.error.URLError as e:
# clean up archive on failure.
if self.archive_file:
os.remove(self.archive_file)
if os.path.lexists(file):
os.remove(file)
raise FailedDownloadError(e) from e
raise FailedDownloadError(self.url, f"Failed to fetch {self.url}: {e}") from e
if os.path.lexists(file):
os.remove(file)
@@ -711,7 +725,6 @@ class GitFetchStrategy(VCSFetchStrategy):
"submodules",
"get_full_repo",
"submodules_delete",
"git_sparse_paths",
]
git_version_re = r"git version (\S+)"
@@ -727,7 +740,6 @@ def __init__(self, **kwargs):
self.submodules = kwargs.get("submodules", False)
self.submodules_delete = kwargs.get("submodules_delete", False)
self.get_full_repo = kwargs.get("get_full_repo", False)
self.git_sparse_paths = kwargs.get("git_sparse_paths", None)
@property
def git_version(self):
@@ -795,50 +807,38 @@ def fetch(self):
tty.debug("Already fetched {0}".format(self.stage.source_path))
return
if self.git_sparse_paths:
self._sparse_clone_src(commit=self.commit, branch=self.branch, tag=self.tag)
else:
self._clone_src(commit=self.commit, branch=self.branch, tag=self.tag)
self.submodule_operations()
self.clone(commit=self.commit, branch=self.branch, tag=self.tag)
def bare_clone(self, dest):
def clone(self, dest=None, commit=None, branch=None, tag=None, bare=False):
"""
Execute a bare clone for metadata only
Clone a repository to a path.
Requires a destination since bare cloning does not provide source
and shouldn't be used for staging.
"""
# Default to spack source path
tty.debug("Cloning git repository: {0}".format(self._repo_info()))
git = self.git
debug = spack.config.get("config:debug")
# We don't need to worry about which commit/branch/tag is checked out
clone_args = ["clone", "--bare"]
if not debug:
clone_args.append("--quiet")
clone_args.extend([self.url, dest])
git(*clone_args)
def _clone_src(self, commit=None, branch=None, tag=None):
"""
Clone a repository to a path using git.
This method handles cloning from git, but does not require a stage.
Arguments:
dest (str or None): The path into which the code is cloned. If None,
requires a stage and uses the stage's source path.
commit (str or None): A commit to fetch from the remote. Only one of
commit, branch, and tag may be non-None.
branch (str or None): A branch to fetch from the remote.
tag (str or None): A tag to fetch from the remote.
bare (bool): Execute a "bare" git clone (--bare option to git)
"""
# Default to spack source path
dest = self.stage.source_path
dest = dest or self.stage.source_path
tty.debug("Cloning git repository: {0}".format(self._repo_info()))
git = self.git
debug = spack.config.get("config:debug")
if commit:
if bare:
# We don't need to worry about which commit/branch/tag is checked out
clone_args = ["clone", "--bare"]
if not debug:
clone_args.append("--quiet")
clone_args.extend([self.url, dest])
git(*clone_args)
elif commit:
# Need to do a regular clone and check out everything if
# they asked for a particular commit.
clone_args = ["clone", self.url]
@@ -917,85 +917,6 @@ def _clone_src(self, commit=None, branch=None, tag=None):
git(*pull_args, ignore_errors=1)
git(*co_args)
def _sparse_clone_src(self, commit=None, branch=None, tag=None, **kwargs):
"""
Use git's sparse checkout feature to clone portions of a git repository
Arguments:
commit (str or None): A commit to fetch from the remote. Only one of
commit, branch, and tag may be non-None.
branch (str or None): A branch to fetch from the remote.
tag (str or None): A tag to fetch from the remote.
"""
dest = self.stage.source_path
git = self.git
if self.git_version < spack.version.Version("2.25.0.0"):
# code paths exist where the package is not set. Assure some indentifier for the
# package that was configured for sparse checkout exists in the error message
identifier = str(self.url)
if self.package:
identifier += f" ({self.package.name})"
tty.warn(
(
f"{identifier} is configured for git sparse-checkout "
"but the git version is too old to support sparse cloning. "
"Cloning the full repository instead."
)
)
self._clone_src(commit, branch, tag)
else:
# default to depth=2 to allow for retention of some git properties
depth = kwargs.get("depth", 2)
needs_fetch = branch or tag
git_ref = branch or tag or commit
assert git_ref
clone_args = ["clone"]
if needs_fetch:
clone_args.extend(["--branch", git_ref])
if self.get_full_repo:
clone_args.append("--no-single-branch")
else:
clone_args.append("--single-branch")
clone_args.extend(
[f"--depth={depth}", "--no-checkout", "--filter=blob:none", self.url]
)
sparse_args = ["sparse-checkout", "set"]
if callable(self.git_sparse_paths):
sparse_args.extend(self.git_sparse_paths())
else:
sparse_args.extend([p for p in self.git_sparse_paths])
sparse_args.append("--cone")
checkout_args = ["checkout", git_ref]
if not spack.config.get("config:debug"):
clone_args.insert(1, "--quiet")
checkout_args.insert(1, "--quiet")
with temp_cwd():
git(*clone_args)
repo_name = get_single_file(".")
if self.stage:
self.stage.srcdir = repo_name
shutil.move(repo_name, dest)
with working_dir(dest):
git(*sparse_args)
git(*checkout_args)
def submodule_operations(self):
dest = self.stage.source_path
git = self.git
if self.submodules_delete:
with working_dir(dest):
for submodule_to_delete in self.submodules_delete:
@@ -1372,7 +1293,7 @@ def reset(self):
shutil.move(scrubbed, source_path)
def __str__(self):
return f"[hg] {self.url}"
return "[hg] %s" % self.url
@fetcher
@@ -1381,21 +1302,46 @@ class S3FetchStrategy(URLFetchStrategy):
url_attr = "s3"
def __init__(self, *args, **kwargs):
try:
super().__init__(*args, **kwargs)
except ValueError:
if not kwargs.get("url"):
raise ValueError("S3FetchStrategy requires a url for fetching.")
@_needs_stage
def fetch(self):
if not self.url.startswith("s3://"):
raise spack.error.FetchError(
f"{self.__class__.__name__} can only fetch from s3:// urls."
)
if self.archive_file:
tty.debug(f"Already downloaded {self.archive_file}")
tty.debug("Already downloaded {0}".format(self.archive_file))
return
self._fetch_urllib(self.url)
if not self.archive_file:
raise FailedDownloadError(
RuntimeError(f"Missing archive {self.archive_file} after fetching")
parsed_url = urllib.parse.urlparse(self.url)
if parsed_url.scheme != "s3":
raise spack.error.FetchError("S3FetchStrategy can only fetch from s3:// urls.")
tty.debug("Fetching {0}".format(self.url))
basename = os.path.basename(parsed_url.path)
with working_dir(self.stage.path):
_, headers, stream = web_util.read_from_url(self.url)
with open(basename, "wb") as f:
shutil.copyfileobj(stream, f)
content_type = web_util.get_header(headers, "Content-type")
if content_type == "text/html":
warn_content_type_mismatch(self.archive_file or "the archive")
if self.stage.save_filename:
llnl.util.filesystem.rename(
os.path.join(self.stage.path, basename), self.stage.save_filename
)
if not self.archive_file:
raise FailedDownloadError(self.url)
@fetcher
class GCSFetchStrategy(URLFetchStrategy):
@@ -1403,22 +1349,43 @@ class GCSFetchStrategy(URLFetchStrategy):
url_attr = "gs"
def __init__(self, *args, **kwargs):
try:
super().__init__(*args, **kwargs)
except ValueError:
if not kwargs.get("url"):
raise ValueError("GCSFetchStrategy requires a url for fetching.")
@_needs_stage
def fetch(self):
if not self.url.startswith("gs"):
raise spack.error.FetchError(
f"{self.__class__.__name__} can only fetch from gs:// urls."
)
if self.archive_file:
tty.debug(f"Already downloaded {self.archive_file}")
tty.debug("Already downloaded {0}".format(self.archive_file))
return
self._fetch_urllib(self.url)
parsed_url = urllib.parse.urlparse(self.url)
if parsed_url.scheme != "gs":
raise spack.error.FetchError("GCSFetchStrategy can only fetch from gs:// urls.")
tty.debug("Fetching {0}".format(self.url))
basename = os.path.basename(parsed_url.path)
with working_dir(self.stage.path):
_, headers, stream = web_util.read_from_url(self.url)
with open(basename, "wb") as f:
shutil.copyfileobj(stream, f)
content_type = web_util.get_header(headers, "Content-type")
if content_type == "text/html":
warn_content_type_mismatch(self.archive_file or "the archive")
if self.stage.save_filename:
os.rename(os.path.join(self.stage.path, basename), self.stage.save_filename)
if not self.archive_file:
raise FailedDownloadError(
RuntimeError(f"Missing archive {self.archive_file} after fetching")
)
raise FailedDownloadError(self.url)
@fetcher
@@ -1427,7 +1394,7 @@ class FetchAndVerifyExpandedFile(URLFetchStrategy):
as well as after expanding it."""
def __init__(self, url, archive_sha256: str, expanded_sha256: str):
super().__init__(url=url, checksum=archive_sha256)
super().__init__(url, archive_sha256)
self.expanded_sha256 = expanded_sha256
def expand(self):
@@ -1469,14 +1436,14 @@ def stable_target(fetcher):
return False
def from_url(url: str) -> URLFetchStrategy:
def from_url(url):
"""Given a URL, find an appropriate fetch strategy for it.
Currently just gives you a URLFetchStrategy that uses curl.
TODO: make this return appropriate fetch strategies for other
types of URLs.
"""
return URLFetchStrategy(url=url)
return URLFetchStrategy(url)
def from_kwargs(**kwargs):
@@ -1545,12 +1512,10 @@ def _check_version_attributes(fetcher, pkg, version):
def _extrapolate(pkg, version):
"""Create a fetcher from an extrapolated URL for this version."""
try:
return URLFetchStrategy(url=pkg.url_for_version(version), fetch_options=pkg.fetch_options)
return URLFetchStrategy(pkg.url_for_version(version), fetch_options=pkg.fetch_options)
except spack.package_base.NoURLError:
raise ExtrapolationError(
f"Can't extrapolate a URL for version {version} because "
f"package {pkg.name} defines no URLs"
)
msg = "Can't extrapolate a URL for version %s " "because package %s defines no URLs"
raise ExtrapolationError(msg % (version, pkg.name))
def _from_merged_attrs(fetcher, pkg, version):
@@ -1567,11 +1532,8 @@ def _from_merged_attrs(fetcher, pkg, version):
attrs["fetch_options"] = pkg.fetch_options
attrs.update(pkg.versions[version])
if fetcher.url_attr == "git":
pkg_attr_list = ["submodules", "git_sparse_paths"]
for pkg_attr in pkg_attr_list:
if hasattr(pkg, pkg_attr):
attrs.setdefault(pkg_attr, getattr(pkg, pkg_attr))
if fetcher.url_attr == "git" and hasattr(pkg, "submodules"):
attrs.setdefault("submodules", pkg.submodules)
return fetcher(**attrs)
@@ -1666,9 +1628,11 @@ def for_package_version(pkg, version=None):
raise InvalidArgsError(pkg, version, **args)
def from_url_scheme(url: str, **kwargs):
def from_url_scheme(url, *args, **kwargs):
"""Finds a suitable FetchStrategy by matching its url_attr with the scheme
in the given url."""
url = kwargs.get("url", url)
parsed_url = urllib.parse.urlparse(url, scheme="file")
scheme_mapping = kwargs.get("scheme_mapping") or {
@@ -1685,9 +1649,11 @@ def from_url_scheme(url: str, **kwargs):
for fetcher in all_strategies:
url_attr = getattr(fetcher, "url_attr", None)
if url_attr and url_attr == scheme:
return fetcher(url=url, **kwargs)
return fetcher(url, *args, **kwargs)
raise ValueError(f'No FetchStrategy found for url with scheme: "{parsed_url.scheme}"')
raise ValueError(
'No FetchStrategy found for url with scheme: "{SCHEME}"'.format(SCHEME=parsed_url.scheme)
)
def from_list_url(pkg):
@@ -1712,9 +1678,7 @@ def from_list_url(pkg):
)
# construct a fetcher
return URLFetchStrategy(
url=url_from_list, checksum=checksum, fetch_options=pkg.fetch_options
)
return URLFetchStrategy(url_from_list, checksum, fetch_options=pkg.fetch_options)
except KeyError as e:
tty.debug(e)
tty.msg("Cannot find version %s in url_list" % pkg.version)
@@ -1742,10 +1706,10 @@ def store(self, fetcher, relative_dest):
mkdirp(os.path.dirname(dst))
fetcher.archive(dst)
def fetcher(self, target_path: str, digest: Optional[str], **kwargs) -> CacheURLFetchStrategy:
def fetcher(self, target_path, digest, **kwargs):
path = os.path.join(self.root, target_path)
url = url_util.path_to_file_url(path)
return CacheURLFetchStrategy(url=url, checksum=digest, **kwargs)
return CacheURLFetchStrategy(url, digest, **kwargs)
def destroy(self):
shutil.rmtree(self.root, ignore_errors=True)
@@ -1758,9 +1722,9 @@ class NoCacheError(spack.error.FetchError):
class FailedDownloadError(spack.error.FetchError):
"""Raised when a download fails."""
def __init__(self, *exceptions: Exception):
super().__init__("Failed to download")
self.exceptions = exceptions
def __init__(self, url, msg=""):
super().__init__("Failed to fetch file from URL: %s" % url, msg)
self.url = url
class NoArchiveFileError(spack.error.FetchError):

View File

@@ -37,12 +37,6 @@ def __call__(self, spec):
"""Run this hash on the provided spec."""
return spec.spec_hash(self)
def __repr__(self):
return (
f"SpecHashDescriptor(depflag={self.depflag!r}, "
f"package_hash={self.package_hash!r}, name={self.name!r}, override={self.override!r})"
)
#: Spack's deployment hash. Includes all inputs that can affect how a package is built.
dag_hash = SpecHashDescriptor(depflag=dt.BUILD | dt.LINK | dt.RUN, package_hash=True, name="hash")

View File

@@ -23,6 +23,9 @@ def post_install(spec, explicit):
# Push the package to all autopush mirrors
for mirror in spack.mirror.MirrorCollection(binary=True, autopush=True).values():
signing_key = bindist.select_signing_key() if mirror.signed else None
bindist.push_or_raise([spec], out_url=mirror.push_url, signing_key=signing_key, force=True)
bindist.push_or_raise(
spec,
mirror.push_url,
bindist.PushOptions(force=True, regenerate_index=False, unsigned=not mirror.signed),
)
tty.msg(f"{spec.name}: Pushed to build cache: '{mirror.name}'")

View File

@@ -757,10 +757,6 @@ def test_process(pkg: Pb, kwargs):
pkg.tester.status(pkg.spec.name, TestStatus.SKIPPED)
return
# Make sure properly named build-time test methods actually run as
# stand-alone tests.
pkg.run_tests = True
# run test methods from the package and all virtuals it provides
v_names = virtuals(pkg)
test_specs = [pkg.spec] + [spack.spec.Spec(v_name) for v_name in sorted(v_names)]

View File

@@ -426,36 +426,48 @@ def _determine_extension(fetcher):
return ext
class MirrorLayout:
"""A ``MirrorLayout`` stores the relative locations of files in a mirror directory. The main
storage location is ``storage_path``. An additional, human-readable path may be obtained as the
second entry when iterating this object."""
class MirrorReference:
"""A ``MirrorReference`` stores the relative paths where you can store a
package/resource in a mirror directory.
def __init__(self, storage_path: str) -> None:
self.storage_path = storage_path
The appropriate storage location is given by ``storage_path``. The
``cosmetic_path`` property provides a reference that a human could generate
themselves based on reading the details of the package.
def __iter__(self):
yield self.storage_path
A user can iterate over a ``MirrorReference`` object to get all the
possible names that might be used to refer to the resource in a mirror;
this includes names generated by previous naming schemes that are no-longer
reported by ``storage_path`` or ``cosmetic_path``.
"""
class DefaultLayout(MirrorLayout):
def __init__(self, cosmetic_path: str, global_path: Optional[str] = None) -> None:
super().__init__(global_path or cosmetic_path)
def __init__(self, cosmetic_path, global_path=None):
self.global_path = global_path
self.cosmetic_path = cosmetic_path
@property
def storage_path(self):
if self.global_path:
return self.global_path
else:
return self.cosmetic_path
def __iter__(self):
if self.global_path:
yield self.global_path
yield self.cosmetic_path
class OCILayout(MirrorLayout):
"""Follow the OCI Image Layout Specification to archive blobs where paths are of the form
``blobs/<algorithm>/<digest>``"""
class OCIImageLayout:
"""Follow the OCI Image Layout Specification to archive blobs
Paths are of the form `blobs/<algorithm>/<digest>`
"""
def __init__(self, digest: spack.oci.image.Digest) -> None:
super().__init__(os.path.join("blobs", digest.algorithm, digest.digest))
self.storage_path = os.path.join("blobs", digest.algorithm, digest.digest)
def __iter__(self):
yield self.storage_path
def mirror_archive_paths(fetcher, per_package_ref, spec=None):
@@ -482,7 +494,7 @@ def mirror_archive_paths(fetcher, per_package_ref, spec=None):
if global_ref and ext:
global_ref += ".%s" % ext
return DefaultLayout(per_package_ref, global_ref)
return MirrorReference(per_package_ref, global_ref)
def get_all_versions(specs):

View File

@@ -6,6 +6,7 @@
import hashlib
import json
import os
import time
import urllib.error
import urllib.parse
import urllib.request
@@ -42,6 +43,11 @@ def create_tarball(spec: spack.spec.Spec, tarfile_path):
return spack.binary_distribution._do_create_tarball(tarfile_path, spec.prefix, buildinfo)
def _log_upload_progress(digest: Digest, size: int, elapsed: float):
elapsed = max(elapsed, 0.001) # guard against division by zero
tty.info(f"Uploaded {digest} ({elapsed:.2f}s, {size / elapsed / 1024 / 1024:.2f} MB/s)")
def with_query_param(url: str, param: str, value: str) -> str:
"""Add a query parameter to a URL
@@ -135,6 +141,8 @@ def upload_blob(
if not force and blob_exists(ref, digest, _urlopen):
return False
start = time.time()
with open(file, "rb") as f:
file_size = os.fstat(f.fileno()).st_size
@@ -159,6 +167,7 @@ def upload_blob(
# Created the blob in one go.
if response.status == 201:
_log_upload_progress(digest, file_size, time.time() - start)
return True
# Otherwise, do another PUT request.
@@ -182,6 +191,8 @@ def upload_blob(
spack.oci.opener.ensure_status(request, response, 201)
# print elapsed time and # MB/s
_log_upload_progress(digest, file_size, time.time() - start)
return True
@@ -390,12 +401,15 @@ def make_stage(
) -> spack.stage.Stage:
_urlopen = _urlopen or spack.oci.opener.urlopen
fetch_strategy = spack.fetch_strategy.OCIRegistryFetchStrategy(
url=url, checksum=digest.digest, _urlopen=_urlopen
url, checksum=digest.digest, _urlopen=_urlopen
)
# Use blobs/<alg>/<encoded> as the cache path, which follows
# the OCI Image Layout Specification. What's missing though,
# is the `oci-layout` and `index.json` files, which are
# required by the spec.
return spack.stage.Stage(
fetch_strategy, mirror_paths=spack.mirror.OCILayout(digest), name=digest.digest, keep=keep
fetch_strategy,
mirror_paths=spack.mirror.OCIImageLayout(digest),
name=digest.digest,
keep=keep,
)

View File

@@ -197,12 +197,13 @@ def __init__(cls, name, bases, attr_dict):
# that "foo" was a possible executable.
# If a package has the executables or libraries attribute then it's
# assumed to be detectable. Add a tag, so finding them is faster
# assumed to be detectable
if hasattr(cls, "executables") or hasattr(cls, "libraries"):
# To add the tag, we need to copy the tags attribute, and attach it to
# the current class. We don't use append, since it might modify base classes,
# if "tags" is retrieved following the MRO.
cls.tags = getattr(cls, "tags", []) + [DetectablePackageMeta.TAG]
# Append a tag to each detectable package, so that finding them is faster
if not hasattr(cls, "tags"):
setattr(cls, "tags", [DetectablePackageMeta.TAG])
elif DetectablePackageMeta.TAG not in cls.tags:
cls.tags.append(DetectablePackageMeta.TAG)
@classmethod
def platform_executables(cls):
@@ -1101,7 +1102,6 @@ def _make_resource_stage(self, root_stage, resource):
mirror_paths=spack.mirror.mirror_archive_paths(
resource.fetcher, os.path.join(self.name, pretty_resource_name)
),
mirrors=spack.mirror.MirrorCollection(source=True).values(),
path=self.path,
)
@@ -1122,7 +1122,6 @@ def _make_root_stage(self, fetcher):
stage = Stage(
fetcher,
mirror_paths=mirror_paths,
mirrors=spack.mirror.MirrorCollection(source=True).values(),
name=stage_name,
path=self.path,
search_fn=self._download_search,

View File

@@ -328,26 +328,19 @@ def next_spec(
if not self.ctx.next_token:
return initial_spec
def add_dependency(dep, **edge_properties):
"""wrapper around root_spec._add_dependency"""
try:
root_spec._add_dependency(dep, **edge_properties)
except spack.error.SpecError as e:
raise SpecParsingError(str(e), self.ctx.current_token, self.literal_str) from e
initial_spec = initial_spec or spack.spec.Spec()
root_spec = SpecNodeParser(self.ctx, self.literal_str).parse(initial_spec)
root_spec = SpecNodeParser(self.ctx).parse(initial_spec)
while True:
if self.ctx.accept(TokenType.START_EDGE_PROPERTIES):
edge_properties = EdgeAttributeParser(self.ctx, self.literal_str).parse()
edge_properties.setdefault("depflag", 0)
edge_properties.setdefault("virtuals", ())
dependency = self._parse_node(root_spec)
add_dependency(dependency, **edge_properties)
root_spec._add_dependency(dependency, **edge_properties)
elif self.ctx.accept(TokenType.DEPENDENCY):
dependency = self._parse_node(root_spec)
add_dependency(dependency, depflag=0, virtuals=())
root_spec._add_dependency(dependency, depflag=0, virtuals=())
else:
break
@@ -355,7 +348,7 @@ def add_dependency(dep, **edge_properties):
return root_spec
def _parse_node(self, root_spec):
dependency = SpecNodeParser(self.ctx, self.literal_str).parse()
dependency = SpecNodeParser(self.ctx).parse()
if dependency is None:
msg = (
"the dependency sigil and any optional edge attributes must be followed by a "
@@ -374,11 +367,10 @@ def all_specs(self) -> List["spack.spec.Spec"]:
class SpecNodeParser:
"""Parse a single spec node from a stream of tokens"""
__slots__ = "ctx", "has_compiler", "has_version", "literal_str"
__slots__ = "ctx", "has_compiler", "has_version"
def __init__(self, ctx, literal_str):
def __init__(self, ctx):
self.ctx = ctx
self.literal_str = literal_str
self.has_compiler = False
self.has_version = False
@@ -396,8 +388,7 @@ def parse(
if not self.ctx.next_token or self.ctx.expect(TokenType.DEPENDENCY):
return initial_spec
if initial_spec is None:
initial_spec = spack.spec.Spec()
initial_spec = initial_spec or spack.spec.Spec()
# If we start with a package name we have a named spec, we cannot
# accept another package name afterwards in a node
@@ -414,21 +405,12 @@ def parse(
elif self.ctx.accept(TokenType.FILENAME):
return FileParser(self.ctx).parse(initial_spec)
def raise_parsing_error(string: str, cause: Optional[Exception] = None):
"""Raise a spec parsing error with token context."""
raise SpecParsingError(string, self.ctx.current_token, self.literal_str) from cause
def add_flag(name: str, value: str, propagate: bool):
"""Wrapper around ``Spec._add_flag()`` that adds parser context to errors raised."""
try:
initial_spec._add_flag(name, value, propagate)
except Exception as e:
raise_parsing_error(str(e), e)
while True:
if self.ctx.accept(TokenType.COMPILER):
if self.has_compiler:
raise_parsing_error("Spec cannot have multiple compilers")
raise spack.spec.DuplicateCompilerSpecError(
f"{initial_spec} cannot have multiple compilers"
)
compiler_name = self.ctx.current_token.value[1:]
initial_spec.compiler = spack.spec.CompilerSpec(compiler_name.strip(), ":")
@@ -436,7 +418,9 @@ def add_flag(name: str, value: str, propagate: bool):
elif self.ctx.accept(TokenType.COMPILER_AND_VERSION):
if self.has_compiler:
raise_parsing_error("Spec cannot have multiple compilers")
raise spack.spec.DuplicateCompilerSpecError(
f"{initial_spec} cannot have multiple compilers"
)
compiler_name, compiler_version = self.ctx.current_token.value[1:].split("@")
initial_spec.compiler = spack.spec.CompilerSpec(
@@ -450,8 +434,9 @@ def add_flag(name: str, value: str, propagate: bool):
or self.ctx.accept(TokenType.VERSION)
):
if self.has_version:
raise_parsing_error("Spec cannot have multiple versions")
raise spack.spec.MultipleVersionError(
f"{initial_spec} cannot have multiple versions"
)
initial_spec.versions = spack.version.VersionList(
[spack.version.from_string(self.ctx.current_token.value[1:])]
)
@@ -460,25 +445,29 @@ def add_flag(name: str, value: str, propagate: bool):
elif self.ctx.accept(TokenType.BOOL_VARIANT):
variant_value = self.ctx.current_token.value[0] == "+"
add_flag(self.ctx.current_token.value[1:].strip(), variant_value, propagate=False)
initial_spec._add_flag(
self.ctx.current_token.value[1:].strip(), variant_value, propagate=False
)
elif self.ctx.accept(TokenType.PROPAGATED_BOOL_VARIANT):
variant_value = self.ctx.current_token.value[0:2] == "++"
add_flag(self.ctx.current_token.value[2:].strip(), variant_value, propagate=True)
initial_spec._add_flag(
self.ctx.current_token.value[2:].strip(), variant_value, propagate=True
)
elif self.ctx.accept(TokenType.KEY_VALUE_PAIR):
match = SPLIT_KVP.match(self.ctx.current_token.value)
assert match, "SPLIT_KVP and KEY_VALUE_PAIR do not agree."
name, _, value = match.groups()
add_flag(name, strip_quotes_and_unescape(value), propagate=False)
name, delim, value = match.groups()
initial_spec._add_flag(name, strip_quotes_and_unescape(value), propagate=False)
elif self.ctx.accept(TokenType.PROPAGATED_KEY_VALUE_PAIR):
match = SPLIT_KVP.match(self.ctx.current_token.value)
assert match, "SPLIT_KVP and PROPAGATED_KEY_VALUE_PAIR do not agree."
name, _, value = match.groups()
add_flag(name, strip_quotes_and_unescape(value), propagate=True)
name, delim, value = match.groups()
initial_spec._add_flag(name, strip_quotes_and_unescape(value), propagate=True)
elif self.ctx.expect(TokenType.DAG_HASH):
if initial_spec.abstract_hash:

View File

@@ -9,7 +9,6 @@
import os.path
import pathlib
import sys
import zipfile
from typing import Any, Dict, Optional, Tuple, Type, Union
import llnl.util.filesystem
@@ -22,7 +21,7 @@
import spack.repo
import spack.stage
import spack.util.spack_json as sjson
from spack.util.crypto import Checker, checksum_stream
from spack.util.crypto import Checker, checksum
from spack.util.executable import which, which_string
@@ -156,9 +155,6 @@ def __hash__(self) -> int:
return hash(self.sha256)
zipfilecache = {}
class FilePatch(Patch):
"""Describes a patch that is retrieved from a file in the repository."""
@@ -198,27 +194,9 @@ def __init__(
# Cannot use pkg.package_dir because it's a property and we have
# classes, not instances.
pkg_dir = os.path.abspath(os.path.dirname(cls.module.__file__))
path = pathlib.Path(os.path.join(pkg_dir, self.relative_path))
if "packages.zip" in path.parts:
# check if it exists in the zip file.
idx = path.parts.index("packages.zip")
zip_path, entry_path = pathlib.PurePath(*path.parts[: idx + 1]), pathlib.PurePath(
*path.parts[idx + 1 :]
)
lookup = zipfilecache.get(zip_path)
if lookup is None:
zip = zipfile.ZipFile(zip_path, "r")
namelist = set(zip.namelist())
zipfilecache[zip_path] = (zip, namelist)
else:
zip, namelist = lookup
if str(entry_path) in namelist:
abs_path = str(path)
break
elif path.exists():
abs_path = str(path)
path = os.path.join(pkg_dir, self.relative_path)
if os.path.exists(path):
abs_path = path
break
if abs_path is None:
@@ -238,24 +216,7 @@ def sha256(self) -> str:
The sha256 of the patch file.
"""
if self._sha256 is None and self.path is not None:
path = pathlib.PurePath(self.path)
if "packages.zip" in path.parts:
print("yes")
# split in path to packages.zip and the path within the zip
idx = path.parts.index("packages.zip")
path_to_zip, path_in_zip = pathlib.PurePath(
*path.parts[: idx + 1]
), pathlib.PurePath(*path.parts[idx + 1 :])
zip = zipfilecache.get(path_to_zip)
if not zip:
zip = zipfile.ZipFile(path_to_zip, "r")
zipfilecache[path_to_zip] = zip
f = zip.open(str(path_in_zip), "r")
else:
f = open(self.path, "rb")
self._sha256 = checksum_stream(hashlib.sha256, f)
f.close()
self._sha256 = checksum(hashlib.sha256, self.path)
assert isinstance(self._sha256, str)
return self._sha256
@@ -358,7 +319,7 @@ def stage(self) -> "spack.stage.Stage":
self.url, archive_sha256=self.archive_sha256, expanded_sha256=self.sha256
)
else:
fetcher = fs.URLFetchStrategy(url=self.url, sha256=self.sha256, expand=False)
fetcher = fs.URLFetchStrategy(self.url, sha256=self.sha256, expand=False)
# The same package can have multiple patches with the same name but
# with different contents, therefore apply a subset of the hash.
@@ -370,7 +331,6 @@ def stage(self) -> "spack.stage.Stage":
fetcher,
name=f"{spack.stage.stage_prefix}patch-{fetch_digest}",
mirror_paths=mirror_ref,
mirrors=spack.mirror.MirrorCollection(source=True).values(),
)
return self._stage

View File

@@ -26,7 +26,6 @@
import types
import uuid
import warnings
import zipimport
from typing import Any, Dict, Generator, List, Optional, Set, Tuple, Type, Union
import llnl.path
@@ -101,6 +100,32 @@ def get_data(self, path):
return self.prepend.encode() + b"\n" + data
class RepoLoader(_PrependFileLoader):
"""Loads a Python module associated with a package in specific repository"""
#: Code in ``_package_prepend`` is prepended to imported packages.
#:
#: Spack packages are expected to call `from spack.package import *`
#: themselves, but we are allowing a deprecation period before breaking
#: external repos that don't do this yet.
_package_prepend = "from spack.package import *"
def __init__(self, fullname, repo, package_name):
self.repo = repo
self.package_name = package_name
self.package_py = repo.filename_for_package_name(package_name)
self.fullname = fullname
super().__init__(self.fullname, self.package_py, prepend=self._package_prepend)
class SpackNamespaceLoader:
def create_module(self, spec):
return SpackNamespace(spec.name)
def exec_module(self, module):
module.__loader__ = self
class ReposFinder:
"""MetaPathFinder class that loads a Python module corresponding to a Spack package.
@@ -124,12 +149,12 @@ def current_repository(self, value):
@contextlib.contextmanager
def switch_repo(self, substitute: "RepoType"):
"""Switch the current repository list for the duration of the context manager."""
old = self._repo
old = self.current_repository
try:
self._repo = substitute
self.current_repository = substitute
yield
finally:
self._repo = old
self.current_repository = old
def find_spec(self, fullname, python_path, target=None):
# "target" is not None only when calling importlib.reload()
@@ -140,11 +165,10 @@ def find_spec(self, fullname, python_path, target=None):
if not fullname.startswith(ROOT_PYTHON_NAMESPACE):
return None
result = self.compute_loader(fullname)
if result is None:
loader = self.compute_loader(fullname)
if loader is None:
return None
loader, actual_fullname = result
return importlib.util.spec_from_loader(actual_fullname, loader)
return importlib.util.spec_from_loader(fullname, loader)
def compute_loader(self, fullname):
# namespaces are added to repo, and package modules are leaves.
@@ -163,29 +187,16 @@ def compute_loader(self, fullname):
# With 2 nested conditionals we can call "repo.real_name" only once
package_name = repo.real_name(module_name)
if package_name:
# annoyingly there is a many to one mapping for pkg module to file, have to
# figure out how to deal with this properly.
return (
(repo.zipimporter, f"{namespace}.{package_name}")
if repo.zipimporter
else (
_PrependFileLoader(
fullname=fullname,
path=repo.filename_for_package_name(package_name),
prepend="from spack.package import *",
),
fullname,
)
)
return RepoLoader(fullname, repo, package_name)
# We are importing a full namespace like 'spack.pkg.builtin'
if fullname == repo.full_namespace:
return SpackNamespaceLoader(), fullname
return SpackNamespaceLoader()
# No repo provides the namespace, but it is a valid prefix of
# something in the RepoPath.
if is_repo_path and self.current_repository.by_namespace.is_prefix(fullname):
return SpackNamespaceLoader(), fullname
return SpackNamespaceLoader()
return None
@@ -196,7 +207,6 @@ def compute_loader(self, fullname):
repo_config_name = "repo.yaml" # Top-level filename for repo config.
repo_index_name = "index.yaml" # Top-level filename for repository index.
packages_dir_name = "packages" # Top-level repo directory containing pkgs.
packages_zip_name = "packages.zip" # Top-level filename for zipped packages.
package_file_name = "package.py" # Filename for packages in a repository.
#: Guaranteed unused default value for some functions.
@@ -206,9 +216,9 @@ def compute_loader(self, fullname):
def packages_path():
"""Get the test repo if it is active, otherwise the builtin repo."""
try:
return PATH.get_repo("builtin.mock").packages_path
except UnknownNamespaceError:
return PATH.get_repo("builtin").packages_path
return spack.repo.PATH.get_repo("builtin.mock").packages_path
except spack.repo.UnknownNamespaceError:
return spack.repo.PATH.get_repo("builtin").packages_path
class GitExe:
@@ -673,7 +683,7 @@ class RepoPath:
def __init__(
self,
*repos: Union[str, "Repo"],
cache: Optional["spack.caches.FileCacheType"],
cache: "spack.caches.FileCacheType",
overrides: Optional[Dict[str, Any]] = None,
) -> None:
self.repos: List[Repo] = []
@@ -686,7 +696,6 @@ def __init__(
for repo in repos:
try:
if isinstance(repo, str):
assert cache is not None, "cache must hold a value, when repo is a string"
repo = Repo(repo, cache=cache, overrides=overrides)
repo.finder(self)
self.put_last(repo)
@@ -698,10 +707,6 @@ def __init__(
f" spack repo rm {repo}",
)
def ensure_unwrapped(self) -> "RepoPath":
"""Ensure we unwrap this object from any dynamic wrapper (like Singleton)"""
return self
def put_first(self, repo: "Repo") -> None:
"""Add repo first in the search path."""
if isinstance(repo, RepoPath):
@@ -925,16 +930,6 @@ def is_virtual_safe(self, pkg_name: str) -> bool:
def __contains__(self, pkg_name):
return self.exists(pkg_name)
def marshal(self):
return (self.repos,)
@staticmethod
def unmarshal(repos):
return RepoPath(*repos, cache=None)
def __reduce__(self):
return RepoPath.unmarshal, self.marshal()
class Repo:
"""Class representing a package repository in the filesystem.
@@ -999,14 +994,9 @@ def check(condition, msg):
self._names = self.full_namespace.split(".")
packages_dir = config.get("subdirectory", packages_dir_name)
packages_zip = os.path.join(self.root, "packages.zip")
self.zipimporter = (
zipimport.zipimporter(packages_zip) if os.path.exists(packages_zip) else None
)
self.packages_path = os.path.join(self.root, packages_dir)
check(
self.zipimporter or os.path.isdir(self.packages_path),
f"No '{self.packages_path}' or '{packages_zip} found in '{root}'",
os.path.isdir(self.packages_path), f"No directory '{packages_dir}' found in '{root}'"
)
# Class attribute overrides by package name
@@ -1329,20 +1319,6 @@ def __repr__(self) -> str:
def __contains__(self, pkg_name: str) -> bool:
return self.exists(pkg_name)
@staticmethod
def unmarshal(root, cache, overrides):
"""Helper method to unmarshal keyword arguments"""
return Repo(root, cache=cache, overrides=overrides)
def marshal(self):
cache = self._cache
if isinstance(cache, llnl.util.lang.Singleton):
cache = cache.instance
return self.root, cache, self.overrides
def __reduce__(self):
return Repo.unmarshal, self.marshal()
RepoType = Union[Repo, RepoPath]
@@ -1502,14 +1478,6 @@ def use_repositories(
PATH = saved
class SpackNamespaceLoader:
def create_module(self, spec):
return SpackNamespace(spec.name)
def exec_module(self, module):
module.__loader__ = self
class MockRepositoryBuilder:
"""Build a mock repository in a directory"""

View File

@@ -11,26 +11,6 @@
import spack.schema.environment
flags: Dict[str, Any] = {
"type": "object",
"additionalProperties": False,
"properties": {
"cflags": {"anyOf": [{"type": "string"}, {"type": "null"}]},
"cxxflags": {"anyOf": [{"type": "string"}, {"type": "null"}]},
"fflags": {"anyOf": [{"type": "string"}, {"type": "null"}]},
"cppflags": {"anyOf": [{"type": "string"}, {"type": "null"}]},
"ldflags": {"anyOf": [{"type": "string"}, {"type": "null"}]},
"ldlibs": {"anyOf": [{"type": "string"}, {"type": "null"}]},
},
}
extra_rpaths: Dict[str, Any] = {"type": "array", "default": [], "items": {"type": "string"}}
implicit_rpaths: Dict[str, Any] = {
"anyOf": [{"type": "array", "items": {"type": "string"}}, {"type": "boolean"}]
}
#: Properties for inclusion in other schemas
properties: Dict[str, Any] = {
"compilers": {
@@ -55,7 +35,18 @@
"fc": {"anyOf": [{"type": "string"}, {"type": "null"}]},
},
},
"flags": flags,
"flags": {
"type": "object",
"additionalProperties": False,
"properties": {
"cflags": {"anyOf": [{"type": "string"}, {"type": "null"}]},
"cxxflags": {"anyOf": [{"type": "string"}, {"type": "null"}]},
"fflags": {"anyOf": [{"type": "string"}, {"type": "null"}]},
"cppflags": {"anyOf": [{"type": "string"}, {"type": "null"}]},
"ldflags": {"anyOf": [{"type": "string"}, {"type": "null"}]},
"ldlibs": {"anyOf": [{"type": "string"}, {"type": "null"}]},
},
},
"spec": {"type": "string"},
"operating_system": {"type": "string"},
"target": {"type": "string"},
@@ -63,9 +54,18 @@
"modules": {
"anyOf": [{"type": "string"}, {"type": "null"}, {"type": "array"}]
},
"implicit_rpaths": implicit_rpaths,
"implicit_rpaths": {
"anyOf": [
{"type": "array", "items": {"type": "string"}},
{"type": "boolean"},
]
},
"environment": spack.schema.environment.definition,
"extra_rpaths": extra_rpaths,
"extra_rpaths": {
"type": "array",
"default": [],
"items": {"type": "string"},
},
},
}
},

View File

@@ -84,6 +84,7 @@
"build_language": {"type": "string"},
"build_jobs": {"type": "integer", "minimum": 1},
"ccache": {"type": "boolean"},
"concretizer": {"type": "string", "enum": ["original", "clingo"]},
"db_lock_timeout": {"type": "integer", "minimum": 1},
"package_lock_timeout": {
"anyOf": [{"type": "integer", "minimum": 1}, {"type": "null"}]
@@ -97,9 +98,9 @@
"aliases": {"type": "object", "patternProperties": {r"\w[\w-]*": {"type": "string"}}},
},
"deprecatedProperties": {
"properties": ["concretizer"],
"message": "Spack supports only clingo as a concretizer from v0.23. "
"The config:concretizer config option is ignored.",
"properties": ["terminal_title"],
"message": "config:terminal_title has been replaced by "
"install_status and is ignored",
"error": False,
},
}

View File

@@ -11,8 +11,6 @@
import spack.schema.environment
from .compilers import extra_rpaths, flags, implicit_rpaths
permissions = {
"type": "object",
"additionalProperties": False,
@@ -186,16 +184,7 @@
"type": "object",
"additionalProperties": True,
"properties": {
"compilers": {
"type": "object",
"patternProperties": {
r"(^\w[\w-]*)": {"type": "string"}
},
},
"environment": spack.schema.environment.definition,
"extra_rpaths": extra_rpaths,
"implicit_rpaths": implicit_rpaths,
"flags": flags,
"environment": spack.schema.environment.definition
},
},
},

View File

@@ -1438,14 +1438,16 @@ def condition(
# caller, we won't emit partial facts.
condition_id = next(self._id_counter)
self.gen.fact(fn.pkg_fact(required_spec.name, fn.condition(condition_id)))
self.gen.fact(fn.condition_reason(condition_id, msg))
trigger_id = self._get_condition_id(
required_spec, cache=self._trigger_cache, body=True, transform=transform_required
)
self.gen.fact(fn.pkg_fact(required_spec.name, fn.condition(condition_id)))
self.gen.fact(fn.condition_reason(condition_id, msg))
self.gen.fact(
fn.pkg_fact(required_spec.name, fn.condition_trigger(condition_id, trigger_id))
)
if not imposed_spec:
return condition_id
@@ -1694,43 +1696,19 @@ def external_packages(self):
spack.spec.parse_with_version_concrete(x["spec"]) for x in externals
]
selected_externals = set()
external_specs = []
if spec_filters:
for current_filter in spec_filters:
current_filter.factory = lambda: candidate_specs
selected_externals.update(current_filter.selected_specs())
# Emit facts for externals specs. Note that "local_idx" is the index of the spec
# in packages:<pkg_name>:externals. This means:
#
# packages:<pkg_name>:externals[local_idx].spec == spec
external_versions = []
for local_idx, spec in enumerate(candidate_specs):
msg = f"{spec.name} available as external when satisfying {spec}"
if spec_filters and spec not in selected_externals:
continue
if not spec.versions.concrete:
warnings.warn(f"cannot use the external spec {spec}: needs a concrete version")
continue
def external_imposition(input_spec, requirements):
return requirements + [
fn.attr("external_conditions_hold", input_spec.name, local_idx)
]
try:
self.condition(spec, spec, msg=msg, transform_imposed=external_imposition)
except (spack.error.SpecError, RuntimeError) as e:
warnings.warn(f"while setting up external spec {spec}: {e}")
continue
external_versions.append((spec.version, local_idx))
self.possible_versions[spec.name].add(spec.version)
self.gen.newline()
external_specs.extend(current_filter.selected_specs())
else:
external_specs.extend(candidate_specs)
# Order the external versions to prefer more recent versions
# even if specs in packages.yaml are not ordered that way
external_versions = [
(x.version, external_id) for external_id, x in enumerate(external_specs)
]
external_versions = [
(v, idx, external_id)
for idx, (v, external_id) in enumerate(sorted(external_versions, reverse=True))
@@ -1740,6 +1718,19 @@ def external_imposition(input_spec, requirements):
DeclaredVersion(version=version, idx=idx, origin=Provenance.EXTERNAL)
)
# Declare external conditions with a local index into packages.yaml
for local_idx, spec in enumerate(external_specs):
msg = "%s available as external when satisfying %s" % (spec.name, spec)
def external_imposition(input_spec, requirements):
return requirements + [
fn.attr("external_conditions_hold", input_spec.name, local_idx)
]
self.condition(spec, spec, msg=msg, transform_imposed=external_imposition)
self.possible_versions[spec.name].add(spec.version)
self.gen.newline()
self.trigger_rules()
self.effect_rules()
@@ -1851,8 +1842,6 @@ def _spec_clauses(
if spec.name:
clauses.append(f.node(spec.name) if not spec.virtual else f.virtual_node(spec.name))
if spec.namespace:
clauses.append(f.namespace(spec.name, spec.namespace))
clauses.extend(self.spec_versions(spec))
@@ -2750,7 +2739,6 @@ class _Head:
"""ASP functions used to express spec clauses in the HEAD of a rule"""
node = fn.attr("node")
namespace = fn.attr("namespace_set")
virtual_node = fn.attr("virtual_node")
node_platform = fn.attr("node_platform_set")
node_os = fn.attr("node_os_set")
@@ -2766,7 +2754,6 @@ class _Body:
"""ASP functions used to express spec clauses in the BODY of a rule"""
node = fn.attr("node")
namespace = fn.attr("namespace")
virtual_node = fn.attr("virtual_node")
node_platform = fn.attr("node_platform")
node_os = fn.attr("node_os")

View File

@@ -18,79 +18,38 @@
{ attr("virtual_node", node(0..X-1, Package)) } :- max_dupes(Package, X), virtual(Package).
% Integrity constraints on DAG nodes
:- attr("root", PackageNode),
not attr("node", PackageNode),
internal_error("Every root must be a node").
:- attr("version", PackageNode, _),
not attr("node", PackageNode),
not attr("virtual_node", PackageNode),
internal_error("Only nodes and virtual_nodes can have versions").
:- attr("node_version_satisfies", PackageNode, _),
not attr("node", PackageNode),
not attr("virtual_node", PackageNode),
internal_error("Only nodes and virtual_nodes can have version satisfaction").
:- attr("hash", PackageNode, _),
not attr("node", PackageNode),
internal_error("Only nodes can have hashes").
:- attr("node_platform", PackageNode, _),
not attr("node", PackageNode),
internal_error("Only nodes can have platforms").
:- attr("node_os", PackageNode, _), not attr("node", PackageNode),
internal_error("Only nodes can have node_os").
:- attr("node_target", PackageNode, _), not attr("node", PackageNode),
internal_error("Only nodes can have node_target").
:- attr("node_compiler_version", PackageNode, _, _), not attr("node", PackageNode),
internal_error("Only nodes can have node_compiler_version").
:- attr("variant_value", PackageNode, _, _), not attr("node", PackageNode),
internal_error("variant_value true for a non-node").
:- attr("node_flag_compiler_default", PackageNode), not attr("node", PackageNode),
internal_error("node_flag_compiler_default true for non-node").
:- attr("node_flag", PackageNode, _, _), not attr("node", PackageNode),
internal_error("node_flag assigned for non-node").
:- attr("external_spec_selected", PackageNode, _), not attr("node", PackageNode),
internal_error("external_spec_selected for non-node").
:- attr("depends_on", ParentNode, _, _), not attr("node", ParentNode),
internal_error("non-node depends on something").
:- attr("depends_on", _, ChildNode, _), not attr("node", ChildNode),
internal_error("something depends_on a non-node").
:- attr("node_flag_source", Node, _, _), not attr("node", Node),
internal_error("node_flag_source assigned for a non-node").
:- attr("node_flag_source", _, _, SourceNode), not attr("node", SourceNode),
internal_error("node_flag_source assigned with a non-node source").
:- attr("virtual_node", VirtualNode), not provider(_, VirtualNode),
internal_error("virtual node with no provider").
:- provider(_, VirtualNode), not attr("virtual_node", VirtualNode),
internal_error("provider with no virtual node").
:- provider(PackageNode, _), not attr("node", PackageNode),
internal_error("provider with no real node").
:- attr("root", PackageNode), not attr("node", PackageNode).
:- attr("version", PackageNode, _), not attr("node", PackageNode), not attr("virtual_node", PackageNode).
:- attr("node_version_satisfies", PackageNode, _), not attr("node", PackageNode), not attr("virtual_node", PackageNode).
:- attr("hash", PackageNode, _), not attr("node", PackageNode).
:- attr("node_platform", PackageNode, _), not attr("node", PackageNode).
:- attr("node_os", PackageNode, _), not attr("node", PackageNode).
:- attr("node_target", PackageNode, _), not attr("node", PackageNode).
:- attr("node_compiler_version", PackageNode, _, _), not attr("node", PackageNode).
:- attr("variant_value", PackageNode, _, _), not attr("node", PackageNode).
:- attr("node_flag_compiler_default", PackageNode), not attr("node", PackageNode).
:- attr("node_flag", PackageNode, _, _), not attr("node", PackageNode).
:- attr("external_spec_selected", PackageNode, _), not attr("node", PackageNode).
:- attr("depends_on", ParentNode, _, _), not attr("node", ParentNode).
:- attr("depends_on", _, ChildNode, _), not attr("node", ChildNode).
:- attr("node_flag_source", ParentNode, _, _), not attr("node", ParentNode).
:- attr("node_flag_source", _, _, ChildNode), not attr("node", ChildNode).
:- attr("virtual_node", VirtualNode), not provider(_, VirtualNode), internal_error("virtual node with no provider").
:- provider(_, VirtualNode), not attr("virtual_node", VirtualNode), internal_error("provider with no virtual node").
:- provider(PackageNode, _), not attr("node", PackageNode), internal_error("provider with no real node").
:- attr("root", node(ID, PackageNode)), ID > min_dupe_id,
internal_error("root with a non-minimal duplicate ID").
:- attr("root", node(ID, PackageNode)), ID > min_dupe_id, internal_error("root with a non-minimal duplicate ID").
% Nodes in the "root" unification set cannot depend on non-root nodes if the dependency is "link" or "run"
:- attr("depends_on", node(min_dupe_id, Package), node(ID, _), "link"), ID != min_dupe_id, unification_set("root", node(min_dupe_id, Package)), internal_error("link dependency out of the root unification set").
:- attr("depends_on", node(min_dupe_id, Package), node(ID, _), "run"), ID != min_dupe_id, unification_set("root", node(min_dupe_id, Package)), internal_error("run dependency out of the root unification set").
% Namespaces are statically assigned by a package fact if not otherwise set
error(100, "{0} does not have a namespace", Package) :- attr("node", node(ID, Package)),
not attr("namespace", node(ID, Package), _),
internal_error("A node must have a namespace").
error(100, "{0} cannot come from both {1} and {2} namespaces", Package, NS1, NS2) :- attr("node", node(ID, Package)),
attr("namespace", node(ID, Package), NS1),
attr("namespace", node(ID, Package), NS2),
NS1 != NS2,
internal_error("A node cannot have two namespaces").
attr("namespace", node(ID, Package), Namespace) :- attr("namespace_set", node(ID, Package), Namespace).
attr("namespace", node(ID, Package), Namespace)
:- attr("node", node(ID, Package)),
not attr("namespace_set", node(ID, Package), _),
pkg_fact(Package, namespace(Namespace)).
% Namespaces are statically assigned by a package fact
attr("namespace", node(ID, Package), Namespace) :- attr("node", node(ID, Package)), pkg_fact(Package, namespace(Namespace)).
% Rules on "unification sets", i.e. on sets of nodes allowing a single configuration of any given package
unify(SetID, PackageName) :- unification_set(SetID, node(_, PackageName)).
:- 2 { unification_set(SetID, node(_, PackageName)) }, unify(SetID, PackageName),
internal_error("Cannot have multiple unification sets IDs for one set").
:- 2 { unification_set(SetID, node(_, PackageName)) }, unify(SetID, PackageName).
unification_set("root", PackageNode) :- attr("root", PackageNode).
unification_set(SetID, ChildNode) :- attr("depends_on", ParentNode, ChildNode, Type), Type != "build", unification_set(SetID, ParentNode).
@@ -116,8 +75,7 @@ unification_set(SetID, VirtualNode)
% as a build dependency.
%
% We'll need to relax the rule before we get to actual cross-compilation
:- depends_on(ParentNode, node(X, Dependency)), depends_on(ParentNode, node(Y, Dependency)), X < Y,
internal_error("Cannot split link/build deptypes for a single edge (yet)").
:- depends_on(ParentNode, node(X, Dependency)), depends_on(ParentNode, node(Y, Dependency)), X < Y.
#defined multiple_unification_sets/1.
@@ -173,8 +131,7 @@ mentioned_in_literal(Root, Mentioned) :- mentioned_in_literal(TriggerID, Root, M
condition_set(node(min_dupe_id, Root), node(min_dupe_id, Root)) :- mentioned_in_literal(Root, Root).
1 { condition_set(node(min_dupe_id, Root), node(0..Y-1, Mentioned)) : max_dupes(Mentioned, Y) } 1 :-
mentioned_in_literal(Root, Mentioned), Mentioned != Root,
internal_error("must have exactly one condition_set for literals").
mentioned_in_literal(Root, Mentioned), Mentioned != Root.
% Discriminate between "roots" that have been explicitly requested, and roots that are deduced from "virtual roots"
explicitly_requested_root(node(min_dupe_id, Package)) :-
@@ -194,8 +151,7 @@ associated_with_root(RootNode, ChildNode) :-
:- attr("root", RootNode),
condition_set(RootNode, node(X, Package)),
not virtual(Package),
not associated_with_root(RootNode, node(X, Package)),
internal_error("nodes in root condition set must be associated with root").
not associated_with_root(RootNode, node(X, Package)).
#defined concretize_everything/0.
#defined literal/1.
@@ -429,10 +385,8 @@ imposed_nodes(ConditionID, PackageNode, node(X, A1))
condition_set(PackageNode, node(X, A1)),
attr("hash", PackageNode, ConditionID).
:- imposed_packages(ID, A1), impose(ID, PackageNode), not condition_set(PackageNode, node(_, A1)),
internal_error("Imposing constraint outside of condition set").
:- imposed_packages(ID, A1), impose(ID, PackageNode), not imposed_nodes(ID, PackageNode, node(_, A1)),
internal_error("Imposing constraint outside of imposed_nodes").
:- imposed_packages(ID, A1), impose(ID, PackageNode), not condition_set(PackageNode, node(_, A1)).
:- imposed_packages(ID, A1), impose(ID, PackageNode), not imposed_nodes(ID, PackageNode, node(_, A1)).
% Conditions that hold impose may impose constraints on other specs
attr(Name, node(X, A1)) :- impose(ID, PackageNode), imposed_constraint(ID, Name, A1), imposed_nodes(ID, PackageNode, node(X, A1)).
@@ -462,8 +416,7 @@ provider(ProviderNode, VirtualNode) :- attr("provider_set", ProviderNode, Virtua
% satisfy the dependency.
1 { attr("depends_on", node(X, A1), node(0..Y-1, A2), A3) : max_dupes(A2, Y) } 1
:- impose(ID, node(X, A1)),
imposed_constraint(ID, "depends_on", A1, A2, A3),
internal_error("Build deps must land in exactly one duplicate").
imposed_constraint(ID, "depends_on", A1, A2, A3).
% Reconstruct virtual dependencies for reused specs
attr("virtual_on_edge", node(X, A1), node(Y, A2), Virtual)

View File

@@ -70,6 +70,7 @@
import spack.compiler
import spack.compilers
import spack.config
import spack.dependency as dp
import spack.deptypes as dt
import spack.error
import spack.hash_types as ht
@@ -98,7 +99,7 @@
"CompilerSpec",
"Spec",
"SpecParseError",
"UnsupportedPropagationError",
"ArchitecturePropagationError",
"DuplicateDependencyError",
"DuplicateCompilerSpecError",
"UnsupportedCompilerError",
@@ -128,7 +129,7 @@
r"|" # or
# OPTION 2: an actual format string
r"{" # non-escaped open brace {
r"([%@/]|[\w ][\w -]*=)?" # optional sigil (or identifier or space) to print sigil in color
r"([%@/]|arch=)?" # optional sigil (to print sigil in color)
r"(?:\^([^}\.]+)\.)?" # optional ^depname. (to get attr from dependency)
# after the sigil or depname, we can have a hash expression or another attribute
r"(?:" # one of
@@ -162,14 +163,14 @@
DEFAULT_FORMAT = (
"{name}{@versions}"
"{%compiler.name}{@compiler.versions}{compiler_flags}"
"{variants}{ namespace=namespace_if_anonymous}{ arch=architecture}{/abstract_hash}"
"{variants}{arch=architecture}{/abstract_hash}"
)
#: Display format, which eliminates extra `@=` in the output, for readability.
DISPLAY_FORMAT = (
"{name}{@version}"
"{%compiler.name}{@compiler.version}{compiler_flags}"
"{variants}{ namespace=namespace_if_anonymous}{ arch=architecture}{/abstract_hash}"
"{variants}{arch=architecture}{/abstract_hash}"
)
#: Regular expression to pull spec contents out of clearsigned signature
@@ -1639,9 +1640,19 @@ def _add_flag(self, name, value, propagate):
Known flags currently include "arch"
"""
if propagate and name in spack.directives.reserved_names:
raise UnsupportedPropagationError(
f"Propagation with '==' is not supported for '{name}'."
# If the == syntax is used to propagate the spec architecture
# This is an error
architecture_names = [
"arch",
"architecture",
"platform",
"os",
"operating_system",
"target",
]
if propagate and name in architecture_names:
raise ArchitecturePropagationError(
"Unable to propagate the architecture failed." " Use a '=' instead."
)
valid_flags = FlagMap.valid_compiler_flags()
@@ -1655,8 +1666,6 @@ def _add_flag(self, name, value, propagate):
self._set_architecture(os=value)
elif name == "target":
self._set_architecture(target=value)
elif name == "namespace":
self.namespace = value
elif name in valid_flags:
assert self.compiler_flags is not None
flags_and_propagation = spack.compiler.tokenize_flags(value, propagate)
@@ -1676,7 +1685,9 @@ def _set_architecture(self, **kwargs):
"""Called by the parser to set the architecture."""
arch_attrs = ["platform", "os", "target"]
if self.architecture and self.architecture.concrete:
raise DuplicateArchitectureError("Spec cannot have two architectures.")
raise DuplicateArchitectureError(
"Spec for '%s' cannot have two architectures." % self.name
)
if not self.architecture:
new_vals = tuple(kwargs.get(arg, None) for arg in arch_attrs)
@@ -1685,7 +1696,10 @@ def _set_architecture(self, **kwargs):
new_attrvals = [(a, v) for a, v in kwargs.items() if a in arch_attrs]
for new_attr, new_value in new_attrvals:
if getattr(self.architecture, new_attr):
raise DuplicateArchitectureError(f"Cannot specify '{new_attr}' twice")
raise DuplicateArchitectureError(
"Spec for '%s' cannot have two '%s' specified "
"for its architecture" % (self.name, new_attr)
)
else:
setattr(self.architecture, new_attr, new_value)
@@ -1880,14 +1894,14 @@ def short_spec(self):
"""Returns a version of the spec with the dependencies hashed
instead of completely enumerated."""
spec_format = "{name}{@version}{%compiler.name}{@compiler.version}"
spec_format += "{variants}{ arch=architecture}{/hash:7}"
spec_format += "{variants}{arch=architecture}{/hash:7}"
return self.format(spec_format)
@property
def cshort_spec(self):
"""Returns an auto-colorized version of ``self.short_spec``."""
spec_format = "{name}{@version}{%compiler.name}{@compiler.version}"
spec_format += "{variants}{ arch=architecture}{/hash:7}"
spec_format += "{variants}{arch=architecture}{/hash:7}"
return self.cformat(spec_format)
@property
@@ -2614,6 +2628,294 @@ def validate_detection(self):
validate_fn = getattr(pkg_cls, "validate_detected_spec", lambda x, y: None)
validate_fn(self, self.extra_attributes)
def _concretize_helper(self, concretizer, presets=None, visited=None):
"""Recursive helper function for concretize().
This concretizes everything bottom-up. As things are
concretized, they're added to the presets, and ancestors
will prefer the settings of their children.
"""
if presets is None:
presets = {}
if visited is None:
visited = set()
if self.name in visited:
return False
if self.concrete:
visited.add(self.name)
return False
changed = False
# Concretize deps first -- this is a bottom-up process.
for name in sorted(self._dependencies):
# WARNING: This function is an implementation detail of the
# WARNING: original concretizer. Since with that greedy
# WARNING: algorithm we don't allow multiple nodes from
# WARNING: the same package in a DAG, here we hard-code
# WARNING: using index 0 i.e. we assume that we have only
# WARNING: one edge from package "name"
changed |= self._dependencies[name][0].spec._concretize_helper(
concretizer, presets, visited
)
if self.name in presets:
changed |= self.constrain(presets[self.name])
else:
# Concretize virtual dependencies last. Because they're added
# to presets below, their constraints will all be merged, but we'll
# still need to select a concrete package later.
if not self.virtual:
changed |= any(
(
concretizer.concretize_develop(self), # special variant
concretizer.concretize_architecture(self),
concretizer.concretize_compiler(self),
concretizer.adjust_target(self),
# flags must be concretized after compiler
concretizer.concretize_compiler_flags(self),
concretizer.concretize_version(self),
concretizer.concretize_variants(self),
)
)
presets[self.name] = self
visited.add(self.name)
return changed
def _replace_with(self, concrete):
"""Replace this virtual spec with a concrete spec."""
assert self.virtual
virtuals = (self.name,)
for dep_spec in itertools.chain.from_iterable(self._dependents.values()):
dependent = dep_spec.parent
depflag = dep_spec.depflag
# remove self from all dependents, unless it is already removed
if self.name in dependent._dependencies:
del dependent._dependencies.edges[self.name]
# add the replacement, unless it is already a dep of dependent.
if concrete.name not in dependent._dependencies:
dependent._add_dependency(concrete, depflag=depflag, virtuals=virtuals)
else:
dependent.edges_to_dependencies(name=concrete.name)[0].update_virtuals(
virtuals=virtuals
)
def _expand_virtual_packages(self, concretizer):
"""Find virtual packages in this spec, replace them with providers,
and normalize again to include the provider's (potentially virtual)
dependencies. Repeat until there are no virtual deps.
Precondition: spec is normalized.
.. todo::
If a provider depends on something that conflicts with
other dependencies in the spec being expanded, this can
produce a conflicting spec. For example, if mpich depends
on hwloc@:1.3 but something in the spec needs hwloc1.4:,
then we should choose an MPI other than mpich. Cases like
this are infrequent, but should implement this before it is
a problem.
"""
# Make an index of stuff this spec already provides
self_index = spack.provider_index.ProviderIndex(
repository=spack.repo.PATH, specs=self.traverse(), restrict=True
)
changed = False
done = False
while not done:
done = True
for spec in list(self.traverse()):
replacement = None
if spec.external:
continue
if spec.virtual:
replacement = self._find_provider(spec, self_index)
if replacement:
# TODO: may break if in-place on self but
# shouldn't happen if root is traversed first.
spec._replace_with(replacement)
done = False
break
if not replacement:
# Get a list of possible replacements in order of
# preference.
candidates = concretizer.choose_virtual_or_external(spec)
# Try the replacements in order, skipping any that cause
# satisfiability problems.
for replacement in candidates:
if replacement is spec:
break
# Replace spec with the candidate and normalize
copy = self.copy()
copy[spec.name]._dup(replacement, deps=False)
try:
# If there are duplicate providers or duplicate
# provider deps, consolidate them and merge
# constraints.
copy.normalize(force=True)
break
except spack.error.SpecError:
# On error, we'll try the next replacement.
continue
# If replacement is external then trim the dependencies
if replacement.external:
if spec._dependencies:
for dep in spec.dependencies():
del dep._dependents.edges[spec.name]
changed = True
spec.clear_dependencies()
replacement.clear_dependencies()
replacement.architecture = self.architecture
# TODO: could this and the stuff in _dup be cleaned up?
def feq(cfield, sfield):
return (not cfield) or (cfield == sfield)
if replacement is spec or (
feq(replacement.name, spec.name)
and feq(replacement.versions, spec.versions)
and feq(replacement.compiler, spec.compiler)
and feq(replacement.architecture, spec.architecture)
and feq(replacement._dependencies, spec._dependencies)
and feq(replacement.variants, spec.variants)
and feq(replacement.external_path, spec.external_path)
and feq(replacement.external_modules, spec.external_modules)
):
continue
# Refine this spec to the candidate. This uses
# replace_with AND dup so that it can work in
# place. TODO: make this more efficient.
if spec.virtual:
spec._replace_with(replacement)
changed = True
if spec._dup(replacement, deps=False, cleardeps=False):
changed = True
self_index.update(spec)
done = False
break
return changed
def _old_concretize(self, tests=False, deprecation_warning=True):
"""A spec is concrete if it describes one build of a package uniquely.
This will ensure that this spec is concrete.
Args:
tests (list or bool): list of packages that will need test
dependencies, or True/False for test all/none
deprecation_warning (bool): enable or disable the deprecation
warning for the old concretizer
If this spec could describe more than one version, variant, or build
of a package, this will add constraints to make it concrete.
Some rigorous validation and checks are also performed on the spec.
Concretizing ensures that it is self-consistent and that it's
consistent with requirements of its packages. See flatten() and
normalize() for more details on this.
"""
import spack.concretize
# Add a warning message to inform users that the original concretizer
# will be removed
if deprecation_warning:
msg = (
"the original concretizer is currently being used.\n\tUpgrade to "
'"clingo" at your earliest convenience. The original concretizer '
"will be removed from Spack in a future version."
)
warnings.warn(msg)
self.replace_hash()
if not self.name:
raise spack.error.SpecError("Attempting to concretize anonymous spec")
if self._concrete:
return
# take the spec apart once before starting the main concretization loop and resolving
# deps, but don't break dependencies during concretization as the spec is built.
user_spec_deps = self.flat_dependencies(disconnect=True)
changed = True
force = False
concretizer = spack.concretize.Concretizer(self.copy())
while changed:
changes = (
self.normalize(force, tests, user_spec_deps, disconnect=False),
self._expand_virtual_packages(concretizer),
self._concretize_helper(concretizer),
)
changed = any(changes)
force = True
visited_user_specs = set()
for dep in self.traverse():
visited_user_specs.add(dep.name)
pkg_cls = spack.repo.PATH.get_pkg_class(dep.name)
visited_user_specs.update(pkg_cls(dep).provided_virtual_names())
extra = set(user_spec_deps.keys()).difference(visited_user_specs)
if extra:
raise InvalidDependencyError(self.name, extra)
Spec.inject_patches_variant(self)
for s in self.traverse():
# TODO: Refactor this into a common method to build external specs
# TODO: or turn external_path into a lazy property
Spec.ensure_external_path_if_external(s)
# assign hashes and mark concrete
self._finalize_concretization()
# If any spec in the DAG is deprecated, throw an error
Spec.ensure_no_deprecated(self)
# Update externals as needed
for dep in self.traverse():
if dep.external:
dep.package.update_external_dependencies()
# Now that the spec is concrete we should check if
# there are declared conflicts
#
# TODO: this needs rethinking, as currently we can only express
# TODO: internal configuration conflicts within one package.
matches = []
for x in self.traverse():
if x.external:
# external specs are already built, don't worry about whether
# it's possible to build that configuration with Spack
continue
for when_spec, conflict_list in x.package_class.conflicts.items():
if x.satisfies(when_spec):
for conflict_spec, msg in conflict_list:
if x.satisfies(conflict_spec):
when = when_spec.copy()
when.name = x.name
matches.append((x, conflict_spec, when, msg))
if matches:
raise ConflictsInSpecError(self, matches)
# Check if we can produce an optimized binary (will throw if
# there are declared inconsistencies)
self.architecture.target.optimization_flags(self.compiler)
def _patches_assigned(self):
"""Whether patches have been assigned to this spec by the concretizer."""
# FIXME: _patches_in_order_of_appearance is attached after concretization
@@ -2743,13 +3045,7 @@ def ensure_no_deprecated(root):
msg += " For each package listed, choose another spec\n"
raise SpecDeprecatedError(msg)
def concretize(self, tests: Union[bool, List[str]] = False) -> None:
"""Concretize the current spec.
Args:
tests: if False disregard 'test' dependencies, if a list of names activate them for
the packages in the list, if True activate 'test' dependencies for all packages.
"""
def _new_concretize(self, tests=False):
import spack.solver.asp
self.replace_hash()
@@ -2783,6 +3079,19 @@ def concretize(self, tests: Union[bool, List[str]] = False) -> None:
concretized = answer[node]
self._dup(concretized)
def concretize(self, tests=False):
"""Concretize the current spec.
Args:
tests (bool or list): if False disregard 'test' dependencies,
if a list of names activate them for the packages in the list,
if True activate 'test' dependencies for all packages.
"""
if spack.config.get("config:concretizer", "clingo") == "clingo":
self._new_concretize(tests)
else:
self._old_concretize(tests)
def _mark_root_concrete(self, value=True):
"""Mark just this spec (not dependencies) concrete."""
if (not value) and self.concrete and self.installed:
@@ -2886,6 +3195,34 @@ def concretized(self, tests=False):
clone.concretize(tests=tests)
return clone
def flat_dependencies(self, disconnect: bool = False):
"""Build DependencyMap of all of this spec's dependencies with their constraints merged.
Arguments:
disconnect: if True, disconnect all dependents and dependencies among nodes in this
spec's DAG.
"""
flat_deps = {}
deptree = self.traverse(root=False)
for spec in deptree:
if spec.name not in flat_deps:
flat_deps[spec.name] = spec
else:
try:
flat_deps[spec.name].constrain(spec)
except spack.error.UnsatisfiableSpecError as e:
# DAG contains two instances of the same package with inconsistent constraints.
raise InconsistentSpecError("Invalid Spec DAG: %s" % e.message) from e
if disconnect:
for spec in flat_deps.values():
if not spec.concrete:
spec.clear_edges()
self.clear_dependencies()
return flat_deps
def index(self, deptype="all"):
"""Return a dictionary that points to all the dependencies in this
spec.
@@ -2895,6 +3232,312 @@ def index(self, deptype="all"):
dm[spec.name].append(spec)
return dm
def _evaluate_dependency_conditions(self, name):
"""Evaluate all the conditions on a dependency with this name.
Args:
name (str): name of dependency to evaluate conditions on.
Returns:
(Dependency): new Dependency object combining all constraints.
If the package depends on <name> in the current spec
configuration, return the constrained dependency and
corresponding dependency types.
If no conditions are True (and we don't depend on it), return
``(None, None)``.
"""
vt.substitute_abstract_variants(self)
# evaluate when specs to figure out constraints on the dependency.
dep = None
for when_spec, deps_by_name in self.package_class.dependencies.items():
if not self.satisfies(when_spec):
continue
for dep_name, dependency in deps_by_name.items():
if dep_name != name:
continue
if dep is None:
dep = dp.Dependency(Spec(self.name), Spec(name), depflag=0)
try:
dep.merge(dependency)
except spack.error.UnsatisfiableSpecError as e:
e.message = (
"Conflicting conditional dependencies for spec"
"\n\n\t{0}\n\n"
"Cannot merge constraint"
"\n\n\t{1}\n\n"
"into"
"\n\n\t{2}".format(self, dependency.spec, dep.spec)
)
raise e
return dep
def _find_provider(self, vdep, provider_index):
"""Find provider for a virtual spec in the provider index.
Raise an exception if there is a conflicting virtual
dependency already in this spec.
"""
assert spack.repo.PATH.is_virtual_safe(vdep.name), vdep
# note that this defensively copies.
providers = provider_index.providers_for(vdep)
# If there is a provider for the vpkg, then use that instead of
# the virtual package.
if providers:
# Remove duplicate providers that can concretize to the same
# result.
for provider in providers:
for spec in providers:
if spec is not provider and provider.intersects(spec):
providers.remove(spec)
# Can't have multiple providers for the same thing in one spec.
if len(providers) > 1:
raise MultipleProviderError(vdep, providers)
return providers[0]
else:
# The user might have required something insufficient for
# pkg_dep -- so we'll get a conflict. e.g., user asked for
# mpi@:1.1 but some package required mpi@2.1:.
required = provider_index.providers_for(vdep.name)
if len(required) > 1:
raise MultipleProviderError(vdep, required)
elif required:
raise UnsatisfiableProviderSpecError(required[0], vdep)
def _merge_dependency(self, dependency, visited, spec_deps, provider_index, tests):
"""Merge dependency information from a Package into this Spec.
Args:
dependency (Dependency): dependency metadata from a package;
this is typically the result of merging *all* matching
dependency constraints from the package.
visited (set): set of dependency nodes already visited by
``normalize()``.
spec_deps (dict): ``dict`` of all dependencies from the spec
being normalized.
provider_index (dict): ``provider_index`` of virtual dep
providers in the ``Spec`` as normalized so far.
NOTE: Caller should assume that this routine owns the
``dependency`` parameter, i.e., it needs to be a copy of any
internal structures.
This is the core of ``normalize()``. There are some basic steps:
* If dep is virtual, evaluate whether it corresponds to an
existing concrete dependency, and merge if so.
* If it's real and it provides some virtual dep, see if it provides
what some virtual dependency wants and merge if so.
* Finally, if none of the above, merge dependency and its
constraints into this spec.
This method returns True if the spec was changed, False otherwise.
"""
changed = False
dep = dependency.spec
# If it's a virtual dependency, try to find an existing
# provider in the spec, and merge that.
virtuals = ()
if spack.repo.PATH.is_virtual_safe(dep.name):
virtuals = (dep.name,)
visited.add(dep.name)
provider = self._find_provider(dep, provider_index)
if provider:
dep = provider
else:
index = spack.provider_index.ProviderIndex(
repository=spack.repo.PATH, specs=[dep], restrict=True
)
items = list(spec_deps.items())
for name, vspec in items:
if not spack.repo.PATH.is_virtual_safe(vspec.name):
continue
if index.providers_for(vspec):
vspec._replace_with(dep)
del spec_deps[vspec.name]
changed = True
else:
required = index.providers_for(vspec.name)
if required:
raise UnsatisfiableProviderSpecError(required[0], dep)
provider_index.update(dep)
# If the spec isn't already in the set of dependencies, add it.
# Note: dep is always owned by this method. If it's from the
# caller, it's a copy from _evaluate_dependency_conditions. If it
# comes from a vdep, it's a defensive copy from _find_provider.
if dep.name not in spec_deps:
if self.concrete:
return False
spec_deps[dep.name] = dep
changed = True
else:
# merge package/vdep information into spec
try:
tty.debug("{0} applying constraint {1}".format(self.name, str(dep)))
changed |= spec_deps[dep.name].constrain(dep)
except spack.error.UnsatisfiableSpecError as e:
fmt = "An unsatisfiable {0}".format(e.constraint_type)
fmt += " constraint has been detected for spec:"
fmt += "\n\n{0}\n\n".format(spec_deps[dep.name].tree(indent=4))
fmt += "while trying to concretize the partial spec:"
fmt += "\n\n{0}\n\n".format(self.tree(indent=4))
fmt += "{0} requires {1} {2} {3}, but spec asked for {4}"
e.message = fmt.format(
self.name, dep.name, e.constraint_type, e.required, e.provided
)
raise
# Add merged spec to my deps and recurse
spec_dependency = spec_deps[dep.name]
if dep.name not in self._dependencies:
self._add_dependency(spec_dependency, depflag=dependency.depflag, virtuals=virtuals)
changed |= spec_dependency._normalize_helper(visited, spec_deps, provider_index, tests)
return changed
def _normalize_helper(self, visited, spec_deps, provider_index, tests):
"""Recursive helper function for _normalize."""
if self.name in visited:
return False
visited.add(self.name)
# If we descend into a virtual spec, there's nothing more
# to normalize. Concretize will finish resolving it later.
if self.virtual or self.external:
return False
# Avoid recursively adding constraints for already-installed packages:
# these may include build dependencies which are not needed for this
# install (since this package is already installed).
if self.concrete and self.installed:
return False
# Combine constraints from package deps with constraints from
# the spec, until nothing changes.
any_change = False
changed = True
while changed:
changed = False
for dep_name in self.package_class.dependency_names():
# Do we depend on dep_name? If so pkg_dep is not None.
dep = self._evaluate_dependency_conditions(dep_name)
# If dep is a needed dependency, merge it.
if dep:
merge = (
# caller requested test dependencies
tests is True
or (tests and self.name in tests)
or
# this is not a test-only dependency
(dep.depflag & ~dt.TEST)
)
if merge:
changed |= self._merge_dependency(
dep, visited, spec_deps, provider_index, tests
)
any_change |= changed
return any_change
def normalize(self, force=False, tests=False, user_spec_deps=None, disconnect=True):
"""When specs are parsed, any dependencies specified are hanging off
the root, and ONLY the ones that were explicitly provided are there.
Normalization turns a partial flat spec into a DAG, where:
1. Known dependencies of the root package are in the DAG.
2. Each node's dependencies dict only contains its known direct
deps.
3. There is only ONE unique spec for each package in the DAG.
* This includes virtual packages. If there a non-virtual
package that provides a virtual package that is in the spec,
then we replace the virtual package with the non-virtual one.
TODO: normalize should probably implement some form of cycle
detection, to ensure that the spec is actually a DAG.
"""
if not self.name:
raise spack.error.SpecError("Attempting to normalize anonymous spec")
# Set _normal and _concrete to False when forced
if force and not self._concrete:
self._normal = False
if self._normal:
return False
# Ensure first that all packages & compilers in the DAG exist.
self.validate_or_raise()
# Clear the DAG and collect all dependencies in the DAG, which will be
# reapplied as constraints. All dependencies collected this way will
# have been created by a previous execution of 'normalize'.
# A dependency extracted here will only be reintegrated if it is
# discovered to apply according to _normalize_helper, so
# user-specified dependencies are recorded separately in case they
# refer to specs which take several normalization passes to
# materialize.
all_spec_deps = self.flat_dependencies(disconnect=disconnect)
if user_spec_deps:
for name, spec in user_spec_deps.items():
if not name:
msg = "Attempted to normalize anonymous dependency spec"
msg += " %s" % spec
raise InvalidSpecDetected(msg)
if name not in all_spec_deps:
all_spec_deps[name] = spec
else:
all_spec_deps[name].constrain(spec)
# Initialize index of virtual dependency providers if
# concretize didn't pass us one already
provider_index = spack.provider_index.ProviderIndex(
repository=spack.repo.PATH, specs=[s for s in all_spec_deps.values()], restrict=True
)
# traverse the package DAG and fill out dependencies according
# to package files & their 'when' specs
visited = set()
any_change = self._normalize_helper(visited, all_spec_deps, provider_index, tests)
# remove any leftover dependents outside the spec from, e.g., pruning externals
valid = {id(spec) for spec in all_spec_deps.values()} | {id(self)}
for spec in all_spec_deps.values():
remove = [dep for dep in spec.dependents() if id(dep) not in valid]
for dep in remove:
del spec._dependents.edges[dep.name]
del dep._dependencies.edges[spec.name]
# Mark the spec as normal once done.
self._normal = True
return any_change
def normalized(self):
"""
Return a normalized copy of this spec without modifying this spec.
"""
clone = self.copy()
clone.normalize()
return clone
def validate_or_raise(self):
"""Checks that names and values in this spec are real. If they're not,
it will raise an appropriate exception.
@@ -3743,19 +4386,14 @@ def deps():
yield deps
@property
def namespace_if_anonymous(self):
return self.namespace if not self.name else None
def format(self, format_string: str = DEFAULT_FORMAT, color: Optional[bool] = False) -> str:
r"""Prints out attributes of a spec according to a format string.
r"""Prints out particular pieces of a spec, depending on what is
in the format string.
Using an ``{attribute}`` format specifier, any field of the spec can be
selected. Those attributes can be recursive. For example,
``s.format({compiler.version})`` will print the version of the compiler.
If the attribute in a format specifier evaluates to ``None``, then the format
specifier will evaluate to the empty string, ``""``.
Using the ``{attribute}`` syntax, any field of the spec can be
selected. Those attributes can be recursive. For example,
``s.format({compiler.version})`` will print the version of the
compiler.
Commonly used attributes of the Spec for format strings include::
@@ -3771,7 +4409,6 @@ def format(self, format_string: str = DEFAULT_FORMAT, color: Optional[bool] = Fa
architecture.os
architecture.target
prefix
namespace
Some additional special-case properties can be added::
@@ -3780,51 +4417,40 @@ def format(self, format_string: str = DEFAULT_FORMAT, color: Optional[bool] = Fa
spack_install The spack install directory
The ``^`` sigil can be used to access dependencies by name.
``s.format({^mpi.name})`` will print the name of the MPI implementation in the
spec.
``s.format({^mpi.name})`` will print the name of the MPI
implementation in the spec.
The ``@``, ``%``, and ``/`` sigils can be used to include the sigil with the
printed string. These sigils may only be used with the appropriate attributes,
listed below::
The ``@``, ``%``, ``arch=``, and ``/`` sigils
can be used to include the sigil with the printed
string. These sigils may only be used with the appropriate
attributes, listed below::
@ ``{@version}``, ``{@compiler.version}``
% ``{%compiler}``, ``{%compiler.name}``
arch= ``{arch=architecture}``
/ ``{/hash}``, ``{/hash:7}``, etc
The ``@`` sigil may also be used for any other property named ``version``.
Sigils printed with the attribute string are only printed if the attribute
string is non-empty, and are colored according to the color of the attribute.
The ``@`` sigil may also be used for any other property named
``version``. Sigils printed with the attribute string are only
printed if the attribute string is non-empty, and are colored
according to the color of the attribute.
Variants listed by name naturally print with their sigil. For example,
``spec.format('{variants.debug}')`` prints either ``+debug`` or ``~debug``
depending on the name of the variant. Non-boolean variants print as
``name=value``. To print variant names or values independently, use
Sigils are not used for printing variants. Variants listed by
name naturally print with their sigil. For example,
``spec.format('{variants.debug}')`` would print either
``+debug`` or ``~debug`` depending on the name of the
variant. Non-boolean variants print as ``name=value``. To
print variant names or values independently, use
``spec.format('{variants.<name>.name}')`` or
``spec.format('{variants.<name>.value}')``.
There are a few attributes on specs that can be specified as key-value pairs
that are *not* variants, e.g.: ``os``, ``arch``, ``architecture``, ``target``,
``namespace``, etc. You can format these with an optional ``key=`` prefix, e.g.
``{namespace=namespace}`` or ``{arch=architecture}``, etc. The ``key=`` prefix
will be colorized along with the value.
When formatting specs, key-value pairs are separated from preceding parts of the
spec by whitespace. To avoid printing extra whitespace when the formatted
attribute is not set, you can add whitespace to the key *inside* the braces of
the format string, e.g.:
{ namespace=namespace}
This evaluates to `` namespace=builtin`` if ``namespace`` is set to ``builtin``,
and to ``""`` if ``namespace`` is ``None``.
Spec format strings use ``\`` as the escape character. Use ``\{`` and ``\}`` for
literal braces, and ``\\`` for the literal ``\`` character.
Spec format strings use ``\`` as the escape character. Use
``\{`` and ``\}`` for literal braces, and ``\\`` for the
literal ``\`` character.
Args:
format_string: string containing the format to be expanded
color: True for colorized result; False for no color; None for auto color.
"""
ensure_modern_format_string(format_string)
@@ -3878,6 +4504,10 @@ def format_attribute(match_object: Match) -> str:
raise SpecFormatSigilError(sig, "compilers", attribute)
elif sig == "/" and attribute != "abstract_hash":
raise SpecFormatSigilError(sig, "DAG hashes", attribute)
elif sig == "arch=":
if attribute not in ("architecture", "arch"):
raise SpecFormatSigilError(sig, "the architecture", attribute)
sig = " arch=" # include space as separator
# Iterate over components using getattr to get next element
for idx, part in enumerate(parts):
@@ -3922,19 +4552,15 @@ def format_attribute(match_object: Match) -> str:
# Set color codes for various attributes
color = None
if "architecture" in parts:
color = ARCHITECTURE_COLOR
elif "variants" in parts or sig.endswith("="):
if "variants" in parts:
color = VARIANT_COLOR
elif "architecture" in parts:
color = ARCHITECTURE_COLOR
elif "compiler" in parts or "compiler_flags" in parts:
color = COMPILER_COLOR
elif "version" in parts or "versions" in parts:
color = VERSION_COLOR
# return empty string if the value of the attribute is None.
if current is None:
return ""
# return colored output
return safe_color(sig, str(current), color)
@@ -4764,8 +5390,10 @@ def long_message(self):
)
class UnsupportedPropagationError(spack.error.SpecError):
"""Raised when propagation (==) is used with reserved variant names."""
class ArchitecturePropagationError(spack.error.SpecError):
"""Raised when the double equal symbols are used to assign
the spec's architecture.
"""
class DuplicateDependencyError(spack.error.SpecError):
@@ -4895,7 +5523,7 @@ def __init__(self, spec):
class AmbiguousHashError(spack.error.SpecError):
def __init__(self, msg, *specs):
spec_fmt = "{namespace}.{name}{@version}{%compiler}{compiler_flags}"
spec_fmt += "{variants}{ arch=architecture}{/hash:7}"
spec_fmt += "{variants}{arch=architecture}{/hash:7}"
specs_str = "\n " + "\n ".join(spec.format(spec_fmt) for spec in specs)
super().__init__(msg + specs_str)

View File

@@ -13,7 +13,7 @@
import stat
import sys
import tempfile
from typing import Callable, Dict, Generator, Iterable, List, Optional, Set
from typing import Callable, Dict, Iterable, Optional, Set
import llnl.string
import llnl.util.lang
@@ -40,7 +40,6 @@
import spack.resource
import spack.spec
import spack.stage
import spack.util.crypto
import spack.util.lock
import spack.util.path as sup
import spack.util.pattern as pattern
@@ -352,10 +351,8 @@ class Stage(LockableStagingDir):
def __init__(
self,
url_or_fetch_strategy,
*,
name=None,
mirror_paths: Optional[spack.mirror.MirrorLayout] = None,
mirrors: Optional[Iterable[spack.mirror.Mirror]] = None,
mirror_paths=None,
keep=False,
path=None,
lock=True,
@@ -409,18 +406,12 @@ def __init__(
# self.fetcher can change with mirrors.
self.default_fetcher = self.fetcher
self.search_fn = search_fn
# If we fetch from a mirror, but the original data is from say git, we can currently not
# prove that they are equal (we don't even have a tree hash in package.py). This bool is
# used to skip checksum verification and instead warn the user.
if isinstance(self.default_fetcher, fs.URLFetchStrategy):
self.skip_checksum_for_mirror = not bool(self.default_fetcher.digest)
else:
self.skip_checksum_for_mirror = True
# used for mirrored archives of repositories.
self.skip_checksum_for_mirror = True
self.srcdir = None
self.mirror_paths = mirror_paths
self.mirrors = list(mirrors) if mirrors else []
@property
def expected_archive_files(self):
@@ -475,87 +466,100 @@ def disable_mirrors(self):
"""The Stage will not attempt to look for the associated fetcher
target in any of Spack's mirrors (including the local download cache).
"""
self.mirror_paths = None
self.mirror_paths = []
def _generate_fetchers(self, mirror_only=False) -> Generator[fs.FetchStrategy, None, None]:
def fetch(self, mirror_only=False, err_msg=None):
"""Retrieves the code or archive
Args:
mirror_only (bool): only fetch from a mirror
err_msg (str or None): the error message to display if all fetchers
fail or ``None`` for the default fetch failure message
"""
fetchers = []
if not mirror_only:
fetchers.append(self.default_fetcher)
# If this archive is normally fetched from a URL, then use the same digest.
if isinstance(self.default_fetcher, fs.URLFetchStrategy):
digest = self.default_fetcher.digest
expand = self.default_fetcher.expand_archive
extension = self.default_fetcher.extension
else:
digest = None
expand = True
extension = None
# TODO: move mirror logic out of here and clean it up!
# TODO: Or @alalazo may have some ideas about how to use a
# TODO: CompositeFetchStrategy here.
if self.mirror_paths and self.mirrors:
# Add URL strategies for all the mirrors with the digest
# Insert fetchers in the order that the URLs are provided.
fetchers[:0] = (
fs.from_url_scheme(
url_util.join(mirror.fetch_url, rel_path),
checksum=digest,
expand=expand,
extension=extension,
)
for mirror in self.mirrors
self.skip_checksum_for_mirror = True
if self.mirror_paths:
# Join URLs of mirror roots with mirror paths. Because
# urljoin() will strip everything past the final '/' in
# the root, so we add a '/' if it is not present.
mirror_urls = [
url_util.join(mirror.fetch_url, rel_path)
for mirror in spack.mirror.MirrorCollection(source=True).values()
if not mirror.fetch_url.startswith("oci://")
for rel_path in self.mirror_paths
)
]
if self.mirror_paths and self.default_fetcher.cachable:
fetchers[:0] = (
spack.caches.FETCH_CACHE.fetcher(
rel_path, digest, expand=expand, extension=extension
# If this archive is normally fetched from a tarball URL,
# then use the same digest. `spack mirror` ensures that
# the checksum will be the same.
digest = None
expand = True
extension = None
if isinstance(self.default_fetcher, fs.URLFetchStrategy):
digest = self.default_fetcher.digest
expand = self.default_fetcher.expand_archive
extension = self.default_fetcher.extension
# Have to skip the checksum for things archived from
# repositories. How can this be made safer?
self.skip_checksum_for_mirror = not bool(digest)
# Add URL strategies for all the mirrors with the digest
# Insert fetchers in the order that the URLs are provided.
for url in reversed(mirror_urls):
fetchers.insert(
0, fs.from_url_scheme(url, digest, expand=expand, extension=extension)
)
for rel_path in self.mirror_paths
)
yield from fetchers
if self.default_fetcher.cachable:
for rel_path in reversed(list(self.mirror_paths)):
cache_fetcher = spack.caches.FETCH_CACHE.fetcher(
rel_path, digest, expand=expand, extension=extension
)
fetchers.insert(0, cache_fetcher)
# The search function may be expensive, so wait until now to call it so the user can stop
# if a prior fetcher succeeded
if self.search_fn and not mirror_only:
yield from self.search_fn()
def generate_fetchers():
for fetcher in fetchers:
yield fetcher
# The search function may be expensive, so wait until now to
# call it so the user can stop if a prior fetcher succeeded
if self.search_fn and not mirror_only:
dynamic_fetchers = self.search_fn()
for fetcher in dynamic_fetchers:
yield fetcher
def fetch(self, mirror_only: bool = False, err_msg: Optional[str] = None) -> None:
"""Retrieves the code or archive
def print_errors(errors):
for msg in errors:
tty.debug(msg)
Args:
mirror_only: only fetch from a mirror
err_msg: the error message to display if all fetchers fail or ``None`` for the default
fetch failure message
"""
errors: List[str] = []
for fetcher in self._generate_fetchers(mirror_only):
errors = []
for fetcher in generate_fetchers():
try:
fetcher.stage = self
self.fetcher = fetcher
self.fetcher.fetch()
break
except fs.NoCacheError:
except spack.fetch_strategy.NoCacheError:
# Don't bother reporting when something is not cached.
continue
except fs.FailedDownloadError as f:
errors.extend(f"{fetcher}: {e.__class__.__name__}: {e}" for e in f.exceptions)
continue
except spack.error.SpackError as e:
errors.append(f"{fetcher}: {e.__class__.__name__}: {e}")
errors.append("Fetching from {0} failed.".format(fetcher))
tty.debug(e)
continue
else:
print_errors(errors)
self.fetcher = self.default_fetcher
if err_msg:
raise spack.error.FetchError(err_msg)
raise spack.error.FetchError(
f"All fetchers failed for {self.name}", "\n".join(f" {e}" for e in errors)
)
default_msg = "All fetchers failed for {0}".format(self.name)
raise spack.error.FetchError(err_msg or default_msg, None)
print_errors(errors)
def steal_source(self, dest):
"""Copy the source_path directory in its entirety to directory dest
@@ -593,19 +597,16 @@ def steal_source(self, dest):
self.destroy()
def check(self):
"""Check the downloaded archive against a checksum digest."""
"""Check the downloaded archive against a checksum digest.
No-op if this stage checks code out of a repository."""
if self.fetcher is not self.default_fetcher and self.skip_checksum_for_mirror:
cache = isinstance(self.fetcher, fs.CacheURLFetchStrategy)
if cache:
secure_msg = "your download cache is in a secure location"
else:
secure_msg = "you trust this mirror and have a secure connection"
tty.warn(
f"Using {'download cache' if cache else 'a mirror'} instead of version control",
"The required sources are normally checked out from a version control system, "
f"but have been archived {'in download cache' if cache else 'on a mirror'}: "
f"{self.fetcher}. Spack lacks a tree hash to verify the integrity of this "
f"archive. Make sure {secure_msg}.",
"Fetching from mirror without a checksum!",
"This package is normally checked out from a version "
"control system, but it has been archived on a spack "
"mirror. This means we cannot know a checksum for the "
"tarball in advance. Be sure that your connection to "
"this mirror is secure!",
)
elif spack.config.get("config:checksum"):
self.fetcher.check()
@@ -927,7 +928,7 @@ def interactive_version_filter(
orig_url_dict = url_dict # only copy when using editor to modify
print_header = True
VERSION_COLOR = spack.spec.VERSION_COLOR
while sys.stdin.isatty():
while True:
if print_header:
has_filter = version_filter != VersionList([":"])
header = []
@@ -944,9 +945,7 @@ def interactive_version_filter(
num_new = sum(1 for v in sorted_and_filtered if v not in known_versions)
header.append(f"{llnl.string.plural(num_new, 'new version')}")
if has_filter:
header.append(
colorize(f"Filtered by {VERSION_COLOR}@@{version_filter}@. (clear with c)")
)
header.append(colorize(f"Filtered by {VERSION_COLOR}@@{version_filter}@."))
version_with_url = [
colorize(
@@ -1176,7 +1175,7 @@ def _fetch_and_checksum(url, options, keep_stage, action_fn=None):
try:
url_or_fs = url
if options:
url_or_fs = fs.URLFetchStrategy(url=url, fetch_options=options)
url_or_fs = fs.URLFetchStrategy(url, fetch_options=options)
with Stage(url_or_fs, keep=keep_stage) as stage:
# Fetch the archive
@@ -1189,7 +1188,7 @@ def _fetch_and_checksum(url, options, keep_stage, action_fn=None):
# Checksum the archive and add it to the list
checksum = spack.util.crypto.checksum(hashlib.sha256, stage.archive_file)
return checksum, None
except fs.FailedDownloadError:
except FailedDownloadError:
return None, f"[WORKER] Failed to fetch {url}"
except Exception as e:
return None, f"[WORKER] Something failed on {url}, skipping. ({e})"
@@ -1209,3 +1208,7 @@ class RestageError(StageError):
class VersionFetchError(StageError):
"""Raised when we can't determine a URL to fetch a package."""
# Keep this in namespace for convenience
FailedDownloadError = fs.FailedDownloadError

View File

@@ -208,6 +208,7 @@ def test_satisfy_strict_constraint_when_not_concrete(architecture_tuple, constra
],
)
@pytest.mark.usefixtures("mock_packages", "config")
@pytest.mark.only_clingo("Fixing the parser broke this test for the original concretizer.")
@pytest.mark.skipif(
str(archspec.cpu.host().family) != "x86_64", reason="tests are for x86_64 uarch ranges"
)

View File

@@ -337,7 +337,7 @@ def test_relative_rpaths_install_nondefault(mirror_dir):
buildcache_cmd("install", "-uf", cspec.name)
def test_push_and_fetch_keys(mock_gnupghome, tmp_path):
def test_push_and_fetch_keys(mock_gnupghome):
testpath = str(mock_gnupghome)
mirror = os.path.join(testpath, "mirror")
@@ -357,7 +357,7 @@ def test_push_and_fetch_keys(mock_gnupghome, tmp_path):
assert len(keys) == 1
fpr = keys[0]
bindist.push_keys(mirror, keys=[fpr], tmpdir=str(tmp_path), update_index=True)
bindist.push_keys(mirror, keys=[fpr], regenerate_index=True)
# dir 2: import the key from the mirror, and confirm that its fingerprint
# matches the one created above
@@ -464,7 +464,7 @@ def test_generate_index_missing(monkeypatch, tmpdir, mutable_config):
assert "libelf" not in cache_list
def test_generate_key_index_failure(monkeypatch, tmp_path):
def test_generate_key_index_failure(monkeypatch):
def list_url(url, recursive=False):
if "fails-listing" in url:
raise Exception("Couldn't list the directory")
@@ -477,13 +477,13 @@ def push_to_url(*args, **kwargs):
monkeypatch.setattr(web_util, "push_to_url", push_to_url)
with pytest.raises(CannotListKeys, match="Encountered problem listing keys"):
bindist.generate_key_index("s3://non-existent/fails-listing", str(tmp_path))
bindist.generate_key_index("s3://non-existent/fails-listing")
with pytest.raises(GenerateIndexError, match="problem pushing .* Couldn't upload"):
bindist.generate_key_index("s3://non-existent/fails-uploading", str(tmp_path))
bindist.generate_key_index("s3://non-existent/fails-uploading")
def test_generate_package_index_failure(monkeypatch, tmp_path, capfd):
def test_generate_package_index_failure(monkeypatch, capfd):
def mock_list_url(url, recursive=False):
raise Exception("Some HTTP error")
@@ -492,16 +492,15 @@ def mock_list_url(url, recursive=False):
test_url = "file:///fake/keys/dir"
with pytest.raises(GenerateIndexError, match="Unable to generate package index"):
bindist.generate_package_index(test_url, str(tmp_path))
bindist.generate_package_index(test_url)
assert (
"Warning: Encountered problem listing packages at "
f"{test_url}/{bindist.BUILD_CACHE_RELATIVE_PATH}: Some HTTP error"
f"Warning: Encountered problem listing packages at {test_url}: Some HTTP error"
in capfd.readouterr().err
)
def test_generate_indices_exception(monkeypatch, tmp_path, capfd):
def test_generate_indices_exception(monkeypatch, capfd):
def mock_list_url(url, recursive=False):
raise Exception("Test Exception handling")
@@ -510,10 +509,10 @@ def mock_list_url(url, recursive=False):
url = "file:///fake/keys/dir"
with pytest.raises(GenerateIndexError, match=f"Encountered problem listing keys at {url}"):
bindist.generate_key_index(url, str(tmp_path))
bindist.generate_key_index(url)
with pytest.raises(GenerateIndexError, match="Unable to generate package index"):
bindist.generate_package_index(url, str(tmp_path))
bindist.generate_package_index(url)
assert f"Encountered problem listing packages at {url}" in capfd.readouterr().err

View File

@@ -13,34 +13,34 @@
import spack.spec
import spack.util.url
install = spack.main.SpackCommand("install")
pytestmark = pytest.mark.not_on_windows("does not run on windows")
def test_build_tarball_overwrite(install_mockery, mock_fetch, monkeypatch, tmp_path):
spec = spack.spec.Spec("trivial-install-test-package").concretized()
spec.package.do_install(fake=True)
def test_build_tarball_overwrite(install_mockery, mock_fetch, monkeypatch, tmpdir):
with tmpdir.as_cwd():
spec = spack.spec.Spec("trivial-install-test-package").concretized()
install(str(spec))
specs = [spec]
# Runs fine the first time, throws the second time
out_url = spack.util.url.path_to_file_url(str(tmpdir))
bd.push_or_raise(spec, out_url, bd.PushOptions(unsigned=True))
with pytest.raises(bd.NoOverwriteException):
bd.push_or_raise(spec, out_url, bd.PushOptions(unsigned=True))
# Runs fine the first time, second time it's a no-op
out_url = spack.util.url.path_to_file_url(str(tmp_path))
skipped = bd.push_or_raise(specs, out_url, signing_key=None)
assert not skipped
# Should work fine with force=True
bd.push_or_raise(spec, out_url, bd.PushOptions(force=True, unsigned=True))
skipped = bd.push_or_raise(specs, out_url, signing_key=None)
assert skipped == specs
# Remove the tarball and try again.
# This must *also* throw, because of the existing .spec.json file
os.remove(
os.path.join(
bd.build_cache_prefix("."),
bd.tarball_directory_name(spec),
bd.tarball_name(spec, ".spack"),
)
)
# Should work fine with force=True
skipped = bd.push_or_raise(specs, out_url, signing_key=None, force=True)
assert not skipped
# Remove the tarball, which should cause push to push.
os.remove(
tmp_path
/ bd.BUILD_CACHE_RELATIVE_PATH
/ bd.tarball_directory_name(spec)
/ bd.tarball_name(spec, ".spack")
)
skipped = bd.push_or_raise(specs, out_url, signing_key=None)
assert not skipped
with pytest.raises(bd.NoOverwriteException):
bd.push_or_raise(spec, out_url, bd.PushOptions(unsigned=True))

View File

@@ -6,7 +6,6 @@
import os
import platform
import posixpath
import sys
import pytest
@@ -288,25 +287,6 @@ def platform_pathsep(pathlist):
assert name not in os.environ
def test_compiler_custom_env(config, mock_packages, monkeypatch, working_env):
if sys.platform == "win32":
test_path = r"C:\test\path\element\custom-env" + "\\"
else:
test_path = r"/test/path/element/custom-env/"
def custom_env(pkg, env):
env.prepend_path("PATH", test_path)
env.append_flags("ENV_CUSTOM_CC_FLAGS", "--custom-env-flag1")
pkg = spack.spec.Spec("cmake").concretized().package
monkeypatch.setattr(pkg.compiler, "setup_custom_environment", custom_env)
spack.build_environment.setup_package(pkg, False)
# Note: trailing slash may be stripped by internal logic
assert test_path[:-1] in os.environ["PATH"]
assert "--custom-env-flag1" in os.environ["ENV_CUSTOM_CC_FLAGS"]
def test_external_config_env(mock_packages, mutable_config, working_env):
cmake_config = {
"externals": [

View File

@@ -355,15 +355,6 @@ def test_fc_flags(wrapper_environment, wrapper_flags):
)
def test_always_cflags(wrapper_environment, wrapper_flags):
with set_env(SPACK_ALWAYS_CFLAGS="-always1 -always2"):
check_args(
cc,
["-v", "--cmd-line-v-opt"],
[real_cc] + ["-always1", "-always2"] + ["-v", "--cmd-line-v-opt"],
)
def test_Wl_parsing(wrapper_environment):
check_args(
cc,

View File

@@ -286,7 +286,7 @@ def _fail(self, args):
def test_ci_create_buildcache(tmpdir, working_env, config, mock_packages, monkeypatch):
"""Test that create_buildcache returns a list of objects with the correct
keys and types."""
monkeypatch.setattr(ci, "push_to_build_cache", lambda a, b, c: True)
monkeypatch.setattr(spack.ci, "_push_to_build_cache", lambda a, b, c: True)
results = ci.create_buildcache(
None, destination_mirror_urls=["file:///fake-url-one", "file:///fake-url-two"]

View File

@@ -12,9 +12,7 @@
import spack.binary_distribution
import spack.cmd.buildcache
import spack.deptypes
import spack.environment as ev
import spack.error
import spack.main
import spack.spec
import spack.util.url
@@ -384,14 +382,11 @@ def test_correct_specs_are_pushed(
packages_to_push = []
def fake_push(specs, *args, **kwargs):
assert all(isinstance(s, Spec) for s in specs)
packages_to_push.extend(s.name for s in specs)
skipped = []
errors = []
return skipped, errors
def fake_push(node, push_url, options):
assert isinstance(node, Spec)
packages_to_push.append(node.name)
monkeypatch.setattr(spack.binary_distribution, "_push", fake_push)
monkeypatch.setattr(spack.binary_distribution, "push_or_raise", fake_push)
buildcache_create_args = ["create", "--unsigned"]
@@ -448,54 +443,3 @@ def test_skip_no_redistribute(mock_packages, config):
filtered = spack.cmd.buildcache._skip_no_redistribute_for_public(specs)
assert not any(s.name == "no-redistribute" for s in filtered)
assert any(s.name == "no-redistribute-dependent" for s in filtered)
def test_best_effort_vs_fail_fast_when_dep_not_installed(tmp_path, mutable_database):
"""When --fail-fast is passed, the push command should fail if it immediately finds an
uninstalled dependency. Otherwise, failure to push one dependency shouldn't prevent the
others from being pushed."""
mirror("add", "--unsigned", "my-mirror", str(tmp_path))
# Uninstall mpich so that its dependent mpileaks can't be pushed
for s in mutable_database.query_local("mpich"):
s.package.do_uninstall(force=True)
with pytest.raises(spack.cmd.buildcache.PackagesAreNotInstalledError, match="mpich"):
buildcache("push", "--update-index", "--fail-fast", "my-mirror", "mpileaks^mpich")
# nothing should be pushed due to --fail-fast.
assert not os.listdir(tmp_path)
assert not spack.binary_distribution.update_cache_and_get_specs()
with pytest.raises(spack.cmd.buildcache.PackageNotInstalledError):
buildcache("push", "--update-index", "my-mirror", "mpileaks^mpich")
specs = spack.binary_distribution.update_cache_and_get_specs()
# everything but mpich should be pushed
mpileaks = mutable_database.query_local("mpileaks^mpich")[0]
assert set(specs) == {s for s in mpileaks.traverse() if s.name != "mpich"}
def test_push_without_build_deps(tmp_path, temporary_store, mock_packages, mutable_config):
"""Spack should not error when build deps are uninstalled and --without-build-dependenies is
passed."""
mirror("add", "--unsigned", "my-mirror", str(tmp_path))
s = spack.spec.Spec("dtrun3").concretized()
s.package.do_install(fake=True)
s["dtbuild3"].package.do_uninstall()
# fails when build deps are required
with pytest.raises(spack.error.SpackError, match="package not installed"):
buildcache(
"push", "--update-index", "--with-build-dependencies", "my-mirror", f"/{s.dag_hash()}"
)
# succeeds when build deps are not required
buildcache(
"push", "--update-index", "--without-build-dependencies", "my-mirror", f"/{s.dag_hash()}"
)
assert spack.binary_distribution.update_cache_and_get_specs() == [s]

File diff suppressed because it is too large Load Diff

View File

@@ -6,6 +6,7 @@
import filecmp
import os
import shutil
import subprocess
import pytest
@@ -155,6 +156,22 @@ def test_update_with_header(tmpdir):
commands("--update", str(update_file), "--header", str(filename))
@pytest.mark.xfail
def test_no_pipe_error():
"""Make sure we don't see any pipe errors when piping output."""
proc = subprocess.Popen(
["spack", "commands", "--format=rst"], stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
# Call close() on stdout to cause a broken pipe
proc.stdout.close()
proc.wait()
stderr = proc.stderr.read().decode("utf-8")
assert "Broken pipe" not in stderr
def test_bash_completion():
"""Test the bash completion writer."""
out1 = commands("--format=bash")

View File

@@ -16,6 +16,8 @@
debug = SpackCommand("debug")
pytestmark = pytest.mark.not_on_windows("does not run on windows")
@pytest.mark.db
def test_create_db_tarball(tmpdir, database):
@@ -58,3 +60,4 @@ def test_report():
assert get_version() in out
assert platform.python_version() in out
assert str(architecture) in out
assert spack.config.get("config:concretizer") in out

View File

@@ -1057,6 +1057,7 @@ def test_env_with_included_config_file(mutable_mock_env_path, packages_file):
assert any(x.satisfies("mpileaks@2.2") for x in e._get_environment_specs())
@pytest.mark.only_clingo("original concretizer does not support requirements")
def test_config_change_existing(mutable_mock_env_path, tmp_path, mock_packages, mutable_config):
"""Test ``config change`` with config in the ``spack.yaml`` as well as an
included file scope.
@@ -1132,6 +1133,7 @@ def test_config_change_existing(mutable_mock_env_path, tmp_path, mock_packages,
spack.spec.Spec("bowtie@1.2.2").concretized()
@pytest.mark.only_clingo("original concretizer does not support requirements")
def test_config_change_new(mutable_mock_env_path, tmp_path, mock_packages, mutable_config):
spack_yaml = tmp_path / ev.manifest_name
spack_yaml.write_text(
@@ -1734,17 +1736,6 @@ def test_env_include_concrete_env_yaml(env_name):
assert test.path in combined_yaml["include_concrete"]
@pytest.mark.regression("45766")
@pytest.mark.parametrize("format", ["v1", "v2", "v3"])
def test_env_include_concrete_old_env(format, tmpdir):
lockfile = os.path.join(spack.paths.test_path, "data", "legacy_env", f"{format}.lock")
# create an env from old .lock file -- this does not update the format
env("create", "old-env", lockfile)
env("create", "--include-concrete", "old-env", "test")
assert ev.read("old-env").all_specs() == ev.read("test").all_specs()
def test_env_bad_include_concrete_env():
with pytest.raises(ev.SpackEnvironmentError):
env("create", "--include-concrete", "nonexistant_env", "combined_env")
@@ -2341,6 +2332,8 @@ def test_stack_concretize_extraneous_deps(tmpdir, mock_packages):
# FIXME: constraints for stacks
# FIXME: This now works for statically-determinable invalid deps
# FIXME: But it still does not work for dynamically determined invalid deps
# if spack.config.get('config:concretizer') == 'clingo':
# pytest.skip('Clingo concretizer does not support soft constraints')
filename = str(tmpdir.join("spack.yaml"))
with open(filename, "w") as f:
@@ -3187,7 +3180,9 @@ def test_concretize_user_specs_together():
e.remove("mpich")
e.add("mpich2")
exc_cls = spack.error.UnsatisfiableSpecError
exc_cls = spack.error.SpackError
if spack.config.get("config:concretizer") == "clingo":
exc_cls = spack.error.UnsatisfiableSpecError
# Concretizing without invalidating the concrete spec for mpileaks fails
with pytest.raises(exc_cls):
@@ -3213,8 +3208,10 @@ def test_duplicate_packages_raise_when_concretizing_together():
e.add("mpileaks~opt")
e.add("mpich")
exc_cls = spack.error.UnsatisfiableSpecError
match = r"You could consider setting `concretizer:unify`"
exc_cls, match = spack.error.SpackError, None
if spack.config.get("config:concretizer") == "clingo":
exc_cls = spack.error.UnsatisfiableSpecError
match = r"You could consider setting `concretizer:unify`"
with pytest.raises(exc_cls, match=match):
e.concretize()

View File

@@ -14,7 +14,6 @@
import spack.cmd.external
import spack.detection
import spack.detection.path
import spack.repo
from spack.main import SpackCommand
from spack.spec import Spec
@@ -56,9 +55,7 @@ def test_find_external_two_instances_same_package(mock_executable):
search_paths = [str(cmake1.parent.parent), str(cmake2.parent.parent)]
finder = spack.detection.path.ExecutablesFinder()
detected_specs = finder.find(
pkg_name="cmake", initial_guess=search_paths, repository=spack.repo.PATH
)
detected_specs = finder.find(pkg_name="cmake", initial_guess=search_paths)
assert len(detected_specs) == 2
spec_to_path = {e.spec: e.prefix for e in detected_specs}
@@ -100,7 +97,7 @@ def test_get_executables(working_env, mock_executable):
# TODO: this test should be made to work, but in the meantime it is
# causing intermittent (spurious) CI failures on all PRs
@pytest.mark.not_on_windows("Test fails intermittently on Windows")
@pytest.mark.skipif(sys.platform == "win32", reason="Test fails intermittently on Windows")
def test_find_external_cmd_not_buildable(mutable_config, working_env, mock_executable):
"""When the user invokes 'spack external find --not-buildable', the config
for any package where Spack finds an external version should be marked as
@@ -117,31 +114,11 @@ def test_find_external_cmd_not_buildable(mutable_config, working_env, mock_execu
@pytest.mark.parametrize(
"names,tags,exclude,expected",
[
# find -all
(
None,
["detectable"],
[],
[
"builtin.mock.find-externals1",
"builtin.mock.gcc",
"builtin.mock.llvm",
"builtin.mock.intel-oneapi-compilers",
],
),
# find --all
(None, ["detectable"], [], ["builtin.mock.find-externals1"]),
# find --all --exclude find-externals1
(
None,
["detectable"],
["builtin.mock.find-externals1"],
["builtin.mock.gcc", "builtin.mock.llvm", "builtin.mock.intel-oneapi-compilers"],
),
(
None,
["detectable"],
["find-externals1"],
["builtin.mock.gcc", "builtin.mock.llvm", "builtin.mock.intel-oneapi-compilers"],
),
(None, ["detectable"], ["builtin.mock.find-externals1"], []),
(None, ["detectable"], ["find-externals1"], []),
# find cmake (and cmake is not detectable)
(["cmake"], ["detectable"], [], []),
],
@@ -266,9 +243,7 @@ def _determine_variants(cls, exes, version_str):
monkeypatch.setattr(gcc_cls, "determine_variants", _determine_variants)
finder = spack.detection.path.ExecutablesFinder()
detected_specs = finder.find(
pkg_name="gcc", initial_guess=[str(search_dir)], repository=spack.repo.PATH
)
detected_specs = finder.find(pkg_name="gcc", initial_guess=[str(search_dir)])
assert len(detected_specs) == 1

View File

@@ -2,13 +2,29 @@
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import pytest
from spack.main import SpackCommand
@pytest.mark.xfail
def test_reuse_after_help():
"""Test `spack help` can be called twice with the same SpackCommand."""
help_cmd = SpackCommand("help", subprocess=True)
help_cmd()
# This second invocation will somehow fail because the parser no
# longer works after add_all_commands() is called in
# SpackArgumentParser.format_help_sections().
#
# TODO: figure out why this doesn't work properly and change this
# test to use a single SpackCommand.
#
# It seems that parse_known_args() finds "too few arguments" the
# second time through b/c add_all_commands() ends up leaving extra
# positionals in the parser. But this used to work before we loaded
# commands lazily.
help_cmd()

View File

@@ -30,6 +30,7 @@ def test_spec():
assert "mpich@3.0.4" in output
@pytest.mark.only_clingo("Known failure of the original concretizer")
def test_spec_concretizer_args(mutable_database, do_not_check_runtimes_on_reuse):
"""End-to-end test of CLI concretizer prefs.

View File

@@ -24,12 +24,6 @@
style = spack.main.SpackCommand("style")
ISORT = which("isort")
BLACK = which("black")
FLAKE8 = which("flake8")
MYPY = which("mypy")
@pytest.fixture(autouse=True)
def has_develop_branch(git):
"""spack style requires git and a develop branch to run -- skip if we're missing either."""
@@ -196,8 +190,8 @@ def external_style_root(git, flake8_package_with_errors, tmpdir):
yield tmpdir, py_file
@pytest.mark.skipif(not ISORT, reason="isort is not installed.")
@pytest.mark.skipif(not BLACK, reason="black is not installed.")
@pytest.mark.skipif(not which("isort"), reason="isort is not installed.")
@pytest.mark.skipif(not which("black"), reason="black is not installed.")
def test_fix_style(external_style_root):
"""Make sure spack style --fix works."""
tmpdir, py_file = external_style_root
@@ -215,10 +209,10 @@ def test_fix_style(external_style_root):
assert filecmp.cmp(broken_py, fixed_py)
@pytest.mark.skipif(not FLAKE8, reason="flake8 is not installed.")
@pytest.mark.skipif(not ISORT, reason="isort is not installed.")
@pytest.mark.skipif(not MYPY, reason="mypy is not installed.")
@pytest.mark.skipif(not BLACK, reason="black is not installed.")
@pytest.mark.skipif(not which("flake8"), reason="flake8 is not installed.")
@pytest.mark.skipif(not which("isort"), reason="isort is not installed.")
@pytest.mark.skipif(not which("mypy"), reason="mypy is not installed.")
@pytest.mark.skipif(not which("black"), reason="black is not installed.")
def test_external_root(external_style_root, capfd):
"""Ensure we can run in a separate root directory w/o configuration files."""
tmpdir, py_file = external_style_root
@@ -244,7 +238,7 @@ def test_external_root(external_style_root, capfd):
assert "lib/spack/spack/dummy.py:7: [F401] 'os' imported but unused" in output
@pytest.mark.skipif(not FLAKE8, reason="flake8 is not installed.")
@pytest.mark.skipif(not which("flake8"), reason="flake8 is not installed.")
def test_style(flake8_package, tmpdir):
root_relative = os.path.relpath(flake8_package, spack.paths.prefix)
@@ -270,7 +264,7 @@ def test_style(flake8_package, tmpdir):
assert "spack style checks were clean" in output
@pytest.mark.skipif(not FLAKE8, reason="flake8 is not installed.")
@pytest.mark.skipif(not which("flake8"), reason="flake8 is not installed.")
def test_style_with_errors(flake8_package_with_errors):
root_relative = os.path.relpath(flake8_package_with_errors, spack.paths.prefix)
output = style(
@@ -281,8 +275,8 @@ def test_style_with_errors(flake8_package_with_errors):
assert "spack style found errors" in output
@pytest.mark.skipif(not BLACK, reason="black is not installed.")
@pytest.mark.skipif(not FLAKE8, reason="flake8 is not installed.")
@pytest.mark.skipif(not which("black"), reason="black is not installed.")
@pytest.mark.skipif(not which("flake8"), reason="flake8 is not installed.")
def test_style_with_black(flake8_package_with_errors):
output = style("--tool", "black,flake8", flake8_package_with_errors, fail_on_error=False)
assert "black found errors" in output

View File

@@ -11,6 +11,13 @@
versions = SpackCommand("versions")
def test_safe_only_versions():
"""Only test the safe versions of a package.
(Using the deprecated command line argument)
"""
versions("--safe-only", "zlib")
def test_safe_versions():
"""Only test the safe versions of a package."""

View File

@@ -425,6 +425,9 @@ def test_compiler_flags_differ_identical_compilers(self, mutable_config, clang12
spec.concretize()
assert spec.satisfies("cflags=-O2")
@pytest.mark.only_clingo(
"Optional compiler propagation isn't deprecated for original concretizer"
)
@pytest.mark.parametrize(
"spec_str,expected,not_expected",
[
@@ -469,6 +472,7 @@ def test_compiler_inherited_upwards(self):
for dep in spec.traverse():
assert "%clang" in dep
@pytest.mark.only_clingo("Fixing the parser broke this test for the original concretizer")
def test_architecture_deep_inheritance(self, mock_targets, compiler_factory):
"""Make sure that indirect dependencies receive architecture
information from the root even when partial architecture information
@@ -533,6 +537,9 @@ def test_concretize_two_virtuals_with_dual_provider_and_a_conflict(self):
with pytest.raises(spack.error.SpackError):
s.concretize()
@pytest.mark.only_clingo(
"Optional compiler propagation isn't deprecated for original concretizer"
)
@pytest.mark.parametrize(
"spec_str,expected_propagation",
[
@@ -560,6 +567,9 @@ def test_concretize_propagate_disabled_variant(self, spec_str, expected_propagat
for key, expected_satisfies in expected_propagation:
spec[key].satisfies(expected_satisfies)
@pytest.mark.only_clingo(
"Optional compiler propagation isn't deprecated for original concretizer"
)
def test_concretize_propagated_variant_is_not_passed_to_dependent(self):
"""Test a package variant value was passed from its parent."""
spec = Spec("ascent~~shared +adios2 ^adios2+shared")
@@ -568,6 +578,9 @@ def test_concretize_propagated_variant_is_not_passed_to_dependent(self):
assert spec.satisfies("^adios2+shared")
assert spec.satisfies("^bzip2~shared")
@pytest.mark.only_clingo(
"Optional compiler propagation isn't deprecated for original concretizer"
)
def test_concretize_propagate_specified_variant(self):
"""Test that only the specified variant is propagated to the dependencies"""
spec = Spec("parent-foo-bar ~~foo")
@@ -576,6 +589,7 @@ def test_concretize_propagate_specified_variant(self):
assert spec.satisfies("~foo") and spec.satisfies("^dependency-foo-bar~foo")
assert spec.satisfies("+bar") and not spec.satisfies("^dependency-foo-bar+bar")
@pytest.mark.only_clingo("Original concretizer is allowed to forego variant propagation")
def test_concretize_propagate_multivalue_variant(self):
"""Test that multivalue variants are propagating the specified value(s)
to their dependecies. The dependencies should not have the default value"""
@@ -632,6 +646,11 @@ def test_virtual_is_fully_expanded_for_mpileaks(self):
assert all(not d.dependencies(name="mpi") for d in spec.traverse())
assert all(x in spec for x in ("zmpi", "mpi"))
def test_my_dep_depends_on_provider_of_my_virtual_dep(self):
spec = Spec("indirect-mpich")
spec.normalize()
spec.concretize()
@pytest.mark.parametrize("compiler_str", ["clang", "gcc", "gcc@10.2.1", "clang@:15.0.0"])
def test_compiler_inheritance(self, compiler_str):
spec_str = "mpileaks %{0}".format(compiler_str)
@@ -727,6 +746,7 @@ def test_conflicts_in_spec(self, conflict_spec):
with pytest.raises(spack.error.SpackError):
s.concretize()
@pytest.mark.only_clingo("Testing debug statements specific to new concretizer")
def test_conflicts_show_cores(self, conflict_spec, monkeypatch):
s = Spec(conflict_spec)
with pytest.raises(spack.error.SpackError) as e:
@@ -900,6 +920,7 @@ def test_concretize_anonymous_dep(self, spec_str):
("bowtie@1.2.2 os=redhat6", "%gcc@11.1.0"),
],
)
@pytest.mark.only_clingo("Original concretizer cannot work around conflicts")
def test_compiler_conflicts_in_package_py(
self, spec_str, expected_str, clang12_with_flags, gcc11_with_flags
):
@@ -1015,6 +1036,7 @@ def test_patching_dependencies(self, spec_str, patched_deps):
("quantum-espresso~veritas", ["^libelf@0.8.13"]),
],
)
@pytest.mark.only_clingo("Use case not supported by the original concretizer")
def test_working_around_conflicting_defaults(self, spec_str, expected):
s = Spec(spec_str).concretized()
@@ -1027,6 +1049,7 @@ def test_working_around_conflicting_defaults(self, spec_str, expected):
"spec_str,expected",
[("cmake", ["%clang"]), ("cmake %gcc", ["%gcc"]), ("cmake %clang", ["%clang"])],
)
@pytest.mark.only_clingo("Use case not supported by the original concretizer")
def test_external_package_and_compiler_preferences(self, spec_str, expected, mutable_config):
packages_yaml = {
"all": {"compiler": ["clang", "gcc"]},
@@ -1043,6 +1066,7 @@ def test_external_package_and_compiler_preferences(self, spec_str, expected, mut
assert s.satisfies(condition)
@pytest.mark.regression("5651")
@pytest.mark.only_clingo("Use case not supported by the original concretizer")
def test_package_with_constraint_not_met_by_external(self):
"""Check that if we have an external package A at version X.Y in
packages.yaml, but our spec doesn't allow X.Y as a version, then
@@ -1057,6 +1081,7 @@ def test_package_with_constraint_not_met_by_external(self):
assert not s["libelf"].external
@pytest.mark.regression("9744")
@pytest.mark.only_clingo("Use case not supported by the original concretizer")
def test_cumulative_version_ranges_with_different_length(self):
s = Spec("cumulative-vrange-root").concretized()
assert s.concrete
@@ -1084,6 +1109,7 @@ def test_dependency_conditional_on_another_dependency_state(self):
@pytest.mark.parametrize(
"spec_str,expected", [("cmake %gcc", "%gcc"), ("cmake %clang", "%clang")]
)
@pytest.mark.only_clingo("Use case not supported by the original concretizer")
def test_compiler_constraint_with_external_package(self, spec_str, expected):
packages_yaml = {
"cmake": {"externals": [{"spec": "cmake@3.4.3", "prefix": "/usr"}], "buildable": False}
@@ -1128,9 +1154,18 @@ def test_compiler_in_nonbuildable_external_package(
spack.config.set("packages", packages_yaml)
s = Spec(spec_str).concretized()
if xfailold and spack.config.get("config:concretizer") == "original":
pytest.xfail("This only works on the ASP-based concretizer")
assert s.satisfies(expected)
assert "external-common-perl" not in [d.name for d in s.dependencies()]
@pytest.mark.only_clingo("Use case not supported by the original concretizer")
def test_external_packages_have_consistent_hash(self):
s, t = Spec("externaltool"), Spec("externaltool")
s._old_concretize(), t._new_concretize()
assert s.dag_hash() == t.dag_hash()
def test_external_that_would_require_a_virtual_dependency(self):
s = Spec("requires-virtual").concretized()
@@ -1147,6 +1182,7 @@ def test_transitive_conditional_virtual_dependency(self, mutable_config):
assert s.satisfies("^[virtuals=stuff] externalvirtual")
@pytest.mark.regression("20040")
@pytest.mark.only_clingo("Use case not supported by the original concretizer")
def test_conditional_provides_or_depends_on(self):
# Check that we can concretize correctly a spec that can either
# provide a virtual or depend on it based on the value of a variant
@@ -1185,6 +1221,7 @@ def test_activating_test_dependencies(self, spec_str, tests_arg, with_dep, witho
assert not node.dependencies(deptype="test"), msg.format(pkg_name)
@pytest.mark.regression("20019")
@pytest.mark.only_clingo("Use case not supported by the original concretizer")
def test_compiler_match_is_preferred_to_newer_version(self, compiler_factory):
# This spec depends on openblas. Openblas has a conflict
# that doesn't allow newer versions with gcc@4.4.0. Check
@@ -1203,6 +1240,7 @@ def test_target_ranges_in_conflicts(self):
with pytest.raises(spack.error.SpackError):
Spec("impossible-concretization").concretized()
@pytest.mark.only_clingo("Use case not supported by the original concretizer")
def test_target_compatibility(self):
with pytest.raises(spack.error.SpackError):
Spec("libdwarf target=x86_64 ^libelf target=x86_64_v2").concretized()
@@ -1219,6 +1257,7 @@ def test_variant_not_default(self):
assert "+foo+bar+baz" in d
@pytest.mark.regression("20055")
@pytest.mark.only_clingo("Use case not supported by the original concretizer")
def test_custom_compiler_version(self, mutable_config, compiler_factory, monkeypatch):
mutable_config.set(
"compilers", [compiler_factory(spec="gcc@10foo", operating_system="redhat6")]
@@ -1319,6 +1358,7 @@ def mock_fn(*args, **kwargs):
{"add_variant": True, "delete_variant": True},
],
)
@pytest.mark.only_clingo("Use case not supported by the original concretizer")
def test_reuse_installed_packages_when_package_def_changes(
self, context, mutable_database, repo_with_changing_recipe
):
@@ -1348,6 +1388,7 @@ def test_reuse_installed_packages_when_package_def_changes(
# Structure and package hash will be different without reuse
assert root.dag_hash() != new_root_without_reuse.dag_hash()
@pytest.mark.only_clingo("Use case not supported by the original concretizer")
@pytest.mark.regression("43663")
def test_no_reuse_when_variant_condition_does_not_hold(self, mutable_database, mock_packages):
spack.config.set("concretizer:reuse", True)
@@ -1363,6 +1404,7 @@ def test_no_reuse_when_variant_condition_does_not_hold(self, mutable_database, m
new2 = Spec("conditional-variant-pkg +two_whens").concretized()
assert new2.satisfies("@2 +two_whens +version_based")
@pytest.mark.only_clingo("Use case not supported by the original concretizer")
def test_reuse_with_flags(self, mutable_database, mutable_config):
spack.config.set("concretizer:reuse", True)
spec = Spec("pkg-a cflags=-g cxxflags=-g").concretized()
@@ -1383,6 +1425,7 @@ def test_concretization_of_test_dependencies(self):
@pytest.mark.parametrize(
"spec_str", ["wrong-variant-in-conflicts", "wrong-variant-in-depends-on"]
)
@pytest.mark.only_clingo("Use case not supported by the original concretizer")
def test_error_message_for_inconsistent_variants(self, spec_str):
s = Spec(spec_str)
with pytest.raises(RuntimeError, match="not found in package"):
@@ -1487,6 +1530,7 @@ def test_multivalued_variants_from_cli(self, spec_str, expected_dict):
("deprecated-versions@=1.1.0", "deprecated-versions@1.1.0"),
],
)
@pytest.mark.only_clingo("Use case not supported by the original concretizer")
def test_deprecated_versions_not_selected(self, spec_str, expected):
with spack.config.override("config:deprecated", True):
s = Spec(spec_str).concretized()
@@ -1547,6 +1591,7 @@ def test_non_default_provider_of_multiple_virtuals(self):
"spec_str,expect_installed",
[("mpich", True), ("mpich+debug", False), ("mpich~debug", True)],
)
@pytest.mark.only_clingo("Use case not supported by the original concretizer")
def test_concrete_specs_are_not_modified_on_reuse(
self, mutable_database, spec_str, expect_installed
):
@@ -1560,6 +1605,7 @@ def test_concrete_specs_are_not_modified_on_reuse(
assert s.satisfies(spec_str)
@pytest.mark.regression("26721,19736")
@pytest.mark.only_clingo("Original concretizer cannot use sticky variants")
def test_sticky_variant_in_package(self):
# Here we test that a sticky variant cannot be changed from its default value
# by the ASP solver if not set explicitly. The package used in the test needs
@@ -1575,6 +1621,7 @@ def test_sticky_variant_in_package(self):
assert s.satisfies("%clang") and s.satisfies("~allow-gcc")
@pytest.mark.regression("42172")
@pytest.mark.only_clingo("Original concretizer cannot use sticky variants")
@pytest.mark.parametrize(
"spec,allow_gcc",
[
@@ -1597,6 +1644,7 @@ def test_sticky_variant_in_external(self, spec, allow_gcc):
assert s["sticky-variant"].satisfies("+allow-gcc")
assert s["sticky-variant"].external
@pytest.mark.only_clingo("Use case not supported by the original concretizer")
def test_do_not_invent_new_concrete_versions_unless_necessary(self):
# ensure we select a known satisfying version rather than creating
# a new '2.7' version.
@@ -1618,12 +1666,14 @@ def test_do_not_invent_new_concrete_versions_unless_necessary(self):
("conditional-values-in-variant foo=foo", True),
],
)
@pytest.mark.only_clingo("Use case not supported by the original concretizer")
def test_conditional_values_in_variants(self, spec_str, valid):
s = Spec(spec_str)
raises = pytest.raises((RuntimeError, spack.error.UnsatisfiableSpecError))
with llnl.util.lang.nullcontext() if valid else raises:
s.concretize()
@pytest.mark.only_clingo("Use case not supported by the original concretizer")
def test_conditional_values_in_conditional_variant(self):
"""Test that conditional variants play well with conditional possible values"""
s = Spec("conditional-values-in-variant@1.50.0").concretized()
@@ -1632,6 +1682,7 @@ def test_conditional_values_in_conditional_variant(self):
s = Spec("conditional-values-in-variant@1.60.0").concretized()
assert "cxxstd" in s.variants
@pytest.mark.only_clingo("Use case not supported by the original concretizer")
def test_target_granularity(self):
# The test architecture uses core2 as the default target. Check that when
# we configure Spack for "generic" granularity we concretize for x86_64
@@ -1642,6 +1693,7 @@ def test_target_granularity(self):
with spack.config.override("concretizer:targets", {"granularity": "generic"}):
assert s.concretized().satisfies("target=%s" % generic_target)
@pytest.mark.only_clingo("Use case not supported by the original concretizer")
def test_host_compatible_concretization(self):
# Check that after setting "host_compatible" to false we cannot concretize.
# Here we use "k10" to set a target non-compatible with the current host
@@ -1654,6 +1706,7 @@ def test_host_compatible_concretization(self):
with pytest.raises(spack.error.SpackError):
s.concretized()
@pytest.mark.only_clingo("Use case not supported by the original concretizer")
def test_add_microarchitectures_on_explicit_request(self):
# Check that if we consider only "generic" targets, we can still solve for
# specific microarchitectures on explicit requests
@@ -1662,6 +1715,7 @@ def test_add_microarchitectures_on_explicit_request(self):
assert s.satisfies("target=k10")
@pytest.mark.regression("29201")
@pytest.mark.only_clingo("Use case not supported by the original concretizer")
def test_delete_version_and_reuse(self, mutable_database, repo_with_changing_recipe):
"""Test that we can reuse installed specs with versions not
declared in package.py
@@ -1676,6 +1730,7 @@ def test_delete_version_and_reuse(self, mutable_database, repo_with_changing_rec
assert root.dag_hash() == new_root.dag_hash()
@pytest.mark.regression("29201")
@pytest.mark.only_clingo("Use case not supported by the original concretizer")
def test_installed_version_is_selected_only_for_reuse(
self, mutable_database, repo_with_changing_recipe
):
@@ -1706,20 +1761,6 @@ def test_reuse_with_unknown_namespace_dont_raise(
s = Spec("pkg-c").concretized()
assert s.namespace == "builtin.mock"
@pytest.mark.regression("45538")
def test_reuse_from_other_namespace_no_raise(self, tmpdir, temporary_store, monkeypatch):
myrepo = spack.repo.MockRepositoryBuilder(tmpdir.mkdir("mock.repo"), namespace="myrepo")
myrepo.add_package("zlib")
builtin = Spec("zlib").concretized()
builtin.package.do_install(fake=True, explicit=True)
with spack.repo.use_repositories(myrepo.root, override=False):
with spack.config.override("concretizer:reuse", True):
myrepo = Spec("myrepo.zlib").concretized()
assert myrepo.namespace == "myrepo"
@pytest.mark.regression("28259")
def test_reuse_with_unknown_package_dont_raise(self, tmpdir, temporary_store, monkeypatch):
builder = spack.repo.MockRepositoryBuilder(tmpdir.mkdir("mock.repo"), namespace="myrepo")
@@ -1753,6 +1794,7 @@ def test_reuse_with_unknown_package_dont_raise(self, tmpdir, temporary_store, mo
(["mpi", "mpich"], 1, 1),
],
)
@pytest.mark.only_clingo("Original concretizer cannot concretize in rounds")
def test_best_effort_coconcretize(self, specs, expected, libc_offset):
specs = [Spec(s) for s in specs]
solver = spack.solver.asp.Solver()
@@ -1797,6 +1839,7 @@ def test_best_effort_coconcretize(self, specs, expected, libc_offset):
(["hdf5+mpi", "zmpi", "mpich"], "mpich", 2),
],
)
@pytest.mark.only_clingo("Original concretizer cannot concretize in rounds")
def test_best_effort_coconcretize_preferences(self, specs, expected_spec, occurances):
"""Test package preferences during coconcretization."""
specs = [Spec(s) for s in specs]
@@ -1812,6 +1855,7 @@ def test_best_effort_coconcretize_preferences(self, specs, expected_spec, occura
counter += 1
assert counter == occurances, concrete_specs
@pytest.mark.only_clingo("Original concretizer cannot concretize in rounds")
def test_solve_in_rounds_all_unsolved(self, monkeypatch, mock_packages):
specs = [Spec(x) for x in ["libdwarf%gcc", "libdwarf%clang"]]
solver = spack.solver.asp.Solver()
@@ -1827,6 +1871,7 @@ def test_solve_in_rounds_all_unsolved(self, monkeypatch, mock_packages):
):
list(solver.solve_in_rounds(specs))
@pytest.mark.only_clingo("Use case not supported by the original concretizer")
def test_coconcretize_reuse_and_virtuals(self):
reusable_specs = []
for s in ["mpileaks ^mpich", "zmpi"]:
@@ -1843,6 +1888,7 @@ def test_coconcretize_reuse_and_virtuals(self):
assert "zmpi" in spec
@pytest.mark.regression("30864")
@pytest.mark.only_clingo("Use case not supported by the original concretizer")
def test_misleading_error_message_on_version(self, mutable_database):
# For this bug to be triggered we need a reusable dependency
# that is not optimal in terms of optimization scores.
@@ -1859,6 +1905,7 @@ def test_misleading_error_message_on_version(self, mutable_database):
solver.driver.solve(setup, [root_spec], reuse=reusable_specs)
@pytest.mark.regression("31148")
@pytest.mark.only_clingo("Use case not supported by the original concretizer")
def test_version_weight_and_provenance(self):
"""Test package preferences during coconcretization."""
reusable_specs = [Spec(spec_str).concretized() for spec_str in ("pkg-b@0.9", "pkg-b@1.0")]
@@ -1890,6 +1937,7 @@ def test_version_weight_and_provenance(self):
assert criterion in result.criteria, criterion
assert result_spec.satisfies("^pkg-b@1.0")
@pytest.mark.only_clingo("Use case not supported by the original concretizer")
def test_reuse_succeeds_with_config_compatible_os(self):
root_spec = Spec("pkg-b")
s = root_spec.concretized()
@@ -1915,6 +1963,7 @@ def test_git_hash_assigned_version_is_preferred(self):
assert hash in str(c)
@pytest.mark.parametrize("git_ref", ("a" * 40, "0.2.15", "main"))
@pytest.mark.only_clingo("Original concretizer cannot account for git hashes")
def test_git_ref_version_is_equivalent_to_specified_version(self, git_ref):
s = Spec("develop-branch-version@git.%s=develop" % git_ref)
c = s.concretized()
@@ -1924,6 +1973,7 @@ def test_git_ref_version_is_equivalent_to_specified_version(self, git_ref):
assert s.satisfies("@0.1:")
@pytest.mark.parametrize("git_ref", ("a" * 40, "0.2.15", "fbranch"))
@pytest.mark.only_clingo("Original concretizer cannot account for git hashes")
def test_git_ref_version_succeeds_with_unknown_version(self, git_ref):
# main is not defined in the package.py for this file
s = Spec("develop-branch-version@git.%s=main" % git_ref)
@@ -1931,6 +1981,7 @@ def test_git_ref_version_succeeds_with_unknown_version(self, git_ref):
assert s.satisfies("develop-branch-version@main")
@pytest.mark.regression("31484")
@pytest.mark.only_clingo("Use case not supported by the original concretizer")
def test_installed_externals_are_reused(
self, mutable_database, repo_with_changing_recipe, tmp_path
):
@@ -1962,6 +2013,7 @@ def test_installed_externals_are_reused(
assert external3.dag_hash() == external1.dag_hash()
@pytest.mark.regression("31484")
@pytest.mark.only_clingo("Use case not supported by the original concretizer")
def test_user_can_select_externals_with_require(self, mutable_database, tmp_path):
"""Test that users have means to select an external even in presence of reusable specs."""
external_conf = {
@@ -1990,6 +2042,7 @@ def test_user_can_select_externals_with_require(self, mutable_database, tmp_path
assert mpi_spec.name == "multi-provider-mpi"
@pytest.mark.regression("31484")
@pytest.mark.only_clingo("Use case not supported by the original concretizer")
def test_installed_specs_disregard_conflicts(self, mutable_database, monkeypatch):
"""Test that installed specs do not trigger conflicts. This covers for the rare case
where a conflict is added on a package after a spec matching the conflict was installed.
@@ -2010,6 +2063,7 @@ def test_installed_specs_disregard_conflicts(self, mutable_database, monkeypatch
assert s.satisfies("~debug"), s
@pytest.mark.regression("32471")
@pytest.mark.only_clingo("Use case not supported by the original concretizer")
def test_require_targets_are_allowed(self, mutable_database):
"""Test that users can set target constraints under the require attribute."""
# Configuration to be added to packages.yaml
@@ -2184,6 +2238,7 @@ def test_unsolved_specs_raises_error(self, monkeypatch, mock_packages):
solver.driver.solve(setup, specs, reuse=[])
@pytest.mark.regression("43141")
@pytest.mark.only_clingo("Use case not supported by the original concretizer")
def test_clear_error_when_unknown_compiler_requested(self, mock_packages):
"""Tests that the solver can report a case where the compiler cannot be set"""
with pytest.raises(
@@ -2286,6 +2341,7 @@ def test_virtuals_are_annotated_on_edges(self, spec_str):
edges = spec.edges_to_dependencies(name="callpath")
assert len(edges) == 1 and edges[0].virtuals == ()
@pytest.mark.only_clingo("Use case not supported by the original concretizer")
@pytest.mark.db
@pytest.mark.parametrize(
"spec_str,mpi_name",
@@ -2300,6 +2356,7 @@ def test_virtuals_are_reconstructed_on_reuse(self, spec_str, mpi_name, mutable_d
assert len(mpi_edges) == 1
assert "mpi" in mpi_edges[0].virtuals
@pytest.mark.only_clingo("Use case not supported by the original concretizer")
def test_dont_define_new_version_from_input_if_checksum_required(self, working_env):
os.environ["SPACK_CONCRETIZER_REQUIRE_CHECKSUM"] = "yes"
with pytest.raises(spack.error.UnsatisfiableSpecError):
@@ -2336,6 +2393,7 @@ def test_reuse_python_from_cli_and_extension_from_db(self, mutable_database):
("hdf5 ^gmake", {"gmake": "duplicates.test", "hdf5": "duplicates.test"}),
],
)
@pytest.mark.only_clingo("Uses specs requiring multiple gmake specs")
def test_select_lower_priority_package_from_repository_stack(
self, spec_str, expected_namespaces
):
@@ -2351,6 +2409,7 @@ def test_select_lower_priority_package_from_repository_stack(
assert s[name].concrete
assert s[name].namespace == namespace
@pytest.mark.only_clingo("Old concretizer cannot reuse")
def test_reuse_specs_from_non_available_compilers(self, mutable_config, mutable_database):
"""Tests that we can reuse specs with compilers that are not configured locally."""
# All the specs in the mutable DB have been compiled with %gcc@=10.2.1
@@ -2421,6 +2480,7 @@ def test_spec_with_build_dep_from_json(self, tmp_path):
assert s["dttop"].dag_hash() == build_dep.dag_hash()
@pytest.mark.regression("44040")
@pytest.mark.only_clingo("Use case not supported by the original concretizer")
def test_exclude_specs_from_reuse(self, monkeypatch):
"""Tests that we can exclude a spec from reuse when concretizing, and that the spec
is not added back to the solve as a dependency of another reusable spec.
@@ -2470,6 +2530,7 @@ def test_exclude_specs_from_reuse(self, monkeypatch):
[],
],
)
@pytest.mark.only_clingo("Use case not supported by the original concretizer")
def test_include_specs_from_externals_and_libcs(
self, included_externals, mutable_config, tmp_path
):
@@ -2502,6 +2563,7 @@ def test_include_specs_from_externals_and_libcs(
assert result["deprecated-versions"].satisfies("@1.0.0")
@pytest.mark.regression("44085")
@pytest.mark.only_clingo("Use case not supported by the original concretizer")
def test_can_reuse_concrete_externals_for_dependents(self, mutable_config, tmp_path):
"""Test that external specs that are in the DB can be reused. This means they are
preferred to concretizing another external from packages.yaml
@@ -2524,6 +2586,7 @@ def test_can_reuse_concrete_externals_for_dependents(self, mutable_config, tmp_p
sombrero = result.specs[0]
assert sombrero["externaltool"].dag_hash() == external_spec.dag_hash()
@pytest.mark.only_clingo("Original concretizer cannot reuse")
def test_cannot_reuse_host_incompatible_libc(self):
"""Test whether reuse concretization correctly fails to reuse a spec with a host
incompatible libc."""
@@ -2546,55 +2609,6 @@ def test_cannot_reuse_host_incompatible_libc(self):
assert len(result.specs) == 1
assert result.specs[0] == snd
@pytest.mark.regression("45321")
@pytest.mark.parametrize(
"corrupted_str",
[
"cmake@3.4.3 foo=bar", # cmake has no variant "foo"
"mvdefaults@1.0 foo=a,d", # variant "foo" has no value "d"
"cmake %gcc", # spec has no version
],
)
def test_corrupted_external_does_not_halt_concretization(self, corrupted_str, mutable_config):
"""Tests that having a wrong variant in an external spec doesn't stop concretization"""
corrupted_spec = Spec(corrupted_str)
packages_yaml = {
f"{corrupted_spec.name}": {
"externals": [{"spec": corrupted_str, "prefix": "/dev/null"}]
}
}
mutable_config.set("packages", packages_yaml)
# Assert we don't raise due to the corrupted external entry above
s = Spec("pkg-a").concretized()
assert s.concrete
@pytest.mark.regression("44828")
@pytest.mark.not_on_windows("Tests use linux paths")
def test_correct_external_is_selected_from_packages_yaml(self, mutable_config):
"""Tests that when filtering external specs, the correct external is selected to
reconstruct the prefix, and other external attributes.
"""
packages_yaml = {
"cmake": {
"externals": [
{"spec": "cmake@3.23.1 %gcc", "prefix": "/tmp/prefix1"},
{"spec": "cmake@3.23.1 %clang", "prefix": "/tmp/prefix2"},
]
}
}
concretizer_yaml = {
"reuse": {"roots": True, "from": [{"type": "external", "exclude": ["%gcc"]}]}
}
mutable_config.set("packages", packages_yaml)
mutable_config.set("concretizer", concretizer_yaml)
s = Spec("cmake").concretized()
# Check that we got the properties from the right external
assert s.external
assert s.satisfies("%clang")
assert s.prefix == "/tmp/prefix2"
@pytest.fixture()
def duplicates_test_repository():
@@ -2604,6 +2618,7 @@ def duplicates_test_repository():
@pytest.mark.usefixtures("mutable_config", "duplicates_test_repository")
@pytest.mark.only_clingo("Not supported by the original concretizer")
class TestConcretizeSeparately:
"""Collects test on separate concretization"""
@@ -2822,6 +2837,7 @@ def edges_test_repository():
@pytest.mark.usefixtures("mutable_config", "edges_test_repository")
@pytest.mark.only_clingo("Edge properties not supported by the original concretizer")
class TestConcretizeEdges:
"""Collects tests on edge properties"""
@@ -2972,6 +2988,7 @@ def test_concretization_version_order():
]
@pytest.mark.only_clingo("Original concretizer cannot reuse specs")
@pytest.mark.parametrize(
"roots,reuse_yaml,expected,not_expected,expected_length",
[
@@ -3046,6 +3063,7 @@ def test_spec_filters(specs, include, exclude, expected):
assert f.selected_specs() == expected
@pytest.mark.only_clingo("clingo only reuse feature being tested")
@pytest.mark.regression("38484")
def test_git_ref_version_can_be_reused(install_mockery, do_not_check_runtimes_on_reuse):
first_spec = spack.spec.Spec("git-ref-package@git.2.1.5=2.1.5~opt").concretized()
@@ -3063,6 +3081,7 @@ def test_git_ref_version_can_be_reused(install_mockery, do_not_check_runtimes_on
assert third_spec.dag_hash() == first_spec.dag_hash()
@pytest.mark.only_clingo("clingo only reuse feature being tested")
@pytest.mark.parametrize("standard_version", ["2.0.0", "2.1.5", "2.1.6"])
def test_reuse_prefers_standard_over_git_versions(
standard_version, install_mockery, do_not_check_runtimes_on_reuse

View File

@@ -17,7 +17,10 @@
from spack.environment.environment import ViewDescriptor
from spack.version import Version
pytestmark = [pytest.mark.usefixtures("enable_runtimes")]
pytestmark = [
pytest.mark.only_clingo("Original concretizer does not support compiler runtimes"),
pytest.mark.usefixtures("enable_runtimes"),
]
def _concretize_with_reuse(*, root_str, reused_str):

View File

@@ -8,7 +8,10 @@
import spack.solver.asp
import spack.spec
pytestmark = [pytest.mark.not_on_windows("Windows uses old concretizer")]
pytestmark = [
pytest.mark.not_on_windows("Windows uses old concretizer"),
pytest.mark.only_clingo("Original concretizer does not support configuration requirements"),
]
version_error_messages = [
"Cannot satisfy 'fftw@:1.0' and 'fftw@1.1:",

View File

@@ -113,6 +113,7 @@ def test_preferred_compilers(self, compiler_str, spec_str):
spec = spack.spec.Spec(spec_str).concretized()
assert spec.compiler == CompilerSpec(compiler_str)
@pytest.mark.only_clingo("Use case not supported by the original concretizer")
def test_preferred_target(self, mutable_mock_repo):
"""Test preferred targets are applied correctly"""
spec = concretize("mpich")
@@ -142,6 +143,7 @@ def test_preferred_versions(self):
spec = concretize("mpileaks")
assert spec.version == Version("2.2")
@pytest.mark.only_clingo("This behavior is not enforced for the old concretizer")
def test_preferred_versions_mixed_version_types(self):
update_packages("mixedversions", "version", ["=2.0"])
spec = concretize("mixedversions")
@@ -223,6 +225,7 @@ def test_preferred(self):
spec.concretize()
assert spec.version == Version("3.5.0")
@pytest.mark.only_clingo("This behavior is not enforced for the old concretizer")
def test_preferred_undefined_raises(self):
"""Preference should not specify an undefined version"""
update_packages("python", "version", ["3.5.0.1"])
@@ -230,6 +233,7 @@ def test_preferred_undefined_raises(self):
with pytest.raises(spack.config.ConfigError):
spec.concretize()
@pytest.mark.only_clingo("This behavior is not enforced for the old concretizer")
def test_preferred_truncated(self):
"""Versions without "=" are treated as version ranges: if there is
a satisfying version defined in the package.py, we should use that
@@ -506,6 +510,7 @@ def test_sticky_variant_accounts_for_packages_yaml(self):
assert s.satisfies("%gcc") and s.satisfies("+allow-gcc")
@pytest.mark.regression("41134")
@pytest.mark.only_clingo("Not backporting the fix to the old concretizer")
def test_default_preference_variant_different_type_does_not_error(self):
"""Tests that a different type for an existing variant in the 'all:' section of
packages.yaml doesn't fail with an error.

View File

@@ -19,7 +19,10 @@
from spack.test.conftest import create_test_repo
from spack.util.url import path_to_file_url
pytestmark = [pytest.mark.not_on_windows("Windows uses old concretizer")]
pytestmark = [
pytest.mark.not_on_windows("Windows uses old concretizer"),
pytest.mark.only_clingo("Original concretizer does not support configuration requirements"),
]
def update_packages_config(conf_str):

View File

@@ -18,7 +18,6 @@
import spack.config
import spack.directory_layout
import spack.environment as ev
import spack.fetch_strategy
import spack.main
import spack.package_base
import spack.paths
@@ -307,14 +306,14 @@ def test_add_config_path(mutable_config):
@pytest.mark.regression("17543,23259")
def test_add_config_path_with_enumerated_type(mutable_config):
spack.config.add("config:flags:keep_werror:all")
assert spack.config.get("config")["flags"]["keep_werror"] == "all"
spack.config.add("config:concretizer:clingo")
assert spack.config.get("config")["concretizer"] == "clingo"
spack.config.add("config:flags:keep_werror:specific")
assert spack.config.get("config")["flags"]["keep_werror"] == "specific"
spack.config.add("config:concretizer:original")
assert spack.config.get("config")["concretizer"] == "original"
with pytest.raises(spack.config.ConfigError):
spack.config.add("config:flags:keep_werror:foo")
spack.config.add("config:concretizer:foo")
def test_add_config_filename(mock_low_high_config, tmpdir):

View File

@@ -56,10 +56,8 @@
import spack.util.executable
import spack.util.git
import spack.util.gpg
import spack.util.parallel
import spack.util.spack_yaml as syaml
import spack.util.url as url_util
import spack.util.web
import spack.version
from spack.fetch_strategy import URLFetchStrategy
from spack.util.pattern import Bunch
@@ -706,10 +704,11 @@ def configuration_dir(tmpdir_factory, linux_os):
tmpdir.ensure("user", dir=True)
# Fill out config.yaml, compilers.yaml and modules.yaml templates.
solver = os.environ.get("SPACK_TEST_SOLVER", "clingo")
locks = sys.platform != "win32"
config = tmpdir.join("site", "config.yaml")
config_template = test_config / "config.yaml"
config.write(config_template.read_text().format(install_tree_root, locks))
config.write(config_template.read_text().format(install_tree_root, solver, locks))
target = str(archspec.cpu.host().family)
compilers = tmpdir.join("site", "compilers.yaml")
@@ -1003,7 +1002,7 @@ def temporary_store(tmpdir, request):
def mock_fetch(mock_archive, monkeypatch):
"""Fake the URL for a package so it downloads from a file."""
monkeypatch.setattr(
spack.package_base.PackageBase, "fetcher", URLFetchStrategy(url=mock_archive.url)
spack.package_base.PackageBase, "fetcher", URLFetchStrategy(mock_archive.url)
)
@@ -1418,24 +1417,6 @@ def mock_git_repository(git, tmpdir_factory):
r1 = rev_hash(branch)
r1_file = branch_file
multiple_directories_branch = "many_dirs"
num_dirs = 3
num_files = 2
dir_files = []
for i in range(num_dirs):
for j in range(num_files):
dir_files.append(f"dir{i}/file{j}")
git("checkout", "-b", multiple_directories_branch)
for f in dir_files:
repodir.ensure(f, file=True)
git("add", f)
git("-c", "commit.gpgsign=false", "commit", "-m", "many_dirs add files")
# restore default
git("checkout", default_branch)
# Map of version -> bunch. Each bunch includes; all the args
# that must be specified as part of a version() declaration (used to
# manufacture a version for the 'git-test' package); the associated
@@ -1455,11 +1436,6 @@ def mock_git_repository(git, tmpdir_factory):
"default-no-per-version-git": Bunch(
revision=default_branch, file=r0_file, args={"branch": default_branch}
),
"many-directories": Bunch(
revision=multiple_directories_branch,
file=dir_files[0],
args={"git": url, "branch": multiple_directories_branch},
),
}
t = Bunch(
@@ -1836,7 +1812,12 @@ def __call__(self, *args, **kwargs):
tty.msg("curl: (22) The requested URL returned error: 404")
self.returncode = 22
monkeypatch.setattr(spack.util.web, "require_curl", MockCurl)
def mock_curl(*args):
return MockCurl()
monkeypatch.setattr(spack.util.web, "_curl", mock_curl)
yield
@pytest.fixture(scope="function")
@@ -1975,18 +1956,26 @@ def nullify_globals(request, monkeypatch):
def pytest_runtest_setup(item):
# Skip tests if they are marked only clingo and are run with the original concretizer
only_clingo_marker = item.get_closest_marker(name="only_clingo")
if only_clingo_marker and os.environ.get("SPACK_TEST_SOLVER") == "original":
pytest.skip(*only_clingo_marker.args)
# Skip tests if they are marked only original and are run with clingo
only_original_marker = item.get_closest_marker(name="only_original")
if only_original_marker and os.environ.get("SPACK_TEST_SOLVER", "clingo") == "clingo":
pytest.skip(*only_original_marker.args)
# Skip test marked "not_on_windows" if they're run on Windows
not_on_windows_marker = item.get_closest_marker(name="not_on_windows")
if not_on_windows_marker and sys.platform == "win32":
pytest.skip(*not_on_windows_marker.args)
@pytest.fixture(autouse=True)
@pytest.fixture(scope="function")
def disable_parallel_buildcache_push(monkeypatch):
"""Disable process pools in tests."""
monkeypatch.setattr(
spack.util.parallel, "make_concurrent_executor", spack.util.parallel.SequentialExecutor
)
monkeypatch.setattr(spack.cmd.buildcache, "_make_pool", spack.cmd.buildcache.NoPool)
def _root_path(x, y, *, path):

Some files were not shown because too many files have changed in this diff Show More