Compare commits
5 Commits
features/s
...
python/add
Author | SHA1 | Date | |
---|---|---|---|
![]() |
474cca3005 | ||
![]() |
868cb442e9 | ||
![]() |
c0e64718c7 | ||
![]() |
8b2749f95c | ||
![]() |
fbfbb9710d |
2
.github/workflows/build-containers.yml
vendored
2
.github/workflows/build-containers.yml
vendored
@@ -113,7 +113,7 @@ jobs:
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Build & Deploy ${{ matrix.dockerfile[0] }}
|
||||
uses: docker/build-push-action@5cd11c3a4ced054e52742c5fd54dca954e0edd85
|
||||
uses: docker/build-push-action@16ebe778df0e7752d2cfcbd924afdbbd89c1a755
|
||||
with:
|
||||
context: dockerfiles/${{ matrix.dockerfile[0] }}
|
||||
platforms: ${{ matrix.dockerfile[1] }}
|
||||
|
14
.github/workflows/unit_tests.yaml
vendored
14
.github/workflows/unit_tests.yaml
vendored
@@ -16,27 +16,38 @@ jobs:
|
||||
matrix:
|
||||
os: [ubuntu-latest]
|
||||
python-version: ['3.7', '3.8', '3.9', '3.10', '3.11', '3.12']
|
||||
concretizer: ['clingo']
|
||||
on_develop:
|
||||
- ${{ github.ref == 'refs/heads/develop' }}
|
||||
include:
|
||||
- python-version: '3.11'
|
||||
os: ubuntu-latest
|
||||
concretizer: original
|
||||
on_develop: ${{ github.ref == 'refs/heads/develop' }}
|
||||
- python-version: '3.6'
|
||||
os: ubuntu-20.04
|
||||
concretizer: clingo
|
||||
on_develop: ${{ github.ref == 'refs/heads/develop' }}
|
||||
exclude:
|
||||
- python-version: '3.7'
|
||||
os: ubuntu-latest
|
||||
concretizer: 'clingo'
|
||||
on_develop: false
|
||||
- python-version: '3.8'
|
||||
os: ubuntu-latest
|
||||
concretizer: 'clingo'
|
||||
on_develop: false
|
||||
- python-version: '3.9'
|
||||
os: ubuntu-latest
|
||||
concretizer: 'clingo'
|
||||
on_develop: false
|
||||
- python-version: '3.10'
|
||||
os: ubuntu-latest
|
||||
concretizer: 'clingo'
|
||||
on_develop: false
|
||||
- python-version: '3.11'
|
||||
os: ubuntu-latest
|
||||
concretizer: 'clingo'
|
||||
on_develop: false
|
||||
|
||||
steps:
|
||||
@@ -74,6 +85,7 @@ jobs:
|
||||
- name: Run unit tests
|
||||
env:
|
||||
SPACK_PYTHON: python
|
||||
SPACK_TEST_SOLVER: ${{ matrix.concretizer }}
|
||||
SPACK_TEST_PARALLEL: 2
|
||||
COVERAGE: true
|
||||
UNIT_TEST_COVERAGE: ${{ matrix.python-version == '3.11' }}
|
||||
@@ -170,6 +182,7 @@ jobs:
|
||||
- name: Run unit tests (full suite with coverage)
|
||||
env:
|
||||
COVERAGE: true
|
||||
SPACK_TEST_SOLVER: clingo
|
||||
run: |
|
||||
share/spack/qa/run-unit-tests
|
||||
- uses: codecov/codecov-action@e28ff129e5465c2c0dcc6f003fc735cb6ae0c673
|
||||
@@ -200,6 +213,7 @@ jobs:
|
||||
brew install dash fish gcc gnupg2 kcov
|
||||
- name: Run unit tests
|
||||
env:
|
||||
SPACK_TEST_SOLVER: clingo
|
||||
SPACK_TEST_PARALLEL: 4
|
||||
run: |
|
||||
git --version
|
||||
|
2
.github/workflows/valid-style.yml
vendored
2
.github/workflows/valid-style.yml
vendored
@@ -85,5 +85,5 @@ jobs:
|
||||
source share/spack/setup-env.sh
|
||||
spack debug report
|
||||
spack -d bootstrap now --dev
|
||||
spack -d style -t black
|
||||
spack style -t black
|
||||
spack unit-test -V
|
||||
|
@@ -170,6 +170,23 @@ config:
|
||||
# If set to true, Spack will use ccache to cache C compiles.
|
||||
ccache: false
|
||||
|
||||
|
||||
# The concretization algorithm to use in Spack. Options are:
|
||||
#
|
||||
# 'clingo': Uses a logic solver under the hood to solve DAGs with full
|
||||
# backtracking and optimization for user preferences. Spack will
|
||||
# try to bootstrap the logic solver, if not already available.
|
||||
#
|
||||
# 'original': Spack's original greedy, fixed-point concretizer. This
|
||||
# algorithm can make decisions too early and will not backtrack
|
||||
# sufficiently for many specs. This will soon be deprecated in
|
||||
# favor of clingo.
|
||||
#
|
||||
# See `concretizer.yaml` for more settings you can fine-tune when
|
||||
# using clingo.
|
||||
concretizer: clingo
|
||||
|
||||
|
||||
# How long to wait to lock the Spack installation database. This lock is used
|
||||
# when Spack needs to manage its own package metadata and all operations are
|
||||
# expected to complete within the default time limit. The timeout should
|
||||
|
@@ -20,14 +20,12 @@ packages:
|
||||
awk: [gawk]
|
||||
armci: [armcimpi]
|
||||
blas: [openblas, amdblis]
|
||||
c: [gcc]
|
||||
cxx: [gcc]
|
||||
D: [ldc]
|
||||
daal: [intel-oneapi-daal]
|
||||
elf: [elfutils]
|
||||
fftw-api: [fftw, amdfftw]
|
||||
flame: [libflame, amdlibflame]
|
||||
fortran: [gcc]
|
||||
fortran-rt: [gcc-runtime, intel-oneapi-runtime]
|
||||
fuse: [libfuse]
|
||||
gl: [glx, osmesa]
|
||||
|
@@ -1,5 +1,6 @@
|
||||
config:
|
||||
locks: false
|
||||
concretizer: clingo
|
||||
build_stage::
|
||||
- '$spack/.staging'
|
||||
stage_name: '{name}-{version}-{hash:7}'
|
||||
|
@@ -206,7 +206,6 @@ def setup(sphinx):
|
||||
("py:class", "six.moves.urllib.parse.ParseResult"),
|
||||
("py:class", "TextIO"),
|
||||
("py:class", "hashlib._Hash"),
|
||||
("py:class", "concurrent.futures._base.Executor"),
|
||||
# Spack classes that are private and we don't want to expose
|
||||
("py:class", "spack.provider_index._IndexBase"),
|
||||
("py:class", "spack.repo._PrependFileLoader"),
|
||||
|
@@ -1263,11 +1263,6 @@ Git fetching supports the following parameters to ``version``:
|
||||
option ``--depth 1`` will be used if the version of git and the specified
|
||||
transport protocol support it, and ``--single-branch`` will be used if the
|
||||
version of git supports it.
|
||||
* ``git_sparse_paths``: Use ``sparse-checkout`` to only clone these relative paths.
|
||||
This feature requires ``git`` to be version ``2.25.0`` or later but is useful for
|
||||
large repositories that have separate portions that can be built independently.
|
||||
If paths provided are directories then all the subdirectories and associated files
|
||||
will also be cloned.
|
||||
|
||||
Only one of ``tag``, ``branch``, or ``commit`` can be used at a time.
|
||||
|
||||
@@ -1366,41 +1361,6 @@ Submodules
|
||||
For more information about git submodules see the manpage of git: ``man
|
||||
git-submodule``.
|
||||
|
||||
Sparse-Checkout
|
||||
You can supply ``git_sparse_paths`` at the package or version level to utilize git's
|
||||
sparse-checkout feature. This will only clone the paths that are specified in the
|
||||
``git_sparse_paths`` attribute for the package along with the files in the top level directory.
|
||||
This feature allows you to only clone what you need from a large repository.
|
||||
Note that this is a newer feature in git and requries git ``2.25.0`` or greater.
|
||||
If ``git_sparse_paths`` is supplied and the git version is too old
|
||||
then a warning will be issued and that package will use the standard cloning operations instead.
|
||||
``git_sparse_paths`` should be supplied as a list of paths, a callable function for versions,
|
||||
or a more complex package attribute using the ``@property`` decorator. The return value should be
|
||||
a list for a callable implementation of ``git_sparse_paths``.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def sparse_path_function(package)
|
||||
"""a callable function that can be used in side a version"""
|
||||
# paths can be directories or functions, all subdirectories and files are included
|
||||
paths = ["doe", "rae", "me/file.cpp"]
|
||||
if package.spec.version > Version("1.2.0"):
|
||||
paths.extend(["fae"])
|
||||
return paths
|
||||
|
||||
class MyPackage(package):
|
||||
# can also be a package attribute that will be used if not specified in versions
|
||||
git_sparse_paths = ["doe", "rae"]
|
||||
|
||||
# use the package attribute
|
||||
version("1.0.0")
|
||||
version("1.1.0")
|
||||
# use the function
|
||||
version("1.1.5", git_sparse_paths=sparse_path_func)
|
||||
version("1.2.0", git_sparse_paths=sparse_path_func)
|
||||
version("1.2.5", git_sparse_paths=sparse_path_func)
|
||||
version("1.1.5", git_sparse_paths=sparse_path_func)
|
||||
|
||||
.. _github-fetch:
|
||||
|
||||
^^^^^^
|
||||
|
2
lib/spack/external/__init__.py
vendored
2
lib/spack/external/__init__.py
vendored
@@ -18,7 +18,7 @@
|
||||
|
||||
* Homepage: https://pypi.python.org/pypi/archspec
|
||||
* Usage: Labeling, comparison and detection of microarchitectures
|
||||
* Version: 0.2.5-dev (commit 7e6740012b897ae4a950f0bba7e9726b767e921f)
|
||||
* Version: 0.2.4 (commit 48b92512b9ce203ded0ebd1ac41b42593e931f7c)
|
||||
|
||||
astunparse
|
||||
----------------
|
||||
|
12
lib/spack/external/archspec/cpu/detect.py
vendored
12
lib/spack/external/archspec/cpu/detect.py
vendored
@@ -47,11 +47,7 @@ def decorator(factory):
|
||||
|
||||
|
||||
def partial_uarch(
|
||||
name: str = "",
|
||||
vendor: str = "",
|
||||
features: Optional[Set[str]] = None,
|
||||
generation: int = 0,
|
||||
cpu_part: str = "",
|
||||
name: str = "", vendor: str = "", features: Optional[Set[str]] = None, generation: int = 0
|
||||
) -> Microarchitecture:
|
||||
"""Construct a partial microarchitecture, from information gathered during system scan."""
|
||||
return Microarchitecture(
|
||||
@@ -61,7 +57,6 @@ def partial_uarch(
|
||||
features=features or set(),
|
||||
compilers={},
|
||||
generation=generation,
|
||||
cpu_part=cpu_part,
|
||||
)
|
||||
|
||||
|
||||
@@ -95,7 +90,6 @@ def proc_cpuinfo() -> Microarchitecture:
|
||||
return partial_uarch(
|
||||
vendor=_canonicalize_aarch64_vendor(data),
|
||||
features=_feature_set(data, key="Features"),
|
||||
cpu_part=data.get("CPU part", ""),
|
||||
)
|
||||
|
||||
if architecture in (PPC64LE, PPC64):
|
||||
@@ -351,10 +345,6 @@ def sorting_fn(item):
|
||||
generic_candidates = [c for c in candidates if c.vendor == "generic"]
|
||||
best_generic = max(generic_candidates, key=sorting_fn)
|
||||
|
||||
# Relevant for AArch64. Filter on "cpu_part" if we have any match
|
||||
if info.cpu_part != "" and any(c for c in candidates if info.cpu_part == c.cpu_part):
|
||||
candidates = [c for c in candidates if info.cpu_part == c.cpu_part]
|
||||
|
||||
# Filter the candidates to be descendant of the best generic candidate.
|
||||
# This is to avoid that the lack of a niche feature that can be disabled
|
||||
# from e.g. BIOS prevents detection of a reasonably performant architecture
|
||||
|
@@ -2,7 +2,9 @@
|
||||
# Archspec Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
"""Types and functions to manage information on CPU microarchitectures."""
|
||||
"""Types and functions to manage information
|
||||
on CPU microarchitectures.
|
||||
"""
|
||||
import functools
|
||||
import platform
|
||||
import re
|
||||
@@ -63,24 +65,21 @@ class Microarchitecture:
|
||||
passed in as argument above.
|
||||
* versions: versions that support this micro-architecture.
|
||||
|
||||
generation (int): generation of the micro-architecture, if relevant.
|
||||
cpu_part (str): cpu part of the architecture, if relevant.
|
||||
generation (int): generation of the micro-architecture, if
|
||||
relevant.
|
||||
"""
|
||||
|
||||
# pylint: disable=too-many-arguments,too-many-instance-attributes
|
||||
# pylint: disable=too-many-arguments
|
||||
#: Aliases for micro-architecture's features
|
||||
feature_aliases = FEATURE_ALIASES
|
||||
|
||||
def __init__(self, name, parents, vendor, features, compilers, generation=0, cpu_part=""):
|
||||
def __init__(self, name, parents, vendor, features, compilers, generation=0):
|
||||
self.name = name
|
||||
self.parents = parents
|
||||
self.vendor = vendor
|
||||
self.features = features
|
||||
self.compilers = compilers
|
||||
# Only relevant for PowerPC
|
||||
self.generation = generation
|
||||
# Only relevant for AArch64
|
||||
self.cpu_part = cpu_part
|
||||
# Cache the ancestor computation
|
||||
self._ancestors = None
|
||||
|
||||
@@ -112,7 +111,6 @@ def __eq__(self, other):
|
||||
and self.parents == other.parents # avoid ancestors here
|
||||
and self.compilers == other.compilers
|
||||
and self.generation == other.generation
|
||||
and self.cpu_part == other.cpu_part
|
||||
)
|
||||
|
||||
@coerce_target_names
|
||||
@@ -145,8 +143,7 @@ def __repr__(self):
|
||||
cls_name = self.__class__.__name__
|
||||
fmt = (
|
||||
cls_name + "({0.name!r}, {0.parents!r}, {0.vendor!r}, "
|
||||
"{0.features!r}, {0.compilers!r}, generation={0.generation!r}, "
|
||||
"cpu_part={0.cpu_part!r})"
|
||||
"{0.features!r}, {0.compilers!r}, {0.generation!r})"
|
||||
)
|
||||
return fmt.format(self)
|
||||
|
||||
@@ -193,7 +190,6 @@ def to_dict(self):
|
||||
"generation": self.generation,
|
||||
"parents": [str(x) for x in self.parents],
|
||||
"compilers": self.compilers,
|
||||
"cpupart": self.cpu_part,
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
@@ -206,7 +202,6 @@ def from_dict(data) -> "Microarchitecture":
|
||||
features=set(data["features"]),
|
||||
compilers=data.get("compilers", {}),
|
||||
generation=data.get("generation", 0),
|
||||
cpu_part=data.get("cpupart", ""),
|
||||
)
|
||||
|
||||
def optimization_flags(self, compiler, version):
|
||||
@@ -365,11 +360,8 @@ def fill_target_from_dict(name, data, targets):
|
||||
features = set(values["features"])
|
||||
compilers = values.get("compilers", {})
|
||||
generation = values.get("generation", 0)
|
||||
cpu_part = values.get("cpupart", "")
|
||||
|
||||
targets[name] = Microarchitecture(
|
||||
name, parents, vendor, features, compilers, generation=generation, cpu_part=cpu_part
|
||||
)
|
||||
targets[name] = Microarchitecture(name, parents, vendor, features, compilers, generation)
|
||||
|
||||
known_targets = {}
|
||||
data = archspec.cpu.schema.TARGETS_JSON["microarchitectures"]
|
||||
|
@@ -2225,14 +2225,10 @@
|
||||
],
|
||||
"nvhpc": [
|
||||
{
|
||||
"versions": "21.11:23.8",
|
||||
"versions": "21.11:",
|
||||
"name": "zen3",
|
||||
"flags": "-tp {name}",
|
||||
"warnings": "zen4 is not fully supported by nvhpc versions < 23.9, falling back to zen3"
|
||||
},
|
||||
{
|
||||
"versions": "23.9:",
|
||||
"flags": "-tp {name}"
|
||||
"warnings": "zen4 is not fully supported by nvhpc yet, falling back to zen3"
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -2715,8 +2711,7 @@
|
||||
"flags": "-mcpu=thunderx2t99"
|
||||
}
|
||||
]
|
||||
},
|
||||
"cpupart": "0x0af"
|
||||
}
|
||||
},
|
||||
"a64fx": {
|
||||
"from": ["armv8.2a"],
|
||||
@@ -2784,8 +2779,7 @@
|
||||
"flags": "-march=armv8.2-a+crc+crypto+fp16+sve"
|
||||
}
|
||||
]
|
||||
},
|
||||
"cpupart": "0x001"
|
||||
}
|
||||
},
|
||||
"cortex_a72": {
|
||||
"from": ["aarch64"],
|
||||
@@ -2822,8 +2816,7 @@
|
||||
"flags" : "-mcpu=cortex-a72"
|
||||
}
|
||||
]
|
||||
},
|
||||
"cpupart": "0xd08"
|
||||
}
|
||||
},
|
||||
"neoverse_n1": {
|
||||
"from": ["cortex_a72", "armv8.2a"],
|
||||
@@ -2909,8 +2902,7 @@
|
||||
"flags": "-tp {name}"
|
||||
}
|
||||
]
|
||||
},
|
||||
"cpupart": "0xd0c"
|
||||
}
|
||||
},
|
||||
"neoverse_v1": {
|
||||
"from": ["neoverse_n1", "armv8.4a"],
|
||||
@@ -2934,6 +2926,8 @@
|
||||
"lrcpc",
|
||||
"dcpop",
|
||||
"sha3",
|
||||
"sm3",
|
||||
"sm4",
|
||||
"asimddp",
|
||||
"sha512",
|
||||
"sve",
|
||||
@@ -3034,8 +3028,7 @@
|
||||
"flags": "-tp {name}"
|
||||
}
|
||||
]
|
||||
},
|
||||
"cpupart": "0xd40"
|
||||
}
|
||||
},
|
||||
"neoverse_v2": {
|
||||
"from": ["neoverse_n1", "armv9.0a"],
|
||||
@@ -3059,10 +3052,13 @@
|
||||
"lrcpc",
|
||||
"dcpop",
|
||||
"sha3",
|
||||
"sm3",
|
||||
"sm4",
|
||||
"asimddp",
|
||||
"sha512",
|
||||
"sve",
|
||||
"asimdfhm",
|
||||
"dit",
|
||||
"uscat",
|
||||
"ilrcpc",
|
||||
"flagm",
|
||||
@@ -3070,12 +3066,18 @@
|
||||
"sb",
|
||||
"dcpodp",
|
||||
"sve2",
|
||||
"sveaes",
|
||||
"svepmull",
|
||||
"svebitperm",
|
||||
"svesha3",
|
||||
"svesm4",
|
||||
"flagm2",
|
||||
"frint",
|
||||
"svei8mm",
|
||||
"svebf16",
|
||||
"i8mm",
|
||||
"bf16"
|
||||
"bf16",
|
||||
"dgh"
|
||||
],
|
||||
"compilers" : {
|
||||
"gcc": [
|
||||
@@ -3100,19 +3102,15 @@
|
||||
"flags" : "-march=armv8.5-a+sve -mtune=cortex-a76"
|
||||
},
|
||||
{
|
||||
"versions": "10.0:11.3.99",
|
||||
"versions": "10.0:11.99",
|
||||
"flags" : "-march=armv8.5-a+sve+sve2+i8mm+bf16 -mtune=cortex-a77"
|
||||
},
|
||||
{
|
||||
"versions": "11.4:11.99",
|
||||
"flags" : "-mcpu=neoverse-v2"
|
||||
},
|
||||
{
|
||||
"versions": "12.0:12.2.99",
|
||||
"versions": "12.0:12.99",
|
||||
"flags" : "-march=armv9-a+i8mm+bf16 -mtune=cortex-a710"
|
||||
},
|
||||
{
|
||||
"versions": "12.3:",
|
||||
"versions": "13.0:",
|
||||
"flags" : "-mcpu=neoverse-v2"
|
||||
}
|
||||
],
|
||||
@@ -3147,113 +3145,7 @@
|
||||
"flags": "-tp {name}"
|
||||
}
|
||||
]
|
||||
},
|
||||
"cpupart": "0xd4f"
|
||||
},
|
||||
"neoverse_n2": {
|
||||
"from": ["neoverse_n1", "armv9.0a"],
|
||||
"vendor": "ARM",
|
||||
"features": [
|
||||
"fp",
|
||||
"asimd",
|
||||
"evtstrm",
|
||||
"aes",
|
||||
"pmull",
|
||||
"sha1",
|
||||
"sha2",
|
||||
"crc32",
|
||||
"atomics",
|
||||
"fphp",
|
||||
"asimdhp",
|
||||
"cpuid",
|
||||
"asimdrdm",
|
||||
"jscvt",
|
||||
"fcma",
|
||||
"lrcpc",
|
||||
"dcpop",
|
||||
"sha3",
|
||||
"asimddp",
|
||||
"sha512",
|
||||
"sve",
|
||||
"asimdfhm",
|
||||
"uscat",
|
||||
"ilrcpc",
|
||||
"flagm",
|
||||
"ssbs",
|
||||
"sb",
|
||||
"dcpodp",
|
||||
"sve2",
|
||||
"flagm2",
|
||||
"frint",
|
||||
"svei8mm",
|
||||
"svebf16",
|
||||
"i8mm",
|
||||
"bf16"
|
||||
],
|
||||
"compilers" : {
|
||||
"gcc": [
|
||||
{
|
||||
"versions": "4.8:5.99",
|
||||
"flags": "-march=armv8-a"
|
||||
},
|
||||
{
|
||||
"versions": "6:6.99",
|
||||
"flags" : "-march=armv8.1-a"
|
||||
},
|
||||
{
|
||||
"versions": "7.0:7.99",
|
||||
"flags" : "-march=armv8.2-a -mtune=cortex-a72"
|
||||
},
|
||||
{
|
||||
"versions": "8.0:8.99",
|
||||
"flags" : "-march=armv8.4-a+sve -mtune=cortex-a72"
|
||||
},
|
||||
{
|
||||
"versions": "9.0:9.99",
|
||||
"flags" : "-march=armv8.5-a+sve -mtune=cortex-a76"
|
||||
},
|
||||
{
|
||||
"versions": "10.0:10.99",
|
||||
"flags" : "-march=armv8.5-a+sve+sve2+i8mm+bf16 -mtune=cortex-a77"
|
||||
},
|
||||
{
|
||||
"versions": "11.0:",
|
||||
"flags" : "-mcpu=neoverse-n2"
|
||||
}
|
||||
],
|
||||
"clang" : [
|
||||
{
|
||||
"versions": "9.0:10.99",
|
||||
"flags" : "-march=armv8.5-a+sve"
|
||||
},
|
||||
{
|
||||
"versions": "11.0:13.99",
|
||||
"flags" : "-march=armv8.5-a+sve+sve2+i8mm+bf16"
|
||||
},
|
||||
{
|
||||
"versions": "14.0:15.99",
|
||||
"flags" : "-march=armv9-a+i8mm+bf16"
|
||||
},
|
||||
{
|
||||
"versions": "16.0:",
|
||||
"flags" : "-mcpu=neoverse-n2"
|
||||
}
|
||||
],
|
||||
"arm" : [
|
||||
{
|
||||
"versions": "23.04.0:",
|
||||
"flags" : "-mcpu=neoverse-n2"
|
||||
}
|
||||
],
|
||||
"nvhpc" : [
|
||||
{
|
||||
"versions": "23.3:",
|
||||
"name": "neoverse-n1",
|
||||
"flags": "-tp {name}"
|
||||
}
|
||||
]
|
||||
},
|
||||
"cpupart": "0xd49"
|
||||
}
|
||||
},
|
||||
"m1": {
|
||||
"from": ["armv8.4a"],
|
||||
@@ -3319,8 +3211,7 @@
|
||||
"flags" : "-mcpu=apple-m1"
|
||||
}
|
||||
]
|
||||
},
|
||||
"cpupart": "0x022"
|
||||
}
|
||||
},
|
||||
"m2": {
|
||||
"from": ["m1", "armv8.5a"],
|
||||
@@ -3398,8 +3289,7 @@
|
||||
"flags" : "-mcpu=apple-m2"
|
||||
}
|
||||
]
|
||||
},
|
||||
"cpupart": "0x032"
|
||||
}
|
||||
},
|
||||
"arm": {
|
||||
"from": [],
|
||||
|
@@ -52,9 +52,6 @@
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"cpupart": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
@@ -110,4 +107,4 @@
|
||||
"additionalProperties": false
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -1624,12 +1624,6 @@ def remove_linked_tree(path):
|
||||
shutil.rmtree(os.path.realpath(path), **kwargs)
|
||||
os.unlink(path)
|
||||
else:
|
||||
if sys.platform == "win32":
|
||||
# Adding this prefix allows shutil to remove long paths on windows
|
||||
# https://learn.microsoft.com/en-us/windows/win32/fileio/maximum-file-path-limitation?tabs=registry
|
||||
long_path_pfx = "\\\\?\\"
|
||||
if not path.startswith(long_path_pfx):
|
||||
path = long_path_pfx + path
|
||||
shutil.rmtree(path, **kwargs)
|
||||
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@@ -1,154 +0,0 @@
|
||||
# Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
"""Bootstrap concrete specs for clingo
|
||||
|
||||
Spack uses clingo to concretize specs. When clingo itself needs to be bootstrapped from sources,
|
||||
we need to rely on another mechanism to get a concrete spec that fits the current host.
|
||||
|
||||
This module contains the logic to get a concrete spec for clingo, starting from a prototype
|
||||
JSON file for a similar platform.
|
||||
"""
|
||||
import pathlib
|
||||
import sys
|
||||
from typing import Dict, Optional, Tuple
|
||||
|
||||
import archspec.cpu
|
||||
|
||||
import spack.compiler
|
||||
import spack.compilers
|
||||
import spack.platforms
|
||||
import spack.spec
|
||||
import spack.traverse
|
||||
|
||||
from .config import spec_for_current_python
|
||||
|
||||
|
||||
class ClingoBootstrapConcretizer:
|
||||
def __init__(self, configuration):
|
||||
self.host_platform = spack.platforms.host()
|
||||
self.host_os = self.host_platform.operating_system("frontend")
|
||||
self.host_target = archspec.cpu.host().family
|
||||
self.host_architecture = spack.spec.ArchSpec.frontend_arch()
|
||||
self.host_architecture.target = str(self.host_target)
|
||||
self.host_compiler = self._valid_compiler_or_raise()
|
||||
self.host_python = self.python_external_spec()
|
||||
if str(self.host_platform) == "linux":
|
||||
self.host_libc = self.libc_external_spec()
|
||||
|
||||
self.external_cmake, self.external_bison = self._externals_from_yaml(configuration)
|
||||
|
||||
def _valid_compiler_or_raise(self) -> "spack.compiler.Compiler":
|
||||
if str(self.host_platform) == "linux":
|
||||
compiler_name = "gcc"
|
||||
elif str(self.host_platform) == "darwin":
|
||||
compiler_name = "apple-clang"
|
||||
elif str(self.host_platform) == "windows":
|
||||
compiler_name = "msvc"
|
||||
elif str(self.host_platform) == "freebsd":
|
||||
compiler_name = "clang"
|
||||
else:
|
||||
raise RuntimeError(f"Cannot bootstrap clingo from sources on {self.host_platform}")
|
||||
candidates = spack.compilers.compilers_for_spec(
|
||||
compiler_name, arch_spec=self.host_architecture
|
||||
)
|
||||
if not candidates:
|
||||
raise RuntimeError(
|
||||
f"Cannot find any version of {compiler_name} to bootstrap clingo from sources"
|
||||
)
|
||||
candidates.sort(key=lambda x: x.spec.version, reverse=True)
|
||||
return candidates[0]
|
||||
|
||||
def _externals_from_yaml(
|
||||
self, configuration: "spack.config.Configuration"
|
||||
) -> Tuple[Optional["spack.spec.Spec"], Optional["spack.spec.Spec"]]:
|
||||
packages_yaml = configuration.get("packages")
|
||||
requirements = {"cmake": "@3.20:", "bison": "@2.5:"}
|
||||
selected: Dict[str, Optional["spack.spec.Spec"]] = {"cmake": None, "bison": None}
|
||||
for pkg_name in ["cmake", "bison"]:
|
||||
if pkg_name not in packages_yaml:
|
||||
continue
|
||||
|
||||
candidates = packages_yaml[pkg_name].get("externals", [])
|
||||
for candidate in candidates:
|
||||
s = spack.spec.Spec(candidate["spec"], external_path=candidate["prefix"])
|
||||
if not s.satisfies(requirements[pkg_name]):
|
||||
continue
|
||||
|
||||
if not s.intersects(f"%{self.host_compiler.spec}"):
|
||||
continue
|
||||
|
||||
if not s.intersects(f"arch={self.host_architecture}"):
|
||||
continue
|
||||
|
||||
selected[pkg_name] = self._external_spec(s)
|
||||
break
|
||||
return selected["cmake"], selected["bison"]
|
||||
|
||||
def prototype_path(self) -> pathlib.Path:
|
||||
"""Path to a prototype concrete specfile for clingo"""
|
||||
parent_dir = pathlib.Path(__file__).parent
|
||||
result = parent_dir / "prototypes" / f"clingo-{self.host_platform}-{self.host_target}.json"
|
||||
if str(self.host_platform) == "linux":
|
||||
# Using aarch64 as a fallback, since it has gnuconfig (x86_64 doesn't have it)
|
||||
if not result.exists():
|
||||
result = parent_dir / "prototypes" / f"clingo-{self.host_platform}-aarch64.json"
|
||||
|
||||
elif str(self.host_platform) == "freebsd":
|
||||
result = parent_dir / "prototypes" / f"clingo-{self.host_platform}-amd64.json"
|
||||
|
||||
elif not result.exists():
|
||||
raise RuntimeError(f"Cannot bootstrap clingo from sources on {self.host_platform}")
|
||||
|
||||
return result
|
||||
|
||||
def concretize(self) -> "spack.spec.Spec":
|
||||
# Read the prototype and mark it NOT concrete
|
||||
s = spack.spec.Spec.from_specfile(str(self.prototype_path()))
|
||||
s._mark_concrete(False)
|
||||
|
||||
# Tweak it to conform to the host architecture
|
||||
for node in s.traverse():
|
||||
node.architecture.os = str(self.host_os)
|
||||
node.compiler = self.host_compiler.spec
|
||||
node.architecture = self.host_architecture
|
||||
|
||||
if node.name == "gcc-runtime":
|
||||
node.versions = self.host_compiler.spec.versions
|
||||
|
||||
for edge in spack.traverse.traverse_edges([s], cover="edges"):
|
||||
if edge.spec.name == "python":
|
||||
edge.spec = self.host_python
|
||||
|
||||
if edge.spec.name == "bison" and self.external_bison:
|
||||
edge.spec = self.external_bison
|
||||
|
||||
if edge.spec.name == "cmake" and self.external_cmake:
|
||||
edge.spec = self.external_cmake
|
||||
|
||||
if "libc" in edge.virtuals:
|
||||
edge.spec = self.host_libc
|
||||
|
||||
s._finalize_concretization()
|
||||
|
||||
# Work around the fact that the installer calls Spec.dependents() and
|
||||
# we modified edges inconsistently
|
||||
return s.copy()
|
||||
|
||||
def python_external_spec(self) -> "spack.spec.Spec":
|
||||
"""Python external spec corresponding to the current running interpreter"""
|
||||
result = spack.spec.Spec(spec_for_current_python(), external_path=sys.exec_prefix)
|
||||
return self._external_spec(result)
|
||||
|
||||
def libc_external_spec(self) -> "spack.spec.Spec":
|
||||
result = self.host_compiler.default_libc
|
||||
return self._external_spec(result)
|
||||
|
||||
def _external_spec(self, initial_spec) -> "spack.spec.Spec":
|
||||
initial_spec.namespace = "builtin"
|
||||
initial_spec.compiler = self.host_compiler.spec
|
||||
initial_spec.architecture = self.host_architecture
|
||||
for flag_type in spack.spec.FlagMap.valid_compiler_flags():
|
||||
initial_spec.compiler_flags[flag_type] = []
|
||||
return spack.spec.parse_with_version_concrete(initial_spec)
|
@@ -54,7 +54,6 @@
|
||||
import spack.version
|
||||
|
||||
from ._common import _executables_in_store, _python_import, _root_spec, _try_import_from_store
|
||||
from .clingo import ClingoBootstrapConcretizer
|
||||
from .config import spack_python_interpreter, spec_for_current_python
|
||||
|
||||
#: Name of the file containing metadata about the bootstrapping source
|
||||
@@ -269,13 +268,15 @@ def try_import(self, module: str, abstract_spec_str: str) -> bool:
|
||||
|
||||
# Try to build and install from sources
|
||||
with spack_python_interpreter():
|
||||
# Add hint to use frontend operating system on Cray
|
||||
concrete_spec = spack.spec.Spec(abstract_spec_str + " ^" + spec_for_current_python())
|
||||
|
||||
if module == "clingo":
|
||||
bootstrapper = ClingoBootstrapConcretizer(configuration=spack.config.CONFIG)
|
||||
concrete_spec = bootstrapper.concretize()
|
||||
else:
|
||||
concrete_spec = spack.spec.Spec(
|
||||
abstract_spec_str + " ^" + spec_for_current_python()
|
||||
# TODO: remove when the old concretizer is deprecated # pylint: disable=fixme
|
||||
concrete_spec._old_concretize( # pylint: disable=protected-access
|
||||
deprecation_warning=False
|
||||
)
|
||||
else:
|
||||
concrete_spec.concretize()
|
||||
|
||||
msg = "[BOOTSTRAP MODULE {0}] Try installing '{1}' from sources"
|
||||
@@ -302,7 +303,14 @@ def try_search_path(self, executables: Tuple[str], abstract_spec_str: str) -> bo
|
||||
# might reduce compilation time by a fair amount
|
||||
_add_externals_if_missing()
|
||||
|
||||
concrete_spec = spack.spec.Spec(abstract_spec_str).concretized()
|
||||
concrete_spec = spack.spec.Spec(abstract_spec_str)
|
||||
if concrete_spec.name == "patchelf":
|
||||
concrete_spec._old_concretize( # pylint: disable=protected-access
|
||||
deprecation_warning=False
|
||||
)
|
||||
else:
|
||||
concrete_spec.concretize()
|
||||
|
||||
msg = "[BOOTSTRAP] Try installing '{0}' from sources"
|
||||
tty.debug(msg.format(abstract_spec_str))
|
||||
with spack.config.override(self.mirror_scope):
|
||||
|
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@@ -457,12 +457,9 @@ def set_wrapper_variables(pkg, env):
|
||||
env.set(SPACK_DEBUG_LOG_ID, pkg.spec.format("{name}-{hash:7}"))
|
||||
env.set(SPACK_DEBUG_LOG_DIR, spack.main.spack_working_dir)
|
||||
|
||||
# Find ccache binary and hand it to build environment
|
||||
if spack.config.get("config:ccache"):
|
||||
# Enable ccache in the compiler wrapper
|
||||
env.set(SPACK_CCACHE_BINARY, spack.util.executable.which_string("ccache", required=True))
|
||||
else:
|
||||
# Avoid cache pollution if a build system forces `ccache <compiler wrapper invocation>`.
|
||||
env.set("CCACHE_DISABLE", "1")
|
||||
|
||||
# Gather information about various types of dependencies
|
||||
link_deps = set(pkg.spec.traverse(root=False, deptype=("link")))
|
||||
|
@@ -3,6 +3,7 @@
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
"""Common utilities for managing intel oneapi packages."""
|
||||
import getpass
|
||||
import os
|
||||
import platform
|
||||
import shutil
|
||||
@@ -12,7 +13,6 @@
|
||||
from llnl.util.filesystem import HeaderList, LibraryList, find_libraries, join_path, mkdirp
|
||||
from llnl.util.link_tree import LinkTree
|
||||
|
||||
import spack.util.path
|
||||
from spack.build_environment import dso_suffix
|
||||
from spack.directives import conflicts, license, redistribute, variant
|
||||
from spack.package_base import InstallError
|
||||
@@ -99,7 +99,7 @@ def install_component(self, installer_path):
|
||||
# with other install depends on the userid. For root, we
|
||||
# delete the installercache before and after install. For
|
||||
# non root we redefine the HOME environment variable.
|
||||
if spack.util.path.get_user() == "root":
|
||||
if getpass.getuser() == "root":
|
||||
shutil.rmtree("/var/intel/installercache", ignore_errors=True)
|
||||
|
||||
bash = Executable("bash")
|
||||
@@ -122,7 +122,7 @@ def install_component(self, installer_path):
|
||||
self.prefix,
|
||||
)
|
||||
|
||||
if spack.util.path.get_user() == "root":
|
||||
if getpass.getuser() == "root":
|
||||
shutil.rmtree("/var/intel/installercache", ignore_errors=True)
|
||||
|
||||
# Some installers have a bug and do not return an error code when failing
|
||||
|
@@ -354,10 +354,18 @@ def homepage(cls) -> Optional[str]: # type: ignore[override]
|
||||
return None
|
||||
|
||||
@lang.classproperty
|
||||
def url(cls) -> Optional[str]:
|
||||
def urls(cls) -> Optional[List[str]]:
|
||||
if cls.pypi:
|
||||
return f"https://files.pythonhosted.org/packages/source/{cls.pypi[0]}/{cls.pypi}"
|
||||
return None
|
||||
urls = [f"https://files.pythonhosted.org/packages/source/{cls.pypi[0]}/{cls.pypi}"]
|
||||
assert cls.pypi.count("/") == 1, "PyPI class attribute must include a single slash"
|
||||
name, file = cls.pypi.split("/")
|
||||
name_dash_count = name.count("-")
|
||||
if name_dash_count > 0:
|
||||
# replace all but last dash with underscores for pypi.org listing changes
|
||||
pypi = "/".join([name, file.replace("-", "_", name_dash_count)])
|
||||
urls.append(f"https://files.pythonhosted.org/packages/source/{pypi[0]}/{pypi}")
|
||||
return urls
|
||||
return [None]
|
||||
|
||||
@lang.classproperty
|
||||
def list_url(cls) -> Optional[str]: # type: ignore[override]
|
||||
|
@@ -38,7 +38,6 @@
|
||||
import spack.paths
|
||||
import spack.repo
|
||||
import spack.spec
|
||||
import spack.stage
|
||||
import spack.util.git
|
||||
import spack.util.gpg as gpg_util
|
||||
import spack.util.spack_yaml as syaml
|
||||
@@ -1108,7 +1107,7 @@ def main_script_replacements(cmd):
|
||||
if cdash_handler and cdash_handler.auth_token:
|
||||
try:
|
||||
cdash_handler.populate_buildgroup(all_job_names)
|
||||
except (SpackError, HTTPError, URLError, TimeoutError) as err:
|
||||
except (SpackError, HTTPError, URLError) as err:
|
||||
tty.warn(f"Problem populating buildgroup: {err}")
|
||||
else:
|
||||
tty.warn("Unable to populate buildgroup without CDash credentials")
|
||||
@@ -1371,6 +1370,15 @@ def can_verify_binaries():
|
||||
return len(gpg_util.public_keys()) >= 1
|
||||
|
||||
|
||||
def _push_to_build_cache(spec: spack.spec.Spec, sign_binaries: bool, mirror_url: str) -> None:
|
||||
"""Unchecked version of the public API, for easier mocking"""
|
||||
bindist.push_or_raise(
|
||||
spec,
|
||||
spack.mirror.Mirror.from_url(mirror_url).push_url,
|
||||
bindist.PushOptions(force=True, unsigned=not sign_binaries),
|
||||
)
|
||||
|
||||
|
||||
def push_to_build_cache(spec: spack.spec.Spec, mirror_url: str, sign_binaries: bool) -> bool:
|
||||
"""Push one or more binary packages to the mirror.
|
||||
|
||||
@@ -1381,13 +1389,20 @@ def push_to_build_cache(spec: spack.spec.Spec, mirror_url: str, sign_binaries: b
|
||||
sign_binaries: If True, spack will attempt to sign binary package before pushing.
|
||||
"""
|
||||
tty.debug(f"Pushing to build cache ({'signed' if sign_binaries else 'unsigned'})")
|
||||
signing_key = bindist.select_signing_key() if sign_binaries else None
|
||||
try:
|
||||
bindist.push_or_raise([spec], out_url=mirror_url, signing_key=signing_key)
|
||||
_push_to_build_cache(spec, sign_binaries, mirror_url)
|
||||
return True
|
||||
except bindist.PushToBuildCacheError as e:
|
||||
tty.error(f"Problem writing to {mirror_url}: {e}")
|
||||
tty.error(str(e))
|
||||
return False
|
||||
except Exception as e:
|
||||
# TODO (zackgalbreath): write an adapter for boto3 exceptions so we can catch a specific
|
||||
# exception instead of parsing str(e)...
|
||||
msg = str(e)
|
||||
if any(x in msg for x in ["Access Denied", "InvalidAccessKeyId"]):
|
||||
tty.error(f"Permission problem writing to {mirror_url}: {msg}")
|
||||
return False
|
||||
raise
|
||||
|
||||
|
||||
def remove_other_mirrors(mirrors_to_keep, scope=None):
|
||||
@@ -1433,6 +1448,10 @@ def copy_stage_logs_to_artifacts(job_spec: spack.spec.Spec, job_log_dir: str) ->
|
||||
job_log_dir: path into which build log should be copied
|
||||
"""
|
||||
tty.debug(f"job spec: {job_spec}")
|
||||
if not job_spec:
|
||||
msg = f"Cannot copy stage logs: job spec ({job_spec}) is required"
|
||||
tty.error(msg)
|
||||
return
|
||||
|
||||
try:
|
||||
pkg_cls = spack.repo.PATH.get_pkg_class(job_spec.name)
|
||||
@@ -2064,7 +2083,7 @@ def read_broken_spec(broken_spec_url):
|
||||
"""
|
||||
try:
|
||||
_, _, fs = web_util.read_from_url(broken_spec_url)
|
||||
except web_util.SpackWebError:
|
||||
except (URLError, web_util.SpackWebError, HTTPError):
|
||||
tty.warn(f"Unable to read broken spec from {broken_spec_url}")
|
||||
return None
|
||||
|
||||
|
@@ -3,24 +3,28 @@
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
import argparse
|
||||
import copy
|
||||
import glob
|
||||
import hashlib
|
||||
import json
|
||||
import multiprocessing
|
||||
import multiprocessing.pool
|
||||
import os
|
||||
import shutil
|
||||
import sys
|
||||
import tempfile
|
||||
from typing import List, Tuple
|
||||
from typing import Dict, List, Optional, Tuple, Union
|
||||
|
||||
import llnl.util.tty as tty
|
||||
from llnl.string import plural
|
||||
from llnl.util.lang import elide_list, stable_partition
|
||||
from llnl.util.lang import elide_list
|
||||
|
||||
import spack.binary_distribution as bindist
|
||||
import spack.cmd
|
||||
import spack.config
|
||||
import spack.deptypes as dt
|
||||
import spack.environment as ev
|
||||
import spack.error
|
||||
import spack.hash_types as ht
|
||||
import spack.mirror
|
||||
import spack.oci.oci
|
||||
import spack.oci.opener
|
||||
@@ -31,13 +35,28 @@
|
||||
import spack.store
|
||||
import spack.user_environment
|
||||
import spack.util.crypto
|
||||
import spack.util.parallel
|
||||
import spack.util.url as url_util
|
||||
import spack.util.web as web_util
|
||||
from spack import traverse
|
||||
from spack.build_environment import determine_number_of_jobs
|
||||
from spack.cmd import display_specs
|
||||
from spack.cmd.common import arguments
|
||||
from spack.oci.image import ImageReference
|
||||
from spack.oci.image import (
|
||||
Digest,
|
||||
ImageReference,
|
||||
default_config,
|
||||
default_index_tag,
|
||||
default_manifest,
|
||||
default_tag,
|
||||
tag_is_spec,
|
||||
)
|
||||
from spack.oci.oci import (
|
||||
copy_missing_layers_with_retry,
|
||||
get_manifest_and_config_with_retry,
|
||||
list_tags,
|
||||
upload_blob_with_retry,
|
||||
upload_manifest_with_retry,
|
||||
)
|
||||
from spack.spec import Spec, save_dependency_specfiles
|
||||
|
||||
description = "create, download and install binary packages"
|
||||
@@ -93,17 +112,6 @@ def setup_parser(subparser: argparse.ArgumentParser):
|
||||
"Alternatively, one can decide to build a cache for only the package or only the "
|
||||
"dependencies",
|
||||
)
|
||||
with_or_without_build_deps = push.add_mutually_exclusive_group()
|
||||
with_or_without_build_deps.add_argument(
|
||||
"--with-build-dependencies",
|
||||
action="store_true",
|
||||
help="include build dependencies in the buildcache",
|
||||
)
|
||||
with_or_without_build_deps.add_argument(
|
||||
"--without-build-dependencies",
|
||||
action="store_true",
|
||||
help="exclude build dependencies from the buildcache",
|
||||
)
|
||||
push.add_argument(
|
||||
"--fail-fast",
|
||||
action="store_true",
|
||||
@@ -321,6 +329,39 @@ def _format_spec(spec: Spec) -> str:
|
||||
return spec.cformat("{name}{@version}{/hash:7}")
|
||||
|
||||
|
||||
def _progress(i: int, total: int):
|
||||
if total > 1:
|
||||
digits = len(str(total))
|
||||
return f"[{i+1:{digits}}/{total}] "
|
||||
return ""
|
||||
|
||||
|
||||
class NoPool:
|
||||
def map(self, func, args):
|
||||
return [func(a) for a in args]
|
||||
|
||||
def starmap(self, func, args):
|
||||
return [func(*a) for a in args]
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, *args):
|
||||
pass
|
||||
|
||||
|
||||
MaybePool = Union[multiprocessing.pool.Pool, NoPool]
|
||||
|
||||
|
||||
def _make_pool() -> MaybePool:
|
||||
"""Can't use threading because it's unsafe, and can't use spawned processes because of globals.
|
||||
That leaves only forking"""
|
||||
if multiprocessing.get_start_method() == "fork":
|
||||
return multiprocessing.pool.Pool(determine_number_of_jobs(parallel=True))
|
||||
else:
|
||||
return NoPool()
|
||||
|
||||
|
||||
def _skip_no_redistribute_for_public(specs):
|
||||
remaining_specs = list()
|
||||
removed_specs = list()
|
||||
@@ -340,45 +381,6 @@ def _skip_no_redistribute_for_public(specs):
|
||||
return remaining_specs
|
||||
|
||||
|
||||
class PackagesAreNotInstalledError(spack.error.SpackError):
|
||||
"""Raised when a list of specs is not installed but picked to be packaged."""
|
||||
|
||||
def __init__(self, specs: List[Spec]):
|
||||
super().__init__(
|
||||
"Cannot push non-installed packages",
|
||||
", ".join(elide_list([_format_spec(s) for s in specs], 5)),
|
||||
)
|
||||
|
||||
|
||||
class PackageNotInstalledError(spack.error.SpackError):
|
||||
"""Raised when a spec is not installed but picked to be packaged."""
|
||||
|
||||
|
||||
def _specs_to_be_packaged(
|
||||
requested: List[Spec], things_to_install: str, build_deps: bool
|
||||
) -> List[Spec]:
|
||||
"""Collect all non-external with or without roots and dependencies"""
|
||||
if "dependencies" not in things_to_install:
|
||||
deptype = dt.NONE
|
||||
elif build_deps:
|
||||
deptype = dt.ALL
|
||||
else:
|
||||
deptype = dt.RUN | dt.LINK | dt.TEST
|
||||
specs = [
|
||||
s
|
||||
for s in traverse.traverse_nodes(
|
||||
requested,
|
||||
root="package" in things_to_install,
|
||||
deptype=deptype,
|
||||
order="breadth",
|
||||
key=traverse.by_dag_hash,
|
||||
)
|
||||
if not s.external
|
||||
]
|
||||
specs.reverse()
|
||||
return specs
|
||||
|
||||
|
||||
def push_fn(args):
|
||||
"""create a binary package and push it to a mirror"""
|
||||
if args.spec_file:
|
||||
@@ -410,84 +412,91 @@ def push_fn(args):
|
||||
|
||||
# For OCI images, we require dependencies to be pushed for now.
|
||||
if target_image:
|
||||
if "dependencies" not in args.things_to_install:
|
||||
tty.die("Dependencies must be pushed for OCI images.")
|
||||
if not unsigned:
|
||||
tty.warn(
|
||||
"Code signing is currently not supported for OCI images. "
|
||||
"Use --unsigned to silence this warning."
|
||||
)
|
||||
unsigned = True
|
||||
|
||||
# Select a signing key, or None if unsigned.
|
||||
signing_key = None if unsigned else (args.key or bindist.select_signing_key())
|
||||
|
||||
specs = _specs_to_be_packaged(
|
||||
# This is a list of installed, non-external specs.
|
||||
specs = bindist.specs_to_be_packaged(
|
||||
roots,
|
||||
things_to_install=args.things_to_install,
|
||||
build_deps=args.with_build_dependencies or not args.without_build_dependencies,
|
||||
root="package" in args.things_to_install,
|
||||
dependencies="dependencies" in args.things_to_install,
|
||||
)
|
||||
|
||||
if not args.private:
|
||||
specs = _skip_no_redistribute_for_public(specs)
|
||||
|
||||
# When pushing multiple specs, print the url once ahead of time, as well as how
|
||||
# many specs are being pushed.
|
||||
if len(specs) > 1:
|
||||
tty.info(f"Selected {len(specs)} specs to push to {push_url}")
|
||||
|
||||
# Pushing not installed specs is an error. Either fail fast or populate the error list and
|
||||
# push installed package in best effort mode.
|
||||
failed: List[Tuple[Spec, BaseException]] = []
|
||||
with spack.store.STORE.db.read_transaction():
|
||||
if any(not s.installed for s in specs):
|
||||
specs, not_installed = stable_partition(specs, lambda s: s.installed)
|
||||
if args.fail_fast:
|
||||
raise PackagesAreNotInstalledError(not_installed)
|
||||
else:
|
||||
failed.extend(
|
||||
(s, PackageNotInstalledError("package not installed")) for s in not_installed
|
||||
)
|
||||
failed = []
|
||||
|
||||
with bindist.default_push_context() as (tmpdir, executor):
|
||||
if target_image:
|
||||
base_image = ImageReference.from_string(args.base_image) if args.base_image else None
|
||||
skipped, base_images, checksums, upload_errors = bindist._push_oci(
|
||||
# TODO: unify this logic in the future.
|
||||
if target_image:
|
||||
base_image = ImageReference.from_string(args.base_image) if args.base_image else None
|
||||
with tempfile.TemporaryDirectory(
|
||||
dir=spack.stage.get_stage_root()
|
||||
) as tmpdir, _make_pool() as pool:
|
||||
skipped, base_images, checksums = _push_oci(
|
||||
target_image=target_image,
|
||||
base_image=base_image,
|
||||
installed_specs_with_deps=specs,
|
||||
force=args.force,
|
||||
tmpdir=tmpdir,
|
||||
executor=executor,
|
||||
pool=pool,
|
||||
)
|
||||
|
||||
if upload_errors:
|
||||
failed.extend(upload_errors)
|
||||
|
||||
# Apart from creating manifests for each individual spec, we allow users to create a
|
||||
# separate image tag for all root specs and their runtime dependencies.
|
||||
elif args.tag:
|
||||
if args.tag:
|
||||
tagged_image = target_image.with_tag(args.tag)
|
||||
# _push_oci may not populate base_images if binaries were already in the registry
|
||||
for spec in roots:
|
||||
bindist._oci_update_base_images(
|
||||
_update_base_images(
|
||||
base_image=base_image,
|
||||
target_image=target_image,
|
||||
spec=spec,
|
||||
base_image_cache=base_images,
|
||||
)
|
||||
bindist._oci_put_manifest(
|
||||
base_images, checksums, tagged_image, tmpdir, None, None, *roots
|
||||
)
|
||||
_put_manifest(base_images, checksums, tagged_image, tmpdir, None, None, *roots)
|
||||
tty.info(f"Tagged {tagged_image}")
|
||||
|
||||
else:
|
||||
skipped, upload_errors = bindist._push(
|
||||
specs,
|
||||
out_url=push_url,
|
||||
force=args.force,
|
||||
update_index=args.update_index,
|
||||
signing_key=signing_key,
|
||||
tmpdir=tmpdir,
|
||||
executor=executor,
|
||||
)
|
||||
failed.extend(upload_errors)
|
||||
else:
|
||||
skipped = []
|
||||
|
||||
for i, spec in enumerate(specs):
|
||||
try:
|
||||
bindist.push_or_raise(
|
||||
spec,
|
||||
push_url,
|
||||
bindist.PushOptions(
|
||||
force=args.force,
|
||||
unsigned=unsigned,
|
||||
key=args.key,
|
||||
regenerate_index=args.update_index,
|
||||
),
|
||||
)
|
||||
|
||||
msg = f"{_progress(i, len(specs))}Pushed {_format_spec(spec)}"
|
||||
if len(specs) == 1:
|
||||
msg += f" to {push_url}"
|
||||
tty.info(msg)
|
||||
|
||||
except bindist.NoOverwriteException:
|
||||
skipped.append(_format_spec(spec))
|
||||
|
||||
# Catch any other exception unless the fail fast option is set
|
||||
except Exception as e:
|
||||
if args.fail_fast or isinstance(
|
||||
e, (bindist.PickKeyException, bindist.NoKeyException)
|
||||
):
|
||||
raise
|
||||
failed.append((_format_spec(spec), e))
|
||||
|
||||
if skipped:
|
||||
if len(specs) == 1:
|
||||
@@ -510,22 +519,389 @@ def push_fn(args):
|
||||
raise spack.error.SpackError(
|
||||
f"The following {len(failed)} errors occurred while pushing specs to the buildcache",
|
||||
"\n".join(
|
||||
elide_list(
|
||||
[
|
||||
f" {_format_spec(spec)}: {e.__class__.__name__}: {e}"
|
||||
for spec, e in failed
|
||||
],
|
||||
5,
|
||||
)
|
||||
elide_list([f" {spec}: {e.__class__.__name__}: {e}" for spec, e in failed], 5)
|
||||
),
|
||||
)
|
||||
|
||||
# Update the OCI index if requested
|
||||
# Update the index if requested
|
||||
# TODO: remove update index logic out of bindist; should be once after all specs are pushed
|
||||
# not once per spec.
|
||||
if target_image and len(skipped) < len(specs) and args.update_index:
|
||||
with tempfile.TemporaryDirectory(
|
||||
dir=spack.stage.get_stage_root()
|
||||
) as tmpdir, spack.util.parallel.make_concurrent_executor() as executor:
|
||||
bindist._oci_update_index(target_image, tmpdir, executor)
|
||||
) as tmpdir, _make_pool() as pool:
|
||||
_update_index_oci(target_image, tmpdir, pool)
|
||||
|
||||
|
||||
def _get_spack_binary_blob(image_ref: ImageReference) -> Optional[spack.oci.oci.Blob]:
|
||||
"""Get the spack tarball layer digests and size if it exists"""
|
||||
try:
|
||||
manifest, config = get_manifest_and_config_with_retry(image_ref)
|
||||
|
||||
return spack.oci.oci.Blob(
|
||||
compressed_digest=Digest.from_string(manifest["layers"][-1]["digest"]),
|
||||
uncompressed_digest=Digest.from_string(config["rootfs"]["diff_ids"][-1]),
|
||||
size=manifest["layers"][-1]["size"],
|
||||
)
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
|
||||
def _push_single_spack_binary_blob(image_ref: ImageReference, spec: spack.spec.Spec, tmpdir: str):
|
||||
filename = os.path.join(tmpdir, f"{spec.dag_hash()}.tar.gz")
|
||||
|
||||
# Create an oci.image.layer aka tarball of the package
|
||||
compressed_tarfile_checksum, tarfile_checksum = spack.oci.oci.create_tarball(spec, filename)
|
||||
|
||||
blob = spack.oci.oci.Blob(
|
||||
Digest.from_sha256(compressed_tarfile_checksum),
|
||||
Digest.from_sha256(tarfile_checksum),
|
||||
os.path.getsize(filename),
|
||||
)
|
||||
|
||||
# Upload the blob
|
||||
upload_blob_with_retry(image_ref, file=filename, digest=blob.compressed_digest)
|
||||
|
||||
# delete the file
|
||||
os.unlink(filename)
|
||||
|
||||
return blob
|
||||
|
||||
|
||||
def _retrieve_env_dict_from_config(config: dict) -> dict:
|
||||
"""Retrieve the environment variables from the image config file.
|
||||
Sets a default value for PATH if it is not present.
|
||||
|
||||
Args:
|
||||
config (dict): The image config file.
|
||||
|
||||
Returns:
|
||||
dict: The environment variables.
|
||||
"""
|
||||
env = {"PATH": "/bin:/usr/bin"}
|
||||
|
||||
if "Env" in config.get("config", {}):
|
||||
for entry in config["config"]["Env"]:
|
||||
key, value = entry.split("=", 1)
|
||||
env[key] = value
|
||||
return env
|
||||
|
||||
|
||||
def _archspec_to_gooarch(spec: spack.spec.Spec) -> str:
|
||||
name = spec.target.family.name
|
||||
name_map = {"aarch64": "arm64", "x86_64": "amd64"}
|
||||
return name_map.get(name, name)
|
||||
|
||||
|
||||
def _put_manifest(
|
||||
base_images: Dict[str, Tuple[dict, dict]],
|
||||
checksums: Dict[str, spack.oci.oci.Blob],
|
||||
image_ref: ImageReference,
|
||||
tmpdir: str,
|
||||
extra_config: Optional[dict],
|
||||
annotations: Optional[dict],
|
||||
*specs: spack.spec.Spec,
|
||||
):
|
||||
architecture = _archspec_to_gooarch(specs[0])
|
||||
|
||||
dependencies = list(
|
||||
reversed(
|
||||
list(
|
||||
s
|
||||
for s in traverse.traverse_nodes(
|
||||
specs, order="topo", deptype=("link", "run"), root=True
|
||||
)
|
||||
if not s.external
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
base_manifest, base_config = base_images[architecture]
|
||||
env = _retrieve_env_dict_from_config(base_config)
|
||||
|
||||
# If the base image uses `vnd.docker.distribution.manifest.v2+json`, then we use that too.
|
||||
# This is because Singularity / Apptainer is very strict about not mixing them.
|
||||
base_manifest_mediaType = base_manifest.get(
|
||||
"mediaType", "application/vnd.oci.image.manifest.v1+json"
|
||||
)
|
||||
use_docker_format = (
|
||||
base_manifest_mediaType == "application/vnd.docker.distribution.manifest.v2+json"
|
||||
)
|
||||
|
||||
spack.user_environment.environment_modifications_for_specs(*specs).apply_modifications(env)
|
||||
|
||||
# Create an oci.image.config file
|
||||
config = copy.deepcopy(base_config)
|
||||
|
||||
# Add the diff ids of the dependencies
|
||||
for s in dependencies:
|
||||
config["rootfs"]["diff_ids"].append(str(checksums[s.dag_hash()].uncompressed_digest))
|
||||
|
||||
# Set the environment variables
|
||||
config["config"]["Env"] = [f"{k}={v}" for k, v in env.items()]
|
||||
|
||||
if extra_config:
|
||||
# From the OCI v1.0 spec:
|
||||
# > Any extra fields in the Image JSON struct are considered implementation
|
||||
# > specific and MUST be ignored by any implementations which are unable to
|
||||
# > interpret them.
|
||||
config.update(extra_config)
|
||||
|
||||
config_file = os.path.join(tmpdir, f"{specs[0].dag_hash()}.config.json")
|
||||
|
||||
with open(config_file, "w") as f:
|
||||
json.dump(config, f, separators=(",", ":"))
|
||||
|
||||
config_file_checksum = Digest.from_sha256(
|
||||
spack.util.crypto.checksum(hashlib.sha256, config_file)
|
||||
)
|
||||
|
||||
# Upload the config file
|
||||
upload_blob_with_retry(image_ref, file=config_file, digest=config_file_checksum)
|
||||
|
||||
manifest = {
|
||||
"mediaType": base_manifest_mediaType,
|
||||
"schemaVersion": 2,
|
||||
"config": {
|
||||
"mediaType": base_manifest["config"]["mediaType"],
|
||||
"digest": str(config_file_checksum),
|
||||
"size": os.path.getsize(config_file),
|
||||
},
|
||||
"layers": [
|
||||
*(layer for layer in base_manifest["layers"]),
|
||||
*(
|
||||
{
|
||||
"mediaType": (
|
||||
"application/vnd.docker.image.rootfs.diff.tar.gzip"
|
||||
if use_docker_format
|
||||
else "application/vnd.oci.image.layer.v1.tar+gzip"
|
||||
),
|
||||
"digest": str(checksums[s.dag_hash()].compressed_digest),
|
||||
"size": checksums[s.dag_hash()].size,
|
||||
}
|
||||
for s in dependencies
|
||||
),
|
||||
],
|
||||
}
|
||||
|
||||
if not use_docker_format and annotations:
|
||||
manifest["annotations"] = annotations
|
||||
|
||||
# Finally upload the manifest
|
||||
upload_manifest_with_retry(image_ref, manifest=manifest)
|
||||
|
||||
# delete the config file
|
||||
os.unlink(config_file)
|
||||
|
||||
|
||||
def _update_base_images(
|
||||
*,
|
||||
base_image: Optional[ImageReference],
|
||||
target_image: ImageReference,
|
||||
spec: spack.spec.Spec,
|
||||
base_image_cache: Dict[str, Tuple[dict, dict]],
|
||||
):
|
||||
"""For a given spec and base image, copy the missing layers of the base image with matching
|
||||
arch to the registry of the target image. If no base image is specified, create a dummy
|
||||
manifest and config file."""
|
||||
architecture = _archspec_to_gooarch(spec)
|
||||
if architecture in base_image_cache:
|
||||
return
|
||||
if base_image is None:
|
||||
base_image_cache[architecture] = (
|
||||
default_manifest(),
|
||||
default_config(architecture, "linux"),
|
||||
)
|
||||
else:
|
||||
base_image_cache[architecture] = copy_missing_layers_with_retry(
|
||||
base_image, target_image, architecture
|
||||
)
|
||||
|
||||
|
||||
def _push_oci(
|
||||
*,
|
||||
target_image: ImageReference,
|
||||
base_image: Optional[ImageReference],
|
||||
installed_specs_with_deps: List[Spec],
|
||||
tmpdir: str,
|
||||
pool: MaybePool,
|
||||
force: bool = False,
|
||||
) -> Tuple[List[str], Dict[str, Tuple[dict, dict]], Dict[str, spack.oci.oci.Blob]]:
|
||||
"""Push specs to an OCI registry
|
||||
|
||||
Args:
|
||||
image_ref: The target OCI image
|
||||
base_image: Optional base image, which will be copied to the target registry.
|
||||
installed_specs_with_deps: The installed specs to push, excluding externals,
|
||||
including deps, ordered from roots to leaves.
|
||||
force: Whether to overwrite existing layers and manifests in the buildcache.
|
||||
|
||||
Returns:
|
||||
A tuple consisting of the list of skipped specs already in the build cache,
|
||||
a dictionary mapping architectures to base image manifests and configs,
|
||||
and a dictionary mapping each spec's dag hash to a blob.
|
||||
"""
|
||||
|
||||
# Reverse the order
|
||||
installed_specs_with_deps = list(reversed(installed_specs_with_deps))
|
||||
|
||||
# Spec dag hash -> blob
|
||||
checksums: Dict[str, spack.oci.oci.Blob] = {}
|
||||
|
||||
# arch -> (manifest, config)
|
||||
base_images: Dict[str, Tuple[dict, dict]] = {}
|
||||
|
||||
# Specs not uploaded because they already exist
|
||||
skipped = []
|
||||
|
||||
if not force:
|
||||
tty.info("Checking for existing specs in the buildcache")
|
||||
to_be_uploaded = []
|
||||
|
||||
tags_to_check = (target_image.with_tag(default_tag(s)) for s in installed_specs_with_deps)
|
||||
available_blobs = pool.map(_get_spack_binary_blob, tags_to_check)
|
||||
|
||||
for spec, maybe_blob in zip(installed_specs_with_deps, available_blobs):
|
||||
if maybe_blob is not None:
|
||||
checksums[spec.dag_hash()] = maybe_blob
|
||||
skipped.append(_format_spec(spec))
|
||||
else:
|
||||
to_be_uploaded.append(spec)
|
||||
else:
|
||||
to_be_uploaded = installed_specs_with_deps
|
||||
|
||||
if not to_be_uploaded:
|
||||
return skipped, base_images, checksums
|
||||
|
||||
tty.info(
|
||||
f"{len(to_be_uploaded)} specs need to be pushed to "
|
||||
f"{target_image.domain}/{target_image.name}"
|
||||
)
|
||||
|
||||
# Upload blobs
|
||||
new_blobs = pool.starmap(
|
||||
_push_single_spack_binary_blob, ((target_image, spec, tmpdir) for spec in to_be_uploaded)
|
||||
)
|
||||
|
||||
# And update the spec to blob mapping
|
||||
for spec, blob in zip(to_be_uploaded, new_blobs):
|
||||
checksums[spec.dag_hash()] = blob
|
||||
|
||||
# Copy base images if necessary
|
||||
for spec in to_be_uploaded:
|
||||
_update_base_images(
|
||||
base_image=base_image,
|
||||
target_image=target_image,
|
||||
spec=spec,
|
||||
base_image_cache=base_images,
|
||||
)
|
||||
|
||||
def extra_config(spec: Spec):
|
||||
spec_dict = spec.to_dict(hash=ht.dag_hash)
|
||||
spec_dict["buildcache_layout_version"] = 1
|
||||
spec_dict["binary_cache_checksum"] = {
|
||||
"hash_algorithm": "sha256",
|
||||
"hash": checksums[spec.dag_hash()].compressed_digest.digest,
|
||||
}
|
||||
return spec_dict
|
||||
|
||||
# Upload manifests
|
||||
tty.info("Uploading manifests")
|
||||
pool.starmap(
|
||||
_put_manifest,
|
||||
(
|
||||
(
|
||||
base_images,
|
||||
checksums,
|
||||
target_image.with_tag(default_tag(spec)),
|
||||
tmpdir,
|
||||
extra_config(spec),
|
||||
{"org.opencontainers.image.description": spec.format()},
|
||||
spec,
|
||||
)
|
||||
for spec in to_be_uploaded
|
||||
),
|
||||
)
|
||||
|
||||
# Print the image names of the top-level specs
|
||||
for spec in to_be_uploaded:
|
||||
tty.info(f"Pushed {_format_spec(spec)} to {target_image.with_tag(default_tag(spec))}")
|
||||
|
||||
return skipped, base_images, checksums
|
||||
|
||||
|
||||
def _config_from_tag(image_ref: ImageReference, tag: str) -> Optional[dict]:
|
||||
# Don't allow recursion here, since Spack itself always uploads
|
||||
# vnd.oci.image.manifest.v1+json, not vnd.oci.image.index.v1+json
|
||||
_, config = get_manifest_and_config_with_retry(image_ref.with_tag(tag), tag, recurse=0)
|
||||
|
||||
# Do very basic validation: if "spec" is a key in the config, it
|
||||
# must be a Spec object too.
|
||||
return config if "spec" in config else None
|
||||
|
||||
|
||||
def _update_index_oci(image_ref: ImageReference, tmpdir: str, pool: MaybePool) -> None:
|
||||
tags = list_tags(image_ref)
|
||||
|
||||
# Fetch all image config files in parallel
|
||||
spec_dicts = pool.starmap(
|
||||
_config_from_tag, ((image_ref, tag) for tag in tags if tag_is_spec(tag))
|
||||
)
|
||||
|
||||
# Populate the database
|
||||
db_root_dir = os.path.join(tmpdir, "db_root")
|
||||
db = bindist.BuildCacheDatabase(db_root_dir)
|
||||
|
||||
for spec_dict in spec_dicts:
|
||||
spec = Spec.from_dict(spec_dict)
|
||||
db.add(spec, directory_layout=None)
|
||||
db.mark(spec, "in_buildcache", True)
|
||||
|
||||
# Create the index.json file
|
||||
index_json_path = os.path.join(tmpdir, "index.json")
|
||||
with open(index_json_path, "w") as f:
|
||||
db._write_to_file(f)
|
||||
|
||||
# Create an empty config.json file
|
||||
empty_config_json_path = os.path.join(tmpdir, "config.json")
|
||||
with open(empty_config_json_path, "wb") as f:
|
||||
f.write(b"{}")
|
||||
|
||||
# Upload the index.json file
|
||||
index_shasum = Digest.from_sha256(spack.util.crypto.checksum(hashlib.sha256, index_json_path))
|
||||
upload_blob_with_retry(image_ref, file=index_json_path, digest=index_shasum)
|
||||
|
||||
# Upload the config.json file
|
||||
empty_config_digest = Digest.from_sha256(
|
||||
spack.util.crypto.checksum(hashlib.sha256, empty_config_json_path)
|
||||
)
|
||||
upload_blob_with_retry(image_ref, file=empty_config_json_path, digest=empty_config_digest)
|
||||
|
||||
# Push a manifest file that references the index.json file as a layer
|
||||
# Notice that we push this as if it is an image, which it of course is not.
|
||||
# When the ORAS spec becomes official, we can use that instead of a fake image.
|
||||
# For now we just use the OCI image spec, so that we don't run into issues with
|
||||
# automatic garbage collection of blobs that are not referenced by any image manifest.
|
||||
oci_manifest = {
|
||||
"mediaType": "application/vnd.oci.image.manifest.v1+json",
|
||||
"schemaVersion": 2,
|
||||
# Config is just an empty {} file for now, and irrelevant
|
||||
"config": {
|
||||
"mediaType": "application/vnd.oci.image.config.v1+json",
|
||||
"digest": str(empty_config_digest),
|
||||
"size": os.path.getsize(empty_config_json_path),
|
||||
},
|
||||
# The buildcache index is the only layer, and is not a tarball, we lie here.
|
||||
"layers": [
|
||||
{
|
||||
"mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
|
||||
"digest": str(index_shasum),
|
||||
"size": os.path.getsize(index_json_path),
|
||||
}
|
||||
],
|
||||
}
|
||||
|
||||
upload_manifest_with_retry(image_ref.with_tag(default_index_tag), oci_manifest)
|
||||
|
||||
|
||||
def install_fn(args):
|
||||
@@ -806,15 +1182,14 @@ def update_index(mirror: spack.mirror.Mirror, update_keys=False):
|
||||
if image_ref:
|
||||
with tempfile.TemporaryDirectory(
|
||||
dir=spack.stage.get_stage_root()
|
||||
) as tmpdir, spack.util.parallel.make_concurrent_executor() as executor:
|
||||
bindist._oci_update_index(image_ref, tmpdir, executor)
|
||||
) as tmpdir, _make_pool() as pool:
|
||||
_update_index_oci(image_ref, tmpdir, pool)
|
||||
return
|
||||
|
||||
# Otherwise, assume a normal mirror.
|
||||
url = mirror.push_url
|
||||
|
||||
with tempfile.TemporaryDirectory(dir=spack.stage.get_stage_root()) as tmpdir:
|
||||
bindist.generate_package_index(url, tmpdir)
|
||||
bindist.generate_package_index(url_util.join(url, bindist.build_cache_relative_path()))
|
||||
|
||||
if update_keys:
|
||||
keys_url = url_util.join(
|
||||
@@ -822,8 +1197,7 @@ def update_index(mirror: spack.mirror.Mirror, update_keys=False):
|
||||
)
|
||||
|
||||
try:
|
||||
with tempfile.TemporaryDirectory(dir=spack.stage.get_stage_root()) as tmpdir:
|
||||
bindist.generate_key_index(keys_url, tmpdir)
|
||||
bindist.generate_key_index(keys_url)
|
||||
except bindist.CannotListKeys as e:
|
||||
# Do not error out if listing keys went wrong. This usually means that the _gpg path
|
||||
# does not exist. TODO: distinguish between this and other errors.
|
||||
|
@@ -6,7 +6,6 @@
|
||||
import os
|
||||
import platform
|
||||
import re
|
||||
import sys
|
||||
from datetime import datetime
|
||||
from glob import glob
|
||||
|
||||
@@ -63,10 +62,9 @@ def create_db_tarball(args):
|
||||
|
||||
base = os.path.basename(str(spack.store.STORE.root))
|
||||
transform_args = []
|
||||
# Currently --transform and -s are not supported by Windows native tar
|
||||
if "GNU" in tar("--version", output=str):
|
||||
transform_args = ["--transform", "s/^%s/%s/" % (base, tarball_name)]
|
||||
elif sys.platform != "win32":
|
||||
else:
|
||||
transform_args = ["-s", "/^%s/%s/" % (base, tarball_name)]
|
||||
|
||||
wd = os.path.dirname(str(spack.store.STORE.root))
|
||||
@@ -92,6 +90,7 @@ def report(args):
|
||||
print("* **Spack:**", get_version())
|
||||
print("* **Python:**", platform.python_version())
|
||||
print("* **Platform:**", architecture)
|
||||
print("* **Concretizer:**", spack.config.get("config:concretizer"))
|
||||
|
||||
|
||||
def debug(parser, args):
|
||||
|
@@ -468,30 +468,32 @@ def env_remove(args):
|
||||
This removes an environment managed by Spack. Directory environments
|
||||
and manifests embedded in repositories should be removed manually.
|
||||
"""
|
||||
remove_envs = []
|
||||
read_envs = []
|
||||
valid_envs = []
|
||||
bad_envs = []
|
||||
invalid_envs = []
|
||||
|
||||
for env_name in ev.all_environment_names():
|
||||
try:
|
||||
env = ev.read(env_name)
|
||||
valid_envs.append(env)
|
||||
valid_envs.append(env_name)
|
||||
|
||||
if env_name in args.rm_env:
|
||||
remove_envs.append(env)
|
||||
read_envs.append(env)
|
||||
except (spack.config.ConfigFormatError, ev.SpackEnvironmentConfigError):
|
||||
invalid_envs.append(env_name)
|
||||
|
||||
if env_name in args.rm_env:
|
||||
bad_envs.append(env_name)
|
||||
|
||||
# Check if remove_env is included from another env before trying to remove
|
||||
for env in valid_envs:
|
||||
for remove_env in remove_envs:
|
||||
# Check if env is linked to another before trying to remove
|
||||
for name in valid_envs:
|
||||
# don't check if environment is included to itself
|
||||
if env.name == remove_env.name:
|
||||
if name == env_name:
|
||||
continue
|
||||
|
||||
if remove_env.path in env.included_concrete_envs:
|
||||
msg = f'Environment "{remove_env.name}" is being used by environment "{env.name}"'
|
||||
environ = ev.Environment(ev.root(name))
|
||||
if ev.root(env_name) in environ.included_concrete_envs:
|
||||
msg = f'Environment "{env_name}" is being used by environment "{name}"'
|
||||
if args.force:
|
||||
tty.warn(msg)
|
||||
else:
|
||||
@@ -504,7 +506,7 @@ def env_remove(args):
|
||||
if not answer:
|
||||
tty.die("Will not remove any environments")
|
||||
|
||||
for env in remove_envs:
|
||||
for env in read_envs:
|
||||
name = env.name
|
||||
if env.active:
|
||||
tty.die(f"Environment {name} can't be removed while activated.")
|
||||
|
@@ -5,12 +5,10 @@
|
||||
|
||||
import argparse
|
||||
import os
|
||||
import tempfile
|
||||
|
||||
import spack.binary_distribution
|
||||
import spack.mirror
|
||||
import spack.paths
|
||||
import spack.stage
|
||||
import spack.util.gpg
|
||||
import spack.util.url
|
||||
from spack.cmd.common import arguments
|
||||
@@ -117,7 +115,6 @@ def setup_parser(subparser):
|
||||
help="URL of the mirror where keys will be published",
|
||||
)
|
||||
publish.add_argument(
|
||||
"--update-index",
|
||||
"--rebuild-index",
|
||||
action="store_true",
|
||||
default=False,
|
||||
@@ -223,10 +220,9 @@ def gpg_publish(args):
|
||||
elif args.mirror_url:
|
||||
mirror = spack.mirror.Mirror(args.mirror_url, args.mirror_url)
|
||||
|
||||
with tempfile.TemporaryDirectory(dir=spack.stage.get_stage_root()) as tmpdir:
|
||||
spack.binary_distribution.push_keys(
|
||||
mirror, keys=args.keys, tmpdir=tmpdir, update_index=args.update_index
|
||||
)
|
||||
spack.binary_distribution.push_keys(
|
||||
mirror, keys=args.keys, regenerate_index=args.rebuild_index
|
||||
)
|
||||
|
||||
|
||||
def gpg(parser, args):
|
||||
|
@@ -502,7 +502,7 @@ def print_licenses(pkg, args):
|
||||
|
||||
def info(parser, args):
|
||||
spec = spack.spec.Spec(args.package)
|
||||
pkg_cls = spack.repo.PATH.get_pkg_class(spec.fullname)
|
||||
pkg_cls = spack.repo.PATH.get_pkg_class(spec.name)
|
||||
pkg = pkg_cls(spec)
|
||||
|
||||
# Output core package information
|
||||
|
@@ -23,6 +23,11 @@ def setup_parser(subparser):
|
||||
output.add_argument(
|
||||
"-s", "--safe", action="store_true", help="only list safe versions of the package"
|
||||
)
|
||||
output.add_argument(
|
||||
"--safe-only",
|
||||
action="store_true",
|
||||
help="[deprecated] only list safe versions of the package",
|
||||
)
|
||||
output.add_argument(
|
||||
"-r", "--remote", action="store_true", help="only list remote versions of the package"
|
||||
)
|
||||
@@ -42,13 +47,17 @@ def versions(parser, args):
|
||||
|
||||
safe_versions = pkg.versions
|
||||
|
||||
if args.safe_only:
|
||||
tty.warn('"--safe-only" is deprecated. Use "--safe" instead.')
|
||||
args.safe = args.safe_only
|
||||
|
||||
if not (args.remote or args.new):
|
||||
if sys.stdout.isatty():
|
||||
tty.msg("Safe versions (already checksummed):")
|
||||
|
||||
if not safe_versions:
|
||||
if sys.stdout.isatty():
|
||||
tty.warn(f"Found no versions for {pkg.name}")
|
||||
tty.warn("Found no versions for {0}".format(pkg.name))
|
||||
tty.debug("Manually add versions to the package.")
|
||||
else:
|
||||
colify(sorted(safe_versions, reverse=True), indent=2)
|
||||
@@ -74,12 +83,12 @@ def versions(parser, args):
|
||||
if not remote_versions:
|
||||
if sys.stdout.isatty():
|
||||
if not fetched_versions:
|
||||
tty.warn(f"Found no versions for {pkg.name}")
|
||||
tty.warn("Found no versions for {0}".format(pkg.name))
|
||||
tty.debug(
|
||||
"Check the list_url and list_depth attributes of "
|
||||
"the package to help Spack find versions."
|
||||
)
|
||||
else:
|
||||
tty.warn(f"Found no unchecksummed versions for {pkg.name}")
|
||||
tty.warn("Found no unchecksummed versions for {0}".format(pkg.name))
|
||||
else:
|
||||
colify(sorted(remote_versions, reverse=True), indent=2)
|
||||
|
@@ -2,11 +2,29 @@
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
"""
|
||||
(DEPRECATED) Used to contain the code for the original concretizer
|
||||
Functions here are used to take abstract specs and make them concrete.
|
||||
For example, if a spec asks for a version between 1.8 and 1.9, these
|
||||
functions might take will take the most recent 1.9 version of the
|
||||
package available. Or, if the user didn't specify a compiler for a
|
||||
spec, then this will assign a compiler to the spec based on defaults
|
||||
or user preferences.
|
||||
|
||||
TODO: make this customizable and allow users to configure
|
||||
concretization policies.
|
||||
"""
|
||||
import functools
|
||||
import platform
|
||||
import tempfile
|
||||
from contextlib import contextmanager
|
||||
from itertools import chain
|
||||
from typing import Union
|
||||
|
||||
import archspec.cpu
|
||||
|
||||
import llnl.util.lang
|
||||
import llnl.util.tty as tty
|
||||
|
||||
import spack.abi
|
||||
import spack.compilers
|
||||
@@ -19,20 +37,639 @@
|
||||
import spack.target
|
||||
import spack.tengine
|
||||
import spack.util.path
|
||||
import spack.variant as vt
|
||||
from spack.package_prefs import PackagePrefs, is_spec_buildable, spec_externals
|
||||
from spack.version import ClosedOpenRange, VersionList, ver
|
||||
|
||||
#: impements rudimentary logic for ABI compatibility
|
||||
_abi: Union[spack.abi.ABI, llnl.util.lang.Singleton] = llnl.util.lang.Singleton(
|
||||
lambda: spack.abi.ABI()
|
||||
)
|
||||
|
||||
|
||||
@functools.total_ordering
|
||||
class reverse_order:
|
||||
"""Helper for creating key functions.
|
||||
|
||||
This is a wrapper that inverts the sense of the natural
|
||||
comparisons on the object.
|
||||
"""
|
||||
|
||||
def __init__(self, value):
|
||||
self.value = value
|
||||
|
||||
def __eq__(self, other):
|
||||
return other.value == self.value
|
||||
|
||||
def __lt__(self, other):
|
||||
return other.value < self.value
|
||||
|
||||
|
||||
class Concretizer:
|
||||
"""(DEPRECATED) Only contains logic to enable/disable compiler existence checks."""
|
||||
"""You can subclass this class to override some of the default
|
||||
concretization strategies, or you can override all of them.
|
||||
"""
|
||||
|
||||
#: Controls whether we check that compiler versions actually exist
|
||||
#: during concretization. Used for testing and for mirror creation
|
||||
check_for_compiler_existence = None
|
||||
|
||||
def __init__(self):
|
||||
#: Packages that the old concretizer cannot deal with correctly, and cannot build anyway.
|
||||
#: Those will not be considered as providers for virtuals.
|
||||
non_buildable_packages = {"glibc", "musl"}
|
||||
|
||||
def __init__(self, abstract_spec=None):
|
||||
if Concretizer.check_for_compiler_existence is None:
|
||||
Concretizer.check_for_compiler_existence = not spack.config.get(
|
||||
"config:install_missing_compilers", False
|
||||
)
|
||||
self.abstract_spec = abstract_spec
|
||||
self._adjust_target_answer_generator = None
|
||||
|
||||
def concretize_develop(self, spec):
|
||||
"""
|
||||
Add ``dev_path=*`` variant to packages built from local source.
|
||||
"""
|
||||
env = spack.environment.active_environment()
|
||||
dev_info = env.dev_specs.get(spec.name, {}) if env else {}
|
||||
if not dev_info:
|
||||
return False
|
||||
|
||||
path = spack.util.path.canonicalize_path(dev_info["path"], default_wd=env.path)
|
||||
|
||||
if "dev_path" in spec.variants:
|
||||
assert spec.variants["dev_path"].value == path
|
||||
changed = False
|
||||
else:
|
||||
spec.variants.setdefault("dev_path", vt.SingleValuedVariant("dev_path", path))
|
||||
changed = True
|
||||
changed |= spec.constrain(dev_info["spec"])
|
||||
return changed
|
||||
|
||||
def _valid_virtuals_and_externals(self, spec):
|
||||
"""Returns a list of candidate virtual dep providers and external
|
||||
packages that coiuld be used to concretize a spec.
|
||||
|
||||
Preferred specs come first in the list.
|
||||
"""
|
||||
# First construct a list of concrete candidates to replace spec with.
|
||||
candidates = [spec]
|
||||
pref_key = lambda spec: 0 # no-op pref key
|
||||
|
||||
if spec.virtual:
|
||||
candidates = [
|
||||
s
|
||||
for s in spack.repo.PATH.providers_for(spec)
|
||||
if s.name not in self.non_buildable_packages
|
||||
]
|
||||
if not candidates:
|
||||
raise spack.error.UnsatisfiableProviderSpecError(candidates[0], spec)
|
||||
|
||||
# Find nearest spec in the DAG (up then down) that has prefs.
|
||||
spec_w_prefs = find_spec(
|
||||
spec, lambda p: PackagePrefs.has_preferred_providers(p.name, spec.name), spec
|
||||
) # default to spec itself.
|
||||
|
||||
# Create a key to sort candidates by the prefs we found
|
||||
pref_key = PackagePrefs(spec_w_prefs.name, "providers", spec.name)
|
||||
|
||||
# For each candidate package, if it has externals, add those
|
||||
# to the usable list. if it's not buildable, then *only* add
|
||||
# the externals.
|
||||
usable = []
|
||||
for cspec in candidates:
|
||||
if is_spec_buildable(cspec):
|
||||
usable.append(cspec)
|
||||
|
||||
externals = spec_externals(cspec)
|
||||
for ext in externals:
|
||||
if ext.intersects(spec):
|
||||
usable.append(ext)
|
||||
|
||||
# If nothing is in the usable list now, it's because we aren't
|
||||
# allowed to build anything.
|
||||
if not usable:
|
||||
raise NoBuildError(spec)
|
||||
|
||||
# Use a sort key to order the results
|
||||
return sorted(
|
||||
usable,
|
||||
key=lambda spec: (
|
||||
not spec.external, # prefer externals
|
||||
pref_key(spec), # respect prefs
|
||||
spec.name, # group by name
|
||||
reverse_order(spec.versions), # latest version
|
||||
spec, # natural order
|
||||
),
|
||||
)
|
||||
|
||||
def choose_virtual_or_external(self, spec: spack.spec.Spec):
|
||||
"""Given a list of candidate virtual and external packages, try to
|
||||
find one that is most ABI compatible.
|
||||
"""
|
||||
candidates = self._valid_virtuals_and_externals(spec)
|
||||
if not candidates:
|
||||
return candidates
|
||||
|
||||
# Find the nearest spec in the dag that has a compiler. We'll
|
||||
# use that spec to calibrate compiler compatibility.
|
||||
abi_exemplar = find_spec(spec, lambda x: x.compiler)
|
||||
if abi_exemplar is None:
|
||||
abi_exemplar = spec.root
|
||||
|
||||
# Sort candidates from most to least compatibility.
|
||||
# We reverse because True > False.
|
||||
# Sort is stable, so candidates keep their order.
|
||||
return sorted(
|
||||
candidates,
|
||||
reverse=True,
|
||||
key=lambda spec: (
|
||||
_abi.compatible(spec, abi_exemplar, loose=True),
|
||||
_abi.compatible(spec, abi_exemplar),
|
||||
),
|
||||
)
|
||||
|
||||
def concretize_version(self, spec):
|
||||
"""If the spec is already concrete, return. Otherwise take
|
||||
the preferred version from spackconfig, and default to the package's
|
||||
version if there are no available versions.
|
||||
|
||||
TODO: In many cases we probably want to look for installed
|
||||
versions of each package and use an installed version
|
||||
if we can link to it. The policy implemented here will
|
||||
tend to rebuild a lot of stuff becasue it will prefer
|
||||
a compiler in the spec to any compiler already-
|
||||
installed things were built with. There is likely
|
||||
some better policy that finds some middle ground
|
||||
between these two extremes.
|
||||
"""
|
||||
# return if already concrete.
|
||||
if spec.versions.concrete:
|
||||
return False
|
||||
|
||||
# List of versions we could consider, in sorted order
|
||||
pkg_versions = spec.package_class.versions
|
||||
usable = [v for v in pkg_versions if any(v.intersects(sv) for sv in spec.versions)]
|
||||
|
||||
yaml_prefs = PackagePrefs(spec.name, "version")
|
||||
|
||||
# The keys below show the order of precedence of factors used
|
||||
# to select a version when concretizing. The item with
|
||||
# the "largest" key will be selected.
|
||||
#
|
||||
# NOTE: When COMPARING VERSIONS, the '@develop' version is always
|
||||
# larger than other versions. BUT when CONCRETIZING,
|
||||
# the largest NON-develop version is selected by default.
|
||||
keyfn = lambda v: (
|
||||
# ------- Special direction from the user
|
||||
# Respect order listed in packages.yaml
|
||||
-yaml_prefs(v),
|
||||
# The preferred=True flag (packages or packages.yaml or both?)
|
||||
pkg_versions.get(v).get("preferred", False),
|
||||
# ------- Regular case: use latest non-develop version by default.
|
||||
# Avoid @develop version, which would otherwise be the "largest"
|
||||
# in straight version comparisons
|
||||
not v.isdevelop(),
|
||||
# Compare the version itself
|
||||
# This includes the logic:
|
||||
# a) develop > everything (disabled by "not v.isdevelop() above)
|
||||
# b) numeric > non-numeric
|
||||
# c) Numeric or string comparison
|
||||
v,
|
||||
)
|
||||
usable.sort(key=keyfn, reverse=True)
|
||||
|
||||
if usable:
|
||||
spec.versions = ver([usable[0]])
|
||||
else:
|
||||
# We don't know of any SAFE versions that match the given
|
||||
# spec. Grab the spec's versions and grab the highest
|
||||
# *non-open* part of the range of versions it specifies.
|
||||
# Someone else can raise an error if this happens,
|
||||
# e.g. when we go to fetch it and don't know how. But it
|
||||
# *might* work.
|
||||
if not spec.versions or spec.versions == VersionList([":"]):
|
||||
raise NoValidVersionError(spec)
|
||||
else:
|
||||
last = spec.versions[-1]
|
||||
if isinstance(last, ClosedOpenRange):
|
||||
range_as_version = VersionList([last]).concrete_range_as_version
|
||||
if range_as_version:
|
||||
spec.versions = ver([range_as_version])
|
||||
else:
|
||||
raise NoValidVersionError(spec)
|
||||
else:
|
||||
spec.versions = ver([last])
|
||||
|
||||
return True # Things changed
|
||||
|
||||
def concretize_architecture(self, spec):
|
||||
"""If the spec is empty provide the defaults of the platform. If the
|
||||
architecture is not a string type, then check if either the platform,
|
||||
target or operating system are concretized. If any of the fields are
|
||||
changed then return True. If everything is concretized (i.e the
|
||||
architecture attribute is a namedtuple of classes) then return False.
|
||||
If the target is a string type, then convert the string into a
|
||||
concretized architecture. If it has no architecture and the root of the
|
||||
DAG has an architecture, then use the root otherwise use the defaults
|
||||
on the platform.
|
||||
"""
|
||||
# ensure type safety for the architecture
|
||||
if spec.architecture is None:
|
||||
spec.architecture = spack.spec.ArchSpec()
|
||||
|
||||
if spec.architecture.concrete:
|
||||
return False
|
||||
|
||||
# Get platform of nearest spec with a platform, including spec
|
||||
# If spec has a platform, easy
|
||||
if spec.architecture.platform:
|
||||
new_plat = spack.platforms.by_name(spec.architecture.platform)
|
||||
else:
|
||||
# Else if anyone else has a platform, take the closest one
|
||||
# Search up, then down, along build/link deps first
|
||||
# Then any nearest. Algorithm from compilerspec search
|
||||
platform_spec = find_spec(spec, lambda x: x.architecture and x.architecture.platform)
|
||||
if platform_spec:
|
||||
new_plat = spack.platforms.by_name(platform_spec.architecture.platform)
|
||||
else:
|
||||
# If no platform anywhere in this spec, grab the default
|
||||
new_plat = spack.platforms.host()
|
||||
|
||||
# Get nearest spec with relevant platform and an os
|
||||
# Generally, same algorithm as finding platform, except we only
|
||||
# consider specs that have a platform
|
||||
if spec.architecture.os:
|
||||
new_os = spec.architecture.os
|
||||
else:
|
||||
new_os_spec = find_spec(
|
||||
spec,
|
||||
lambda x: (
|
||||
x.architecture
|
||||
and x.architecture.platform == str(new_plat)
|
||||
and x.architecture.os
|
||||
),
|
||||
)
|
||||
if new_os_spec:
|
||||
new_os = new_os_spec.architecture.os
|
||||
else:
|
||||
new_os = new_plat.operating_system("default_os")
|
||||
|
||||
# Get the nearest spec with relevant platform and a target
|
||||
# Generally, same algorithm as finding os
|
||||
curr_target = None
|
||||
if spec.architecture.target:
|
||||
curr_target = spec.architecture.target
|
||||
if spec.architecture.target and spec.architecture.target_concrete:
|
||||
new_target = spec.architecture.target
|
||||
else:
|
||||
new_target_spec = find_spec(
|
||||
spec,
|
||||
lambda x: (
|
||||
x.architecture
|
||||
and x.architecture.platform == str(new_plat)
|
||||
and x.architecture.target
|
||||
and x.architecture.target != curr_target
|
||||
),
|
||||
)
|
||||
if new_target_spec:
|
||||
if curr_target:
|
||||
# constrain one target by the other
|
||||
new_target_arch = spack.spec.ArchSpec(
|
||||
(None, None, new_target_spec.architecture.target)
|
||||
)
|
||||
curr_target_arch = spack.spec.ArchSpec((None, None, curr_target))
|
||||
curr_target_arch.constrain(new_target_arch)
|
||||
new_target = curr_target_arch.target
|
||||
else:
|
||||
new_target = new_target_spec.architecture.target
|
||||
else:
|
||||
# To get default platform, consider package prefs
|
||||
if PackagePrefs.has_preferred_targets(spec.name):
|
||||
new_target = self.target_from_package_preferences(spec)
|
||||
else:
|
||||
new_target = new_plat.target("default_target")
|
||||
if curr_target:
|
||||
# convert to ArchSpec to compare satisfaction
|
||||
new_target_arch = spack.spec.ArchSpec((None, None, str(new_target)))
|
||||
curr_target_arch = spack.spec.ArchSpec((None, None, str(curr_target)))
|
||||
|
||||
if not new_target_arch.intersects(curr_target_arch):
|
||||
# new_target is an incorrect guess based on preferences
|
||||
# and/or default
|
||||
valid_target_ranges = str(curr_target).split(",")
|
||||
for target_range in valid_target_ranges:
|
||||
t_min, t_sep, t_max = target_range.partition(":")
|
||||
if not t_sep:
|
||||
new_target = t_min
|
||||
break
|
||||
elif t_max:
|
||||
new_target = t_max
|
||||
break
|
||||
elif t_min:
|
||||
# TODO: something better than picking first
|
||||
new_target = t_min
|
||||
break
|
||||
|
||||
# Construct new architecture, compute whether spec changed
|
||||
arch_spec = (str(new_plat), str(new_os), str(new_target))
|
||||
new_arch = spack.spec.ArchSpec(arch_spec)
|
||||
spec_changed = new_arch != spec.architecture
|
||||
spec.architecture = new_arch
|
||||
return spec_changed
|
||||
|
||||
def target_from_package_preferences(self, spec):
|
||||
"""Returns the preferred target from the package preferences if
|
||||
there's any.
|
||||
|
||||
Args:
|
||||
spec: abstract spec to be concretized
|
||||
"""
|
||||
target_prefs = PackagePrefs(spec.name, "target")
|
||||
target_specs = [spack.spec.Spec("target=%s" % tname) for tname in archspec.cpu.TARGETS]
|
||||
|
||||
def tspec_filter(s):
|
||||
# Filter target specs by whether the architecture
|
||||
# family is the current machine type. This ensures
|
||||
# we only consider x86_64 targets when on an
|
||||
# x86_64 machine, etc. This may need to change to
|
||||
# enable setting cross compiling as a default
|
||||
target = archspec.cpu.TARGETS[str(s.architecture.target)]
|
||||
arch_family_name = target.family.name
|
||||
return arch_family_name == platform.machine()
|
||||
|
||||
# Sort filtered targets by package prefs
|
||||
target_specs = list(filter(tspec_filter, target_specs))
|
||||
target_specs.sort(key=target_prefs)
|
||||
new_target = target_specs[0].architecture.target
|
||||
return new_target
|
||||
|
||||
def concretize_variants(self, spec):
|
||||
"""If the spec already has variants filled in, return. Otherwise, add
|
||||
the user preferences from packages.yaml or the default variants from
|
||||
the package specification.
|
||||
"""
|
||||
changed = False
|
||||
preferred_variants = PackagePrefs.preferred_variants(spec.name)
|
||||
pkg_cls = spec.package_class
|
||||
for name, entry in pkg_cls.variants.items():
|
||||
variant, when = entry
|
||||
var = spec.variants.get(name, None)
|
||||
if var and "*" in var:
|
||||
# remove variant wildcard before concretizing
|
||||
# wildcard cannot be combined with other variables in a
|
||||
# multivalue variant, a concrete variant cannot have the value
|
||||
# wildcard, and a wildcard does not constrain a variant
|
||||
spec.variants.pop(name)
|
||||
if name not in spec.variants and any(spec.satisfies(w) for w in when):
|
||||
changed = True
|
||||
if name in preferred_variants:
|
||||
spec.variants[name] = preferred_variants.get(name)
|
||||
else:
|
||||
spec.variants[name] = variant.make_default()
|
||||
if name in spec.variants and not any(spec.satisfies(w) for w in when):
|
||||
raise vt.InvalidVariantForSpecError(name, when, spec)
|
||||
|
||||
return changed
|
||||
|
||||
def concretize_compiler(self, spec):
|
||||
"""If the spec already has a compiler, we're done. If not, then take
|
||||
the compiler used for the nearest ancestor with a compiler
|
||||
spec and use that. If the ancestor's compiler is not
|
||||
concrete, then used the preferred compiler as specified in
|
||||
spackconfig.
|
||||
|
||||
Intuition: Use the spackconfig default if no package that depends on
|
||||
this one has a strict compiler requirement. Otherwise, try to
|
||||
build with the compiler that will be used by libraries that
|
||||
link to this one, to maximize compatibility.
|
||||
"""
|
||||
# Pass on concretizing the compiler if the target or operating system
|
||||
# is not yet determined
|
||||
if not spec.architecture.concrete:
|
||||
# We haven't changed, but other changes need to happen before we
|
||||
# continue. `return True` here to force concretization to keep
|
||||
# running.
|
||||
return True
|
||||
|
||||
# Only use a matching compiler if it is of the proper style
|
||||
# Takes advantage of the proper logic already existing in
|
||||
# compiler_for_spec Should think whether this can be more
|
||||
# efficient
|
||||
def _proper_compiler_style(cspec, aspec):
|
||||
compilers = spack.compilers.compilers_for_spec(cspec, arch_spec=aspec)
|
||||
# If the spec passed as argument is concrete we want to check
|
||||
# the versions match exactly
|
||||
if (
|
||||
cspec.concrete
|
||||
and compilers
|
||||
and cspec.version not in [c.version for c in compilers]
|
||||
):
|
||||
return []
|
||||
|
||||
return compilers
|
||||
|
||||
if spec.compiler and spec.compiler.concrete:
|
||||
if self.check_for_compiler_existence and not _proper_compiler_style(
|
||||
spec.compiler, spec.architecture
|
||||
):
|
||||
_compiler_concretization_failure(spec.compiler, spec.architecture)
|
||||
return False
|
||||
|
||||
# Find another spec that has a compiler, or the root if none do
|
||||
other_spec = spec if spec.compiler else find_spec(spec, lambda x: x.compiler, spec.root)
|
||||
other_compiler = other_spec.compiler
|
||||
assert other_spec
|
||||
|
||||
# Check if the compiler is already fully specified
|
||||
if other_compiler and other_compiler.concrete:
|
||||
if self.check_for_compiler_existence and not _proper_compiler_style(
|
||||
other_compiler, spec.architecture
|
||||
):
|
||||
_compiler_concretization_failure(other_compiler, spec.architecture)
|
||||
spec.compiler = other_compiler
|
||||
return True
|
||||
|
||||
if other_compiler: # Another node has abstract compiler information
|
||||
compiler_list = spack.compilers.find_specs_by_arch(other_compiler, spec.architecture)
|
||||
if not compiler_list:
|
||||
# We don't have a matching compiler installed
|
||||
if not self.check_for_compiler_existence:
|
||||
# Concretize compiler spec versions as a package to build
|
||||
cpkg_spec = spack.compilers.pkg_spec_for_compiler(other_compiler)
|
||||
self.concretize_version(cpkg_spec)
|
||||
spec.compiler = spack.spec.CompilerSpec(
|
||||
other_compiler.name, cpkg_spec.versions
|
||||
)
|
||||
return True
|
||||
else:
|
||||
# No compiler with a satisfactory spec was found
|
||||
raise UnavailableCompilerVersionError(other_compiler, spec.architecture)
|
||||
else:
|
||||
# We have no hints to go by, grab any compiler
|
||||
compiler_list = spack.compilers.all_compiler_specs()
|
||||
if not compiler_list:
|
||||
# Spack has no compilers.
|
||||
raise spack.compilers.NoCompilersError()
|
||||
|
||||
# By default, prefer later versions of compilers
|
||||
compiler_list = sorted(compiler_list, key=lambda x: (x.name, x.version), reverse=True)
|
||||
ppk = PackagePrefs(other_spec.name, "compiler")
|
||||
matches = sorted(compiler_list, key=ppk)
|
||||
|
||||
# copy concrete version into other_compiler
|
||||
try:
|
||||
spec.compiler = next(
|
||||
c for c in matches if _proper_compiler_style(c, spec.architecture)
|
||||
).copy()
|
||||
except StopIteration:
|
||||
# No compiler with a satisfactory spec has a suitable arch
|
||||
_compiler_concretization_failure(other_compiler, spec.architecture)
|
||||
|
||||
assert spec.compiler.concrete
|
||||
return True # things changed.
|
||||
|
||||
def concretize_compiler_flags(self, spec):
|
||||
"""
|
||||
The compiler flags are updated to match those of the spec whose
|
||||
compiler is used, defaulting to no compiler flags in the spec.
|
||||
Default specs set at the compiler level will still be added later.
|
||||
"""
|
||||
# Pass on concretizing the compiler flags if the target or operating
|
||||
# system is not set.
|
||||
if not spec.architecture.concrete:
|
||||
# We haven't changed, but other changes need to happen before we
|
||||
# continue. `return True` here to force concretization to keep
|
||||
# running.
|
||||
return True
|
||||
|
||||
compiler_match = lambda other: (
|
||||
spec.compiler == other.compiler and spec.architecture == other.architecture
|
||||
)
|
||||
|
||||
ret = False
|
||||
for flag in spack.spec.FlagMap.valid_compiler_flags():
|
||||
if flag not in spec.compiler_flags:
|
||||
spec.compiler_flags[flag] = list()
|
||||
try:
|
||||
nearest = next(
|
||||
p
|
||||
for p in spec.traverse(direction="parents")
|
||||
if (compiler_match(p) and (p is not spec) and flag in p.compiler_flags)
|
||||
)
|
||||
nearest_flags = nearest.compiler_flags.get(flag, [])
|
||||
flags = spec.compiler_flags.get(flag, [])
|
||||
if set(nearest_flags) - set(flags):
|
||||
spec.compiler_flags[flag] = list(llnl.util.lang.dedupe(nearest_flags + flags))
|
||||
ret = True
|
||||
except StopIteration:
|
||||
pass
|
||||
|
||||
# Include the compiler flag defaults from the config files
|
||||
# This ensures that spack will detect conflicts that stem from a change
|
||||
# in default compiler flags.
|
||||
try:
|
||||
compiler = spack.compilers.compiler_for_spec(spec.compiler, spec.architecture)
|
||||
except spack.compilers.NoCompilerForSpecError:
|
||||
if self.check_for_compiler_existence:
|
||||
raise
|
||||
return ret
|
||||
for flag in compiler.flags:
|
||||
config_flags = compiler.flags.get(flag, [])
|
||||
flags = spec.compiler_flags.get(flag, [])
|
||||
spec.compiler_flags[flag] = list(llnl.util.lang.dedupe(config_flags + flags))
|
||||
if set(config_flags) - set(flags):
|
||||
ret = True
|
||||
|
||||
return ret
|
||||
|
||||
def adjust_target(self, spec):
|
||||
"""Adjusts the target microarchitecture if the compiler is too old
|
||||
to support the default one.
|
||||
|
||||
Args:
|
||||
spec: spec to be concretized
|
||||
|
||||
Returns:
|
||||
True if spec was modified, False otherwise
|
||||
"""
|
||||
# To minimize the impact on performance this function will attempt
|
||||
# to adjust the target only at the very first call once necessary
|
||||
# information is set. It will just return False on subsequent calls.
|
||||
# The way this is achieved is by initializing a generator and making
|
||||
# this function return the next answer.
|
||||
if not (spec.architecture and spec.architecture.concrete):
|
||||
# Not ready, but keep going because we have work to do later
|
||||
return True
|
||||
|
||||
def _make_only_one_call(spec):
|
||||
yield self._adjust_target(spec)
|
||||
while True:
|
||||
yield False
|
||||
|
||||
if self._adjust_target_answer_generator is None:
|
||||
self._adjust_target_answer_generator = _make_only_one_call(spec)
|
||||
|
||||
return next(self._adjust_target_answer_generator)
|
||||
|
||||
def _adjust_target(self, spec):
|
||||
"""Assumes that the architecture and the compiler have been
|
||||
set already and checks if the current target microarchitecture
|
||||
is the default and can be optimized by the compiler.
|
||||
|
||||
If not, downgrades the microarchitecture until a suitable one
|
||||
is found. If none can be found raise an error.
|
||||
|
||||
Args:
|
||||
spec: spec to be concretized
|
||||
|
||||
Returns:
|
||||
True if any modification happened, False otherwise
|
||||
"""
|
||||
import archspec.cpu
|
||||
|
||||
# Try to adjust the target only if it is the default
|
||||
# target for this platform
|
||||
current_target = spec.architecture.target
|
||||
current_platform = spack.platforms.by_name(spec.architecture.platform)
|
||||
|
||||
default_target = current_platform.target("default_target")
|
||||
if PackagePrefs.has_preferred_targets(spec.name):
|
||||
default_target = self.target_from_package_preferences(spec)
|
||||
|
||||
if current_target != default_target or (
|
||||
self.abstract_spec
|
||||
and self.abstract_spec.architecture
|
||||
and self.abstract_spec.architecture.concrete
|
||||
):
|
||||
return False
|
||||
|
||||
try:
|
||||
current_target.optimization_flags(spec.compiler)
|
||||
except archspec.cpu.UnsupportedMicroarchitecture:
|
||||
microarchitecture = current_target.microarchitecture
|
||||
for ancestor in microarchitecture.ancestors:
|
||||
candidate = None
|
||||
try:
|
||||
candidate = spack.target.Target(ancestor)
|
||||
candidate.optimization_flags(spec.compiler)
|
||||
except archspec.cpu.UnsupportedMicroarchitecture:
|
||||
continue
|
||||
|
||||
if candidate is not None:
|
||||
msg = (
|
||||
"{0.name}@{0.version} cannot build optimized "
|
||||
'binaries for "{1}". Using best target possible: '
|
||||
'"{2}"'
|
||||
)
|
||||
msg = msg.format(spec.compiler, current_target, candidate)
|
||||
tty.warn(msg)
|
||||
spec.architecture.target = candidate
|
||||
return True
|
||||
else:
|
||||
raise
|
||||
|
||||
return False
|
||||
|
||||
|
||||
@contextmanager
|
||||
@@ -82,6 +719,19 @@ def find_spec(spec, condition, default=None):
|
||||
return default # Nothing matched the condition; return default.
|
||||
|
||||
|
||||
def _compiler_concretization_failure(compiler_spec, arch):
|
||||
# Distinguish between the case that there are compilers for
|
||||
# the arch but not with the given compiler spec and the case that
|
||||
# there are no compilers for the arch at all
|
||||
if not spack.compilers.compilers_for_arch(arch):
|
||||
available_os_targets = set(
|
||||
(c.operating_system, c.target) for c in spack.compilers.all_compilers()
|
||||
)
|
||||
raise NoCompilersForArchError(arch, available_os_targets)
|
||||
else:
|
||||
raise UnavailableCompilerVersionError(compiler_spec, arch)
|
||||
|
||||
|
||||
def concretize_specs_together(*abstract_specs, **kwargs):
|
||||
"""Given a number of specs as input, tries to concretize them together.
|
||||
|
||||
@@ -94,6 +744,12 @@ def concretize_specs_together(*abstract_specs, **kwargs):
|
||||
Returns:
|
||||
List of concretized specs
|
||||
"""
|
||||
if spack.config.get("config:concretizer", "clingo") == "original":
|
||||
return _concretize_specs_together_original(*abstract_specs, **kwargs)
|
||||
return _concretize_specs_together_new(*abstract_specs, **kwargs)
|
||||
|
||||
|
||||
def _concretize_specs_together_new(*abstract_specs, **kwargs):
|
||||
import spack.solver.asp
|
||||
|
||||
allow_deprecated = spack.config.get("config:deprecated", False)
|
||||
@@ -104,6 +760,51 @@ def concretize_specs_together(*abstract_specs, **kwargs):
|
||||
return [s.copy() for s in result.specs]
|
||||
|
||||
|
||||
def _concretize_specs_together_original(*abstract_specs, **kwargs):
|
||||
abstract_specs = [spack.spec.Spec(s) for s in abstract_specs]
|
||||
tmpdir = tempfile.mkdtemp()
|
||||
builder = spack.repo.MockRepositoryBuilder(tmpdir)
|
||||
# Split recursive specs, as it seems the concretizer has issue
|
||||
# respecting conditions on dependents expressed like
|
||||
# depends_on('foo ^bar@1.0'), see issue #11160
|
||||
split_specs = [
|
||||
dep.copy(deps=False) for spec1 in abstract_specs for dep in spec1.traverse(root=True)
|
||||
]
|
||||
builder.add_package(
|
||||
"concretizationroot", dependencies=[(str(x), None, None) for x in split_specs]
|
||||
)
|
||||
|
||||
with spack.repo.use_repositories(builder.root, override=False):
|
||||
# Spec from a helper package that depends on all the abstract_specs
|
||||
concretization_root = spack.spec.Spec("concretizationroot")
|
||||
concretization_root.concretize(tests=kwargs.get("tests", False))
|
||||
# Retrieve the direct dependencies
|
||||
concrete_specs = [concretization_root[spec.name].copy() for spec in abstract_specs]
|
||||
|
||||
return concrete_specs
|
||||
|
||||
|
||||
class NoCompilersForArchError(spack.error.SpackError):
|
||||
def __init__(self, arch, available_os_targets):
|
||||
err_msg = (
|
||||
"No compilers found"
|
||||
" for operating system %s and target %s."
|
||||
"\nIf previous installations have succeeded, the"
|
||||
" operating system may have been updated." % (arch.os, arch.target)
|
||||
)
|
||||
|
||||
available_os_target_strs = list()
|
||||
for operating_system, t in available_os_targets:
|
||||
os_target_str = "%s-%s" % (operating_system, t) if t else operating_system
|
||||
available_os_target_strs.append(os_target_str)
|
||||
err_msg += (
|
||||
"\nCompilers are defined for the following"
|
||||
" operating systems and targets:\n\t" + "\n\t".join(available_os_target_strs)
|
||||
)
|
||||
|
||||
super().__init__(err_msg, "Run 'spack compiler find' to add compilers.")
|
||||
|
||||
|
||||
class UnavailableCompilerVersionError(spack.error.SpackError):
|
||||
"""Raised when there is no available compiler that satisfies a
|
||||
compiler spec."""
|
||||
@@ -119,3 +820,37 @@ def __init__(self, compiler_spec, arch=None):
|
||||
"'spack compilers' to see which compilers are already recognized"
|
||||
" by spack.",
|
||||
)
|
||||
|
||||
|
||||
class NoValidVersionError(spack.error.SpackError):
|
||||
"""Raised when there is no way to have a concrete version for a
|
||||
particular spec."""
|
||||
|
||||
def __init__(self, spec):
|
||||
super().__init__(
|
||||
"There are no valid versions for %s that match '%s'" % (spec.name, spec.versions)
|
||||
)
|
||||
|
||||
|
||||
class InsufficientArchitectureInfoError(spack.error.SpackError):
|
||||
"""Raised when details on architecture cannot be collected from the
|
||||
system"""
|
||||
|
||||
def __init__(self, spec, archs):
|
||||
super().__init__(
|
||||
"Cannot determine necessary architecture information for '%s': %s"
|
||||
% (spec.name, str(archs))
|
||||
)
|
||||
|
||||
|
||||
class NoBuildError(spack.error.SpecError):
|
||||
"""Raised when a package is configured with the buildable option False, but
|
||||
no satisfactory external versions can be found
|
||||
"""
|
||||
|
||||
def __init__(self, spec):
|
||||
msg = (
|
||||
"The spec\n '%s'\n is configured as not buildable, "
|
||||
"and no matching external installs were found"
|
||||
)
|
||||
super().__init__(msg % spec)
|
||||
|
@@ -53,7 +53,6 @@
|
||||
import spack.schema.modules
|
||||
import spack.schema.packages
|
||||
import spack.schema.repos
|
||||
import spack.schema.splice
|
||||
import spack.schema.upstreams
|
||||
|
||||
# Hacked yaml for configuration files preserves line numbers.
|
||||
@@ -78,7 +77,6 @@
|
||||
"bootstrap": spack.schema.bootstrap.schema,
|
||||
"ci": spack.schema.ci.schema,
|
||||
"cdash": spack.schema.cdash.schema,
|
||||
"splice": spack.schema.splice.schema,
|
||||
}
|
||||
|
||||
# Same as above, but including keys for environments
|
||||
@@ -101,6 +99,7 @@
|
||||
"dirty": False,
|
||||
"build_jobs": min(16, cpus_available()),
|
||||
"build_stage": "$tempdir/spack-stage",
|
||||
"concretizer": "clingo",
|
||||
"license_dir": spack.paths.default_license_dir,
|
||||
}
|
||||
}
|
||||
|
@@ -136,10 +136,10 @@ def path_to_dict(search_paths: List[str]):
|
||||
# entry overrides later entries
|
||||
for search_path in reversed(search_paths):
|
||||
try:
|
||||
with os.scandir(search_path) as entries:
|
||||
path_to_lib.update(
|
||||
{entry.path: entry.name for entry in entries if entry.is_file()}
|
||||
)
|
||||
for lib in os.listdir(search_path):
|
||||
lib_path = os.path.join(search_path, lib)
|
||||
if llnl.util.filesystem.is_readable_file(lib_path):
|
||||
path_to_lib[lib_path] = lib
|
||||
except OSError as e:
|
||||
msg = f"cannot scan '{search_path}' for external software: {str(e)}"
|
||||
llnl.util.tty.debug(msg)
|
||||
|
@@ -12,7 +12,7 @@
|
||||
import re
|
||||
import sys
|
||||
import warnings
|
||||
from typing import Dict, Iterable, List, Optional, Set, Tuple, Type
|
||||
from typing import Dict, List, Optional, Set, Tuple, Type
|
||||
|
||||
import llnl.util.filesystem
|
||||
import llnl.util.lang
|
||||
@@ -187,7 +187,7 @@ def libraries_in_windows_paths(path_hints: Optional[List[str]] = None) -> Dict[s
|
||||
return path_to_dict(search_paths)
|
||||
|
||||
|
||||
def _group_by_prefix(paths: List[str]) -> Dict[str, Set[str]]:
|
||||
def _group_by_prefix(paths: Set[str]) -> Dict[str, Set[str]]:
|
||||
groups = collections.defaultdict(set)
|
||||
for p in paths:
|
||||
groups[os.path.dirname(p)].add(p)
|
||||
@@ -243,9 +243,7 @@ def detect_specs(
|
||||
return []
|
||||
|
||||
result = []
|
||||
for candidate_path, items_in_prefix in _group_by_prefix(
|
||||
llnl.util.lang.dedupe(paths)
|
||||
).items():
|
||||
for candidate_path, items_in_prefix in sorted(_group_by_prefix(set(paths)).items()):
|
||||
# TODO: multiple instances of a package can live in the same
|
||||
# prefix, and a package implementation can return multiple specs
|
||||
# for one prefix, but without additional details (e.g. about the
|
||||
@@ -301,17 +299,19 @@ def detect_specs(
|
||||
return result
|
||||
|
||||
def find(
|
||||
self, *, pkg_name: str, repository, initial_guess: Optional[List[str]] = None
|
||||
self, *, pkg_name: str, initial_guess: Optional[List[str]] = None
|
||||
) -> List[DetectedPackage]:
|
||||
"""For a given package, returns a list of detected specs.
|
||||
|
||||
Args:
|
||||
pkg_name: package being detected
|
||||
repository: repository to retrieve the package
|
||||
initial_guess: initial list of paths to search from the caller if None, default paths
|
||||
are searched. If this is an empty list, nothing will be searched.
|
||||
initial_guess: initial list of paths to search from the caller
|
||||
if None, default paths are searched. If this
|
||||
is an empty list, nothing will be searched.
|
||||
"""
|
||||
pkg_cls = repository.get_pkg_class(pkg_name)
|
||||
import spack.repo
|
||||
|
||||
pkg_cls = spack.repo.PATH.get_pkg_class(pkg_name)
|
||||
patterns = self.search_patterns(pkg=pkg_cls)
|
||||
if not patterns:
|
||||
return []
|
||||
@@ -335,10 +335,13 @@ def search_patterns(self, *, pkg: Type["spack.package_base.PackageBase"]) -> Lis
|
||||
|
||||
def candidate_files(self, *, patterns: List[str], paths: List[str]) -> List[str]:
|
||||
executables_by_path = executables_in_path(path_hints=paths)
|
||||
joined_pattern = re.compile(r"|".join(patterns))
|
||||
result = [path for path, exe in executables_by_path.items() if joined_pattern.search(exe)]
|
||||
result.sort()
|
||||
return result
|
||||
patterns = [re.compile(x) for x in patterns]
|
||||
result = []
|
||||
for compiled_re in patterns:
|
||||
for path, exe in executables_by_path.items():
|
||||
if compiled_re.search(exe):
|
||||
result.append(path)
|
||||
return list(sorted(set(result)))
|
||||
|
||||
def prefix_from_path(self, *, path: str) -> str:
|
||||
result = executable_prefix(path)
|
||||
@@ -382,7 +385,7 @@ def prefix_from_path(self, *, path: str) -> str:
|
||||
|
||||
|
||||
def by_path(
|
||||
packages_to_search: Iterable[str],
|
||||
packages_to_search: List[str],
|
||||
*,
|
||||
path_hints: Optional[List[str]] = None,
|
||||
max_workers: Optional[int] = None,
|
||||
@@ -396,28 +399,19 @@ def by_path(
|
||||
path_hints: initial list of paths to be searched
|
||||
max_workers: maximum number of workers to search for packages in parallel
|
||||
"""
|
||||
import spack.repo
|
||||
|
||||
# TODO: Packages should be able to define both .libraries and .executables in the future
|
||||
# TODO: determine_spec_details should get all relevant libraries and executables in one call
|
||||
executables_finder, libraries_finder = ExecutablesFinder(), LibrariesFinder()
|
||||
detected_specs_by_package: Dict[str, Tuple[concurrent.futures.Future, ...]] = {}
|
||||
|
||||
result = collections.defaultdict(list)
|
||||
repository = spack.repo.PATH.ensure_unwrapped()
|
||||
with concurrent.futures.ProcessPoolExecutor(max_workers=max_workers) as executor:
|
||||
for pkg in packages_to_search:
|
||||
executable_future = executor.submit(
|
||||
executables_finder.find,
|
||||
pkg_name=pkg,
|
||||
initial_guess=path_hints,
|
||||
repository=repository,
|
||||
executables_finder.find, pkg_name=pkg, initial_guess=path_hints
|
||||
)
|
||||
library_future = executor.submit(
|
||||
libraries_finder.find,
|
||||
pkg_name=pkg,
|
||||
initial_guess=path_hints,
|
||||
repository=repository,
|
||||
libraries_finder.find, pkg_name=pkg, initial_guess=path_hints
|
||||
)
|
||||
detected_specs_by_package[pkg] = executable_future, library_future
|
||||
|
||||
|
@@ -1214,6 +1214,7 @@ def scope_name(self):
|
||||
def include_concrete_envs(self):
|
||||
"""Copy and save the included envs' specs internally"""
|
||||
|
||||
lockfile_meta = None
|
||||
root_hash_seen = set()
|
||||
concrete_hash_seen = set()
|
||||
self.included_concrete_spec_data = {}
|
||||
@@ -1224,26 +1225,37 @@ def include_concrete_envs(self):
|
||||
raise SpackEnvironmentError(f"Unable to find env at {env_path}")
|
||||
|
||||
env = Environment(env_path)
|
||||
self.included_concrete_spec_data[env_path] = {"roots": [], "concrete_specs": {}}
|
||||
|
||||
with open(env.lock_path) as f:
|
||||
lockfile_as_dict = env._read_lockfile(f)
|
||||
|
||||
# Lockfile_meta must match each env and use at least format version 5
|
||||
if lockfile_meta is None:
|
||||
lockfile_meta = lockfile_as_dict["_meta"]
|
||||
elif lockfile_meta != lockfile_as_dict["_meta"]:
|
||||
raise SpackEnvironmentError("All lockfile _meta values must match")
|
||||
elif lockfile_meta["lockfile-version"] < 5:
|
||||
raise SpackEnvironmentError("The lockfile format must be at version 5 or higher")
|
||||
|
||||
# Copy unique root specs from env
|
||||
for root_dict in env._concrete_roots_dict():
|
||||
self.included_concrete_spec_data[env_path] = {"roots": []}
|
||||
for root_dict in lockfile_as_dict["roots"]:
|
||||
if root_dict["hash"] not in root_hash_seen:
|
||||
self.included_concrete_spec_data[env_path]["roots"].append(root_dict)
|
||||
root_hash_seen.add(root_dict["hash"])
|
||||
|
||||
# Copy unique concrete specs from env
|
||||
for dag_hash, spec_details in env._concrete_specs_dict().items():
|
||||
if dag_hash not in concrete_hash_seen:
|
||||
self.included_concrete_spec_data[env_path]["concrete_specs"].update(
|
||||
{dag_hash: spec_details}
|
||||
for concrete_spec in lockfile_as_dict["concrete_specs"]:
|
||||
if concrete_spec not in concrete_hash_seen:
|
||||
self.included_concrete_spec_data[env_path].update(
|
||||
{"concrete_specs": lockfile_as_dict["concrete_specs"]}
|
||||
)
|
||||
concrete_hash_seen.add(dag_hash)
|
||||
concrete_hash_seen.add(concrete_spec)
|
||||
|
||||
# Copy transitive include data
|
||||
transitive = env.included_concrete_spec_data
|
||||
if transitive:
|
||||
self.included_concrete_spec_data[env_path]["include_concrete"] = transitive
|
||||
if "include_concrete" in lockfile_as_dict.keys():
|
||||
self.included_concrete_spec_data[env_path]["include_concrete"] = lockfile_as_dict[
|
||||
"include_concrete"
|
||||
]
|
||||
|
||||
self._read_lockfile_dict(self._to_lockfile_dict())
|
||||
self.write()
|
||||
@@ -1632,8 +1644,9 @@ def _concretize_separately(self, tests=False):
|
||||
i += 1
|
||||
|
||||
# Ensure we don't try to bootstrap clingo in parallel
|
||||
with spack.bootstrap.ensure_bootstrap_configuration():
|
||||
spack.bootstrap.ensure_clingo_importable_or_raise()
|
||||
if spack.config.get("config:concretizer", "clingo") == "clingo":
|
||||
with spack.bootstrap.ensure_bootstrap_configuration():
|
||||
spack.bootstrap.ensure_clingo_importable_or_raise()
|
||||
|
||||
# Ensure all the indexes have been built or updated, since
|
||||
# otherwise the processes in the pool may timeout on waiting
|
||||
@@ -2161,7 +2174,8 @@ def _get_environment_specs(self, recurse_dependencies=True):
|
||||
|
||||
return specs
|
||||
|
||||
def _concrete_specs_dict(self):
|
||||
def _to_lockfile_dict(self):
|
||||
"""Create a dictionary to store a lockfile for this environment."""
|
||||
concrete_specs = {}
|
||||
for s in traverse.traverse_nodes(self.specs_by_hash.values(), key=traverse.by_dag_hash):
|
||||
spec_dict = s.node_dict_with_hashes(hash=ht.dag_hash)
|
||||
@@ -2169,22 +2183,7 @@ def _concrete_specs_dict(self):
|
||||
spec_dict[ht.dag_hash.name] = s.dag_hash()
|
||||
concrete_specs[s.dag_hash()] = spec_dict
|
||||
|
||||
if s.build_spec is not s:
|
||||
for d in s.build_spec.traverse():
|
||||
build_spec_dict = d.node_dict_with_hashes(hash=ht.dag_hash)
|
||||
build_spec_dict[ht.dag_hash.name] = d.dag_hash()
|
||||
concrete_specs[d.dag_hash()] = build_spec_dict
|
||||
|
||||
return concrete_specs
|
||||
|
||||
def _concrete_roots_dict(self):
|
||||
hash_spec_list = zip(self.concretized_order, self.concretized_user_specs)
|
||||
return [{"hash": h, "spec": str(s)} for h, s in hash_spec_list]
|
||||
|
||||
def _to_lockfile_dict(self):
|
||||
"""Create a dictionary to store a lockfile for this environment."""
|
||||
concrete_specs = self._concrete_specs_dict()
|
||||
root_specs = self._concrete_roots_dict()
|
||||
|
||||
spack_dict = {"version": spack.spack_version}
|
||||
spack_commit = spack.main.get_spack_commit()
|
||||
@@ -2205,7 +2204,7 @@ def _to_lockfile_dict(self):
|
||||
# spack version information
|
||||
"spack": spack_dict,
|
||||
# users specs + hashes are the 'roots' of the environment
|
||||
"roots": root_specs,
|
||||
"roots": [{"hash": h, "spec": str(s)} for h, s in hash_spec_list],
|
||||
# Concrete specs by hash, including dependencies
|
||||
"concrete_specs": concrete_specs,
|
||||
}
|
||||
@@ -2334,7 +2333,7 @@ def filter_specs(self, reader, json_specs_by_hash, order_concretized):
|
||||
specs_by_hash[lockfile_key] = spec
|
||||
|
||||
# Second pass: For each spec, get its dependencies from the node dict
|
||||
# and add them to the spec, including build specs
|
||||
# and add them to the spec
|
||||
for lockfile_key, node_dict in json_specs_by_hash.items():
|
||||
name, data = reader.name_and_data(node_dict)
|
||||
for _, dep_hash, deptypes, _, virtuals in reader.dependencies_from_node_dict(data):
|
||||
@@ -2342,10 +2341,6 @@ def filter_specs(self, reader, json_specs_by_hash, order_concretized):
|
||||
specs_by_hash[dep_hash], depflag=dt.canonicalize(deptypes), virtuals=virtuals
|
||||
)
|
||||
|
||||
if "build_spec" in node_dict:
|
||||
_, bhash, _ = reader.build_spec_from_node_dict(node_dict)
|
||||
specs_by_hash[lockfile_key]._build_spec = specs_by_hash[bhash]
|
||||
|
||||
# Traverse the root specs one at a time in the order they appear.
|
||||
# The first time we see each DAG hash, that's the one we want to
|
||||
# keep. This is only required as long as we support older lockfile
|
||||
|
@@ -30,7 +30,6 @@
|
||||
import shutil
|
||||
import urllib.error
|
||||
import urllib.parse
|
||||
import urllib.request
|
||||
from pathlib import PurePath
|
||||
from typing import List, Optional
|
||||
|
||||
@@ -54,7 +53,7 @@
|
||||
import spack.version
|
||||
import spack.version.git_ref_lookup
|
||||
from spack.util.compression import decompressor_for
|
||||
from spack.util.executable import CommandNotFoundError, Executable, which
|
||||
from spack.util.executable import CommandNotFoundError, which
|
||||
|
||||
#: List of all fetch strategies, created by FetchStrategy metaclass.
|
||||
all_strategies = []
|
||||
@@ -246,30 +245,38 @@ class URLFetchStrategy(FetchStrategy):
|
||||
|
||||
# these are checksum types. The generic 'checksum' is deprecated for
|
||||
# specific hash names, but we need it for backward compatibility
|
||||
optional_attrs = [*crypto.hashes.keys(), "checksum"]
|
||||
optional_attrs = list(crypto.hashes.keys()) + ["checksum"]
|
||||
|
||||
def __init__(self, *, url: str, checksum: Optional[str] = None, **kwargs) -> None:
|
||||
def __init__(self, url=None, checksum=None, **kwargs):
|
||||
super().__init__(**kwargs)
|
||||
|
||||
self.url = url
|
||||
# Prefer values in kwargs to the positionals.
|
||||
self.url = kwargs.get("url", url)
|
||||
self.mirrors = kwargs.get("mirrors", [])
|
||||
|
||||
# digest can be set as the first argument, or from an explicit
|
||||
# kwarg by the hash name.
|
||||
self.digest: Optional[str] = checksum
|
||||
self.digest = kwargs.get("checksum", checksum)
|
||||
for h in self.optional_attrs:
|
||||
if h in kwargs:
|
||||
self.digest = kwargs[h]
|
||||
|
||||
self.expand_archive: bool = kwargs.get("expand", True)
|
||||
self.extra_options: dict = kwargs.get("fetch_options", {})
|
||||
self._curl: Optional[Executable] = None
|
||||
self.extension: Optional[str] = kwargs.get("extension", None)
|
||||
self.expand_archive = kwargs.get("expand", True)
|
||||
self.extra_options = kwargs.get("fetch_options", {})
|
||||
self._curl = None
|
||||
|
||||
self.extension = kwargs.get("extension", None)
|
||||
|
||||
if not self.url:
|
||||
raise ValueError("URLFetchStrategy requires a url for fetching.")
|
||||
|
||||
@property
|
||||
def curl(self) -> Executable:
|
||||
def curl(self):
|
||||
if not self._curl:
|
||||
self._curl = web_util.require_curl()
|
||||
try:
|
||||
self._curl = which("curl", required=True)
|
||||
except CommandNotFoundError as exc:
|
||||
tty.error(str(exc))
|
||||
return self._curl
|
||||
|
||||
def source_id(self):
|
||||
@@ -290,23 +297,27 @@ def candidate_urls(self):
|
||||
@_needs_stage
|
||||
def fetch(self):
|
||||
if self.archive_file:
|
||||
tty.debug(f"Already downloaded {self.archive_file}")
|
||||
tty.debug("Already downloaded {0}".format(self.archive_file))
|
||||
return
|
||||
|
||||
errors: List[Exception] = []
|
||||
url = None
|
||||
errors = []
|
||||
for url in self.candidate_urls:
|
||||
if not web_util.url_exists(url):
|
||||
tty.debug("URL does not exist: " + url)
|
||||
continue
|
||||
|
||||
try:
|
||||
self._fetch_from_url(url)
|
||||
break
|
||||
except FailedDownloadError as e:
|
||||
errors.extend(e.exceptions)
|
||||
else:
|
||||
raise FailedDownloadError(*errors)
|
||||
errors.append(str(e))
|
||||
|
||||
for msg in errors:
|
||||
tty.debug(msg)
|
||||
|
||||
if not self.archive_file:
|
||||
raise FailedDownloadError(
|
||||
RuntimeError(f"Missing archive {self.archive_file} after fetching")
|
||||
)
|
||||
raise FailedDownloadError(url)
|
||||
|
||||
def _fetch_from_url(self, url):
|
||||
if spack.config.get("config:url_fetch_method") == "curl":
|
||||
@@ -325,28 +336,27 @@ def _check_headers(self, headers):
|
||||
@_needs_stage
|
||||
def _fetch_urllib(self, url):
|
||||
save_file = self.stage.save_filename
|
||||
tty.msg("Fetching {0}".format(url))
|
||||
|
||||
request = urllib.request.Request(url, headers={"User-Agent": web_util.SPACK_USER_AGENT})
|
||||
|
||||
# Run urllib but grab the mime type from the http headers
|
||||
try:
|
||||
response = web_util.urlopen(request)
|
||||
except (TimeoutError, urllib.error.URLError) as e:
|
||||
url, headers, response = web_util.read_from_url(url)
|
||||
except web_util.SpackWebError as e:
|
||||
# clean up archive on failure.
|
||||
if self.archive_file:
|
||||
os.remove(self.archive_file)
|
||||
if os.path.lexists(save_file):
|
||||
os.remove(save_file)
|
||||
raise FailedDownloadError(e) from e
|
||||
|
||||
tty.msg(f"Fetching {url}")
|
||||
msg = "urllib failed to fetch with error {0}".format(e)
|
||||
raise FailedDownloadError(url, msg)
|
||||
|
||||
if os.path.lexists(save_file):
|
||||
os.remove(save_file)
|
||||
|
||||
with open(save_file, "wb") as f:
|
||||
shutil.copyfileobj(response, f)
|
||||
with open(save_file, "wb") as _open_file:
|
||||
shutil.copyfileobj(response, _open_file)
|
||||
|
||||
self._check_headers(str(response.headers))
|
||||
self._check_headers(str(headers))
|
||||
|
||||
@_needs_stage
|
||||
def _fetch_curl(self, url):
|
||||
@@ -355,7 +365,7 @@ def _fetch_curl(self, url):
|
||||
if self.stage.save_filename:
|
||||
save_file = self.stage.save_filename
|
||||
partial_file = self.stage.save_filename + ".part"
|
||||
tty.msg(f"Fetching {url}")
|
||||
tty.msg("Fetching {0}".format(url))
|
||||
if partial_file:
|
||||
save_args = [
|
||||
"-C",
|
||||
@@ -395,8 +405,8 @@ def _fetch_curl(self, url):
|
||||
|
||||
try:
|
||||
web_util.check_curl_code(curl.returncode)
|
||||
except spack.error.FetchError as e:
|
||||
raise FailedDownloadError(e) from e
|
||||
except spack.error.FetchError as err:
|
||||
raise spack.fetch_strategy.FailedDownloadError(url, str(err))
|
||||
|
||||
self._check_headers(headers)
|
||||
|
||||
@@ -463,7 +473,7 @@ def check(self):
|
||||
"""Check the downloaded archive against a checksum digest.
|
||||
No-op if this stage checks code out of a repository."""
|
||||
if not self.digest:
|
||||
raise NoDigestError(f"Attempt to check {self.__class__.__name__} with no digest.")
|
||||
raise NoDigestError("Attempt to check URLFetchStrategy with no digest.")
|
||||
|
||||
verify_checksum(self.archive_file, self.digest)
|
||||
|
||||
@@ -474,8 +484,8 @@ def reset(self):
|
||||
"""
|
||||
if not self.archive_file:
|
||||
raise NoArchiveFileError(
|
||||
f"Tried to reset {self.__class__.__name__} before fetching",
|
||||
f"Failed on reset() for URL{self.url}",
|
||||
"Tried to reset URLFetchStrategy before fetching",
|
||||
"Failed on reset() for URL %s" % self.url,
|
||||
)
|
||||
|
||||
# Remove everything but the archive from the stage
|
||||
@@ -488,10 +498,14 @@ def reset(self):
|
||||
self.expand()
|
||||
|
||||
def __repr__(self):
|
||||
return f"{self.__class__.__name__}<{self.url}>"
|
||||
url = self.url if self.url else "no url"
|
||||
return "%s<%s>" % (self.__class__.__name__, url)
|
||||
|
||||
def __str__(self):
|
||||
return self.url
|
||||
if self.url:
|
||||
return self.url
|
||||
else:
|
||||
return "[no url]"
|
||||
|
||||
|
||||
@fetcher
|
||||
@@ -504,7 +518,7 @@ def fetch(self):
|
||||
|
||||
# check whether the cache file exists.
|
||||
if not os.path.isfile(path):
|
||||
raise NoCacheError(f"No cache of {path}")
|
||||
raise NoCacheError("No cache of %s" % path)
|
||||
|
||||
# remove old symlink if one is there.
|
||||
filename = self.stage.save_filename
|
||||
@@ -514,8 +528,8 @@ def fetch(self):
|
||||
# Symlink to local cached archive.
|
||||
symlink(path, filename)
|
||||
|
||||
# Remove link if checksum fails, or subsequent fetchers will assume they don't need to
|
||||
# download.
|
||||
# Remove link if checksum fails, or subsequent fetchers
|
||||
# will assume they don't need to download.
|
||||
if self.digest:
|
||||
try:
|
||||
self.check()
|
||||
@@ -524,12 +538,12 @@ def fetch(self):
|
||||
raise
|
||||
|
||||
# Notify the user how we fetched.
|
||||
tty.msg(f"Using cached archive: {path}")
|
||||
tty.msg("Using cached archive: {0}".format(path))
|
||||
|
||||
|
||||
class OCIRegistryFetchStrategy(URLFetchStrategy):
|
||||
def __init__(self, *, url: str, checksum: Optional[str] = None, **kwargs):
|
||||
super().__init__(url=url, checksum=checksum, **kwargs)
|
||||
def __init__(self, url=None, checksum=None, **kwargs):
|
||||
super().__init__(url, checksum, **kwargs)
|
||||
|
||||
self._urlopen = kwargs.get("_urlopen", spack.oci.opener.urlopen)
|
||||
|
||||
@@ -540,13 +554,13 @@ def fetch(self):
|
||||
|
||||
try:
|
||||
response = self._urlopen(self.url)
|
||||
except (TimeoutError, urllib.error.URLError) as e:
|
||||
except urllib.error.URLError as e:
|
||||
# clean up archive on failure.
|
||||
if self.archive_file:
|
||||
os.remove(self.archive_file)
|
||||
if os.path.lexists(file):
|
||||
os.remove(file)
|
||||
raise FailedDownloadError(e) from e
|
||||
raise FailedDownloadError(self.url, f"Failed to fetch {self.url}: {e}") from e
|
||||
|
||||
if os.path.lexists(file):
|
||||
os.remove(file)
|
||||
@@ -711,7 +725,6 @@ class GitFetchStrategy(VCSFetchStrategy):
|
||||
"submodules",
|
||||
"get_full_repo",
|
||||
"submodules_delete",
|
||||
"git_sparse_paths",
|
||||
]
|
||||
|
||||
git_version_re = r"git version (\S+)"
|
||||
@@ -727,7 +740,6 @@ def __init__(self, **kwargs):
|
||||
self.submodules = kwargs.get("submodules", False)
|
||||
self.submodules_delete = kwargs.get("submodules_delete", False)
|
||||
self.get_full_repo = kwargs.get("get_full_repo", False)
|
||||
self.git_sparse_paths = kwargs.get("git_sparse_paths", None)
|
||||
|
||||
@property
|
||||
def git_version(self):
|
||||
@@ -795,50 +807,38 @@ def fetch(self):
|
||||
tty.debug("Already fetched {0}".format(self.stage.source_path))
|
||||
return
|
||||
|
||||
if self.git_sparse_paths:
|
||||
self._sparse_clone_src(commit=self.commit, branch=self.branch, tag=self.tag)
|
||||
else:
|
||||
self._clone_src(commit=self.commit, branch=self.branch, tag=self.tag)
|
||||
self.submodule_operations()
|
||||
self.clone(commit=self.commit, branch=self.branch, tag=self.tag)
|
||||
|
||||
def bare_clone(self, dest):
|
||||
def clone(self, dest=None, commit=None, branch=None, tag=None, bare=False):
|
||||
"""
|
||||
Execute a bare clone for metadata only
|
||||
Clone a repository to a path.
|
||||
|
||||
Requires a destination since bare cloning does not provide source
|
||||
and shouldn't be used for staging.
|
||||
"""
|
||||
# Default to spack source path
|
||||
tty.debug("Cloning git repository: {0}".format(self._repo_info()))
|
||||
|
||||
git = self.git
|
||||
debug = spack.config.get("config:debug")
|
||||
|
||||
# We don't need to worry about which commit/branch/tag is checked out
|
||||
clone_args = ["clone", "--bare"]
|
||||
if not debug:
|
||||
clone_args.append("--quiet")
|
||||
clone_args.extend([self.url, dest])
|
||||
git(*clone_args)
|
||||
|
||||
def _clone_src(self, commit=None, branch=None, tag=None):
|
||||
"""
|
||||
Clone a repository to a path using git.
|
||||
This method handles cloning from git, but does not require a stage.
|
||||
|
||||
Arguments:
|
||||
dest (str or None): The path into which the code is cloned. If None,
|
||||
requires a stage and uses the stage's source path.
|
||||
commit (str or None): A commit to fetch from the remote. Only one of
|
||||
commit, branch, and tag may be non-None.
|
||||
branch (str or None): A branch to fetch from the remote.
|
||||
tag (str or None): A tag to fetch from the remote.
|
||||
bare (bool): Execute a "bare" git clone (--bare option to git)
|
||||
"""
|
||||
# Default to spack source path
|
||||
dest = self.stage.source_path
|
||||
dest = dest or self.stage.source_path
|
||||
tty.debug("Cloning git repository: {0}".format(self._repo_info()))
|
||||
|
||||
git = self.git
|
||||
debug = spack.config.get("config:debug")
|
||||
|
||||
if commit:
|
||||
if bare:
|
||||
# We don't need to worry about which commit/branch/tag is checked out
|
||||
clone_args = ["clone", "--bare"]
|
||||
if not debug:
|
||||
clone_args.append("--quiet")
|
||||
clone_args.extend([self.url, dest])
|
||||
git(*clone_args)
|
||||
elif commit:
|
||||
# Need to do a regular clone and check out everything if
|
||||
# they asked for a particular commit.
|
||||
clone_args = ["clone", self.url]
|
||||
@@ -917,85 +917,6 @@ def _clone_src(self, commit=None, branch=None, tag=None):
|
||||
git(*pull_args, ignore_errors=1)
|
||||
git(*co_args)
|
||||
|
||||
def _sparse_clone_src(self, commit=None, branch=None, tag=None, **kwargs):
|
||||
"""
|
||||
Use git's sparse checkout feature to clone portions of a git repository
|
||||
|
||||
Arguments:
|
||||
commit (str or None): A commit to fetch from the remote. Only one of
|
||||
commit, branch, and tag may be non-None.
|
||||
branch (str or None): A branch to fetch from the remote.
|
||||
tag (str or None): A tag to fetch from the remote.
|
||||
"""
|
||||
dest = self.stage.source_path
|
||||
git = self.git
|
||||
|
||||
if self.git_version < spack.version.Version("2.25.0.0"):
|
||||
# code paths exist where the package is not set. Assure some indentifier for the
|
||||
# package that was configured for sparse checkout exists in the error message
|
||||
identifier = str(self.url)
|
||||
if self.package:
|
||||
identifier += f" ({self.package.name})"
|
||||
tty.warn(
|
||||
(
|
||||
f"{identifier} is configured for git sparse-checkout "
|
||||
"but the git version is too old to support sparse cloning. "
|
||||
"Cloning the full repository instead."
|
||||
)
|
||||
)
|
||||
self._clone_src(commit, branch, tag)
|
||||
else:
|
||||
# default to depth=2 to allow for retention of some git properties
|
||||
depth = kwargs.get("depth", 2)
|
||||
needs_fetch = branch or tag
|
||||
git_ref = branch or tag or commit
|
||||
|
||||
assert git_ref
|
||||
|
||||
clone_args = ["clone"]
|
||||
|
||||
if needs_fetch:
|
||||
clone_args.extend(["--branch", git_ref])
|
||||
|
||||
if self.get_full_repo:
|
||||
clone_args.append("--no-single-branch")
|
||||
else:
|
||||
clone_args.append("--single-branch")
|
||||
|
||||
clone_args.extend(
|
||||
[f"--depth={depth}", "--no-checkout", "--filter=blob:none", self.url]
|
||||
)
|
||||
|
||||
sparse_args = ["sparse-checkout", "set"]
|
||||
|
||||
if callable(self.git_sparse_paths):
|
||||
sparse_args.extend(self.git_sparse_paths())
|
||||
else:
|
||||
sparse_args.extend([p for p in self.git_sparse_paths])
|
||||
|
||||
sparse_args.append("--cone")
|
||||
|
||||
checkout_args = ["checkout", git_ref]
|
||||
|
||||
if not spack.config.get("config:debug"):
|
||||
clone_args.insert(1, "--quiet")
|
||||
checkout_args.insert(1, "--quiet")
|
||||
|
||||
with temp_cwd():
|
||||
git(*clone_args)
|
||||
repo_name = get_single_file(".")
|
||||
if self.stage:
|
||||
self.stage.srcdir = repo_name
|
||||
shutil.move(repo_name, dest)
|
||||
|
||||
with working_dir(dest):
|
||||
git(*sparse_args)
|
||||
git(*checkout_args)
|
||||
|
||||
def submodule_operations(self):
|
||||
dest = self.stage.source_path
|
||||
git = self.git
|
||||
|
||||
if self.submodules_delete:
|
||||
with working_dir(dest):
|
||||
for submodule_to_delete in self.submodules_delete:
|
||||
@@ -1372,7 +1293,7 @@ def reset(self):
|
||||
shutil.move(scrubbed, source_path)
|
||||
|
||||
def __str__(self):
|
||||
return f"[hg] {self.url}"
|
||||
return "[hg] %s" % self.url
|
||||
|
||||
|
||||
@fetcher
|
||||
@@ -1381,21 +1302,46 @@ class S3FetchStrategy(URLFetchStrategy):
|
||||
|
||||
url_attr = "s3"
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
try:
|
||||
super().__init__(*args, **kwargs)
|
||||
except ValueError:
|
||||
if not kwargs.get("url"):
|
||||
raise ValueError("S3FetchStrategy requires a url for fetching.")
|
||||
|
||||
@_needs_stage
|
||||
def fetch(self):
|
||||
if not self.url.startswith("s3://"):
|
||||
raise spack.error.FetchError(
|
||||
f"{self.__class__.__name__} can only fetch from s3:// urls."
|
||||
)
|
||||
if self.archive_file:
|
||||
tty.debug(f"Already downloaded {self.archive_file}")
|
||||
tty.debug("Already downloaded {0}".format(self.archive_file))
|
||||
return
|
||||
self._fetch_urllib(self.url)
|
||||
if not self.archive_file:
|
||||
raise FailedDownloadError(
|
||||
RuntimeError(f"Missing archive {self.archive_file} after fetching")
|
||||
|
||||
parsed_url = urllib.parse.urlparse(self.url)
|
||||
if parsed_url.scheme != "s3":
|
||||
raise spack.error.FetchError("S3FetchStrategy can only fetch from s3:// urls.")
|
||||
|
||||
tty.debug("Fetching {0}".format(self.url))
|
||||
|
||||
basename = os.path.basename(parsed_url.path)
|
||||
|
||||
with working_dir(self.stage.path):
|
||||
_, headers, stream = web_util.read_from_url(self.url)
|
||||
|
||||
with open(basename, "wb") as f:
|
||||
shutil.copyfileobj(stream, f)
|
||||
|
||||
content_type = web_util.get_header(headers, "Content-type")
|
||||
|
||||
if content_type == "text/html":
|
||||
warn_content_type_mismatch(self.archive_file or "the archive")
|
||||
|
||||
if self.stage.save_filename:
|
||||
llnl.util.filesystem.rename(
|
||||
os.path.join(self.stage.path, basename), self.stage.save_filename
|
||||
)
|
||||
|
||||
if not self.archive_file:
|
||||
raise FailedDownloadError(self.url)
|
||||
|
||||
|
||||
@fetcher
|
||||
class GCSFetchStrategy(URLFetchStrategy):
|
||||
@@ -1403,22 +1349,43 @@ class GCSFetchStrategy(URLFetchStrategy):
|
||||
|
||||
url_attr = "gs"
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
try:
|
||||
super().__init__(*args, **kwargs)
|
||||
except ValueError:
|
||||
if not kwargs.get("url"):
|
||||
raise ValueError("GCSFetchStrategy requires a url for fetching.")
|
||||
|
||||
@_needs_stage
|
||||
def fetch(self):
|
||||
if not self.url.startswith("gs"):
|
||||
raise spack.error.FetchError(
|
||||
f"{self.__class__.__name__} can only fetch from gs:// urls."
|
||||
)
|
||||
if self.archive_file:
|
||||
tty.debug(f"Already downloaded {self.archive_file}")
|
||||
tty.debug("Already downloaded {0}".format(self.archive_file))
|
||||
return
|
||||
|
||||
self._fetch_urllib(self.url)
|
||||
parsed_url = urllib.parse.urlparse(self.url)
|
||||
if parsed_url.scheme != "gs":
|
||||
raise spack.error.FetchError("GCSFetchStrategy can only fetch from gs:// urls.")
|
||||
|
||||
tty.debug("Fetching {0}".format(self.url))
|
||||
|
||||
basename = os.path.basename(parsed_url.path)
|
||||
|
||||
with working_dir(self.stage.path):
|
||||
_, headers, stream = web_util.read_from_url(self.url)
|
||||
|
||||
with open(basename, "wb") as f:
|
||||
shutil.copyfileobj(stream, f)
|
||||
|
||||
content_type = web_util.get_header(headers, "Content-type")
|
||||
|
||||
if content_type == "text/html":
|
||||
warn_content_type_mismatch(self.archive_file or "the archive")
|
||||
|
||||
if self.stage.save_filename:
|
||||
os.rename(os.path.join(self.stage.path, basename), self.stage.save_filename)
|
||||
|
||||
if not self.archive_file:
|
||||
raise FailedDownloadError(
|
||||
RuntimeError(f"Missing archive {self.archive_file} after fetching")
|
||||
)
|
||||
raise FailedDownloadError(self.url)
|
||||
|
||||
|
||||
@fetcher
|
||||
@@ -1427,7 +1394,7 @@ class FetchAndVerifyExpandedFile(URLFetchStrategy):
|
||||
as well as after expanding it."""
|
||||
|
||||
def __init__(self, url, archive_sha256: str, expanded_sha256: str):
|
||||
super().__init__(url=url, checksum=archive_sha256)
|
||||
super().__init__(url, archive_sha256)
|
||||
self.expanded_sha256 = expanded_sha256
|
||||
|
||||
def expand(self):
|
||||
@@ -1469,14 +1436,14 @@ def stable_target(fetcher):
|
||||
return False
|
||||
|
||||
|
||||
def from_url(url: str) -> URLFetchStrategy:
|
||||
def from_url(url):
|
||||
"""Given a URL, find an appropriate fetch strategy for it.
|
||||
Currently just gives you a URLFetchStrategy that uses curl.
|
||||
|
||||
TODO: make this return appropriate fetch strategies for other
|
||||
types of URLs.
|
||||
"""
|
||||
return URLFetchStrategy(url=url)
|
||||
return URLFetchStrategy(url)
|
||||
|
||||
|
||||
def from_kwargs(**kwargs):
|
||||
@@ -1545,12 +1512,10 @@ def _check_version_attributes(fetcher, pkg, version):
|
||||
def _extrapolate(pkg, version):
|
||||
"""Create a fetcher from an extrapolated URL for this version."""
|
||||
try:
|
||||
return URLFetchStrategy(url=pkg.url_for_version(version), fetch_options=pkg.fetch_options)
|
||||
return URLFetchStrategy(pkg.url_for_version(version), fetch_options=pkg.fetch_options)
|
||||
except spack.package_base.NoURLError:
|
||||
raise ExtrapolationError(
|
||||
f"Can't extrapolate a URL for version {version} because "
|
||||
f"package {pkg.name} defines no URLs"
|
||||
)
|
||||
msg = "Can't extrapolate a URL for version %s " "because package %s defines no URLs"
|
||||
raise ExtrapolationError(msg % (version, pkg.name))
|
||||
|
||||
|
||||
def _from_merged_attrs(fetcher, pkg, version):
|
||||
@@ -1567,11 +1532,8 @@ def _from_merged_attrs(fetcher, pkg, version):
|
||||
attrs["fetch_options"] = pkg.fetch_options
|
||||
attrs.update(pkg.versions[version])
|
||||
|
||||
if fetcher.url_attr == "git":
|
||||
pkg_attr_list = ["submodules", "git_sparse_paths"]
|
||||
for pkg_attr in pkg_attr_list:
|
||||
if hasattr(pkg, pkg_attr):
|
||||
attrs.setdefault(pkg_attr, getattr(pkg, pkg_attr))
|
||||
if fetcher.url_attr == "git" and hasattr(pkg, "submodules"):
|
||||
attrs.setdefault("submodules", pkg.submodules)
|
||||
|
||||
return fetcher(**attrs)
|
||||
|
||||
@@ -1666,9 +1628,11 @@ def for_package_version(pkg, version=None):
|
||||
raise InvalidArgsError(pkg, version, **args)
|
||||
|
||||
|
||||
def from_url_scheme(url: str, **kwargs):
|
||||
def from_url_scheme(url, *args, **kwargs):
|
||||
"""Finds a suitable FetchStrategy by matching its url_attr with the scheme
|
||||
in the given url."""
|
||||
|
||||
url = kwargs.get("url", url)
|
||||
parsed_url = urllib.parse.urlparse(url, scheme="file")
|
||||
|
||||
scheme_mapping = kwargs.get("scheme_mapping") or {
|
||||
@@ -1685,9 +1649,11 @@ def from_url_scheme(url: str, **kwargs):
|
||||
for fetcher in all_strategies:
|
||||
url_attr = getattr(fetcher, "url_attr", None)
|
||||
if url_attr and url_attr == scheme:
|
||||
return fetcher(url=url, **kwargs)
|
||||
return fetcher(url, *args, **kwargs)
|
||||
|
||||
raise ValueError(f'No FetchStrategy found for url with scheme: "{parsed_url.scheme}"')
|
||||
raise ValueError(
|
||||
'No FetchStrategy found for url with scheme: "{SCHEME}"'.format(SCHEME=parsed_url.scheme)
|
||||
)
|
||||
|
||||
|
||||
def from_list_url(pkg):
|
||||
@@ -1712,9 +1678,7 @@ def from_list_url(pkg):
|
||||
)
|
||||
|
||||
# construct a fetcher
|
||||
return URLFetchStrategy(
|
||||
url=url_from_list, checksum=checksum, fetch_options=pkg.fetch_options
|
||||
)
|
||||
return URLFetchStrategy(url_from_list, checksum, fetch_options=pkg.fetch_options)
|
||||
except KeyError as e:
|
||||
tty.debug(e)
|
||||
tty.msg("Cannot find version %s in url_list" % pkg.version)
|
||||
@@ -1742,10 +1706,10 @@ def store(self, fetcher, relative_dest):
|
||||
mkdirp(os.path.dirname(dst))
|
||||
fetcher.archive(dst)
|
||||
|
||||
def fetcher(self, target_path: str, digest: Optional[str], **kwargs) -> CacheURLFetchStrategy:
|
||||
def fetcher(self, target_path, digest, **kwargs):
|
||||
path = os.path.join(self.root, target_path)
|
||||
url = url_util.path_to_file_url(path)
|
||||
return CacheURLFetchStrategy(url=url, checksum=digest, **kwargs)
|
||||
return CacheURLFetchStrategy(url, digest, **kwargs)
|
||||
|
||||
def destroy(self):
|
||||
shutil.rmtree(self.root, ignore_errors=True)
|
||||
@@ -1758,9 +1722,9 @@ class NoCacheError(spack.error.FetchError):
|
||||
class FailedDownloadError(spack.error.FetchError):
|
||||
"""Raised when a download fails."""
|
||||
|
||||
def __init__(self, *exceptions: Exception):
|
||||
super().__init__("Failed to download")
|
||||
self.exceptions = exceptions
|
||||
def __init__(self, url, msg=""):
|
||||
super().__init__("Failed to fetch file from URL: %s" % url, msg)
|
||||
self.url = url
|
||||
|
||||
|
||||
class NoArchiveFileError(spack.error.FetchError):
|
||||
|
@@ -37,12 +37,6 @@ def __call__(self, spec):
|
||||
"""Run this hash on the provided spec."""
|
||||
return spec.spec_hash(self)
|
||||
|
||||
def __repr__(self):
|
||||
return (
|
||||
f"SpecHashDescriptor(depflag={self.depflag!r}, "
|
||||
f"package_hash={self.package_hash!r}, name={self.name!r}, override={self.override!r})"
|
||||
)
|
||||
|
||||
|
||||
#: Spack's deployment hash. Includes all inputs that can affect how a package is built.
|
||||
dag_hash = SpecHashDescriptor(depflag=dt.BUILD | dt.LINK | dt.RUN, package_hash=True, name="hash")
|
||||
|
@@ -23,6 +23,9 @@ def post_install(spec, explicit):
|
||||
|
||||
# Push the package to all autopush mirrors
|
||||
for mirror in spack.mirror.MirrorCollection(binary=True, autopush=True).values():
|
||||
signing_key = bindist.select_signing_key() if mirror.signed else None
|
||||
bindist.push_or_raise([spec], out_url=mirror.push_url, signing_key=signing_key, force=True)
|
||||
bindist.push_or_raise(
|
||||
spec,
|
||||
mirror.push_url,
|
||||
bindist.PushOptions(force=True, regenerate_index=False, unsigned=not mirror.signed),
|
||||
)
|
||||
tty.msg(f"{spec.name}: Pushed to build cache: '{mirror.name}'")
|
||||
|
@@ -757,10 +757,6 @@ def test_process(pkg: Pb, kwargs):
|
||||
pkg.tester.status(pkg.spec.name, TestStatus.SKIPPED)
|
||||
return
|
||||
|
||||
# Make sure properly named build-time test methods actually run as
|
||||
# stand-alone tests.
|
||||
pkg.run_tests = True
|
||||
|
||||
# run test methods from the package and all virtuals it provides
|
||||
v_names = virtuals(pkg)
|
||||
test_specs = [pkg.spec] + [spack.spec.Spec(v_name) for v_name in sorted(v_names)]
|
||||
|
@@ -2,7 +2,8 @@
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
"""This module encapsulates package installation functionality.
|
||||
"""
|
||||
This module encapsulates package installation functionality.
|
||||
|
||||
The PackageInstaller coordinates concurrent builds of packages for the same
|
||||
Spack instance by leveraging the dependency DAG and file system locks. It
|
||||
@@ -16,18 +17,16 @@
|
||||
File system locks enable coordination such that no two processes attempt to
|
||||
build the same or a failed dependency package.
|
||||
|
||||
Failures to install dependency packages result in removal of their
|
||||
dependents' tasks from the current process. A failure file is also
|
||||
written (and locked) so that other processes can detect the failure
|
||||
and adjust their tasks accordingly.
|
||||
Failures to install dependency packages result in removal of their dependents'
|
||||
build tasks from the current process. A failure file is also written (and
|
||||
locked) so that other processes can detect the failure and adjust their build
|
||||
tasks accordingly.
|
||||
|
||||
This module supports the coordination of local and distributed concurrent
|
||||
installations of packages in a Spack instance.
|
||||
|
||||
"""
|
||||
|
||||
import copy
|
||||
import enum
|
||||
import glob
|
||||
import heapq
|
||||
import io
|
||||
@@ -59,7 +58,6 @@
|
||||
import spack.package_base
|
||||
import spack.package_prefs as prefs
|
||||
import spack.repo
|
||||
import spack.rewiring
|
||||
import spack.spec
|
||||
import spack.store
|
||||
import spack.util.executable
|
||||
@@ -104,16 +102,7 @@ def _write_timer_json(pkg, timer, cache):
|
||||
return
|
||||
|
||||
|
||||
class ExecuteResult(enum.Enum):
|
||||
# Task succeeded
|
||||
SUCCESS = enum.auto()
|
||||
# Task failed
|
||||
FAILED = enum.auto()
|
||||
# Task is missing build spec and will be requeued
|
||||
MISSING_BUILD_SPEC = enum.auto()
|
||||
|
||||
|
||||
class InstallAction(enum.Enum):
|
||||
class InstallAction:
|
||||
#: Don't perform an install
|
||||
NONE = 0
|
||||
#: Do a standard install
|
||||
@@ -287,13 +276,6 @@ def _do_fake_install(pkg: "spack.package_base.PackageBase") -> None:
|
||||
dump_packages(pkg.spec, packages_dir)
|
||||
|
||||
|
||||
def _add_compiler_package_to_config(pkg):
|
||||
compiler_search_prefix = getattr(pkg, "compiler_search_prefix", pkg.spec.prefix)
|
||||
spack.compilers.add_compilers_to_config(
|
||||
spack.compilers.find_compilers([compiler_search_prefix])
|
||||
)
|
||||
|
||||
|
||||
def _packages_needed_to_bootstrap_compiler(
|
||||
compiler: "spack.spec.CompilerSpec", architecture: "spack.spec.ArchSpec", pkgs: list
|
||||
) -> List[Tuple["spack.package_base.PackageBase", bool]]:
|
||||
@@ -743,7 +725,7 @@ def log(pkg: "spack.package_base.PackageBase") -> None:
|
||||
def package_id(spec: "spack.spec.Spec") -> str:
|
||||
"""A "unique" package identifier for installation purposes
|
||||
|
||||
The identifier is used to track tasks, locks, install, and
|
||||
The identifier is used to track build tasks, locks, install, and
|
||||
failure statuses.
|
||||
|
||||
The identifier needs to distinguish between combinations of compilers
|
||||
@@ -906,10 +888,9 @@ def traverse_dependencies(self, spec=None, visited=None) -> Iterator["spack.spec
|
||||
yield dep
|
||||
|
||||
|
||||
class Task:
|
||||
"""Base class for representing a task for a package."""
|
||||
class BuildTask:
|
||||
"""Class for representing the build task for a package."""
|
||||
|
||||
# TODO: Consider adding pid as a parameter here:
|
||||
def __init__(
|
||||
self,
|
||||
pkg: "spack.package_base.PackageBase",
|
||||
@@ -921,7 +902,7 @@ def __init__(
|
||||
installed: Set[str],
|
||||
):
|
||||
"""
|
||||
Instantiate a task for a package.
|
||||
Instantiate a build task for a package.
|
||||
|
||||
Args:
|
||||
pkg: the package to be built and installed
|
||||
@@ -957,13 +938,13 @@ def __init__(
|
||||
# queue.
|
||||
if status == STATUS_REMOVED:
|
||||
raise InstallError(
|
||||
f"Cannot create a task for {self.pkg_id} with status '{status}'", pkg=pkg
|
||||
f"Cannot create a build task for {self.pkg_id} with status '{status}'", pkg=pkg
|
||||
)
|
||||
|
||||
self.status = status
|
||||
|
||||
# Getting the PID again because it will be needed for execute functionality.
|
||||
# TODO: Should this be cached in PackageInstaller?
|
||||
self.pid = os.getpid()
|
||||
# Package is associated with a bootstrap compiler
|
||||
self.compiler = compiler
|
||||
|
||||
# The initial start time for processing the spec
|
||||
self.start = start
|
||||
@@ -987,8 +968,28 @@ def __init__(
|
||||
if package_id(d) != self.pkg_id
|
||||
)
|
||||
|
||||
# Handle bootstrapped compiler
|
||||
#
|
||||
# The bootstrapped compiler is not a dependency in the spec, but it is
|
||||
# a dependency of the build task. Here we add it to self.dependencies
|
||||
compiler_spec = self.pkg.spec.compiler
|
||||
arch_spec = self.pkg.spec.architecture
|
||||
strict = spack.concretize.Concretizer().check_for_compiler_existence
|
||||
if (
|
||||
not spack.compilers.compilers_for_spec(compiler_spec, arch_spec=arch_spec)
|
||||
and not strict
|
||||
):
|
||||
# The compiler is in the queue, identify it as dependency
|
||||
dep = spack.compilers.pkg_spec_for_compiler(compiler_spec)
|
||||
dep.constrain(f"platform={str(arch_spec.platform)}")
|
||||
dep.constrain(f"os={str(arch_spec.os)}")
|
||||
dep.constrain(f"target={arch_spec.target.microarchitecture.family.name}:")
|
||||
dep.concretize()
|
||||
dep_id = package_id(dep)
|
||||
self.dependencies.add(dep_id)
|
||||
|
||||
# List of uninstalled dependencies, which is used to establish
|
||||
# the priority of the task.
|
||||
# the priority of the build task.
|
||||
#
|
||||
self.uninstalled_deps = set(
|
||||
pkg_id for pkg_id in self.dependencies if pkg_id not in installed
|
||||
@@ -998,33 +999,6 @@ def __init__(
|
||||
self.attempts = 0
|
||||
self._update()
|
||||
|
||||
# Is this task to install a compiler
|
||||
self.compiler = compiler
|
||||
|
||||
# Handle bootstrapped compiler
|
||||
#
|
||||
# The bootstrapped compiler is not a dependency in the spec, but it is
|
||||
# a dependency of the build task. Here we add it to self.dependencies
|
||||
if compiler:
|
||||
compiler_spec = self.pkg.spec.compiler
|
||||
arch_spec = self.pkg.spec.architecture
|
||||
strict = spack.concretize.Concretizer().check_for_compiler_existence
|
||||
if (
|
||||
not spack.compilers.compilers_for_spec(compiler_spec, arch_spec=arch_spec)
|
||||
and not strict
|
||||
):
|
||||
# The compiler is in the queue, identify it as dependency
|
||||
dep = spack.compilers.pkg_spec_for_compiler(compiler_spec)
|
||||
dep.constrain("platform=%s" % str(arch_spec.platform))
|
||||
dep.constrain("os=%s" % str(arch_spec.os))
|
||||
dep.constrain("target=%s:" % arch_spec.target.microarchitecture.family.name)
|
||||
dep.concretize()
|
||||
dep_id = package_id(dep.package.spec)
|
||||
self.dependencies.add(dep_id)
|
||||
|
||||
def execute(self, install_status):
|
||||
raise NotImplementedError
|
||||
|
||||
def __eq__(self, other):
|
||||
return self.key == other.key
|
||||
|
||||
@@ -1044,14 +1018,14 @@ def __ne__(self, other):
|
||||
return self.key != other.key
|
||||
|
||||
def __repr__(self) -> str:
|
||||
"""Returns a formal representation of the task."""
|
||||
"""Returns a formal representation of the build task."""
|
||||
rep = f"{self.__class__.__name__}("
|
||||
for attr, value in self.__dict__.items():
|
||||
rep += f"{attr}={value.__repr__()}, "
|
||||
return f"{rep.strip(', ')})"
|
||||
|
||||
def __str__(self) -> str:
|
||||
"""Returns a printable version of the task."""
|
||||
"""Returns a printable version of the build task."""
|
||||
dependencies = f"#dependencies={len(self.dependencies)}"
|
||||
return "priority={0}, status={1}, start={2}, {3}".format(
|
||||
self.priority, self.status, self.start, dependencies
|
||||
@@ -1078,21 +1052,6 @@ def add_dependent(self, pkg_id: str) -> None:
|
||||
tty.debug(f"Adding {pkg_id} as a dependent of {self.pkg_id}")
|
||||
self.dependents.add(pkg_id)
|
||||
|
||||
def add_dependency(self, pkg_id, installed=False):
|
||||
"""
|
||||
Ensure the dependency package id is in the task's list so the task priority will be
|
||||
correct.
|
||||
|
||||
Args:
|
||||
pkg_id (str): package identifier of the dependency package
|
||||
installed (bool): install status of the dependency package
|
||||
"""
|
||||
if pkg_id != self.pkg_id and pkg_id not in self.dependencies:
|
||||
tty.debug("Adding {0} as a depencency of {1}".format(pkg_id, self.pkg_id))
|
||||
self.dependencies.add(pkg_id)
|
||||
if not installed:
|
||||
self.uninstalled_deps.add(pkg_id)
|
||||
|
||||
def flag_installed(self, installed: List[str]) -> None:
|
||||
"""
|
||||
Ensure the dependency is not considered to still be uninstalled.
|
||||
@@ -1109,39 +1068,6 @@ def flag_installed(self, installed: List[str]) -> None:
|
||||
level=2,
|
||||
)
|
||||
|
||||
def _setup_install_dir(self, pkg: "spack.package_base.PackageBase") -> None:
|
||||
"""
|
||||
Create and ensure proper access controls for the install directory.
|
||||
Write a small metadata file with the current spack environment.
|
||||
|
||||
Args:
|
||||
pkg: the package to be built and installed
|
||||
"""
|
||||
# Move to a module level method.
|
||||
if not os.path.exists(pkg.spec.prefix):
|
||||
path = spack.util.path.debug_padded_filter(pkg.spec.prefix)
|
||||
tty.debug("Creating the installation directory {0}".format(path))
|
||||
spack.store.STORE.layout.create_install_directory(pkg.spec)
|
||||
else:
|
||||
# Set the proper group for the prefix
|
||||
group = prefs.get_package_group(pkg.spec)
|
||||
if group:
|
||||
fs.chgrp(pkg.spec.prefix, group)
|
||||
|
||||
# Set the proper permissions.
|
||||
# This has to be done after group because changing groups blows
|
||||
# away the sticky group bit on the directory
|
||||
mode = os.stat(pkg.spec.prefix).st_mode
|
||||
perms = prefs.get_package_dir_permissions(pkg.spec)
|
||||
if mode != perms:
|
||||
os.chmod(pkg.spec.prefix, perms)
|
||||
|
||||
# Ensure the metadata path exists as well
|
||||
fs.mkdirp(spack.store.STORE.layout.metadata_path(pkg.spec), mode=perms)
|
||||
|
||||
# Always write host environment - we assume this can change
|
||||
spack.store.STORE.layout.write_host_environment(pkg.spec)
|
||||
|
||||
@property
|
||||
def explicit(self) -> bool:
|
||||
return self.pkg.spec.dag_hash() in self.request.install_args.get("explicit", [])
|
||||
@@ -1172,7 +1098,7 @@ def key(self) -> Tuple[int, int]:
|
||||
"""The key is the tuple (# uninstalled dependencies, sequence)."""
|
||||
return (self.priority, self.sequence)
|
||||
|
||||
def next_attempt(self, installed) -> "Task":
|
||||
def next_attempt(self, installed) -> "BuildTask":
|
||||
"""Create a new, updated task for the next installation attempt."""
|
||||
task = copy.copy(self)
|
||||
task._update()
|
||||
@@ -1186,99 +1112,6 @@ def priority(self):
|
||||
return len(self.uninstalled_deps)
|
||||
|
||||
|
||||
class BuildTask(Task):
|
||||
"""Class for representing a build task for a package."""
|
||||
|
||||
def execute(self, install_status):
|
||||
"""
|
||||
Perform the installation of the requested spec and/or dependency
|
||||
represented by the build task.
|
||||
"""
|
||||
install_args = self.request.install_args
|
||||
tests = install_args.get("tests")
|
||||
unsigned = install_args.get("unsigned")
|
||||
|
||||
pkg, pkg_id = self.pkg, self.pkg_id
|
||||
|
||||
tty.msg(install_msg(pkg_id, self.pid, install_status))
|
||||
self.start = self.start or time.time()
|
||||
self.status = STATUS_INSTALLING
|
||||
|
||||
# Use the binary cache if requested
|
||||
if self.use_cache:
|
||||
if _install_from_cache(pkg, self.explicit, unsigned):
|
||||
if self.compiler:
|
||||
_add_compiler_package_to_config(pkg)
|
||||
return ExecuteResult.SUCCESS
|
||||
elif self.cache_only:
|
||||
raise InstallError("No binary found when cache-only was specified", pkg=pkg)
|
||||
else:
|
||||
tty.msg(f"No binary for {pkg_id} found: installing from source")
|
||||
|
||||
pkg.run_tests = tests is True or tests and pkg.name in tests
|
||||
|
||||
# hook that allows tests to inspect the Package before installation
|
||||
# see unit_test_check() docs.
|
||||
if not pkg.unit_test_check():
|
||||
return ExecuteResult.FAILED
|
||||
|
||||
try:
|
||||
# Create stage object now and let it be serialized for the child process. That
|
||||
# way monkeypatch in tests works correctly.
|
||||
pkg.stage
|
||||
|
||||
self._setup_install_dir(pkg)
|
||||
|
||||
# Create a child process to do the actual installation.
|
||||
# Preserve verbosity settings across installs.
|
||||
spack.package_base.PackageBase._verbose = spack.build_environment.start_build_process(
|
||||
pkg, build_process, install_args
|
||||
)
|
||||
# Currently this is how RPATH-like behavior is achieved on Windows, after install
|
||||
# establish runtime linkage via Windows Runtime link object
|
||||
# Note: this is a no-op on non Windows platforms
|
||||
pkg.windows_establish_runtime_linkage()
|
||||
# Note: PARENT of the build process adds the new package to
|
||||
# the database, so that we don't need to re-read from file.
|
||||
spack.store.STORE.db.add(pkg.spec, spack.store.STORE.layout, explicit=self.explicit)
|
||||
|
||||
# If a compiler, ensure it is added to the configuration
|
||||
if self.compiler:
|
||||
_add_compiler_package_to_config(pkg)
|
||||
except spack.build_environment.StopPhase as e:
|
||||
# A StopPhase exception means that do_install was asked to
|
||||
# stop early from clients, and is not an error at this point
|
||||
pid = "{0}: ".format(self.pid) if tty.show_pid() else ""
|
||||
tty.debug("{0}{1}".format(pid, str(e)))
|
||||
tty.debug("Package stage directory: {0}".format(pkg.stage.source_path))
|
||||
return ExecuteResult.SUCCESS
|
||||
|
||||
|
||||
class RewireTask(Task):
|
||||
"""Class for representing a rewire task for a package."""
|
||||
|
||||
def execute(self, install_status):
|
||||
# TODO: Docstring
|
||||
oldstatus = self.status
|
||||
self.status = STATUS_INSTALLING
|
||||
tty.msg(install_msg(self.pkg_id, self.pid, install_status))
|
||||
self.start = self.start or time.time()
|
||||
if not self.pkg.spec.build_spec.installed:
|
||||
try:
|
||||
install_args = self.request.install_args
|
||||
unsigned = install_args.get("unsigned")
|
||||
binary_distribution.install_root_node(self.pkg.spec, unsigned=unsigned)
|
||||
_print_installed_pkg(self.pkg.prefix)
|
||||
return ExecuteResult.SUCCESS
|
||||
except BaseException as e:
|
||||
tty.debug(f"Failed to rewire {self.pkg.spec} from binary. {e}")
|
||||
self.status = oldstatus
|
||||
return ExecuteResult.MISSING_BUILD_SPEC
|
||||
spack.rewiring.rewire_node(self.pkg.spec, self.explicit)
|
||||
_print_installed_pkg(self.pkg.prefix)
|
||||
return ExecuteResult.SUCCESS
|
||||
|
||||
|
||||
class PackageInstaller:
|
||||
"""
|
||||
Class for managing the install process for a Spack instance based on a bottom-up DAG approach.
|
||||
@@ -1293,11 +1126,11 @@ def __init__(
|
||||
# List of build requests
|
||||
self.build_requests = [BuildRequest(pkg, install_args) for pkg in packages]
|
||||
|
||||
# Priority queue of tasks
|
||||
self.build_pq: List[Tuple[Tuple[int, int], Task]] = []
|
||||
# Priority queue of build tasks
|
||||
self.build_pq: List[Tuple[Tuple[int, int], BuildTask]] = []
|
||||
|
||||
# Mapping of unique package ids to task
|
||||
self.build_tasks: Dict[str, Task] = {}
|
||||
# Mapping of unique package ids to build task
|
||||
self.build_tasks: Dict[str, BuildTask] = {}
|
||||
|
||||
# Cache of package locks for failed packages, keyed on package's ids
|
||||
self.failed: Dict[str, Optional[lk.Lock]] = {}
|
||||
@@ -1318,9 +1151,6 @@ def __init__(
|
||||
# fast then that option applies to all build requests.
|
||||
self.fail_fast = False
|
||||
|
||||
# Initializing all_dependencies to empty. This will be set later in _init_queue.
|
||||
self.all_dependencies: Dict[str, Set[str]] = {}
|
||||
|
||||
def __repr__(self) -> str:
|
||||
"""Returns a formal representation of the package installer."""
|
||||
rep = f"{self.__class__.__name__}("
|
||||
@@ -1391,7 +1221,7 @@ def _add_init_task(
|
||||
all_deps: Dict[str, Set[str]],
|
||||
) -> None:
|
||||
"""
|
||||
Creates and queus the initial task for the package.
|
||||
Creates and queus the initial build task for the package.
|
||||
|
||||
Args:
|
||||
pkg: the package to be built and installed
|
||||
@@ -1402,9 +1232,7 @@ def _add_init_task(
|
||||
all_deps (defaultdict(set)): dictionary of all dependencies and
|
||||
associated dependents
|
||||
"""
|
||||
cls = RewireTask if pkg.spec.spliced else BuildTask
|
||||
task: Task = cls(pkg, request, is_compiler, 0, 0, STATUS_ADDED, self.installed)
|
||||
|
||||
task = BuildTask(pkg, request, is_compiler, 0, 0, STATUS_ADDED, self.installed)
|
||||
for dep_id in task.dependencies:
|
||||
all_deps[dep_id].add(package_id(pkg.spec))
|
||||
|
||||
@@ -1478,7 +1306,7 @@ def _check_deps_status(self, request: BuildRequest) -> None:
|
||||
else:
|
||||
lock.release_read()
|
||||
|
||||
def _prepare_for_install(self, task: Task) -> None:
|
||||
def _prepare_for_install(self, task: BuildTask) -> None:
|
||||
"""
|
||||
Check the database and leftover installation directories/files and
|
||||
prepare for a new install attempt for an uninstalled package.
|
||||
@@ -1486,7 +1314,7 @@ def _prepare_for_install(self, task: Task) -> None:
|
||||
and ensuring the database is up-to-date.
|
||||
|
||||
Args:
|
||||
task: the task whose associated package is
|
||||
task (BuildTask): the build task whose associated package is
|
||||
being checked
|
||||
"""
|
||||
install_args = task.request.install_args
|
||||
@@ -1537,7 +1365,7 @@ def _prepare_for_install(self, task: Task) -> None:
|
||||
spack.store.STORE.db.update_explicit(task.pkg.spec, True)
|
||||
|
||||
def _cleanup_all_tasks(self) -> None:
|
||||
"""Cleanup all tasks to include releasing their locks."""
|
||||
"""Cleanup all build tasks to include releasing their locks."""
|
||||
for pkg_id in self.locks:
|
||||
self._release_lock(pkg_id)
|
||||
|
||||
@@ -1569,7 +1397,7 @@ def _cleanup_failed(self, pkg_id: str) -> None:
|
||||
|
||||
def _cleanup_task(self, pkg: "spack.package_base.PackageBase") -> None:
|
||||
"""
|
||||
Cleanup the task for the spec
|
||||
Cleanup the build task for the spec
|
||||
|
||||
Args:
|
||||
pkg: the package being installed
|
||||
@@ -1641,7 +1469,7 @@ def _ensure_locked(
|
||||
|
||||
if lock_type == "read":
|
||||
# Wait until the other process finishes if there are no more
|
||||
# tasks with priority 0 (i.e., with no uninstalled
|
||||
# build tasks with priority 0 (i.e., with no uninstalled
|
||||
# dependencies).
|
||||
no_p0 = len(self.build_tasks) == 0 or not self._next_is_pri0()
|
||||
timeout = None if no_p0 else 3.0
|
||||
@@ -1693,75 +1521,6 @@ def _ensure_locked(
|
||||
self.locks[pkg_id] = (lock_type, lock)
|
||||
return self.locks[pkg_id]
|
||||
|
||||
def _requeue_with_build_spec_tasks(self, task):
|
||||
"""TODO: Docstring"""
|
||||
# Full install of the build_spec is necessary because it didn't already exist somewhere
|
||||
# TODO: Bootstrap compilers first (from add_tasks)
|
||||
install_compilers = spack.config.get("config:install_missing_compilers", False)
|
||||
|
||||
spec = task.pkg.spec
|
||||
|
||||
if install_compilers:
|
||||
packages_per_compiler = {}
|
||||
|
||||
# Queue all dependencies of the build spec.
|
||||
for dep in spec.build_spec.traverse():
|
||||
pkg = dep.package
|
||||
compiler = pkg.spec.compiler
|
||||
arch = pkg.spec.architecture
|
||||
if compiler not in packages_per_compiler:
|
||||
packages_per_compiler[compiler] = {}
|
||||
|
||||
if arch not in packages_per_compiler[compiler]:
|
||||
packages_per_compiler[compiler][arch] = []
|
||||
|
||||
packages_per_compiler[compiler][arch].append(pkg)
|
||||
pkg_id = package_id(pkg.spec)
|
||||
if pkg_id not in self.build_tasks:
|
||||
spack.store.STORE.failure_tracker.clear(dep, force=False)
|
||||
self._add_init_task(dep.package, task.request, False, self.all_dependencies)
|
||||
|
||||
compiler = spec.build_spec.compiler
|
||||
arch = spec.build_spec.architecture
|
||||
|
||||
if compiler not in packages_per_compiler:
|
||||
packages_per_compiler[compiler] = {}
|
||||
|
||||
if arch not in packages_per_compiler[compiler]:
|
||||
packages_per_compiler[compiler][arch] = []
|
||||
|
||||
packages_per_compiler[compiler][arch].append(spec.build_spec.package)
|
||||
|
||||
for compiler, archs in packages_per_compiler.items():
|
||||
for arch, packages in archs.items():
|
||||
# TODO: Ensure that this works w.r.t all deps
|
||||
self._add_bootstrap_compilers(
|
||||
compiler, arch, packages, task.request, self.all_dependencies
|
||||
)
|
||||
|
||||
for dep in spec.build_spec.traverse():
|
||||
dep_pkg = dep.package
|
||||
|
||||
dep_id = package_id(dep)
|
||||
if dep_id not in self.build_tasks:
|
||||
self._add_init_task(dep_pkg, task.request, False, self.all_dependencies)
|
||||
|
||||
# Clear any persistent failure markings _unless_ they are
|
||||
# associated with another process in this parallel build
|
||||
# of the spec.
|
||||
spack.store.STORE.failure_tracker.clear(dep, force=False)
|
||||
|
||||
# Queue the build spec.
|
||||
build_pkg_id = package_id(spec.build_spec)
|
||||
build_spec_task = self.build_tasks[build_pkg_id]
|
||||
spec_pkg_id = package_id(spec)
|
||||
spec_task = task.next_attempt(self.installed)
|
||||
spec_task.status = STATUS_ADDED
|
||||
# Convey a build spec as a dependency of a deployed spec.
|
||||
build_spec_task.add_dependent(spec_pkg_id)
|
||||
spec_task.add_dependency(build_pkg_id)
|
||||
self._push_task(spec_task)
|
||||
|
||||
def _add_tasks(self, request: BuildRequest, all_deps):
|
||||
"""Add tasks to the priority queue for the given build request.
|
||||
|
||||
@@ -1850,24 +1609,83 @@ def _add_tasks(self, request: BuildRequest, all_deps):
|
||||
fail_fast = bool(request.install_args.get("fail_fast"))
|
||||
self.fail_fast = self.fail_fast or fail_fast
|
||||
|
||||
def _install_task(self, task: Task, install_status: InstallStatus) -> None:
|
||||
def _add_compiler_package_to_config(self, pkg: "spack.package_base.PackageBase") -> None:
|
||||
compiler_search_prefix = getattr(pkg, "compiler_search_prefix", pkg.spec.prefix)
|
||||
spack.compilers.add_compilers_to_config(
|
||||
spack.compilers.find_compilers([compiler_search_prefix])
|
||||
)
|
||||
|
||||
def _install_task(self, task: BuildTask, install_status: InstallStatus) -> None:
|
||||
"""
|
||||
Perform the installation of the requested spec and/or dependency
|
||||
represented by the task.
|
||||
represented by the build task.
|
||||
|
||||
Args:
|
||||
task: the installation task for a package
|
||||
task: the installation build task for a package
|
||||
install_status: the installation status for the package"""
|
||||
# TODO: use install_status
|
||||
rc = task.execute(install_status)
|
||||
if rc == ExecuteResult.MISSING_BUILD_SPEC:
|
||||
self._requeue_with_build_spec_tasks(task)
|
||||
else: # if rc == ExecuteResult.SUCCESS or rc == ExecuteResult.FAILED
|
||||
self._update_installed(task)
|
||||
|
||||
explicit = task.explicit
|
||||
install_args = task.request.install_args
|
||||
cache_only = task.cache_only
|
||||
use_cache = task.use_cache
|
||||
tests = install_args.get("tests", False)
|
||||
assert isinstance(tests, (bool, list)) # make mypy happy.
|
||||
unsigned: Optional[bool] = install_args.get("unsigned")
|
||||
|
||||
pkg, pkg_id = task.pkg, task.pkg_id
|
||||
|
||||
tty.msg(install_msg(pkg_id, self.pid, install_status))
|
||||
task.start = task.start or time.time()
|
||||
task.status = STATUS_INSTALLING
|
||||
|
||||
# Use the binary cache if requested
|
||||
if use_cache:
|
||||
if _install_from_cache(pkg, explicit, unsigned):
|
||||
self._update_installed(task)
|
||||
if task.compiler:
|
||||
self._add_compiler_package_to_config(pkg)
|
||||
return
|
||||
elif cache_only:
|
||||
raise InstallError("No binary found when cache-only was specified", pkg=pkg)
|
||||
else:
|
||||
tty.msg(f"No binary for {pkg_id} found: installing from source")
|
||||
|
||||
pkg.run_tests = tests if isinstance(tests, bool) else pkg.name in tests
|
||||
|
||||
# hook that allows tests to inspect the Package before installation
|
||||
# see unit_test_check() docs.
|
||||
if not pkg.unit_test_check():
|
||||
return
|
||||
|
||||
try:
|
||||
self._setup_install_dir(pkg)
|
||||
|
||||
# Create stage object now and let it be serialized for the child process. That
|
||||
# way monkeypatch in tests works correctly.
|
||||
pkg.stage
|
||||
|
||||
# Create a child process to do the actual installation.
|
||||
# Preserve verbosity settings across installs.
|
||||
spack.package_base.PackageBase._verbose = spack.build_environment.start_build_process(
|
||||
pkg, build_process, install_args
|
||||
)
|
||||
# Note: PARENT of the build process adds the new package to
|
||||
# the database, so that we don't need to re-read from file.
|
||||
spack.store.STORE.db.add(pkg.spec, spack.store.STORE.layout, explicit=explicit)
|
||||
|
||||
# If a compiler, ensure it is added to the configuration
|
||||
if task.compiler:
|
||||
self._add_compiler_package_to_config(pkg)
|
||||
except spack.build_environment.StopPhase as e:
|
||||
# A StopPhase exception means that do_install was asked to
|
||||
# stop early from clients, and is not an error at this point
|
||||
pid = f"{self.pid}: " if tty.show_pid() else ""
|
||||
tty.debug(f"{pid}{str(e)}")
|
||||
tty.debug(f"Package stage directory: {pkg.stage.source_path}")
|
||||
|
||||
def _next_is_pri0(self) -> bool:
|
||||
"""
|
||||
Determine if the next task has priority 0
|
||||
Determine if the next build task has priority 0
|
||||
|
||||
Return:
|
||||
True if it does, False otherwise
|
||||
@@ -1877,9 +1695,9 @@ def _next_is_pri0(self) -> bool:
|
||||
task = self.build_pq[0][1]
|
||||
return task.priority == 0
|
||||
|
||||
def _pop_task(self) -> Optional[Task]:
|
||||
def _pop_task(self) -> Optional[BuildTask]:
|
||||
"""
|
||||
Remove and return the lowest priority task.
|
||||
Remove and return the lowest priority build task.
|
||||
|
||||
Source: Variant of function at docs.python.org/2/library/heapq.html
|
||||
"""
|
||||
@@ -1891,17 +1709,17 @@ def _pop_task(self) -> Optional[Task]:
|
||||
return task
|
||||
return None
|
||||
|
||||
def _push_task(self, task: Task) -> None:
|
||||
def _push_task(self, task: BuildTask) -> None:
|
||||
"""
|
||||
Push (or queue) the specified task for the package.
|
||||
Push (or queue) the specified build task for the package.
|
||||
|
||||
Source: Customization of "add_task" function at
|
||||
docs.python.org/2/library/heapq.html
|
||||
|
||||
Args:
|
||||
task: the installation task for a package
|
||||
task: the installation build task for a package
|
||||
"""
|
||||
msg = "{0} a task for {1} with status '{2}'"
|
||||
msg = "{0} a build task for {1} with status '{2}'"
|
||||
skip = "Skipping requeue of task for {0}: {1}"
|
||||
|
||||
# Ensure do not (re-)queue installed or failed packages whose status
|
||||
@@ -1914,7 +1732,7 @@ def _push_task(self, task: Task) -> None:
|
||||
tty.debug(skip.format(task.pkg_id, "failed"))
|
||||
return
|
||||
|
||||
# Remove any associated task since its sequence will change
|
||||
# Remove any associated build task since its sequence will change
|
||||
self._remove_task(task.pkg_id)
|
||||
desc = "Queueing" if task.attempts == 0 else "Requeueing"
|
||||
tty.debug(msg.format(desc, task.pkg_id, task.status))
|
||||
@@ -1947,9 +1765,9 @@ def _release_lock(self, pkg_id: str) -> None:
|
||||
except Exception as exc:
|
||||
tty.warn(err.format(exc.__class__.__name__, ltype, pkg_id, str(exc)))
|
||||
|
||||
def _remove_task(self, pkg_id: str) -> Optional[Task]:
|
||||
def _remove_task(self, pkg_id: str) -> Optional[BuildTask]:
|
||||
"""
|
||||
Mark the existing package task as being removed and return it.
|
||||
Mark the existing package build task as being removed and return it.
|
||||
Raises KeyError if not found.
|
||||
|
||||
Source: Variant of function at docs.python.org/2/library/heapq.html
|
||||
@@ -1958,19 +1776,19 @@ def _remove_task(self, pkg_id: str) -> Optional[Task]:
|
||||
pkg_id: identifier for the package to be removed
|
||||
"""
|
||||
if pkg_id in self.build_tasks:
|
||||
tty.debug(f"Removing task for {pkg_id} from list")
|
||||
tty.debug(f"Removing build task for {pkg_id} from list")
|
||||
task = self.build_tasks.pop(pkg_id)
|
||||
task.status = STATUS_REMOVED
|
||||
return task
|
||||
else:
|
||||
return None
|
||||
|
||||
def _requeue_task(self, task: Task, install_status: InstallStatus) -> None:
|
||||
def _requeue_task(self, task: BuildTask, install_status: InstallStatus) -> None:
|
||||
"""
|
||||
Requeues a task that appears to be in progress by another process.
|
||||
|
||||
Args:
|
||||
task (Task): the installation task for a package
|
||||
task (BuildTask): the installation build task for a package
|
||||
"""
|
||||
if task.status not in [STATUS_INSTALLED, STATUS_INSTALLING]:
|
||||
tty.debug(
|
||||
@@ -1982,15 +1800,47 @@ def _requeue_task(self, task: Task, install_status: InstallStatus) -> None:
|
||||
new_task.status = STATUS_INSTALLING
|
||||
self._push_task(new_task)
|
||||
|
||||
def _setup_install_dir(self, pkg: "spack.package_base.PackageBase") -> None:
|
||||
"""
|
||||
Create and ensure proper access controls for the install directory.
|
||||
Write a small metadata file with the current spack environment.
|
||||
|
||||
Args:
|
||||
pkg: the package to be built and installed
|
||||
"""
|
||||
if not os.path.exists(pkg.spec.prefix):
|
||||
path = spack.util.path.debug_padded_filter(pkg.spec.prefix)
|
||||
tty.debug(f"Creating the installation directory {path}")
|
||||
spack.store.STORE.layout.create_install_directory(pkg.spec)
|
||||
else:
|
||||
# Set the proper group for the prefix
|
||||
group = prefs.get_package_group(pkg.spec)
|
||||
if group:
|
||||
fs.chgrp(pkg.spec.prefix, group)
|
||||
|
||||
# Set the proper permissions.
|
||||
# This has to be done after group because changing groups blows
|
||||
# away the sticky group bit on the directory
|
||||
mode = os.stat(pkg.spec.prefix).st_mode
|
||||
perms = prefs.get_package_dir_permissions(pkg.spec)
|
||||
if mode != perms:
|
||||
os.chmod(pkg.spec.prefix, perms)
|
||||
|
||||
# Ensure the metadata path exists as well
|
||||
fs.mkdirp(spack.store.STORE.layout.metadata_path(pkg.spec), mode=perms)
|
||||
|
||||
# Always write host environment - we assume this can change
|
||||
spack.store.STORE.layout.write_host_environment(pkg.spec)
|
||||
|
||||
def _update_failed(
|
||||
self, task: Task, mark: bool = False, exc: Optional[BaseException] = None
|
||||
self, task: BuildTask, mark: bool = False, exc: Optional[BaseException] = None
|
||||
) -> None:
|
||||
"""
|
||||
Update the task and transitive dependents as failed; optionally mark
|
||||
externally as failed; and remove associated tasks.
|
||||
externally as failed; and remove associated build tasks.
|
||||
|
||||
Args:
|
||||
task: the task for the failed package
|
||||
task: the build task for the failed package
|
||||
mark: ``True`` if the package and its dependencies are to
|
||||
be marked as "failed", otherwise, ``False``
|
||||
exc: optional exception if associated with the failure
|
||||
@@ -2008,19 +1858,19 @@ def _update_failed(
|
||||
if dep_id in self.build_tasks:
|
||||
tty.warn(f"Skipping build of {dep_id} since {pkg_id} failed")
|
||||
# Ensure the dependent's uninstalled dependents are
|
||||
# up-to-date and their tasks removed.
|
||||
# up-to-date and their build tasks removed.
|
||||
dep_task = self.build_tasks[dep_id]
|
||||
self._update_failed(dep_task, mark)
|
||||
self._remove_task(dep_id)
|
||||
else:
|
||||
tty.debug(f"No task for {dep_id} to skip since {pkg_id} failed")
|
||||
tty.debug(f"No build task for {dep_id} to skip since {pkg_id} failed")
|
||||
|
||||
def _update_installed(self, task: Task) -> None:
|
||||
def _update_installed(self, task: BuildTask) -> None:
|
||||
"""
|
||||
Mark the task as installed and ensure dependent tasks are aware.
|
||||
Mark the task as installed and ensure dependent build tasks are aware.
|
||||
|
||||
Args:
|
||||
task: the task for the installed package
|
||||
task (BuildTask): the build task for the installed package
|
||||
"""
|
||||
task.status = STATUS_INSTALLED
|
||||
self._flag_installed(task.pkg, task.dependents)
|
||||
@@ -2029,7 +1879,7 @@ def _flag_installed(
|
||||
self, pkg: "spack.package_base.PackageBase", dependent_ids: Optional[Set[str]] = None
|
||||
) -> None:
|
||||
"""
|
||||
Flag the package as installed and ensure known by all tasks of
|
||||
Flag the package as installed and ensure known by all build tasks of
|
||||
known dependents.
|
||||
|
||||
Args:
|
||||
@@ -2057,7 +1907,7 @@ def _flag_installed(
|
||||
dep_task = self.build_tasks[dep_id]
|
||||
self._push_task(dep_task.next_attempt(self.installed))
|
||||
else:
|
||||
tty.debug(f"{dep_id} has no task to update for {pkg_id}'s success")
|
||||
tty.debug(f"{dep_id} has no build task to update for {pkg_id}'s success")
|
||||
|
||||
def _init_queue(self) -> None:
|
||||
"""Initialize the build queue from the list of build requests."""
|
||||
@@ -2076,9 +1926,8 @@ def _init_queue(self) -> None:
|
||||
task = self.build_tasks[dep_id]
|
||||
for dependent_id in dependents.difference(task.dependents):
|
||||
task.add_dependent(dependent_id)
|
||||
self.all_dependencies = all_dependencies
|
||||
|
||||
def _install_action(self, task: Task) -> InstallAction:
|
||||
def _install_action(self, task: BuildTask) -> int:
|
||||
"""
|
||||
Determine whether the installation should be overwritten (if it already
|
||||
exists) or skipped (if has been handled by another process).
|
||||
@@ -2229,7 +2078,7 @@ def install(self) -> None:
|
||||
|
||||
# It's an already installed compiler, add it to the config
|
||||
if task.compiler:
|
||||
_add_compiler_package_to_config(pkg)
|
||||
self._add_compiler_package_to_config(pkg)
|
||||
|
||||
else:
|
||||
# At this point we've failed to get a write or a read
|
||||
@@ -2270,6 +2119,8 @@ def install(self) -> None:
|
||||
# wrapper -- silence mypy
|
||||
OverwriteInstall(self, spack.store.STORE.db, task, install_status).install() # type: ignore[arg-type] # noqa: E501
|
||||
|
||||
self._update_installed(task)
|
||||
|
||||
# If we installed then we should keep the prefix
|
||||
stop_before_phase = getattr(pkg, "stop_before_phase", None)
|
||||
last_phase = getattr(pkg, "last_phase", None)
|
||||
@@ -2329,8 +2180,7 @@ def install(self) -> None:
|
||||
|
||||
# Perform basic task cleanup for the installed spec to
|
||||
# include downgrading the write to a read lock
|
||||
if pkg.spec.installed:
|
||||
self._cleanup_task(pkg)
|
||||
self._cleanup_task(pkg)
|
||||
|
||||
# Cleanup, which includes releasing all of the read locks
|
||||
self._cleanup_all_tasks()
|
||||
@@ -2602,7 +2452,7 @@ def __init__(
|
||||
self,
|
||||
installer: PackageInstaller,
|
||||
database: spack.database.Database,
|
||||
task: Task,
|
||||
task: BuildTask,
|
||||
install_status: InstallStatus,
|
||||
):
|
||||
self.installer = installer
|
||||
|
@@ -426,36 +426,48 @@ def _determine_extension(fetcher):
|
||||
return ext
|
||||
|
||||
|
||||
class MirrorLayout:
|
||||
"""A ``MirrorLayout`` stores the relative locations of files in a mirror directory. The main
|
||||
storage location is ``storage_path``. An additional, human-readable path may be obtained as the
|
||||
second entry when iterating this object."""
|
||||
class MirrorReference:
|
||||
"""A ``MirrorReference`` stores the relative paths where you can store a
|
||||
package/resource in a mirror directory.
|
||||
|
||||
def __init__(self, storage_path: str) -> None:
|
||||
self.storage_path = storage_path
|
||||
The appropriate storage location is given by ``storage_path``. The
|
||||
``cosmetic_path`` property provides a reference that a human could generate
|
||||
themselves based on reading the details of the package.
|
||||
|
||||
def __iter__(self):
|
||||
yield self.storage_path
|
||||
A user can iterate over a ``MirrorReference`` object to get all the
|
||||
possible names that might be used to refer to the resource in a mirror;
|
||||
this includes names generated by previous naming schemes that are no-longer
|
||||
reported by ``storage_path`` or ``cosmetic_path``.
|
||||
"""
|
||||
|
||||
|
||||
class DefaultLayout(MirrorLayout):
|
||||
def __init__(self, cosmetic_path: str, global_path: Optional[str] = None) -> None:
|
||||
super().__init__(global_path or cosmetic_path)
|
||||
def __init__(self, cosmetic_path, global_path=None):
|
||||
self.global_path = global_path
|
||||
self.cosmetic_path = cosmetic_path
|
||||
|
||||
@property
|
||||
def storage_path(self):
|
||||
if self.global_path:
|
||||
return self.global_path
|
||||
else:
|
||||
return self.cosmetic_path
|
||||
|
||||
def __iter__(self):
|
||||
if self.global_path:
|
||||
yield self.global_path
|
||||
yield self.cosmetic_path
|
||||
|
||||
|
||||
class OCILayout(MirrorLayout):
|
||||
"""Follow the OCI Image Layout Specification to archive blobs where paths are of the form
|
||||
``blobs/<algorithm>/<digest>``"""
|
||||
class OCIImageLayout:
|
||||
"""Follow the OCI Image Layout Specification to archive blobs
|
||||
|
||||
Paths are of the form `blobs/<algorithm>/<digest>`
|
||||
"""
|
||||
|
||||
def __init__(self, digest: spack.oci.image.Digest) -> None:
|
||||
super().__init__(os.path.join("blobs", digest.algorithm, digest.digest))
|
||||
self.storage_path = os.path.join("blobs", digest.algorithm, digest.digest)
|
||||
|
||||
def __iter__(self):
|
||||
yield self.storage_path
|
||||
|
||||
|
||||
def mirror_archive_paths(fetcher, per_package_ref, spec=None):
|
||||
@@ -482,7 +494,7 @@ def mirror_archive_paths(fetcher, per_package_ref, spec=None):
|
||||
if global_ref and ext:
|
||||
global_ref += ".%s" % ext
|
||||
|
||||
return DefaultLayout(per_package_ref, global_ref)
|
||||
return MirrorReference(per_package_ref, global_ref)
|
||||
|
||||
|
||||
def get_all_versions(specs):
|
||||
|
@@ -6,6 +6,7 @@
|
||||
import hashlib
|
||||
import json
|
||||
import os
|
||||
import time
|
||||
import urllib.error
|
||||
import urllib.parse
|
||||
import urllib.request
|
||||
@@ -42,6 +43,11 @@ def create_tarball(spec: spack.spec.Spec, tarfile_path):
|
||||
return spack.binary_distribution._do_create_tarball(tarfile_path, spec.prefix, buildinfo)
|
||||
|
||||
|
||||
def _log_upload_progress(digest: Digest, size: int, elapsed: float):
|
||||
elapsed = max(elapsed, 0.001) # guard against division by zero
|
||||
tty.info(f"Uploaded {digest} ({elapsed:.2f}s, {size / elapsed / 1024 / 1024:.2f} MB/s)")
|
||||
|
||||
|
||||
def with_query_param(url: str, param: str, value: str) -> str:
|
||||
"""Add a query parameter to a URL
|
||||
|
||||
@@ -135,6 +141,8 @@ def upload_blob(
|
||||
if not force and blob_exists(ref, digest, _urlopen):
|
||||
return False
|
||||
|
||||
start = time.time()
|
||||
|
||||
with open(file, "rb") as f:
|
||||
file_size = os.fstat(f.fileno()).st_size
|
||||
|
||||
@@ -159,6 +167,7 @@ def upload_blob(
|
||||
|
||||
# Created the blob in one go.
|
||||
if response.status == 201:
|
||||
_log_upload_progress(digest, file_size, time.time() - start)
|
||||
return True
|
||||
|
||||
# Otherwise, do another PUT request.
|
||||
@@ -182,6 +191,8 @@ def upload_blob(
|
||||
|
||||
spack.oci.opener.ensure_status(request, response, 201)
|
||||
|
||||
# print elapsed time and # MB/s
|
||||
_log_upload_progress(digest, file_size, time.time() - start)
|
||||
return True
|
||||
|
||||
|
||||
@@ -390,12 +401,15 @@ def make_stage(
|
||||
) -> spack.stage.Stage:
|
||||
_urlopen = _urlopen or spack.oci.opener.urlopen
|
||||
fetch_strategy = spack.fetch_strategy.OCIRegistryFetchStrategy(
|
||||
url=url, checksum=digest.digest, _urlopen=_urlopen
|
||||
url, checksum=digest.digest, _urlopen=_urlopen
|
||||
)
|
||||
# Use blobs/<alg>/<encoded> as the cache path, which follows
|
||||
# the OCI Image Layout Specification. What's missing though,
|
||||
# is the `oci-layout` and `index.json` files, which are
|
||||
# required by the spec.
|
||||
return spack.stage.Stage(
|
||||
fetch_strategy, mirror_paths=spack.mirror.OCILayout(digest), name=digest.digest, keep=keep
|
||||
fetch_strategy,
|
||||
mirror_paths=spack.mirror.OCIImageLayout(digest),
|
||||
name=digest.digest,
|
||||
keep=keep,
|
||||
)
|
||||
|
@@ -197,12 +197,13 @@ def __init__(cls, name, bases, attr_dict):
|
||||
# that "foo" was a possible executable.
|
||||
|
||||
# If a package has the executables or libraries attribute then it's
|
||||
# assumed to be detectable. Add a tag, so finding them is faster
|
||||
# assumed to be detectable
|
||||
if hasattr(cls, "executables") or hasattr(cls, "libraries"):
|
||||
# To add the tag, we need to copy the tags attribute, and attach it to
|
||||
# the current class. We don't use append, since it might modify base classes,
|
||||
# if "tags" is retrieved following the MRO.
|
||||
cls.tags = getattr(cls, "tags", []) + [DetectablePackageMeta.TAG]
|
||||
# Append a tag to each detectable package, so that finding them is faster
|
||||
if not hasattr(cls, "tags"):
|
||||
setattr(cls, "tags", [DetectablePackageMeta.TAG])
|
||||
elif DetectablePackageMeta.TAG not in cls.tags:
|
||||
cls.tags.append(DetectablePackageMeta.TAG)
|
||||
|
||||
@classmethod
|
||||
def platform_executables(cls):
|
||||
@@ -1101,7 +1102,6 @@ def _make_resource_stage(self, root_stage, resource):
|
||||
mirror_paths=spack.mirror.mirror_archive_paths(
|
||||
resource.fetcher, os.path.join(self.name, pretty_resource_name)
|
||||
),
|
||||
mirrors=spack.mirror.MirrorCollection(source=True).values(),
|
||||
path=self.path,
|
||||
)
|
||||
|
||||
@@ -1122,7 +1122,6 @@ def _make_root_stage(self, fetcher):
|
||||
stage = Stage(
|
||||
fetcher,
|
||||
mirror_paths=mirror_paths,
|
||||
mirrors=spack.mirror.MirrorCollection(source=True).values(),
|
||||
name=stage_name,
|
||||
path=self.path,
|
||||
search_fn=self._download_search,
|
||||
@@ -2427,9 +2426,8 @@ def all_urls(self) -> List[str]:
|
||||
if hasattr(self, "url") and self.url:
|
||||
urls.append(self.url)
|
||||
|
||||
# fetch from first entry in urls to save time
|
||||
if hasattr(self, "urls") and self.urls:
|
||||
urls.append(self.urls[0])
|
||||
urls.extend(self.urls)
|
||||
|
||||
for args in self.versions.values():
|
||||
if "url" in args:
|
||||
|
@@ -319,7 +319,7 @@ def stage(self) -> "spack.stage.Stage":
|
||||
self.url, archive_sha256=self.archive_sha256, expanded_sha256=self.sha256
|
||||
)
|
||||
else:
|
||||
fetcher = fs.URLFetchStrategy(url=self.url, sha256=self.sha256, expand=False)
|
||||
fetcher = fs.URLFetchStrategy(self.url, sha256=self.sha256, expand=False)
|
||||
|
||||
# The same package can have multiple patches with the same name but
|
||||
# with different contents, therefore apply a subset of the hash.
|
||||
@@ -331,7 +331,6 @@ def stage(self) -> "spack.stage.Stage":
|
||||
fetcher,
|
||||
name=f"{spack.stage.stage_prefix}patch-{fetch_digest}",
|
||||
mirror_paths=mirror_ref,
|
||||
mirrors=spack.mirror.MirrorCollection(source=True).values(),
|
||||
)
|
||||
return self._stage
|
||||
|
||||
|
@@ -149,12 +149,12 @@ def current_repository(self, value):
|
||||
@contextlib.contextmanager
|
||||
def switch_repo(self, substitute: "RepoType"):
|
||||
"""Switch the current repository list for the duration of the context manager."""
|
||||
old = self._repo
|
||||
old = self.current_repository
|
||||
try:
|
||||
self._repo = substitute
|
||||
self.current_repository = substitute
|
||||
yield
|
||||
finally:
|
||||
self._repo = old
|
||||
self.current_repository = old
|
||||
|
||||
def find_spec(self, fullname, python_path, target=None):
|
||||
# "target" is not None only when calling importlib.reload()
|
||||
@@ -683,7 +683,7 @@ class RepoPath:
|
||||
def __init__(
|
||||
self,
|
||||
*repos: Union[str, "Repo"],
|
||||
cache: Optional["spack.caches.FileCacheType"],
|
||||
cache: "spack.caches.FileCacheType",
|
||||
overrides: Optional[Dict[str, Any]] = None,
|
||||
) -> None:
|
||||
self.repos: List[Repo] = []
|
||||
@@ -696,7 +696,6 @@ def __init__(
|
||||
for repo in repos:
|
||||
try:
|
||||
if isinstance(repo, str):
|
||||
assert cache is not None, "cache must hold a value, when repo is a string"
|
||||
repo = Repo(repo, cache=cache, overrides=overrides)
|
||||
repo.finder(self)
|
||||
self.put_last(repo)
|
||||
@@ -708,10 +707,6 @@ def __init__(
|
||||
f" spack repo rm {repo}",
|
||||
)
|
||||
|
||||
def ensure_unwrapped(self) -> "RepoPath":
|
||||
"""Ensure we unwrap this object from any dynamic wrapper (like Singleton)"""
|
||||
return self
|
||||
|
||||
def put_first(self, repo: "Repo") -> None:
|
||||
"""Add repo first in the search path."""
|
||||
if isinstance(repo, RepoPath):
|
||||
@@ -935,16 +930,6 @@ def is_virtual_safe(self, pkg_name: str) -> bool:
|
||||
def __contains__(self, pkg_name):
|
||||
return self.exists(pkg_name)
|
||||
|
||||
def marshal(self):
|
||||
return (self.repos,)
|
||||
|
||||
@staticmethod
|
||||
def unmarshal(repos):
|
||||
return RepoPath(*repos, cache=None)
|
||||
|
||||
def __reduce__(self):
|
||||
return RepoPath.unmarshal, self.marshal()
|
||||
|
||||
|
||||
class Repo:
|
||||
"""Class representing a package repository in the filesystem.
|
||||
@@ -1334,20 +1319,6 @@ def __repr__(self) -> str:
|
||||
def __contains__(self, pkg_name: str) -> bool:
|
||||
return self.exists(pkg_name)
|
||||
|
||||
@staticmethod
|
||||
def unmarshal(root, cache, overrides):
|
||||
"""Helper method to unmarshal keyword arguments"""
|
||||
return Repo(root, cache=cache, overrides=overrides)
|
||||
|
||||
def marshal(self):
|
||||
cache = self._cache
|
||||
if isinstance(cache, llnl.util.lang.Singleton):
|
||||
cache = cache.instance
|
||||
return self.root, cache, self.overrides
|
||||
|
||||
def __reduce__(self):
|
||||
return Repo.unmarshal, self.marshal()
|
||||
|
||||
|
||||
RepoType = Union[Repo, RepoPath]
|
||||
|
||||
|
@@ -52,7 +52,6 @@ def rewire_node(spec, explicit):
|
||||
its subgraph. Binaries, text, and links are all changed in accordance with
|
||||
the splice. The resulting package is then 'installed.'"""
|
||||
tempdir = tempfile.mkdtemp()
|
||||
|
||||
# copy anything installed to a temporary directory
|
||||
shutil.copytree(spec.build_spec.prefix, os.path.join(tempdir, spec.dag_hash()))
|
||||
|
||||
@@ -61,16 +60,7 @@ def rewire_node(spec, explicit):
|
||||
# spec
|
||||
prefix_to_prefix = OrderedDict({spec.build_spec.prefix: spec.prefix})
|
||||
for build_dep in spec.build_spec.traverse(root=False):
|
||||
if build_dep.name in spec:
|
||||
prefix_to_prefix[build_dep.prefix] = spec[build_dep.name].prefix
|
||||
else:
|
||||
virtuals = build_dep.package.virtuals_provided
|
||||
for virtual in virtuals:
|
||||
try:
|
||||
prefix_to_prefix[build_dep.prefix] = spec[virtual.name].prefix
|
||||
break
|
||||
except KeyError:
|
||||
continue
|
||||
prefix_to_prefix[build_dep.prefix] = spec[build_dep.name].prefix
|
||||
|
||||
manifest = bindist.get_buildfile_manifest(spec.build_spec)
|
||||
platform = spack.platforms.by_name(spec.platform)
|
||||
|
@@ -11,26 +11,6 @@
|
||||
|
||||
import spack.schema.environment
|
||||
|
||||
flags: Dict[str, Any] = {
|
||||
"type": "object",
|
||||
"additionalProperties": False,
|
||||
"properties": {
|
||||
"cflags": {"anyOf": [{"type": "string"}, {"type": "null"}]},
|
||||
"cxxflags": {"anyOf": [{"type": "string"}, {"type": "null"}]},
|
||||
"fflags": {"anyOf": [{"type": "string"}, {"type": "null"}]},
|
||||
"cppflags": {"anyOf": [{"type": "string"}, {"type": "null"}]},
|
||||
"ldflags": {"anyOf": [{"type": "string"}, {"type": "null"}]},
|
||||
"ldlibs": {"anyOf": [{"type": "string"}, {"type": "null"}]},
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
extra_rpaths: Dict[str, Any] = {"type": "array", "default": [], "items": {"type": "string"}}
|
||||
|
||||
implicit_rpaths: Dict[str, Any] = {
|
||||
"anyOf": [{"type": "array", "items": {"type": "string"}}, {"type": "boolean"}]
|
||||
}
|
||||
|
||||
#: Properties for inclusion in other schemas
|
||||
properties: Dict[str, Any] = {
|
||||
"compilers": {
|
||||
@@ -55,7 +35,18 @@
|
||||
"fc": {"anyOf": [{"type": "string"}, {"type": "null"}]},
|
||||
},
|
||||
},
|
||||
"flags": flags,
|
||||
"flags": {
|
||||
"type": "object",
|
||||
"additionalProperties": False,
|
||||
"properties": {
|
||||
"cflags": {"anyOf": [{"type": "string"}, {"type": "null"}]},
|
||||
"cxxflags": {"anyOf": [{"type": "string"}, {"type": "null"}]},
|
||||
"fflags": {"anyOf": [{"type": "string"}, {"type": "null"}]},
|
||||
"cppflags": {"anyOf": [{"type": "string"}, {"type": "null"}]},
|
||||
"ldflags": {"anyOf": [{"type": "string"}, {"type": "null"}]},
|
||||
"ldlibs": {"anyOf": [{"type": "string"}, {"type": "null"}]},
|
||||
},
|
||||
},
|
||||
"spec": {"type": "string"},
|
||||
"operating_system": {"type": "string"},
|
||||
"target": {"type": "string"},
|
||||
@@ -63,9 +54,18 @@
|
||||
"modules": {
|
||||
"anyOf": [{"type": "string"}, {"type": "null"}, {"type": "array"}]
|
||||
},
|
||||
"implicit_rpaths": implicit_rpaths,
|
||||
"implicit_rpaths": {
|
||||
"anyOf": [
|
||||
{"type": "array", "items": {"type": "string"}},
|
||||
{"type": "boolean"},
|
||||
]
|
||||
},
|
||||
"environment": spack.schema.environment.definition,
|
||||
"extra_rpaths": extra_rpaths,
|
||||
"extra_rpaths": {
|
||||
"type": "array",
|
||||
"default": [],
|
||||
"items": {"type": "string"},
|
||||
},
|
||||
},
|
||||
}
|
||||
},
|
||||
|
@@ -84,6 +84,7 @@
|
||||
"build_language": {"type": "string"},
|
||||
"build_jobs": {"type": "integer", "minimum": 1},
|
||||
"ccache": {"type": "boolean"},
|
||||
"concretizer": {"type": "string", "enum": ["original", "clingo"]},
|
||||
"db_lock_timeout": {"type": "integer", "minimum": 1},
|
||||
"package_lock_timeout": {
|
||||
"anyOf": [{"type": "integer", "minimum": 1}, {"type": "null"}]
|
||||
@@ -97,9 +98,9 @@
|
||||
"aliases": {"type": "object", "patternProperties": {r"\w[\w-]*": {"type": "string"}}},
|
||||
},
|
||||
"deprecatedProperties": {
|
||||
"properties": ["concretizer"],
|
||||
"message": "Spack supports only clingo as a concretizer from v0.23. "
|
||||
"The config:concretizer config option is ignored.",
|
||||
"properties": ["terminal_title"],
|
||||
"message": "config:terminal_title has been replaced by "
|
||||
"install_status and is ignored",
|
||||
"error": False,
|
||||
},
|
||||
}
|
||||
|
@@ -25,7 +25,6 @@
|
||||
import spack.schema.modules
|
||||
import spack.schema.packages
|
||||
import spack.schema.repos
|
||||
import spack.schema.splice
|
||||
import spack.schema.upstreams
|
||||
import spack.schema.view
|
||||
|
||||
@@ -44,7 +43,6 @@
|
||||
spack.schema.modules.properties,
|
||||
spack.schema.packages.properties,
|
||||
spack.schema.repos.properties,
|
||||
spack.schema.splice.properties,
|
||||
spack.schema.upstreams.properties,
|
||||
spack.schema.view.properties,
|
||||
)
|
||||
|
@@ -11,8 +11,6 @@
|
||||
|
||||
import spack.schema.environment
|
||||
|
||||
from .compilers import extra_rpaths, flags, implicit_rpaths
|
||||
|
||||
permissions = {
|
||||
"type": "object",
|
||||
"additionalProperties": False,
|
||||
@@ -186,16 +184,7 @@
|
||||
"type": "object",
|
||||
"additionalProperties": True,
|
||||
"properties": {
|
||||
"compilers": {
|
||||
"type": "object",
|
||||
"patternProperties": {
|
||||
r"(^\w[\w-]*)": {"type": "string"}
|
||||
},
|
||||
},
|
||||
"environment": spack.schema.environment.definition,
|
||||
"extra_rpaths": extra_rpaths,
|
||||
"implicit_rpaths": implicit_rpaths,
|
||||
"flags": flags,
|
||||
"environment": spack.schema.environment.definition
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@@ -1,36 +0,0 @@
|
||||
# Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
"""Schema for splice.yaml configuration file.
|
||||
"""
|
||||
|
||||
|
||||
#: Properties for inclusion in other schemas
|
||||
properties = {
|
||||
"splice": {
|
||||
"type": "array",
|
||||
"default": [],
|
||||
"items": {
|
||||
"type": "object",
|
||||
"required": ["target", "replacement"],
|
||||
"additionalProperties": False,
|
||||
"properties": {
|
||||
"target": {"type": "string"},
|
||||
"replacement": {"type": "string"},
|
||||
"transitive": {"type": "boolean", "default": False},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#: Full schema with metadata
|
||||
schema = {
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"title": "Spack concretization splice configuration file schema",
|
||||
"type": "object",
|
||||
"additionalProperties": False,
|
||||
"properties": properties,
|
||||
}
|
@@ -519,7 +519,7 @@ def _compute_specs_from_answer_set(self):
|
||||
node = SpecBuilder.make_node(pkg=providers[0])
|
||||
candidate = answer.get(node)
|
||||
|
||||
if candidate and candidate.build_spec.satisfies(input_spec):
|
||||
if candidate and candidate.satisfies(input_spec):
|
||||
self._concrete_specs.append(answer[node])
|
||||
self._concrete_specs_by_input[input_spec] = answer[node]
|
||||
else:
|
||||
@@ -3617,33 +3617,7 @@ def build_specs(self, function_tuples):
|
||||
spack.version.git_ref_lookup.GitRefLookup(spec.fullname)
|
||||
)
|
||||
|
||||
specs = self.execute_splices()
|
||||
|
||||
return specs
|
||||
|
||||
def execute_splices(self):
|
||||
splice_config = spack.config.CONFIG.get("splice", [])
|
||||
splice_triples = []
|
||||
for splice_set in splice_config:
|
||||
target = splice_set["target"]
|
||||
replacement = spack.spec.Spec(splice_set["replacement"])
|
||||
assert replacement.abstract_hash
|
||||
replacement.replace_hash()
|
||||
transitive = splice_set.get("transitive", False)
|
||||
splice_triples.append((target, replacement, transitive))
|
||||
|
||||
specs = {}
|
||||
for key, spec in self._specs.items():
|
||||
current_spec = spec
|
||||
for target, replacement, transitive in splice_triples:
|
||||
if target in current_spec:
|
||||
# matches root or non-root
|
||||
# e.g. mvapich2%gcc
|
||||
current_spec = current_spec.splice(replacement, transitive)
|
||||
new_key = NodeArgument(id=key.id, pkg=current_spec.name)
|
||||
specs[new_key] = current_spec
|
||||
|
||||
return specs
|
||||
return self._specs
|
||||
|
||||
|
||||
def _develop_specs_from_env(spec, env):
|
||||
|
@@ -70,6 +70,7 @@
|
||||
import spack.compiler
|
||||
import spack.compilers
|
||||
import spack.config
|
||||
import spack.dependency as dp
|
||||
import spack.deptypes as dt
|
||||
import spack.error
|
||||
import spack.hash_types as ht
|
||||
@@ -2614,6 +2615,294 @@ def validate_detection(self):
|
||||
validate_fn = getattr(pkg_cls, "validate_detected_spec", lambda x, y: None)
|
||||
validate_fn(self, self.extra_attributes)
|
||||
|
||||
def _concretize_helper(self, concretizer, presets=None, visited=None):
|
||||
"""Recursive helper function for concretize().
|
||||
This concretizes everything bottom-up. As things are
|
||||
concretized, they're added to the presets, and ancestors
|
||||
will prefer the settings of their children.
|
||||
"""
|
||||
if presets is None:
|
||||
presets = {}
|
||||
if visited is None:
|
||||
visited = set()
|
||||
|
||||
if self.name in visited:
|
||||
return False
|
||||
|
||||
if self.concrete:
|
||||
visited.add(self.name)
|
||||
return False
|
||||
|
||||
changed = False
|
||||
|
||||
# Concretize deps first -- this is a bottom-up process.
|
||||
for name in sorted(self._dependencies):
|
||||
# WARNING: This function is an implementation detail of the
|
||||
# WARNING: original concretizer. Since with that greedy
|
||||
# WARNING: algorithm we don't allow multiple nodes from
|
||||
# WARNING: the same package in a DAG, here we hard-code
|
||||
# WARNING: using index 0 i.e. we assume that we have only
|
||||
# WARNING: one edge from package "name"
|
||||
changed |= self._dependencies[name][0].spec._concretize_helper(
|
||||
concretizer, presets, visited
|
||||
)
|
||||
|
||||
if self.name in presets:
|
||||
changed |= self.constrain(presets[self.name])
|
||||
else:
|
||||
# Concretize virtual dependencies last. Because they're added
|
||||
# to presets below, their constraints will all be merged, but we'll
|
||||
# still need to select a concrete package later.
|
||||
if not self.virtual:
|
||||
changed |= any(
|
||||
(
|
||||
concretizer.concretize_develop(self), # special variant
|
||||
concretizer.concretize_architecture(self),
|
||||
concretizer.concretize_compiler(self),
|
||||
concretizer.adjust_target(self),
|
||||
# flags must be concretized after compiler
|
||||
concretizer.concretize_compiler_flags(self),
|
||||
concretizer.concretize_version(self),
|
||||
concretizer.concretize_variants(self),
|
||||
)
|
||||
)
|
||||
presets[self.name] = self
|
||||
|
||||
visited.add(self.name)
|
||||
return changed
|
||||
|
||||
def _replace_with(self, concrete):
|
||||
"""Replace this virtual spec with a concrete spec."""
|
||||
assert self.virtual
|
||||
virtuals = (self.name,)
|
||||
for dep_spec in itertools.chain.from_iterable(self._dependents.values()):
|
||||
dependent = dep_spec.parent
|
||||
depflag = dep_spec.depflag
|
||||
|
||||
# remove self from all dependents, unless it is already removed
|
||||
if self.name in dependent._dependencies:
|
||||
del dependent._dependencies.edges[self.name]
|
||||
|
||||
# add the replacement, unless it is already a dep of dependent.
|
||||
if concrete.name not in dependent._dependencies:
|
||||
dependent._add_dependency(concrete, depflag=depflag, virtuals=virtuals)
|
||||
else:
|
||||
dependent.edges_to_dependencies(name=concrete.name)[0].update_virtuals(
|
||||
virtuals=virtuals
|
||||
)
|
||||
|
||||
def _expand_virtual_packages(self, concretizer):
|
||||
"""Find virtual packages in this spec, replace them with providers,
|
||||
and normalize again to include the provider's (potentially virtual)
|
||||
dependencies. Repeat until there are no virtual deps.
|
||||
|
||||
Precondition: spec is normalized.
|
||||
|
||||
.. todo::
|
||||
|
||||
If a provider depends on something that conflicts with
|
||||
other dependencies in the spec being expanded, this can
|
||||
produce a conflicting spec. For example, if mpich depends
|
||||
on hwloc@:1.3 but something in the spec needs hwloc1.4:,
|
||||
then we should choose an MPI other than mpich. Cases like
|
||||
this are infrequent, but should implement this before it is
|
||||
a problem.
|
||||
"""
|
||||
# Make an index of stuff this spec already provides
|
||||
self_index = spack.provider_index.ProviderIndex(
|
||||
repository=spack.repo.PATH, specs=self.traverse(), restrict=True
|
||||
)
|
||||
changed = False
|
||||
done = False
|
||||
|
||||
while not done:
|
||||
done = True
|
||||
for spec in list(self.traverse()):
|
||||
replacement = None
|
||||
if spec.external:
|
||||
continue
|
||||
if spec.virtual:
|
||||
replacement = self._find_provider(spec, self_index)
|
||||
if replacement:
|
||||
# TODO: may break if in-place on self but
|
||||
# shouldn't happen if root is traversed first.
|
||||
spec._replace_with(replacement)
|
||||
done = False
|
||||
break
|
||||
|
||||
if not replacement:
|
||||
# Get a list of possible replacements in order of
|
||||
# preference.
|
||||
candidates = concretizer.choose_virtual_or_external(spec)
|
||||
|
||||
# Try the replacements in order, skipping any that cause
|
||||
# satisfiability problems.
|
||||
for replacement in candidates:
|
||||
if replacement is spec:
|
||||
break
|
||||
|
||||
# Replace spec with the candidate and normalize
|
||||
copy = self.copy()
|
||||
copy[spec.name]._dup(replacement, deps=False)
|
||||
|
||||
try:
|
||||
# If there are duplicate providers or duplicate
|
||||
# provider deps, consolidate them and merge
|
||||
# constraints.
|
||||
copy.normalize(force=True)
|
||||
break
|
||||
except spack.error.SpecError:
|
||||
# On error, we'll try the next replacement.
|
||||
continue
|
||||
|
||||
# If replacement is external then trim the dependencies
|
||||
if replacement.external:
|
||||
if spec._dependencies:
|
||||
for dep in spec.dependencies():
|
||||
del dep._dependents.edges[spec.name]
|
||||
changed = True
|
||||
spec.clear_dependencies()
|
||||
replacement.clear_dependencies()
|
||||
replacement.architecture = self.architecture
|
||||
|
||||
# TODO: could this and the stuff in _dup be cleaned up?
|
||||
def feq(cfield, sfield):
|
||||
return (not cfield) or (cfield == sfield)
|
||||
|
||||
if replacement is spec or (
|
||||
feq(replacement.name, spec.name)
|
||||
and feq(replacement.versions, spec.versions)
|
||||
and feq(replacement.compiler, spec.compiler)
|
||||
and feq(replacement.architecture, spec.architecture)
|
||||
and feq(replacement._dependencies, spec._dependencies)
|
||||
and feq(replacement.variants, spec.variants)
|
||||
and feq(replacement.external_path, spec.external_path)
|
||||
and feq(replacement.external_modules, spec.external_modules)
|
||||
):
|
||||
continue
|
||||
# Refine this spec to the candidate. This uses
|
||||
# replace_with AND dup so that it can work in
|
||||
# place. TODO: make this more efficient.
|
||||
if spec.virtual:
|
||||
spec._replace_with(replacement)
|
||||
changed = True
|
||||
if spec._dup(replacement, deps=False, cleardeps=False):
|
||||
changed = True
|
||||
|
||||
self_index.update(spec)
|
||||
done = False
|
||||
break
|
||||
|
||||
return changed
|
||||
|
||||
def _old_concretize(self, tests=False, deprecation_warning=True):
|
||||
"""A spec is concrete if it describes one build of a package uniquely.
|
||||
This will ensure that this spec is concrete.
|
||||
|
||||
Args:
|
||||
tests (list or bool): list of packages that will need test
|
||||
dependencies, or True/False for test all/none
|
||||
deprecation_warning (bool): enable or disable the deprecation
|
||||
warning for the old concretizer
|
||||
|
||||
If this spec could describe more than one version, variant, or build
|
||||
of a package, this will add constraints to make it concrete.
|
||||
|
||||
Some rigorous validation and checks are also performed on the spec.
|
||||
Concretizing ensures that it is self-consistent and that it's
|
||||
consistent with requirements of its packages. See flatten() and
|
||||
normalize() for more details on this.
|
||||
"""
|
||||
import spack.concretize
|
||||
|
||||
# Add a warning message to inform users that the original concretizer
|
||||
# will be removed
|
||||
if deprecation_warning:
|
||||
msg = (
|
||||
"the original concretizer is currently being used.\n\tUpgrade to "
|
||||
'"clingo" at your earliest convenience. The original concretizer '
|
||||
"will be removed from Spack in a future version."
|
||||
)
|
||||
warnings.warn(msg)
|
||||
|
||||
self.replace_hash()
|
||||
|
||||
if not self.name:
|
||||
raise spack.error.SpecError("Attempting to concretize anonymous spec")
|
||||
|
||||
if self._concrete:
|
||||
return
|
||||
|
||||
# take the spec apart once before starting the main concretization loop and resolving
|
||||
# deps, but don't break dependencies during concretization as the spec is built.
|
||||
user_spec_deps = self.flat_dependencies(disconnect=True)
|
||||
|
||||
changed = True
|
||||
force = False
|
||||
concretizer = spack.concretize.Concretizer(self.copy())
|
||||
while changed:
|
||||
changes = (
|
||||
self.normalize(force, tests, user_spec_deps, disconnect=False),
|
||||
self._expand_virtual_packages(concretizer),
|
||||
self._concretize_helper(concretizer),
|
||||
)
|
||||
changed = any(changes)
|
||||
force = True
|
||||
|
||||
visited_user_specs = set()
|
||||
for dep in self.traverse():
|
||||
visited_user_specs.add(dep.name)
|
||||
pkg_cls = spack.repo.PATH.get_pkg_class(dep.name)
|
||||
visited_user_specs.update(pkg_cls(dep).provided_virtual_names())
|
||||
|
||||
extra = set(user_spec_deps.keys()).difference(visited_user_specs)
|
||||
if extra:
|
||||
raise InvalidDependencyError(self.name, extra)
|
||||
|
||||
Spec.inject_patches_variant(self)
|
||||
|
||||
for s in self.traverse():
|
||||
# TODO: Refactor this into a common method to build external specs
|
||||
# TODO: or turn external_path into a lazy property
|
||||
Spec.ensure_external_path_if_external(s)
|
||||
|
||||
# assign hashes and mark concrete
|
||||
self._finalize_concretization()
|
||||
|
||||
# If any spec in the DAG is deprecated, throw an error
|
||||
Spec.ensure_no_deprecated(self)
|
||||
|
||||
# Update externals as needed
|
||||
for dep in self.traverse():
|
||||
if dep.external:
|
||||
dep.package.update_external_dependencies()
|
||||
|
||||
# Now that the spec is concrete we should check if
|
||||
# there are declared conflicts
|
||||
#
|
||||
# TODO: this needs rethinking, as currently we can only express
|
||||
# TODO: internal configuration conflicts within one package.
|
||||
matches = []
|
||||
for x in self.traverse():
|
||||
if x.external:
|
||||
# external specs are already built, don't worry about whether
|
||||
# it's possible to build that configuration with Spack
|
||||
continue
|
||||
|
||||
for when_spec, conflict_list in x.package_class.conflicts.items():
|
||||
if x.satisfies(when_spec):
|
||||
for conflict_spec, msg in conflict_list:
|
||||
if x.satisfies(conflict_spec):
|
||||
when = when_spec.copy()
|
||||
when.name = x.name
|
||||
matches.append((x, conflict_spec, when, msg))
|
||||
if matches:
|
||||
raise ConflictsInSpecError(self, matches)
|
||||
|
||||
# Check if we can produce an optimized binary (will throw if
|
||||
# there are declared inconsistencies)
|
||||
self.architecture.target.optimization_flags(self.compiler)
|
||||
|
||||
def _patches_assigned(self):
|
||||
"""Whether patches have been assigned to this spec by the concretizer."""
|
||||
# FIXME: _patches_in_order_of_appearance is attached after concretization
|
||||
@@ -2743,13 +3032,7 @@ def ensure_no_deprecated(root):
|
||||
msg += " For each package listed, choose another spec\n"
|
||||
raise SpecDeprecatedError(msg)
|
||||
|
||||
def concretize(self, tests: Union[bool, List[str]] = False) -> None:
|
||||
"""Concretize the current spec.
|
||||
|
||||
Args:
|
||||
tests: if False disregard 'test' dependencies, if a list of names activate them for
|
||||
the packages in the list, if True activate 'test' dependencies for all packages.
|
||||
"""
|
||||
def _new_concretize(self, tests=False):
|
||||
import spack.solver.asp
|
||||
|
||||
self.replace_hash()
|
||||
@@ -2783,6 +3066,19 @@ def concretize(self, tests: Union[bool, List[str]] = False) -> None:
|
||||
concretized = answer[node]
|
||||
self._dup(concretized)
|
||||
|
||||
def concretize(self, tests=False):
|
||||
"""Concretize the current spec.
|
||||
|
||||
Args:
|
||||
tests (bool or list): if False disregard 'test' dependencies,
|
||||
if a list of names activate them for the packages in the list,
|
||||
if True activate 'test' dependencies for all packages.
|
||||
"""
|
||||
if spack.config.get("config:concretizer", "clingo") == "clingo":
|
||||
self._new_concretize(tests)
|
||||
else:
|
||||
self._old_concretize(tests)
|
||||
|
||||
def _mark_root_concrete(self, value=True):
|
||||
"""Mark just this spec (not dependencies) concrete."""
|
||||
if (not value) and self.concrete and self.installed:
|
||||
@@ -2886,6 +3182,34 @@ def concretized(self, tests=False):
|
||||
clone.concretize(tests=tests)
|
||||
return clone
|
||||
|
||||
def flat_dependencies(self, disconnect: bool = False):
|
||||
"""Build DependencyMap of all of this spec's dependencies with their constraints merged.
|
||||
|
||||
Arguments:
|
||||
disconnect: if True, disconnect all dependents and dependencies among nodes in this
|
||||
spec's DAG.
|
||||
"""
|
||||
flat_deps = {}
|
||||
deptree = self.traverse(root=False)
|
||||
|
||||
for spec in deptree:
|
||||
if spec.name not in flat_deps:
|
||||
flat_deps[spec.name] = spec
|
||||
else:
|
||||
try:
|
||||
flat_deps[spec.name].constrain(spec)
|
||||
except spack.error.UnsatisfiableSpecError as e:
|
||||
# DAG contains two instances of the same package with inconsistent constraints.
|
||||
raise InconsistentSpecError("Invalid Spec DAG: %s" % e.message) from e
|
||||
|
||||
if disconnect:
|
||||
for spec in flat_deps.values():
|
||||
if not spec.concrete:
|
||||
spec.clear_edges()
|
||||
self.clear_dependencies()
|
||||
|
||||
return flat_deps
|
||||
|
||||
def index(self, deptype="all"):
|
||||
"""Return a dictionary that points to all the dependencies in this
|
||||
spec.
|
||||
@@ -2895,6 +3219,312 @@ def index(self, deptype="all"):
|
||||
dm[spec.name].append(spec)
|
||||
return dm
|
||||
|
||||
def _evaluate_dependency_conditions(self, name):
|
||||
"""Evaluate all the conditions on a dependency with this name.
|
||||
|
||||
Args:
|
||||
name (str): name of dependency to evaluate conditions on.
|
||||
|
||||
Returns:
|
||||
(Dependency): new Dependency object combining all constraints.
|
||||
|
||||
If the package depends on <name> in the current spec
|
||||
configuration, return the constrained dependency and
|
||||
corresponding dependency types.
|
||||
|
||||
If no conditions are True (and we don't depend on it), return
|
||||
``(None, None)``.
|
||||
"""
|
||||
vt.substitute_abstract_variants(self)
|
||||
# evaluate when specs to figure out constraints on the dependency.
|
||||
dep = None
|
||||
for when_spec, deps_by_name in self.package_class.dependencies.items():
|
||||
if not self.satisfies(when_spec):
|
||||
continue
|
||||
|
||||
for dep_name, dependency in deps_by_name.items():
|
||||
if dep_name != name:
|
||||
continue
|
||||
|
||||
if dep is None:
|
||||
dep = dp.Dependency(Spec(self.name), Spec(name), depflag=0)
|
||||
try:
|
||||
dep.merge(dependency)
|
||||
except spack.error.UnsatisfiableSpecError as e:
|
||||
e.message = (
|
||||
"Conflicting conditional dependencies for spec"
|
||||
"\n\n\t{0}\n\n"
|
||||
"Cannot merge constraint"
|
||||
"\n\n\t{1}\n\n"
|
||||
"into"
|
||||
"\n\n\t{2}".format(self, dependency.spec, dep.spec)
|
||||
)
|
||||
raise e
|
||||
|
||||
return dep
|
||||
|
||||
def _find_provider(self, vdep, provider_index):
|
||||
"""Find provider for a virtual spec in the provider index.
|
||||
Raise an exception if there is a conflicting virtual
|
||||
dependency already in this spec.
|
||||
"""
|
||||
assert spack.repo.PATH.is_virtual_safe(vdep.name), vdep
|
||||
|
||||
# note that this defensively copies.
|
||||
providers = provider_index.providers_for(vdep)
|
||||
|
||||
# If there is a provider for the vpkg, then use that instead of
|
||||
# the virtual package.
|
||||
if providers:
|
||||
# Remove duplicate providers that can concretize to the same
|
||||
# result.
|
||||
for provider in providers:
|
||||
for spec in providers:
|
||||
if spec is not provider and provider.intersects(spec):
|
||||
providers.remove(spec)
|
||||
# Can't have multiple providers for the same thing in one spec.
|
||||
if len(providers) > 1:
|
||||
raise MultipleProviderError(vdep, providers)
|
||||
return providers[0]
|
||||
else:
|
||||
# The user might have required something insufficient for
|
||||
# pkg_dep -- so we'll get a conflict. e.g., user asked for
|
||||
# mpi@:1.1 but some package required mpi@2.1:.
|
||||
required = provider_index.providers_for(vdep.name)
|
||||
if len(required) > 1:
|
||||
raise MultipleProviderError(vdep, required)
|
||||
elif required:
|
||||
raise UnsatisfiableProviderSpecError(required[0], vdep)
|
||||
|
||||
def _merge_dependency(self, dependency, visited, spec_deps, provider_index, tests):
|
||||
"""Merge dependency information from a Package into this Spec.
|
||||
|
||||
Args:
|
||||
dependency (Dependency): dependency metadata from a package;
|
||||
this is typically the result of merging *all* matching
|
||||
dependency constraints from the package.
|
||||
visited (set): set of dependency nodes already visited by
|
||||
``normalize()``.
|
||||
spec_deps (dict): ``dict`` of all dependencies from the spec
|
||||
being normalized.
|
||||
provider_index (dict): ``provider_index`` of virtual dep
|
||||
providers in the ``Spec`` as normalized so far.
|
||||
|
||||
NOTE: Caller should assume that this routine owns the
|
||||
``dependency`` parameter, i.e., it needs to be a copy of any
|
||||
internal structures.
|
||||
|
||||
This is the core of ``normalize()``. There are some basic steps:
|
||||
|
||||
* If dep is virtual, evaluate whether it corresponds to an
|
||||
existing concrete dependency, and merge if so.
|
||||
|
||||
* If it's real and it provides some virtual dep, see if it provides
|
||||
what some virtual dependency wants and merge if so.
|
||||
|
||||
* Finally, if none of the above, merge dependency and its
|
||||
constraints into this spec.
|
||||
|
||||
This method returns True if the spec was changed, False otherwise.
|
||||
|
||||
"""
|
||||
changed = False
|
||||
dep = dependency.spec
|
||||
|
||||
# If it's a virtual dependency, try to find an existing
|
||||
# provider in the spec, and merge that.
|
||||
virtuals = ()
|
||||
if spack.repo.PATH.is_virtual_safe(dep.name):
|
||||
virtuals = (dep.name,)
|
||||
visited.add(dep.name)
|
||||
provider = self._find_provider(dep, provider_index)
|
||||
if provider:
|
||||
dep = provider
|
||||
else:
|
||||
index = spack.provider_index.ProviderIndex(
|
||||
repository=spack.repo.PATH, specs=[dep], restrict=True
|
||||
)
|
||||
items = list(spec_deps.items())
|
||||
for name, vspec in items:
|
||||
if not spack.repo.PATH.is_virtual_safe(vspec.name):
|
||||
continue
|
||||
|
||||
if index.providers_for(vspec):
|
||||
vspec._replace_with(dep)
|
||||
del spec_deps[vspec.name]
|
||||
changed = True
|
||||
else:
|
||||
required = index.providers_for(vspec.name)
|
||||
if required:
|
||||
raise UnsatisfiableProviderSpecError(required[0], dep)
|
||||
provider_index.update(dep)
|
||||
|
||||
# If the spec isn't already in the set of dependencies, add it.
|
||||
# Note: dep is always owned by this method. If it's from the
|
||||
# caller, it's a copy from _evaluate_dependency_conditions. If it
|
||||
# comes from a vdep, it's a defensive copy from _find_provider.
|
||||
if dep.name not in spec_deps:
|
||||
if self.concrete:
|
||||
return False
|
||||
|
||||
spec_deps[dep.name] = dep
|
||||
changed = True
|
||||
else:
|
||||
# merge package/vdep information into spec
|
||||
try:
|
||||
tty.debug("{0} applying constraint {1}".format(self.name, str(dep)))
|
||||
changed |= spec_deps[dep.name].constrain(dep)
|
||||
except spack.error.UnsatisfiableSpecError as e:
|
||||
fmt = "An unsatisfiable {0}".format(e.constraint_type)
|
||||
fmt += " constraint has been detected for spec:"
|
||||
fmt += "\n\n{0}\n\n".format(spec_deps[dep.name].tree(indent=4))
|
||||
fmt += "while trying to concretize the partial spec:"
|
||||
fmt += "\n\n{0}\n\n".format(self.tree(indent=4))
|
||||
fmt += "{0} requires {1} {2} {3}, but spec asked for {4}"
|
||||
|
||||
e.message = fmt.format(
|
||||
self.name, dep.name, e.constraint_type, e.required, e.provided
|
||||
)
|
||||
|
||||
raise
|
||||
|
||||
# Add merged spec to my deps and recurse
|
||||
spec_dependency = spec_deps[dep.name]
|
||||
if dep.name not in self._dependencies:
|
||||
self._add_dependency(spec_dependency, depflag=dependency.depflag, virtuals=virtuals)
|
||||
|
||||
changed |= spec_dependency._normalize_helper(visited, spec_deps, provider_index, tests)
|
||||
return changed
|
||||
|
||||
def _normalize_helper(self, visited, spec_deps, provider_index, tests):
|
||||
"""Recursive helper function for _normalize."""
|
||||
if self.name in visited:
|
||||
return False
|
||||
visited.add(self.name)
|
||||
|
||||
# If we descend into a virtual spec, there's nothing more
|
||||
# to normalize. Concretize will finish resolving it later.
|
||||
if self.virtual or self.external:
|
||||
return False
|
||||
|
||||
# Avoid recursively adding constraints for already-installed packages:
|
||||
# these may include build dependencies which are not needed for this
|
||||
# install (since this package is already installed).
|
||||
if self.concrete and self.installed:
|
||||
return False
|
||||
|
||||
# Combine constraints from package deps with constraints from
|
||||
# the spec, until nothing changes.
|
||||
any_change = False
|
||||
changed = True
|
||||
|
||||
while changed:
|
||||
changed = False
|
||||
for dep_name in self.package_class.dependency_names():
|
||||
# Do we depend on dep_name? If so pkg_dep is not None.
|
||||
dep = self._evaluate_dependency_conditions(dep_name)
|
||||
|
||||
# If dep is a needed dependency, merge it.
|
||||
if dep:
|
||||
merge = (
|
||||
# caller requested test dependencies
|
||||
tests is True
|
||||
or (tests and self.name in tests)
|
||||
or
|
||||
# this is not a test-only dependency
|
||||
(dep.depflag & ~dt.TEST)
|
||||
)
|
||||
|
||||
if merge:
|
||||
changed |= self._merge_dependency(
|
||||
dep, visited, spec_deps, provider_index, tests
|
||||
)
|
||||
any_change |= changed
|
||||
|
||||
return any_change
|
||||
|
||||
def normalize(self, force=False, tests=False, user_spec_deps=None, disconnect=True):
|
||||
"""When specs are parsed, any dependencies specified are hanging off
|
||||
the root, and ONLY the ones that were explicitly provided are there.
|
||||
Normalization turns a partial flat spec into a DAG, where:
|
||||
|
||||
1. Known dependencies of the root package are in the DAG.
|
||||
2. Each node's dependencies dict only contains its known direct
|
||||
deps.
|
||||
3. There is only ONE unique spec for each package in the DAG.
|
||||
|
||||
* This includes virtual packages. If there a non-virtual
|
||||
package that provides a virtual package that is in the spec,
|
||||
then we replace the virtual package with the non-virtual one.
|
||||
|
||||
TODO: normalize should probably implement some form of cycle
|
||||
detection, to ensure that the spec is actually a DAG.
|
||||
"""
|
||||
if not self.name:
|
||||
raise spack.error.SpecError("Attempting to normalize anonymous spec")
|
||||
|
||||
# Set _normal and _concrete to False when forced
|
||||
if force and not self._concrete:
|
||||
self._normal = False
|
||||
|
||||
if self._normal:
|
||||
return False
|
||||
|
||||
# Ensure first that all packages & compilers in the DAG exist.
|
||||
self.validate_or_raise()
|
||||
# Clear the DAG and collect all dependencies in the DAG, which will be
|
||||
# reapplied as constraints. All dependencies collected this way will
|
||||
# have been created by a previous execution of 'normalize'.
|
||||
# A dependency extracted here will only be reintegrated if it is
|
||||
# discovered to apply according to _normalize_helper, so
|
||||
# user-specified dependencies are recorded separately in case they
|
||||
# refer to specs which take several normalization passes to
|
||||
# materialize.
|
||||
all_spec_deps = self.flat_dependencies(disconnect=disconnect)
|
||||
|
||||
if user_spec_deps:
|
||||
for name, spec in user_spec_deps.items():
|
||||
if not name:
|
||||
msg = "Attempted to normalize anonymous dependency spec"
|
||||
msg += " %s" % spec
|
||||
raise InvalidSpecDetected(msg)
|
||||
if name not in all_spec_deps:
|
||||
all_spec_deps[name] = spec
|
||||
else:
|
||||
all_spec_deps[name].constrain(spec)
|
||||
|
||||
# Initialize index of virtual dependency providers if
|
||||
# concretize didn't pass us one already
|
||||
provider_index = spack.provider_index.ProviderIndex(
|
||||
repository=spack.repo.PATH, specs=[s for s in all_spec_deps.values()], restrict=True
|
||||
)
|
||||
|
||||
# traverse the package DAG and fill out dependencies according
|
||||
# to package files & their 'when' specs
|
||||
visited = set()
|
||||
|
||||
any_change = self._normalize_helper(visited, all_spec_deps, provider_index, tests)
|
||||
|
||||
# remove any leftover dependents outside the spec from, e.g., pruning externals
|
||||
valid = {id(spec) for spec in all_spec_deps.values()} | {id(self)}
|
||||
for spec in all_spec_deps.values():
|
||||
remove = [dep for dep in spec.dependents() if id(dep) not in valid]
|
||||
for dep in remove:
|
||||
del spec._dependents.edges[dep.name]
|
||||
del dep._dependencies.edges[spec.name]
|
||||
|
||||
# Mark the spec as normal once done.
|
||||
self._normal = True
|
||||
return any_change
|
||||
|
||||
def normalized(self):
|
||||
"""
|
||||
Return a normalized copy of this spec without modifying this spec.
|
||||
"""
|
||||
clone = self.copy()
|
||||
clone.normalize()
|
||||
return clone
|
||||
|
||||
def validate_or_raise(self):
|
||||
"""Checks that names and values in this spec are real. If they're not,
|
||||
it will raise an appropriate exception.
|
||||
@@ -3838,16 +4468,9 @@ def safe_color(sigil: str, string: str, color_fmt: Optional[str]) -> str:
|
||||
return clr.colorize(f"{color_fmt}{sigil}{clr.cescape(string)}@.", color=color)
|
||||
|
||||
def format_attribute(match_object: Match) -> str:
|
||||
(
|
||||
esc,
|
||||
sig,
|
||||
dep,
|
||||
hash,
|
||||
hash_len,
|
||||
attribute,
|
||||
close_brace,
|
||||
unmatched_close_brace,
|
||||
) = match_object.groups()
|
||||
(esc, sig, dep, hash, hash_len, attribute, close_brace, unmatched_close_brace) = (
|
||||
match_object.groups()
|
||||
)
|
||||
if esc:
|
||||
return esc
|
||||
elif unmatched_close_brace:
|
||||
@@ -4182,59 +4805,52 @@ def splice(self, other, transitive):
|
||||
assert self.concrete
|
||||
assert other.concrete
|
||||
|
||||
virtuals_to_replace = [
|
||||
v.name
|
||||
for v in other.package.virtuals_provided
|
||||
if v in self or v in self.package.virtuals_provided
|
||||
]
|
||||
if transitive:
|
||||
virtuals_to_replace.extend(
|
||||
[
|
||||
v.name
|
||||
for od in other.traverse(root=False)
|
||||
for v in od.package.virtuals_provided
|
||||
if v in self or v in self.package.virtuals_provided
|
||||
]
|
||||
)
|
||||
|
||||
virtuals_to_replace = [v.name for v in other.package.virtuals_provided if v in self]
|
||||
if virtuals_to_replace:
|
||||
deps_to_replace = {
|
||||
self[v]: (other[v] if v in other else other) for v in virtuals_to_replace
|
||||
}
|
||||
deps_to_replace = dict((self[v], other) for v in virtuals_to_replace)
|
||||
# deps_to_replace = [self[v] for v in virtuals_to_replace]
|
||||
else:
|
||||
# TODO: sanity check and error raise here for other.name not in self
|
||||
deps_to_replace = {self[other.name]: other}
|
||||
# deps_to_replace = [self[other.name]]
|
||||
|
||||
for d, od in deps_to_replace.items():
|
||||
virtuals = []
|
||||
for e in d.edges_from_dependents():
|
||||
virtuals.extend(e.virtuals)
|
||||
|
||||
for v in virtuals:
|
||||
if not any(ov.satisfies(v) for ov in od.package.virtuals_provided):
|
||||
# There was something provided by the original that we don't
|
||||
# get from its replacement.
|
||||
for d in deps_to_replace:
|
||||
if not all(
|
||||
v in other.package.virtuals_provided or v not in self
|
||||
for v in d.package.virtuals_provided
|
||||
):
|
||||
# There was something provided by the original that we don't
|
||||
# get from its replacement.
|
||||
raise SpliceError(
|
||||
("Splice between {0} and {1} will not provide " "the same virtuals.").format(
|
||||
self.name, other.name
|
||||
)
|
||||
)
|
||||
for n in d.traverse(root=False):
|
||||
if not all(
|
||||
any(
|
||||
v in other_n.package.virtuals_provided
|
||||
for other_n in other.traverse(root=False)
|
||||
)
|
||||
or v not in self
|
||||
for v in n.package.virtuals_provided
|
||||
):
|
||||
raise SpliceError(
|
||||
(
|
||||
f"Splice between {self.name} and {other.name} will not provide "
|
||||
"the same virtuals."
|
||||
)
|
||||
"Splice between {0} and {1} will not provide " "the same virtuals."
|
||||
).format(self.name, other.name)
|
||||
)
|
||||
|
||||
# For now, check that we don't have DAG with multiple specs from the
|
||||
# same package
|
||||
def multiple_specs(root):
|
||||
counter = collections.Counter(
|
||||
[node.name for node in root.traverse(deptype=("link", "run"))]
|
||||
)
|
||||
counter = collections.Counter([node.name for node in root.traverse()])
|
||||
_, max_number = counter.most_common()[0]
|
||||
return max_number > 1
|
||||
|
||||
if multiple_specs(self) or multiple_specs(other):
|
||||
msg = (
|
||||
'Either "{0}"\n or "{1}"\n contain multiple specs from the same '
|
||||
'Either "{0}" or "{1}" contain multiple specs from the same '
|
||||
"package, which cannot be handled by splicing at the moment"
|
||||
)
|
||||
raise ValueError(msg.format(self, other))
|
||||
@@ -4256,7 +4872,7 @@ def from_self(name, transitive):
|
||||
else:
|
||||
if name == other.name:
|
||||
return False
|
||||
if any( # TODO: should this be all
|
||||
if any(
|
||||
v in other.package.virtuals_provided
|
||||
for v in self[name].package.virtuals_provided
|
||||
):
|
||||
@@ -4289,30 +4905,17 @@ def from_self(name, transitive):
|
||||
nodes[name].add_dependency_edge(
|
||||
nodes[dep_name], depflag=edge.depflag, virtuals=edge.virtuals
|
||||
)
|
||||
deps_to_check = []
|
||||
for dep_name, dep_specs in self[name]._dependencies.items():
|
||||
deps_to_check.append(dep_name)
|
||||
for dep_spec in dep_specs:
|
||||
deps_to_check.extend(dep_spec.virtuals)
|
||||
|
||||
if any(dep not in self_nodes for dep in deps_to_check):
|
||||
nodes[name].build_spec = self[name].build_spec.copy()
|
||||
if any(dep not in self_nodes for dep in self[name]._dependencies):
|
||||
nodes[name].build_spec = self[name].build_spec
|
||||
else:
|
||||
for edge in other[name].edges_to_dependencies():
|
||||
nodes[name].add_dependency_edge(
|
||||
nodes[edge.spec.name], depflag=edge.depflag, virtuals=edge.virtuals
|
||||
)
|
||||
deps_to_check = []
|
||||
for dep_name, dep_specs in other[name]._dependencies.items():
|
||||
deps_to_check.append(dep_name)
|
||||
for dep_spec in dep_specs:
|
||||
deps_to_check.extend(dep_spec.virtuals)
|
||||
if any(dep not in other_nodes for dep in other[name]._dependencies):
|
||||
nodes[name].build_spec = other[name].build_spec
|
||||
|
||||
if any(dep not in other_nodes for dep in deps_to_check):
|
||||
nodes[name].build_spec = other[name].build_spec.copy()
|
||||
|
||||
# If self.name not in nodes then we spliced the root with a different virtual provider
|
||||
ret = nodes[self.name] if self.name in nodes else nodes[other.name]
|
||||
ret = nodes[self.name]
|
||||
|
||||
# Clear cached hashes for all affected nodes
|
||||
# Do not touch unaffected nodes
|
||||
@@ -4324,7 +4927,7 @@ def from_self(name, transitive):
|
||||
|
||||
dep.dag_hash()
|
||||
|
||||
return ret
|
||||
return nodes[self.name]
|
||||
|
||||
def clear_cached_hashes(self, ignore=()):
|
||||
"""
|
||||
|
@@ -13,7 +13,7 @@
|
||||
import stat
|
||||
import sys
|
||||
import tempfile
|
||||
from typing import Callable, Dict, Generator, Iterable, List, Optional, Set
|
||||
from typing import Callable, Dict, Iterable, Optional, Set
|
||||
|
||||
import llnl.string
|
||||
import llnl.util.lang
|
||||
@@ -40,7 +40,6 @@
|
||||
import spack.resource
|
||||
import spack.spec
|
||||
import spack.stage
|
||||
import spack.util.crypto
|
||||
import spack.util.lock
|
||||
import spack.util.path as sup
|
||||
import spack.util.pattern as pattern
|
||||
@@ -352,10 +351,8 @@ class Stage(LockableStagingDir):
|
||||
def __init__(
|
||||
self,
|
||||
url_or_fetch_strategy,
|
||||
*,
|
||||
name=None,
|
||||
mirror_paths: Optional[spack.mirror.MirrorLayout] = None,
|
||||
mirrors: Optional[Iterable[spack.mirror.Mirror]] = None,
|
||||
mirror_paths=None,
|
||||
keep=False,
|
||||
path=None,
|
||||
lock=True,
|
||||
@@ -409,18 +406,12 @@ def __init__(
|
||||
# self.fetcher can change with mirrors.
|
||||
self.default_fetcher = self.fetcher
|
||||
self.search_fn = search_fn
|
||||
# If we fetch from a mirror, but the original data is from say git, we can currently not
|
||||
# prove that they are equal (we don't even have a tree hash in package.py). This bool is
|
||||
# used to skip checksum verification and instead warn the user.
|
||||
if isinstance(self.default_fetcher, fs.URLFetchStrategy):
|
||||
self.skip_checksum_for_mirror = not bool(self.default_fetcher.digest)
|
||||
else:
|
||||
self.skip_checksum_for_mirror = True
|
||||
# used for mirrored archives of repositories.
|
||||
self.skip_checksum_for_mirror = True
|
||||
|
||||
self.srcdir = None
|
||||
|
||||
self.mirror_paths = mirror_paths
|
||||
self.mirrors = list(mirrors) if mirrors else []
|
||||
|
||||
@property
|
||||
def expected_archive_files(self):
|
||||
@@ -475,87 +466,100 @@ def disable_mirrors(self):
|
||||
"""The Stage will not attempt to look for the associated fetcher
|
||||
target in any of Spack's mirrors (including the local download cache).
|
||||
"""
|
||||
self.mirror_paths = None
|
||||
self.mirror_paths = []
|
||||
|
||||
def _generate_fetchers(self, mirror_only=False) -> Generator[fs.FetchStrategy, None, None]:
|
||||
def fetch(self, mirror_only=False, err_msg=None):
|
||||
"""Retrieves the code or archive
|
||||
|
||||
Args:
|
||||
mirror_only (bool): only fetch from a mirror
|
||||
err_msg (str or None): the error message to display if all fetchers
|
||||
fail or ``None`` for the default fetch failure message
|
||||
"""
|
||||
fetchers = []
|
||||
if not mirror_only:
|
||||
fetchers.append(self.default_fetcher)
|
||||
|
||||
# If this archive is normally fetched from a URL, then use the same digest.
|
||||
if isinstance(self.default_fetcher, fs.URLFetchStrategy):
|
||||
digest = self.default_fetcher.digest
|
||||
expand = self.default_fetcher.expand_archive
|
||||
extension = self.default_fetcher.extension
|
||||
else:
|
||||
digest = None
|
||||
expand = True
|
||||
extension = None
|
||||
|
||||
# TODO: move mirror logic out of here and clean it up!
|
||||
# TODO: Or @alalazo may have some ideas about how to use a
|
||||
# TODO: CompositeFetchStrategy here.
|
||||
if self.mirror_paths and self.mirrors:
|
||||
# Add URL strategies for all the mirrors with the digest
|
||||
# Insert fetchers in the order that the URLs are provided.
|
||||
fetchers[:0] = (
|
||||
fs.from_url_scheme(
|
||||
url_util.join(mirror.fetch_url, rel_path),
|
||||
checksum=digest,
|
||||
expand=expand,
|
||||
extension=extension,
|
||||
)
|
||||
for mirror in self.mirrors
|
||||
self.skip_checksum_for_mirror = True
|
||||
if self.mirror_paths:
|
||||
# Join URLs of mirror roots with mirror paths. Because
|
||||
# urljoin() will strip everything past the final '/' in
|
||||
# the root, so we add a '/' if it is not present.
|
||||
mirror_urls = [
|
||||
url_util.join(mirror.fetch_url, rel_path)
|
||||
for mirror in spack.mirror.MirrorCollection(source=True).values()
|
||||
if not mirror.fetch_url.startswith("oci://")
|
||||
for rel_path in self.mirror_paths
|
||||
)
|
||||
]
|
||||
|
||||
if self.mirror_paths and self.default_fetcher.cachable:
|
||||
fetchers[:0] = (
|
||||
spack.caches.FETCH_CACHE.fetcher(
|
||||
rel_path, digest, expand=expand, extension=extension
|
||||
# If this archive is normally fetched from a tarball URL,
|
||||
# then use the same digest. `spack mirror` ensures that
|
||||
# the checksum will be the same.
|
||||
digest = None
|
||||
expand = True
|
||||
extension = None
|
||||
if isinstance(self.default_fetcher, fs.URLFetchStrategy):
|
||||
digest = self.default_fetcher.digest
|
||||
expand = self.default_fetcher.expand_archive
|
||||
extension = self.default_fetcher.extension
|
||||
|
||||
# Have to skip the checksum for things archived from
|
||||
# repositories. How can this be made safer?
|
||||
self.skip_checksum_for_mirror = not bool(digest)
|
||||
|
||||
# Add URL strategies for all the mirrors with the digest
|
||||
# Insert fetchers in the order that the URLs are provided.
|
||||
for url in reversed(mirror_urls):
|
||||
fetchers.insert(
|
||||
0, fs.from_url_scheme(url, digest, expand=expand, extension=extension)
|
||||
)
|
||||
for rel_path in self.mirror_paths
|
||||
)
|
||||
|
||||
yield from fetchers
|
||||
if self.default_fetcher.cachable:
|
||||
for rel_path in reversed(list(self.mirror_paths)):
|
||||
cache_fetcher = spack.caches.FETCH_CACHE.fetcher(
|
||||
rel_path, digest, expand=expand, extension=extension
|
||||
)
|
||||
fetchers.insert(0, cache_fetcher)
|
||||
|
||||
# The search function may be expensive, so wait until now to call it so the user can stop
|
||||
# if a prior fetcher succeeded
|
||||
if self.search_fn and not mirror_only:
|
||||
yield from self.search_fn()
|
||||
def generate_fetchers():
|
||||
for fetcher in fetchers:
|
||||
yield fetcher
|
||||
# The search function may be expensive, so wait until now to
|
||||
# call it so the user can stop if a prior fetcher succeeded
|
||||
if self.search_fn and not mirror_only:
|
||||
dynamic_fetchers = self.search_fn()
|
||||
for fetcher in dynamic_fetchers:
|
||||
yield fetcher
|
||||
|
||||
def fetch(self, mirror_only: bool = False, err_msg: Optional[str] = None) -> None:
|
||||
"""Retrieves the code or archive
|
||||
def print_errors(errors):
|
||||
for msg in errors:
|
||||
tty.debug(msg)
|
||||
|
||||
Args:
|
||||
mirror_only: only fetch from a mirror
|
||||
err_msg: the error message to display if all fetchers fail or ``None`` for the default
|
||||
fetch failure message
|
||||
"""
|
||||
errors: List[str] = []
|
||||
for fetcher in self._generate_fetchers(mirror_only):
|
||||
errors = []
|
||||
for fetcher in generate_fetchers():
|
||||
try:
|
||||
fetcher.stage = self
|
||||
self.fetcher = fetcher
|
||||
self.fetcher.fetch()
|
||||
break
|
||||
except fs.NoCacheError:
|
||||
except spack.fetch_strategy.NoCacheError:
|
||||
# Don't bother reporting when something is not cached.
|
||||
continue
|
||||
except fs.FailedDownloadError as f:
|
||||
errors.extend(f"{fetcher}: {e.__class__.__name__}: {e}" for e in f.exceptions)
|
||||
continue
|
||||
except spack.error.SpackError as e:
|
||||
errors.append(f"{fetcher}: {e.__class__.__name__}: {e}")
|
||||
errors.append("Fetching from {0} failed.".format(fetcher))
|
||||
tty.debug(e)
|
||||
continue
|
||||
else:
|
||||
print_errors(errors)
|
||||
|
||||
self.fetcher = self.default_fetcher
|
||||
if err_msg:
|
||||
raise spack.error.FetchError(err_msg)
|
||||
raise spack.error.FetchError(
|
||||
f"All fetchers failed for {self.name}", "\n".join(f" {e}" for e in errors)
|
||||
)
|
||||
default_msg = "All fetchers failed for {0}".format(self.name)
|
||||
raise spack.error.FetchError(err_msg or default_msg, None)
|
||||
|
||||
print_errors(errors)
|
||||
|
||||
def steal_source(self, dest):
|
||||
"""Copy the source_path directory in its entirety to directory dest
|
||||
@@ -593,19 +597,16 @@ def steal_source(self, dest):
|
||||
self.destroy()
|
||||
|
||||
def check(self):
|
||||
"""Check the downloaded archive against a checksum digest."""
|
||||
"""Check the downloaded archive against a checksum digest.
|
||||
No-op if this stage checks code out of a repository."""
|
||||
if self.fetcher is not self.default_fetcher and self.skip_checksum_for_mirror:
|
||||
cache = isinstance(self.fetcher, fs.CacheURLFetchStrategy)
|
||||
if cache:
|
||||
secure_msg = "your download cache is in a secure location"
|
||||
else:
|
||||
secure_msg = "you trust this mirror and have a secure connection"
|
||||
tty.warn(
|
||||
f"Using {'download cache' if cache else 'a mirror'} instead of version control",
|
||||
"The required sources are normally checked out from a version control system, "
|
||||
f"but have been archived {'in download cache' if cache else 'on a mirror'}: "
|
||||
f"{self.fetcher}. Spack lacks a tree hash to verify the integrity of this "
|
||||
f"archive. Make sure {secure_msg}.",
|
||||
"Fetching from mirror without a checksum!",
|
||||
"This package is normally checked out from a version "
|
||||
"control system, but it has been archived on a spack "
|
||||
"mirror. This means we cannot know a checksum for the "
|
||||
"tarball in advance. Be sure that your connection to "
|
||||
"this mirror is secure!",
|
||||
)
|
||||
elif spack.config.get("config:checksum"):
|
||||
self.fetcher.check()
|
||||
@@ -1174,7 +1175,7 @@ def _fetch_and_checksum(url, options, keep_stage, action_fn=None):
|
||||
try:
|
||||
url_or_fs = url
|
||||
if options:
|
||||
url_or_fs = fs.URLFetchStrategy(url=url, fetch_options=options)
|
||||
url_or_fs = fs.URLFetchStrategy(url, fetch_options=options)
|
||||
|
||||
with Stage(url_or_fs, keep=keep_stage) as stage:
|
||||
# Fetch the archive
|
||||
@@ -1187,7 +1188,7 @@ def _fetch_and_checksum(url, options, keep_stage, action_fn=None):
|
||||
# Checksum the archive and add it to the list
|
||||
checksum = spack.util.crypto.checksum(hashlib.sha256, stage.archive_file)
|
||||
return checksum, None
|
||||
except fs.FailedDownloadError:
|
||||
except FailedDownloadError:
|
||||
return None, f"[WORKER] Failed to fetch {url}"
|
||||
except Exception as e:
|
||||
return None, f"[WORKER] Something failed on {url}, skipping. ({e})"
|
||||
@@ -1207,3 +1208,7 @@ class RestageError(StageError):
|
||||
|
||||
class VersionFetchError(StageError):
|
||||
"""Raised when we can't determine a URL to fetch a package."""
|
||||
|
||||
|
||||
# Keep this in namespace for convenience
|
||||
FailedDownloadError = fs.FailedDownloadError
|
||||
|
@@ -208,6 +208,7 @@ def test_satisfy_strict_constraint_when_not_concrete(architecture_tuple, constra
|
||||
],
|
||||
)
|
||||
@pytest.mark.usefixtures("mock_packages", "config")
|
||||
@pytest.mark.only_clingo("Fixing the parser broke this test for the original concretizer.")
|
||||
@pytest.mark.skipif(
|
||||
str(archspec.cpu.host().family) != "x86_64", reason="tests are for x86_64 uarch ranges"
|
||||
)
|
||||
|
@@ -64,6 +64,22 @@ def cache_directory(tmpdir):
|
||||
spack.config.caches = old_cache_path
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def mirror_dir(tmpdir_factory):
|
||||
dir = tmpdir_factory.mktemp("mirror")
|
||||
dir.ensure("build_cache", dir=True)
|
||||
yield str(dir)
|
||||
dir.join("build_cache").remove()
|
||||
|
||||
|
||||
@pytest.fixture(scope="function")
|
||||
def test_mirror(mirror_dir):
|
||||
mirror_url = url_util.path_to_file_url(mirror_dir)
|
||||
mirror_cmd("add", "--scope", "site", "test-mirror-func", mirror_url)
|
||||
yield mirror_dir
|
||||
mirror_cmd("rm", "--scope=site", "test-mirror-func")
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def config_directory(tmpdir_factory):
|
||||
tmpdir = tmpdir_factory.mktemp("test_configs")
|
||||
@@ -207,9 +223,9 @@ def dummy_prefix(tmpdir):
|
||||
@pytest.mark.requires_executables(*args)
|
||||
@pytest.mark.maybeslow
|
||||
@pytest.mark.usefixtures(
|
||||
"default_config", "cache_directory", "install_dir_default_layout", "temporary_mirror"
|
||||
"default_config", "cache_directory", "install_dir_default_layout", "test_mirror"
|
||||
)
|
||||
def test_default_rpaths_create_install_default_layout(temporary_mirror_dir):
|
||||
def test_default_rpaths_create_install_default_layout(mirror_dir):
|
||||
"""
|
||||
Test the creation and installation of buildcaches with default rpaths
|
||||
into the default directory layout scheme.
|
||||
@@ -222,12 +238,13 @@ def test_default_rpaths_create_install_default_layout(temporary_mirror_dir):
|
||||
install_cmd("--no-cache", sy_spec.name)
|
||||
|
||||
# Create a buildache
|
||||
buildcache_cmd("push", "-u", temporary_mirror_dir, cspec.name, sy_spec.name)
|
||||
buildcache_cmd("push", "-u", mirror_dir, cspec.name, sy_spec.name)
|
||||
|
||||
# Test force overwrite create buildcache (-f option)
|
||||
buildcache_cmd("push", "-uf", temporary_mirror_dir, cspec.name)
|
||||
buildcache_cmd("push", "-uf", mirror_dir, cspec.name)
|
||||
|
||||
# Create mirror index
|
||||
buildcache_cmd("update-index", temporary_mirror_dir)
|
||||
buildcache_cmd("update-index", mirror_dir)
|
||||
|
||||
# List the buildcaches in the mirror
|
||||
buildcache_cmd("list", "-alv")
|
||||
@@ -255,9 +272,9 @@ def test_default_rpaths_create_install_default_layout(temporary_mirror_dir):
|
||||
@pytest.mark.maybeslow
|
||||
@pytest.mark.nomockstage
|
||||
@pytest.mark.usefixtures(
|
||||
"default_config", "cache_directory", "install_dir_non_default_layout", "temporary_mirror"
|
||||
"default_config", "cache_directory", "install_dir_non_default_layout", "test_mirror"
|
||||
)
|
||||
def test_default_rpaths_install_nondefault_layout(temporary_mirror_dir):
|
||||
def test_default_rpaths_install_nondefault_layout(mirror_dir):
|
||||
"""
|
||||
Test the creation and installation of buildcaches with default rpaths
|
||||
into the non-default directory layout scheme.
|
||||
@@ -278,9 +295,9 @@ def test_default_rpaths_install_nondefault_layout(temporary_mirror_dir):
|
||||
@pytest.mark.maybeslow
|
||||
@pytest.mark.nomockstage
|
||||
@pytest.mark.usefixtures(
|
||||
"default_config", "cache_directory", "install_dir_default_layout", "temporary_mirror"
|
||||
"default_config", "cache_directory", "install_dir_default_layout", "test_mirror"
|
||||
)
|
||||
def test_relative_rpaths_install_default_layout(temporary_mirror_dir):
|
||||
def test_relative_rpaths_install_default_layout(mirror_dir):
|
||||
"""
|
||||
Test the creation and installation of buildcaches with relative
|
||||
rpaths into the default directory layout scheme.
|
||||
@@ -307,9 +324,9 @@ def test_relative_rpaths_install_default_layout(temporary_mirror_dir):
|
||||
@pytest.mark.maybeslow
|
||||
@pytest.mark.nomockstage
|
||||
@pytest.mark.usefixtures(
|
||||
"default_config", "cache_directory", "install_dir_non_default_layout", "temporary_mirror"
|
||||
"default_config", "cache_directory", "install_dir_non_default_layout", "test_mirror"
|
||||
)
|
||||
def test_relative_rpaths_install_nondefault(temporary_mirror_dir):
|
||||
def test_relative_rpaths_install_nondefault(mirror_dir):
|
||||
"""
|
||||
Test the installation of buildcaches with relativized rpaths
|
||||
into the non-default directory layout scheme.
|
||||
@@ -320,7 +337,7 @@ def test_relative_rpaths_install_nondefault(temporary_mirror_dir):
|
||||
buildcache_cmd("install", "-uf", cspec.name)
|
||||
|
||||
|
||||
def test_push_and_fetch_keys(mock_gnupghome, tmp_path):
|
||||
def test_push_and_fetch_keys(mock_gnupghome):
|
||||
testpath = str(mock_gnupghome)
|
||||
|
||||
mirror = os.path.join(testpath, "mirror")
|
||||
@@ -340,7 +357,7 @@ def test_push_and_fetch_keys(mock_gnupghome, tmp_path):
|
||||
assert len(keys) == 1
|
||||
fpr = keys[0]
|
||||
|
||||
bindist.push_keys(mirror, keys=[fpr], tmpdir=str(tmp_path), update_index=True)
|
||||
bindist.push_keys(mirror, keys=[fpr], regenerate_index=True)
|
||||
|
||||
# dir 2: import the key from the mirror, and confirm that its fingerprint
|
||||
# matches the one created above
|
||||
@@ -358,9 +375,9 @@ def test_push_and_fetch_keys(mock_gnupghome, tmp_path):
|
||||
@pytest.mark.maybeslow
|
||||
@pytest.mark.nomockstage
|
||||
@pytest.mark.usefixtures(
|
||||
"default_config", "cache_directory", "install_dir_non_default_layout", "temporary_mirror"
|
||||
"default_config", "cache_directory", "install_dir_non_default_layout", "test_mirror"
|
||||
)
|
||||
def test_built_spec_cache(temporary_mirror_dir):
|
||||
def test_built_spec_cache(mirror_dir):
|
||||
"""Because the buildcache list command fetches the buildcache index
|
||||
and uses it to populate the binary_distribution built spec cache, when
|
||||
this test calls get_mirrors_for_spec, it is testing the popluation of
|
||||
@@ -381,9 +398,7 @@ def fake_dag_hash(spec, length=None):
|
||||
return "tal4c7h4z0gqmixb1eqa92mjoybxn5l6"[:length]
|
||||
|
||||
|
||||
@pytest.mark.usefixtures(
|
||||
"install_mockery_mutable_config", "mock_packages", "mock_fetch", "temporary_mirror"
|
||||
)
|
||||
@pytest.mark.usefixtures("install_mockery", "mock_packages", "mock_fetch", "test_mirror")
|
||||
def test_spec_needs_rebuild(monkeypatch, tmpdir):
|
||||
"""Make sure needs_rebuild properly compares remote hash
|
||||
against locally computed one, avoiding unnecessary rebuilds"""
|
||||
@@ -449,7 +464,7 @@ def test_generate_index_missing(monkeypatch, tmpdir, mutable_config):
|
||||
assert "libelf" not in cache_list
|
||||
|
||||
|
||||
def test_generate_key_index_failure(monkeypatch, tmp_path):
|
||||
def test_generate_key_index_failure(monkeypatch):
|
||||
def list_url(url, recursive=False):
|
||||
if "fails-listing" in url:
|
||||
raise Exception("Couldn't list the directory")
|
||||
@@ -462,13 +477,13 @@ def push_to_url(*args, **kwargs):
|
||||
monkeypatch.setattr(web_util, "push_to_url", push_to_url)
|
||||
|
||||
with pytest.raises(CannotListKeys, match="Encountered problem listing keys"):
|
||||
bindist.generate_key_index("s3://non-existent/fails-listing", str(tmp_path))
|
||||
bindist.generate_key_index("s3://non-existent/fails-listing")
|
||||
|
||||
with pytest.raises(GenerateIndexError, match="problem pushing .* Couldn't upload"):
|
||||
bindist.generate_key_index("s3://non-existent/fails-uploading", str(tmp_path))
|
||||
bindist.generate_key_index("s3://non-existent/fails-uploading")
|
||||
|
||||
|
||||
def test_generate_package_index_failure(monkeypatch, tmp_path, capfd):
|
||||
def test_generate_package_index_failure(monkeypatch, capfd):
|
||||
def mock_list_url(url, recursive=False):
|
||||
raise Exception("Some HTTP error")
|
||||
|
||||
@@ -477,16 +492,15 @@ def mock_list_url(url, recursive=False):
|
||||
test_url = "file:///fake/keys/dir"
|
||||
|
||||
with pytest.raises(GenerateIndexError, match="Unable to generate package index"):
|
||||
bindist.generate_package_index(test_url, str(tmp_path))
|
||||
bindist.generate_package_index(test_url)
|
||||
|
||||
assert (
|
||||
"Warning: Encountered problem listing packages at "
|
||||
f"{test_url}/{bindist.BUILD_CACHE_RELATIVE_PATH}: Some HTTP error"
|
||||
f"Warning: Encountered problem listing packages at {test_url}: Some HTTP error"
|
||||
in capfd.readouterr().err
|
||||
)
|
||||
|
||||
|
||||
def test_generate_indices_exception(monkeypatch, tmp_path, capfd):
|
||||
def test_generate_indices_exception(monkeypatch, capfd):
|
||||
def mock_list_url(url, recursive=False):
|
||||
raise Exception("Test Exception handling")
|
||||
|
||||
@@ -495,16 +509,16 @@ def mock_list_url(url, recursive=False):
|
||||
url = "file:///fake/keys/dir"
|
||||
|
||||
with pytest.raises(GenerateIndexError, match=f"Encountered problem listing keys at {url}"):
|
||||
bindist.generate_key_index(url, str(tmp_path))
|
||||
bindist.generate_key_index(url)
|
||||
|
||||
with pytest.raises(GenerateIndexError, match="Unable to generate package index"):
|
||||
bindist.generate_package_index(url, str(tmp_path))
|
||||
bindist.generate_package_index(url)
|
||||
|
||||
assert f"Encountered problem listing packages at {url}" in capfd.readouterr().err
|
||||
|
||||
|
||||
@pytest.mark.usefixtures("mock_fetch", "install_mockery")
|
||||
def test_update_sbang(tmpdir, temporary_mirror):
|
||||
def test_update_sbang(tmpdir, test_mirror):
|
||||
"""Test the creation and installation of buildcaches with default rpaths
|
||||
into the non-default directory layout scheme, triggering an update of the
|
||||
sbang.
|
||||
@@ -515,7 +529,7 @@ def test_update_sbang(tmpdir, temporary_mirror):
|
||||
old_spec_hash_str = "/{0}".format(old_spec.dag_hash())
|
||||
|
||||
# Need a fake mirror with *function* scope.
|
||||
mirror_dir = temporary_mirror
|
||||
mirror_dir = test_mirror
|
||||
|
||||
# Assume all commands will concretize old_spec the same way.
|
||||
install_cmd("--no-cache", old_spec.name)
|
||||
|
@@ -13,34 +13,34 @@
|
||||
import spack.spec
|
||||
import spack.util.url
|
||||
|
||||
install = spack.main.SpackCommand("install")
|
||||
|
||||
pytestmark = pytest.mark.not_on_windows("does not run on windows")
|
||||
|
||||
|
||||
def test_build_tarball_overwrite(install_mockery, mock_fetch, monkeypatch, tmp_path):
|
||||
spec = spack.spec.Spec("trivial-install-test-package").concretized()
|
||||
spec.package.do_install(fake=True)
|
||||
def test_build_tarball_overwrite(install_mockery, mock_fetch, monkeypatch, tmpdir):
|
||||
with tmpdir.as_cwd():
|
||||
spec = spack.spec.Spec("trivial-install-test-package").concretized()
|
||||
install(str(spec))
|
||||
|
||||
specs = [spec]
|
||||
# Runs fine the first time, throws the second time
|
||||
out_url = spack.util.url.path_to_file_url(str(tmpdir))
|
||||
bd.push_or_raise(spec, out_url, bd.PushOptions(unsigned=True))
|
||||
with pytest.raises(bd.NoOverwriteException):
|
||||
bd.push_or_raise(spec, out_url, bd.PushOptions(unsigned=True))
|
||||
|
||||
# Runs fine the first time, second time it's a no-op
|
||||
out_url = spack.util.url.path_to_file_url(str(tmp_path))
|
||||
skipped = bd.push_or_raise(specs, out_url, signing_key=None)
|
||||
assert not skipped
|
||||
# Should work fine with force=True
|
||||
bd.push_or_raise(spec, out_url, bd.PushOptions(force=True, unsigned=True))
|
||||
|
||||
skipped = bd.push_or_raise(specs, out_url, signing_key=None)
|
||||
assert skipped == specs
|
||||
# Remove the tarball and try again.
|
||||
# This must *also* throw, because of the existing .spec.json file
|
||||
os.remove(
|
||||
os.path.join(
|
||||
bd.build_cache_prefix("."),
|
||||
bd.tarball_directory_name(spec),
|
||||
bd.tarball_name(spec, ".spack"),
|
||||
)
|
||||
)
|
||||
|
||||
# Should work fine with force=True
|
||||
skipped = bd.push_or_raise(specs, out_url, signing_key=None, force=True)
|
||||
assert not skipped
|
||||
|
||||
# Remove the tarball, which should cause push to push.
|
||||
os.remove(
|
||||
tmp_path
|
||||
/ bd.BUILD_CACHE_RELATIVE_PATH
|
||||
/ bd.tarball_directory_name(spec)
|
||||
/ bd.tarball_name(spec, ".spack")
|
||||
)
|
||||
|
||||
skipped = bd.push_or_raise(specs, out_url, signing_key=None)
|
||||
assert not skipped
|
||||
with pytest.raises(bd.NoOverwriteException):
|
||||
bd.push_or_raise(spec, out_url, bd.PushOptions(unsigned=True))
|
||||
|
@@ -25,7 +25,7 @@ def test_build_task_errors(install_mockery):
|
||||
inst.BuildTask(spec.package, None, False, 0, 0, 0, set())
|
||||
|
||||
request = inst.BuildRequest(spec.package, {})
|
||||
with pytest.raises(inst.InstallError, match="Cannot create a task"):
|
||||
with pytest.raises(inst.InstallError, match="Cannot create a build task"):
|
||||
inst.BuildTask(spec.package, request, False, 0, 0, inst.STATUS_REMOVED, set())
|
||||
|
||||
|
||||
|
@@ -286,7 +286,7 @@ def _fail(self, args):
|
||||
def test_ci_create_buildcache(tmpdir, working_env, config, mock_packages, monkeypatch):
|
||||
"""Test that create_buildcache returns a list of objects with the correct
|
||||
keys and types."""
|
||||
monkeypatch.setattr(ci, "push_to_build_cache", lambda a, b, c: True)
|
||||
monkeypatch.setattr(spack.ci, "_push_to_build_cache", lambda a, b, c: True)
|
||||
|
||||
results = ci.create_buildcache(
|
||||
None, destination_mirror_urls=["file:///fake-url-one", "file:///fake-url-two"]
|
||||
|
@@ -12,9 +12,7 @@
|
||||
|
||||
import spack.binary_distribution
|
||||
import spack.cmd.buildcache
|
||||
import spack.deptypes
|
||||
import spack.environment as ev
|
||||
import spack.error
|
||||
import spack.main
|
||||
import spack.spec
|
||||
import spack.util.url
|
||||
@@ -384,14 +382,11 @@ def test_correct_specs_are_pushed(
|
||||
|
||||
packages_to_push = []
|
||||
|
||||
def fake_push(specs, *args, **kwargs):
|
||||
assert all(isinstance(s, Spec) for s in specs)
|
||||
packages_to_push.extend(s.name for s in specs)
|
||||
skipped = []
|
||||
errors = []
|
||||
return skipped, errors
|
||||
def fake_push(node, push_url, options):
|
||||
assert isinstance(node, Spec)
|
||||
packages_to_push.append(node.name)
|
||||
|
||||
monkeypatch.setattr(spack.binary_distribution, "_push", fake_push)
|
||||
monkeypatch.setattr(spack.binary_distribution, "push_or_raise", fake_push)
|
||||
|
||||
buildcache_create_args = ["create", "--unsigned"]
|
||||
|
||||
@@ -448,54 +443,3 @@ def test_skip_no_redistribute(mock_packages, config):
|
||||
filtered = spack.cmd.buildcache._skip_no_redistribute_for_public(specs)
|
||||
assert not any(s.name == "no-redistribute" for s in filtered)
|
||||
assert any(s.name == "no-redistribute-dependent" for s in filtered)
|
||||
|
||||
|
||||
def test_best_effort_vs_fail_fast_when_dep_not_installed(tmp_path, mutable_database):
|
||||
"""When --fail-fast is passed, the push command should fail if it immediately finds an
|
||||
uninstalled dependency. Otherwise, failure to push one dependency shouldn't prevent the
|
||||
others from being pushed."""
|
||||
|
||||
mirror("add", "--unsigned", "my-mirror", str(tmp_path))
|
||||
|
||||
# Uninstall mpich so that its dependent mpileaks can't be pushed
|
||||
for s in mutable_database.query_local("mpich"):
|
||||
s.package.do_uninstall(force=True)
|
||||
|
||||
with pytest.raises(spack.cmd.buildcache.PackagesAreNotInstalledError, match="mpich"):
|
||||
buildcache("push", "--update-index", "--fail-fast", "my-mirror", "mpileaks^mpich")
|
||||
|
||||
# nothing should be pushed due to --fail-fast.
|
||||
assert not os.listdir(tmp_path)
|
||||
assert not spack.binary_distribution.update_cache_and_get_specs()
|
||||
|
||||
with pytest.raises(spack.cmd.buildcache.PackageNotInstalledError):
|
||||
buildcache("push", "--update-index", "my-mirror", "mpileaks^mpich")
|
||||
|
||||
specs = spack.binary_distribution.update_cache_and_get_specs()
|
||||
|
||||
# everything but mpich should be pushed
|
||||
mpileaks = mutable_database.query_local("mpileaks^mpich")[0]
|
||||
assert set(specs) == {s for s in mpileaks.traverse() if s.name != "mpich"}
|
||||
|
||||
|
||||
def test_push_without_build_deps(tmp_path, temporary_store, mock_packages, mutable_config):
|
||||
"""Spack should not error when build deps are uninstalled and --without-build-dependenies is
|
||||
passed."""
|
||||
|
||||
mirror("add", "--unsigned", "my-mirror", str(tmp_path))
|
||||
|
||||
s = spack.spec.Spec("dtrun3").concretized()
|
||||
s.package.do_install(fake=True)
|
||||
s["dtbuild3"].package.do_uninstall()
|
||||
|
||||
# fails when build deps are required
|
||||
with pytest.raises(spack.error.SpackError, match="package not installed"):
|
||||
buildcache(
|
||||
"push", "--update-index", "--with-build-dependencies", "my-mirror", f"/{s.dag_hash()}"
|
||||
)
|
||||
|
||||
# succeeds when build deps are not required
|
||||
buildcache(
|
||||
"push", "--update-index", "--without-build-dependencies", "my-mirror", f"/{s.dag_hash()}"
|
||||
)
|
||||
assert spack.binary_distribution.update_cache_and_get_specs() == [s]
|
||||
|
File diff suppressed because it is too large
Load Diff
@@ -6,6 +6,7 @@
|
||||
import filecmp
|
||||
import os
|
||||
import shutil
|
||||
import subprocess
|
||||
|
||||
import pytest
|
||||
|
||||
@@ -155,6 +156,22 @@ def test_update_with_header(tmpdir):
|
||||
commands("--update", str(update_file), "--header", str(filename))
|
||||
|
||||
|
||||
@pytest.mark.xfail
|
||||
def test_no_pipe_error():
|
||||
"""Make sure we don't see any pipe errors when piping output."""
|
||||
|
||||
proc = subprocess.Popen(
|
||||
["spack", "commands", "--format=rst"], stdout=subprocess.PIPE, stderr=subprocess.PIPE
|
||||
)
|
||||
|
||||
# Call close() on stdout to cause a broken pipe
|
||||
proc.stdout.close()
|
||||
proc.wait()
|
||||
stderr = proc.stderr.read().decode("utf-8")
|
||||
|
||||
assert "Broken pipe" not in stderr
|
||||
|
||||
|
||||
def test_bash_completion():
|
||||
"""Test the bash completion writer."""
|
||||
out1 = commands("--format=bash")
|
||||
|
@@ -16,6 +16,8 @@
|
||||
|
||||
debug = SpackCommand("debug")
|
||||
|
||||
pytestmark = pytest.mark.not_on_windows("does not run on windows")
|
||||
|
||||
|
||||
@pytest.mark.db
|
||||
def test_create_db_tarball(tmpdir, database):
|
||||
@@ -58,3 +60,4 @@ def test_report():
|
||||
assert get_version() in out
|
||||
assert platform.python_version() in out
|
||||
assert str(architecture) in out
|
||||
assert spack.config.get("config:concretizer") in out
|
||||
|
@@ -1057,6 +1057,7 @@ def test_env_with_included_config_file(mutable_mock_env_path, packages_file):
|
||||
assert any(x.satisfies("mpileaks@2.2") for x in e._get_environment_specs())
|
||||
|
||||
|
||||
@pytest.mark.only_clingo("original concretizer does not support requirements")
|
||||
def test_config_change_existing(mutable_mock_env_path, tmp_path, mock_packages, mutable_config):
|
||||
"""Test ``config change`` with config in the ``spack.yaml`` as well as an
|
||||
included file scope.
|
||||
@@ -1132,6 +1133,7 @@ def test_config_change_existing(mutable_mock_env_path, tmp_path, mock_packages,
|
||||
spack.spec.Spec("bowtie@1.2.2").concretized()
|
||||
|
||||
|
||||
@pytest.mark.only_clingo("original concretizer does not support requirements")
|
||||
def test_config_change_new(mutable_mock_env_path, tmp_path, mock_packages, mutable_config):
|
||||
spack_yaml = tmp_path / ev.manifest_name
|
||||
spack_yaml.write_text(
|
||||
@@ -1734,17 +1736,6 @@ def test_env_include_concrete_env_yaml(env_name):
|
||||
assert test.path in combined_yaml["include_concrete"]
|
||||
|
||||
|
||||
@pytest.mark.regression("45766")
|
||||
@pytest.mark.parametrize("format", ["v1", "v2", "v3"])
|
||||
def test_env_include_concrete_old_env(format, tmpdir):
|
||||
lockfile = os.path.join(spack.paths.test_path, "data", "legacy_env", f"{format}.lock")
|
||||
# create an env from old .lock file -- this does not update the format
|
||||
env("create", "old-env", lockfile)
|
||||
env("create", "--include-concrete", "old-env", "test")
|
||||
|
||||
assert ev.read("old-env").all_specs() == ev.read("test").all_specs()
|
||||
|
||||
|
||||
def test_env_bad_include_concrete_env():
|
||||
with pytest.raises(ev.SpackEnvironmentError):
|
||||
env("create", "--include-concrete", "nonexistant_env", "combined_env")
|
||||
@@ -2341,6 +2332,8 @@ def test_stack_concretize_extraneous_deps(tmpdir, mock_packages):
|
||||
# FIXME: constraints for stacks
|
||||
# FIXME: This now works for statically-determinable invalid deps
|
||||
# FIXME: But it still does not work for dynamically determined invalid deps
|
||||
# if spack.config.get('config:concretizer') == 'clingo':
|
||||
# pytest.skip('Clingo concretizer does not support soft constraints')
|
||||
|
||||
filename = str(tmpdir.join("spack.yaml"))
|
||||
with open(filename, "w") as f:
|
||||
@@ -3187,7 +3180,9 @@ def test_concretize_user_specs_together():
|
||||
e.remove("mpich")
|
||||
e.add("mpich2")
|
||||
|
||||
exc_cls = spack.error.UnsatisfiableSpecError
|
||||
exc_cls = spack.error.SpackError
|
||||
if spack.config.get("config:concretizer") == "clingo":
|
||||
exc_cls = spack.error.UnsatisfiableSpecError
|
||||
|
||||
# Concretizing without invalidating the concrete spec for mpileaks fails
|
||||
with pytest.raises(exc_cls):
|
||||
@@ -3213,8 +3208,10 @@ def test_duplicate_packages_raise_when_concretizing_together():
|
||||
e.add("mpileaks~opt")
|
||||
e.add("mpich")
|
||||
|
||||
exc_cls = spack.error.UnsatisfiableSpecError
|
||||
match = r"You could consider setting `concretizer:unify`"
|
||||
exc_cls, match = spack.error.SpackError, None
|
||||
if spack.config.get("config:concretizer") == "clingo":
|
||||
exc_cls = spack.error.UnsatisfiableSpecError
|
||||
match = r"You could consider setting `concretizer:unify`"
|
||||
|
||||
with pytest.raises(exc_cls, match=match):
|
||||
e.concretize()
|
||||
|
@@ -14,7 +14,6 @@
|
||||
import spack.cmd.external
|
||||
import spack.detection
|
||||
import spack.detection.path
|
||||
import spack.repo
|
||||
from spack.main import SpackCommand
|
||||
from spack.spec import Spec
|
||||
|
||||
@@ -56,9 +55,7 @@ def test_find_external_two_instances_same_package(mock_executable):
|
||||
search_paths = [str(cmake1.parent.parent), str(cmake2.parent.parent)]
|
||||
|
||||
finder = spack.detection.path.ExecutablesFinder()
|
||||
detected_specs = finder.find(
|
||||
pkg_name="cmake", initial_guess=search_paths, repository=spack.repo.PATH
|
||||
)
|
||||
detected_specs = finder.find(pkg_name="cmake", initial_guess=search_paths)
|
||||
|
||||
assert len(detected_specs) == 2
|
||||
spec_to_path = {e.spec: e.prefix for e in detected_specs}
|
||||
@@ -100,7 +97,7 @@ def test_get_executables(working_env, mock_executable):
|
||||
|
||||
# TODO: this test should be made to work, but in the meantime it is
|
||||
# causing intermittent (spurious) CI failures on all PRs
|
||||
@pytest.mark.not_on_windows("Test fails intermittently on Windows")
|
||||
@pytest.mark.skipif(sys.platform == "win32", reason="Test fails intermittently on Windows")
|
||||
def test_find_external_cmd_not_buildable(mutable_config, working_env, mock_executable):
|
||||
"""When the user invokes 'spack external find --not-buildable', the config
|
||||
for any package where Spack finds an external version should be marked as
|
||||
@@ -117,31 +114,11 @@ def test_find_external_cmd_not_buildable(mutable_config, working_env, mock_execu
|
||||
@pytest.mark.parametrize(
|
||||
"names,tags,exclude,expected",
|
||||
[
|
||||
# find -all
|
||||
(
|
||||
None,
|
||||
["detectable"],
|
||||
[],
|
||||
[
|
||||
"builtin.mock.find-externals1",
|
||||
"builtin.mock.gcc",
|
||||
"builtin.mock.llvm",
|
||||
"builtin.mock.intel-oneapi-compilers",
|
||||
],
|
||||
),
|
||||
# find --all
|
||||
(None, ["detectable"], [], ["builtin.mock.find-externals1"]),
|
||||
# find --all --exclude find-externals1
|
||||
(
|
||||
None,
|
||||
["detectable"],
|
||||
["builtin.mock.find-externals1"],
|
||||
["builtin.mock.gcc", "builtin.mock.llvm", "builtin.mock.intel-oneapi-compilers"],
|
||||
),
|
||||
(
|
||||
None,
|
||||
["detectable"],
|
||||
["find-externals1"],
|
||||
["builtin.mock.gcc", "builtin.mock.llvm", "builtin.mock.intel-oneapi-compilers"],
|
||||
),
|
||||
(None, ["detectable"], ["builtin.mock.find-externals1"], []),
|
||||
(None, ["detectable"], ["find-externals1"], []),
|
||||
# find cmake (and cmake is not detectable)
|
||||
(["cmake"], ["detectable"], [], []),
|
||||
],
|
||||
@@ -266,9 +243,7 @@ def _determine_variants(cls, exes, version_str):
|
||||
monkeypatch.setattr(gcc_cls, "determine_variants", _determine_variants)
|
||||
|
||||
finder = spack.detection.path.ExecutablesFinder()
|
||||
detected_specs = finder.find(
|
||||
pkg_name="gcc", initial_guess=[str(search_dir)], repository=spack.repo.PATH
|
||||
)
|
||||
detected_specs = finder.find(pkg_name="gcc", initial_guess=[str(search_dir)])
|
||||
|
||||
assert len(detected_specs) == 1
|
||||
|
||||
|
@@ -2,13 +2,29 @@
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
import pytest
|
||||
|
||||
from spack.main import SpackCommand
|
||||
|
||||
|
||||
@pytest.mark.xfail
|
||||
def test_reuse_after_help():
|
||||
"""Test `spack help` can be called twice with the same SpackCommand."""
|
||||
help_cmd = SpackCommand("help", subprocess=True)
|
||||
help_cmd()
|
||||
|
||||
# This second invocation will somehow fail because the parser no
|
||||
# longer works after add_all_commands() is called in
|
||||
# SpackArgumentParser.format_help_sections().
|
||||
#
|
||||
# TODO: figure out why this doesn't work properly and change this
|
||||
# test to use a single SpackCommand.
|
||||
#
|
||||
# It seems that parse_known_args() finds "too few arguments" the
|
||||
# second time through b/c add_all_commands() ends up leaving extra
|
||||
# positionals in the parser. But this used to work before we loaded
|
||||
# commands lazily.
|
||||
help_cmd()
|
||||
|
||||
|
||||
|
@@ -310,7 +310,7 @@ def test_pkg_grep(mock_packages, capfd):
|
||||
output, _ = capfd.readouterr()
|
||||
assert output.strip() == "\n".join(
|
||||
spack.repo.PATH.get_pkg_class(name).module.__file__
|
||||
for name in ["splice-a", "splice-h", "splice-t", "splice-vh", "splice-vt", "splice-z"]
|
||||
for name in ["splice-a", "splice-h", "splice-t", "splice-vh", "splice-z"]
|
||||
)
|
||||
|
||||
# ensure that this string isn't fouhnd
|
||||
|
@@ -30,6 +30,7 @@ def test_spec():
|
||||
assert "mpich@3.0.4" in output
|
||||
|
||||
|
||||
@pytest.mark.only_clingo("Known failure of the original concretizer")
|
||||
def test_spec_concretizer_args(mutable_database, do_not_check_runtimes_on_reuse):
|
||||
"""End-to-end test of CLI concretizer prefs.
|
||||
|
||||
|
@@ -24,12 +24,6 @@
|
||||
style = spack.main.SpackCommand("style")
|
||||
|
||||
|
||||
ISORT = which("isort")
|
||||
BLACK = which("black")
|
||||
FLAKE8 = which("flake8")
|
||||
MYPY = which("mypy")
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def has_develop_branch(git):
|
||||
"""spack style requires git and a develop branch to run -- skip if we're missing either."""
|
||||
@@ -196,8 +190,8 @@ def external_style_root(git, flake8_package_with_errors, tmpdir):
|
||||
yield tmpdir, py_file
|
||||
|
||||
|
||||
@pytest.mark.skipif(not ISORT, reason="isort is not installed.")
|
||||
@pytest.mark.skipif(not BLACK, reason="black is not installed.")
|
||||
@pytest.mark.skipif(not which("isort"), reason="isort is not installed.")
|
||||
@pytest.mark.skipif(not which("black"), reason="black is not installed.")
|
||||
def test_fix_style(external_style_root):
|
||||
"""Make sure spack style --fix works."""
|
||||
tmpdir, py_file = external_style_root
|
||||
@@ -215,10 +209,10 @@ def test_fix_style(external_style_root):
|
||||
assert filecmp.cmp(broken_py, fixed_py)
|
||||
|
||||
|
||||
@pytest.mark.skipif(not FLAKE8, reason="flake8 is not installed.")
|
||||
@pytest.mark.skipif(not ISORT, reason="isort is not installed.")
|
||||
@pytest.mark.skipif(not MYPY, reason="mypy is not installed.")
|
||||
@pytest.mark.skipif(not BLACK, reason="black is not installed.")
|
||||
@pytest.mark.skipif(not which("flake8"), reason="flake8 is not installed.")
|
||||
@pytest.mark.skipif(not which("isort"), reason="isort is not installed.")
|
||||
@pytest.mark.skipif(not which("mypy"), reason="mypy is not installed.")
|
||||
@pytest.mark.skipif(not which("black"), reason="black is not installed.")
|
||||
def test_external_root(external_style_root, capfd):
|
||||
"""Ensure we can run in a separate root directory w/o configuration files."""
|
||||
tmpdir, py_file = external_style_root
|
||||
@@ -244,7 +238,7 @@ def test_external_root(external_style_root, capfd):
|
||||
assert "lib/spack/spack/dummy.py:7: [F401] 'os' imported but unused" in output
|
||||
|
||||
|
||||
@pytest.mark.skipif(not FLAKE8, reason="flake8 is not installed.")
|
||||
@pytest.mark.skipif(not which("flake8"), reason="flake8 is not installed.")
|
||||
def test_style(flake8_package, tmpdir):
|
||||
root_relative = os.path.relpath(flake8_package, spack.paths.prefix)
|
||||
|
||||
@@ -270,7 +264,7 @@ def test_style(flake8_package, tmpdir):
|
||||
assert "spack style checks were clean" in output
|
||||
|
||||
|
||||
@pytest.mark.skipif(not FLAKE8, reason="flake8 is not installed.")
|
||||
@pytest.mark.skipif(not which("flake8"), reason="flake8 is not installed.")
|
||||
def test_style_with_errors(flake8_package_with_errors):
|
||||
root_relative = os.path.relpath(flake8_package_with_errors, spack.paths.prefix)
|
||||
output = style(
|
||||
@@ -281,8 +275,8 @@ def test_style_with_errors(flake8_package_with_errors):
|
||||
assert "spack style found errors" in output
|
||||
|
||||
|
||||
@pytest.mark.skipif(not BLACK, reason="black is not installed.")
|
||||
@pytest.mark.skipif(not FLAKE8, reason="flake8 is not installed.")
|
||||
@pytest.mark.skipif(not which("black"), reason="black is not installed.")
|
||||
@pytest.mark.skipif(not which("flake8"), reason="flake8 is not installed.")
|
||||
def test_style_with_black(flake8_package_with_errors):
|
||||
output = style("--tool", "black,flake8", flake8_package_with_errors, fail_on_error=False)
|
||||
assert "black found errors" in output
|
||||
|
@@ -11,6 +11,13 @@
|
||||
versions = SpackCommand("versions")
|
||||
|
||||
|
||||
def test_safe_only_versions():
|
||||
"""Only test the safe versions of a package.
|
||||
(Using the deprecated command line argument)
|
||||
"""
|
||||
versions("--safe-only", "zlib")
|
||||
|
||||
|
||||
def test_safe_versions():
|
||||
"""Only test the safe versions of a package."""
|
||||
|
||||
|
@@ -425,6 +425,9 @@ def test_compiler_flags_differ_identical_compilers(self, mutable_config, clang12
|
||||
spec.concretize()
|
||||
assert spec.satisfies("cflags=-O2")
|
||||
|
||||
@pytest.mark.only_clingo(
|
||||
"Optional compiler propagation isn't deprecated for original concretizer"
|
||||
)
|
||||
@pytest.mark.parametrize(
|
||||
"spec_str,expected,not_expected",
|
||||
[
|
||||
@@ -469,6 +472,7 @@ def test_compiler_inherited_upwards(self):
|
||||
for dep in spec.traverse():
|
||||
assert "%clang" in dep
|
||||
|
||||
@pytest.mark.only_clingo("Fixing the parser broke this test for the original concretizer")
|
||||
def test_architecture_deep_inheritance(self, mock_targets, compiler_factory):
|
||||
"""Make sure that indirect dependencies receive architecture
|
||||
information from the root even when partial architecture information
|
||||
@@ -533,6 +537,9 @@ def test_concretize_two_virtuals_with_dual_provider_and_a_conflict(self):
|
||||
with pytest.raises(spack.error.SpackError):
|
||||
s.concretize()
|
||||
|
||||
@pytest.mark.only_clingo(
|
||||
"Optional compiler propagation isn't deprecated for original concretizer"
|
||||
)
|
||||
@pytest.mark.parametrize(
|
||||
"spec_str,expected_propagation",
|
||||
[
|
||||
@@ -560,6 +567,9 @@ def test_concretize_propagate_disabled_variant(self, spec_str, expected_propagat
|
||||
for key, expected_satisfies in expected_propagation:
|
||||
spec[key].satisfies(expected_satisfies)
|
||||
|
||||
@pytest.mark.only_clingo(
|
||||
"Optional compiler propagation isn't deprecated for original concretizer"
|
||||
)
|
||||
def test_concretize_propagated_variant_is_not_passed_to_dependent(self):
|
||||
"""Test a package variant value was passed from its parent."""
|
||||
spec = Spec("ascent~~shared +adios2 ^adios2+shared")
|
||||
@@ -568,6 +578,9 @@ def test_concretize_propagated_variant_is_not_passed_to_dependent(self):
|
||||
assert spec.satisfies("^adios2+shared")
|
||||
assert spec.satisfies("^bzip2~shared")
|
||||
|
||||
@pytest.mark.only_clingo(
|
||||
"Optional compiler propagation isn't deprecated for original concretizer"
|
||||
)
|
||||
def test_concretize_propagate_specified_variant(self):
|
||||
"""Test that only the specified variant is propagated to the dependencies"""
|
||||
spec = Spec("parent-foo-bar ~~foo")
|
||||
@@ -576,6 +589,7 @@ def test_concretize_propagate_specified_variant(self):
|
||||
assert spec.satisfies("~foo") and spec.satisfies("^dependency-foo-bar~foo")
|
||||
assert spec.satisfies("+bar") and not spec.satisfies("^dependency-foo-bar+bar")
|
||||
|
||||
@pytest.mark.only_clingo("Original concretizer is allowed to forego variant propagation")
|
||||
def test_concretize_propagate_multivalue_variant(self):
|
||||
"""Test that multivalue variants are propagating the specified value(s)
|
||||
to their dependecies. The dependencies should not have the default value"""
|
||||
@@ -632,6 +646,11 @@ def test_virtual_is_fully_expanded_for_mpileaks(self):
|
||||
assert all(not d.dependencies(name="mpi") for d in spec.traverse())
|
||||
assert all(x in spec for x in ("zmpi", "mpi"))
|
||||
|
||||
def test_my_dep_depends_on_provider_of_my_virtual_dep(self):
|
||||
spec = Spec("indirect-mpich")
|
||||
spec.normalize()
|
||||
spec.concretize()
|
||||
|
||||
@pytest.mark.parametrize("compiler_str", ["clang", "gcc", "gcc@10.2.1", "clang@:15.0.0"])
|
||||
def test_compiler_inheritance(self, compiler_str):
|
||||
spec_str = "mpileaks %{0}".format(compiler_str)
|
||||
@@ -727,6 +746,7 @@ def test_conflicts_in_spec(self, conflict_spec):
|
||||
with pytest.raises(spack.error.SpackError):
|
||||
s.concretize()
|
||||
|
||||
@pytest.mark.only_clingo("Testing debug statements specific to new concretizer")
|
||||
def test_conflicts_show_cores(self, conflict_spec, monkeypatch):
|
||||
s = Spec(conflict_spec)
|
||||
with pytest.raises(spack.error.SpackError) as e:
|
||||
@@ -900,6 +920,7 @@ def test_concretize_anonymous_dep(self, spec_str):
|
||||
("bowtie@1.2.2 os=redhat6", "%gcc@11.1.0"),
|
||||
],
|
||||
)
|
||||
@pytest.mark.only_clingo("Original concretizer cannot work around conflicts")
|
||||
def test_compiler_conflicts_in_package_py(
|
||||
self, spec_str, expected_str, clang12_with_flags, gcc11_with_flags
|
||||
):
|
||||
@@ -1015,6 +1036,7 @@ def test_patching_dependencies(self, spec_str, patched_deps):
|
||||
("quantum-espresso~veritas", ["^libelf@0.8.13"]),
|
||||
],
|
||||
)
|
||||
@pytest.mark.only_clingo("Use case not supported by the original concretizer")
|
||||
def test_working_around_conflicting_defaults(self, spec_str, expected):
|
||||
s = Spec(spec_str).concretized()
|
||||
|
||||
@@ -1027,6 +1049,7 @@ def test_working_around_conflicting_defaults(self, spec_str, expected):
|
||||
"spec_str,expected",
|
||||
[("cmake", ["%clang"]), ("cmake %gcc", ["%gcc"]), ("cmake %clang", ["%clang"])],
|
||||
)
|
||||
@pytest.mark.only_clingo("Use case not supported by the original concretizer")
|
||||
def test_external_package_and_compiler_preferences(self, spec_str, expected, mutable_config):
|
||||
packages_yaml = {
|
||||
"all": {"compiler": ["clang", "gcc"]},
|
||||
@@ -1043,6 +1066,7 @@ def test_external_package_and_compiler_preferences(self, spec_str, expected, mut
|
||||
assert s.satisfies(condition)
|
||||
|
||||
@pytest.mark.regression("5651")
|
||||
@pytest.mark.only_clingo("Use case not supported by the original concretizer")
|
||||
def test_package_with_constraint_not_met_by_external(self):
|
||||
"""Check that if we have an external package A at version X.Y in
|
||||
packages.yaml, but our spec doesn't allow X.Y as a version, then
|
||||
@@ -1057,6 +1081,7 @@ def test_package_with_constraint_not_met_by_external(self):
|
||||
assert not s["libelf"].external
|
||||
|
||||
@pytest.mark.regression("9744")
|
||||
@pytest.mark.only_clingo("Use case not supported by the original concretizer")
|
||||
def test_cumulative_version_ranges_with_different_length(self):
|
||||
s = Spec("cumulative-vrange-root").concretized()
|
||||
assert s.concrete
|
||||
@@ -1084,6 +1109,7 @@ def test_dependency_conditional_on_another_dependency_state(self):
|
||||
@pytest.mark.parametrize(
|
||||
"spec_str,expected", [("cmake %gcc", "%gcc"), ("cmake %clang", "%clang")]
|
||||
)
|
||||
@pytest.mark.only_clingo("Use case not supported by the original concretizer")
|
||||
def test_compiler_constraint_with_external_package(self, spec_str, expected):
|
||||
packages_yaml = {
|
||||
"cmake": {"externals": [{"spec": "cmake@3.4.3", "prefix": "/usr"}], "buildable": False}
|
||||
@@ -1128,9 +1154,18 @@ def test_compiler_in_nonbuildable_external_package(
|
||||
spack.config.set("packages", packages_yaml)
|
||||
|
||||
s = Spec(spec_str).concretized()
|
||||
if xfailold and spack.config.get("config:concretizer") == "original":
|
||||
pytest.xfail("This only works on the ASP-based concretizer")
|
||||
assert s.satisfies(expected)
|
||||
assert "external-common-perl" not in [d.name for d in s.dependencies()]
|
||||
|
||||
@pytest.mark.only_clingo("Use case not supported by the original concretizer")
|
||||
def test_external_packages_have_consistent_hash(self):
|
||||
s, t = Spec("externaltool"), Spec("externaltool")
|
||||
s._old_concretize(), t._new_concretize()
|
||||
|
||||
assert s.dag_hash() == t.dag_hash()
|
||||
|
||||
def test_external_that_would_require_a_virtual_dependency(self):
|
||||
s = Spec("requires-virtual").concretized()
|
||||
|
||||
@@ -1147,6 +1182,7 @@ def test_transitive_conditional_virtual_dependency(self, mutable_config):
|
||||
assert s.satisfies("^[virtuals=stuff] externalvirtual")
|
||||
|
||||
@pytest.mark.regression("20040")
|
||||
@pytest.mark.only_clingo("Use case not supported by the original concretizer")
|
||||
def test_conditional_provides_or_depends_on(self):
|
||||
# Check that we can concretize correctly a spec that can either
|
||||
# provide a virtual or depend on it based on the value of a variant
|
||||
@@ -1185,6 +1221,7 @@ def test_activating_test_dependencies(self, spec_str, tests_arg, with_dep, witho
|
||||
assert not node.dependencies(deptype="test"), msg.format(pkg_name)
|
||||
|
||||
@pytest.mark.regression("20019")
|
||||
@pytest.mark.only_clingo("Use case not supported by the original concretizer")
|
||||
def test_compiler_match_is_preferred_to_newer_version(self, compiler_factory):
|
||||
# This spec depends on openblas. Openblas has a conflict
|
||||
# that doesn't allow newer versions with gcc@4.4.0. Check
|
||||
@@ -1203,6 +1240,7 @@ def test_target_ranges_in_conflicts(self):
|
||||
with pytest.raises(spack.error.SpackError):
|
||||
Spec("impossible-concretization").concretized()
|
||||
|
||||
@pytest.mark.only_clingo("Use case not supported by the original concretizer")
|
||||
def test_target_compatibility(self):
|
||||
with pytest.raises(spack.error.SpackError):
|
||||
Spec("libdwarf target=x86_64 ^libelf target=x86_64_v2").concretized()
|
||||
@@ -1219,6 +1257,7 @@ def test_variant_not_default(self):
|
||||
assert "+foo+bar+baz" in d
|
||||
|
||||
@pytest.mark.regression("20055")
|
||||
@pytest.mark.only_clingo("Use case not supported by the original concretizer")
|
||||
def test_custom_compiler_version(self, mutable_config, compiler_factory, monkeypatch):
|
||||
mutable_config.set(
|
||||
"compilers", [compiler_factory(spec="gcc@10foo", operating_system="redhat6")]
|
||||
@@ -1319,6 +1358,7 @@ def mock_fn(*args, **kwargs):
|
||||
{"add_variant": True, "delete_variant": True},
|
||||
],
|
||||
)
|
||||
@pytest.mark.only_clingo("Use case not supported by the original concretizer")
|
||||
def test_reuse_installed_packages_when_package_def_changes(
|
||||
self, context, mutable_database, repo_with_changing_recipe
|
||||
):
|
||||
@@ -1348,6 +1388,7 @@ def test_reuse_installed_packages_when_package_def_changes(
|
||||
# Structure and package hash will be different without reuse
|
||||
assert root.dag_hash() != new_root_without_reuse.dag_hash()
|
||||
|
||||
@pytest.mark.only_clingo("Use case not supported by the original concretizer")
|
||||
@pytest.mark.regression("43663")
|
||||
def test_no_reuse_when_variant_condition_does_not_hold(self, mutable_database, mock_packages):
|
||||
spack.config.set("concretizer:reuse", True)
|
||||
@@ -1363,6 +1404,7 @@ def test_no_reuse_when_variant_condition_does_not_hold(self, mutable_database, m
|
||||
new2 = Spec("conditional-variant-pkg +two_whens").concretized()
|
||||
assert new2.satisfies("@2 +two_whens +version_based")
|
||||
|
||||
@pytest.mark.only_clingo("Use case not supported by the original concretizer")
|
||||
def test_reuse_with_flags(self, mutable_database, mutable_config):
|
||||
spack.config.set("concretizer:reuse", True)
|
||||
spec = Spec("pkg-a cflags=-g cxxflags=-g").concretized()
|
||||
@@ -1383,6 +1425,7 @@ def test_concretization_of_test_dependencies(self):
|
||||
@pytest.mark.parametrize(
|
||||
"spec_str", ["wrong-variant-in-conflicts", "wrong-variant-in-depends-on"]
|
||||
)
|
||||
@pytest.mark.only_clingo("Use case not supported by the original concretizer")
|
||||
def test_error_message_for_inconsistent_variants(self, spec_str):
|
||||
s = Spec(spec_str)
|
||||
with pytest.raises(RuntimeError, match="not found in package"):
|
||||
@@ -1487,6 +1530,7 @@ def test_multivalued_variants_from_cli(self, spec_str, expected_dict):
|
||||
("deprecated-versions@=1.1.0", "deprecated-versions@1.1.0"),
|
||||
],
|
||||
)
|
||||
@pytest.mark.only_clingo("Use case not supported by the original concretizer")
|
||||
def test_deprecated_versions_not_selected(self, spec_str, expected):
|
||||
with spack.config.override("config:deprecated", True):
|
||||
s = Spec(spec_str).concretized()
|
||||
@@ -1547,6 +1591,7 @@ def test_non_default_provider_of_multiple_virtuals(self):
|
||||
"spec_str,expect_installed",
|
||||
[("mpich", True), ("mpich+debug", False), ("mpich~debug", True)],
|
||||
)
|
||||
@pytest.mark.only_clingo("Use case not supported by the original concretizer")
|
||||
def test_concrete_specs_are_not_modified_on_reuse(
|
||||
self, mutable_database, spec_str, expect_installed
|
||||
):
|
||||
@@ -1560,6 +1605,7 @@ def test_concrete_specs_are_not_modified_on_reuse(
|
||||
assert s.satisfies(spec_str)
|
||||
|
||||
@pytest.mark.regression("26721,19736")
|
||||
@pytest.mark.only_clingo("Original concretizer cannot use sticky variants")
|
||||
def test_sticky_variant_in_package(self):
|
||||
# Here we test that a sticky variant cannot be changed from its default value
|
||||
# by the ASP solver if not set explicitly. The package used in the test needs
|
||||
@@ -1575,6 +1621,7 @@ def test_sticky_variant_in_package(self):
|
||||
assert s.satisfies("%clang") and s.satisfies("~allow-gcc")
|
||||
|
||||
@pytest.mark.regression("42172")
|
||||
@pytest.mark.only_clingo("Original concretizer cannot use sticky variants")
|
||||
@pytest.mark.parametrize(
|
||||
"spec,allow_gcc",
|
||||
[
|
||||
@@ -1597,6 +1644,7 @@ def test_sticky_variant_in_external(self, spec, allow_gcc):
|
||||
assert s["sticky-variant"].satisfies("+allow-gcc")
|
||||
assert s["sticky-variant"].external
|
||||
|
||||
@pytest.mark.only_clingo("Use case not supported by the original concretizer")
|
||||
def test_do_not_invent_new_concrete_versions_unless_necessary(self):
|
||||
# ensure we select a known satisfying version rather than creating
|
||||
# a new '2.7' version.
|
||||
@@ -1618,12 +1666,14 @@ def test_do_not_invent_new_concrete_versions_unless_necessary(self):
|
||||
("conditional-values-in-variant foo=foo", True),
|
||||
],
|
||||
)
|
||||
@pytest.mark.only_clingo("Use case not supported by the original concretizer")
|
||||
def test_conditional_values_in_variants(self, spec_str, valid):
|
||||
s = Spec(spec_str)
|
||||
raises = pytest.raises((RuntimeError, spack.error.UnsatisfiableSpecError))
|
||||
with llnl.util.lang.nullcontext() if valid else raises:
|
||||
s.concretize()
|
||||
|
||||
@pytest.mark.only_clingo("Use case not supported by the original concretizer")
|
||||
def test_conditional_values_in_conditional_variant(self):
|
||||
"""Test that conditional variants play well with conditional possible values"""
|
||||
s = Spec("conditional-values-in-variant@1.50.0").concretized()
|
||||
@@ -1632,6 +1682,7 @@ def test_conditional_values_in_conditional_variant(self):
|
||||
s = Spec("conditional-values-in-variant@1.60.0").concretized()
|
||||
assert "cxxstd" in s.variants
|
||||
|
||||
@pytest.mark.only_clingo("Use case not supported by the original concretizer")
|
||||
def test_target_granularity(self):
|
||||
# The test architecture uses core2 as the default target. Check that when
|
||||
# we configure Spack for "generic" granularity we concretize for x86_64
|
||||
@@ -1642,6 +1693,7 @@ def test_target_granularity(self):
|
||||
with spack.config.override("concretizer:targets", {"granularity": "generic"}):
|
||||
assert s.concretized().satisfies("target=%s" % generic_target)
|
||||
|
||||
@pytest.mark.only_clingo("Use case not supported by the original concretizer")
|
||||
def test_host_compatible_concretization(self):
|
||||
# Check that after setting "host_compatible" to false we cannot concretize.
|
||||
# Here we use "k10" to set a target non-compatible with the current host
|
||||
@@ -1654,6 +1706,7 @@ def test_host_compatible_concretization(self):
|
||||
with pytest.raises(spack.error.SpackError):
|
||||
s.concretized()
|
||||
|
||||
@pytest.mark.only_clingo("Use case not supported by the original concretizer")
|
||||
def test_add_microarchitectures_on_explicit_request(self):
|
||||
# Check that if we consider only "generic" targets, we can still solve for
|
||||
# specific microarchitectures on explicit requests
|
||||
@@ -1662,6 +1715,7 @@ def test_add_microarchitectures_on_explicit_request(self):
|
||||
assert s.satisfies("target=k10")
|
||||
|
||||
@pytest.mark.regression("29201")
|
||||
@pytest.mark.only_clingo("Use case not supported by the original concretizer")
|
||||
def test_delete_version_and_reuse(self, mutable_database, repo_with_changing_recipe):
|
||||
"""Test that we can reuse installed specs with versions not
|
||||
declared in package.py
|
||||
@@ -1676,6 +1730,7 @@ def test_delete_version_and_reuse(self, mutable_database, repo_with_changing_rec
|
||||
assert root.dag_hash() == new_root.dag_hash()
|
||||
|
||||
@pytest.mark.regression("29201")
|
||||
@pytest.mark.only_clingo("Use case not supported by the original concretizer")
|
||||
def test_installed_version_is_selected_only_for_reuse(
|
||||
self, mutable_database, repo_with_changing_recipe
|
||||
):
|
||||
@@ -1753,6 +1808,7 @@ def test_reuse_with_unknown_package_dont_raise(self, tmpdir, temporary_store, mo
|
||||
(["mpi", "mpich"], 1, 1),
|
||||
],
|
||||
)
|
||||
@pytest.mark.only_clingo("Original concretizer cannot concretize in rounds")
|
||||
def test_best_effort_coconcretize(self, specs, expected, libc_offset):
|
||||
specs = [Spec(s) for s in specs]
|
||||
solver = spack.solver.asp.Solver()
|
||||
@@ -1797,6 +1853,7 @@ def test_best_effort_coconcretize(self, specs, expected, libc_offset):
|
||||
(["hdf5+mpi", "zmpi", "mpich"], "mpich", 2),
|
||||
],
|
||||
)
|
||||
@pytest.mark.only_clingo("Original concretizer cannot concretize in rounds")
|
||||
def test_best_effort_coconcretize_preferences(self, specs, expected_spec, occurances):
|
||||
"""Test package preferences during coconcretization."""
|
||||
specs = [Spec(s) for s in specs]
|
||||
@@ -1812,6 +1869,7 @@ def test_best_effort_coconcretize_preferences(self, specs, expected_spec, occura
|
||||
counter += 1
|
||||
assert counter == occurances, concrete_specs
|
||||
|
||||
@pytest.mark.only_clingo("Original concretizer cannot concretize in rounds")
|
||||
def test_solve_in_rounds_all_unsolved(self, monkeypatch, mock_packages):
|
||||
specs = [Spec(x) for x in ["libdwarf%gcc", "libdwarf%clang"]]
|
||||
solver = spack.solver.asp.Solver()
|
||||
@@ -1827,6 +1885,7 @@ def test_solve_in_rounds_all_unsolved(self, monkeypatch, mock_packages):
|
||||
):
|
||||
list(solver.solve_in_rounds(specs))
|
||||
|
||||
@pytest.mark.only_clingo("Use case not supported by the original concretizer")
|
||||
def test_coconcretize_reuse_and_virtuals(self):
|
||||
reusable_specs = []
|
||||
for s in ["mpileaks ^mpich", "zmpi"]:
|
||||
@@ -1843,6 +1902,7 @@ def test_coconcretize_reuse_and_virtuals(self):
|
||||
assert "zmpi" in spec
|
||||
|
||||
@pytest.mark.regression("30864")
|
||||
@pytest.mark.only_clingo("Use case not supported by the original concretizer")
|
||||
def test_misleading_error_message_on_version(self, mutable_database):
|
||||
# For this bug to be triggered we need a reusable dependency
|
||||
# that is not optimal in terms of optimization scores.
|
||||
@@ -1859,6 +1919,7 @@ def test_misleading_error_message_on_version(self, mutable_database):
|
||||
solver.driver.solve(setup, [root_spec], reuse=reusable_specs)
|
||||
|
||||
@pytest.mark.regression("31148")
|
||||
@pytest.mark.only_clingo("Use case not supported by the original concretizer")
|
||||
def test_version_weight_and_provenance(self):
|
||||
"""Test package preferences during coconcretization."""
|
||||
reusable_specs = [Spec(spec_str).concretized() for spec_str in ("pkg-b@0.9", "pkg-b@1.0")]
|
||||
@@ -1890,6 +1951,7 @@ def test_version_weight_and_provenance(self):
|
||||
assert criterion in result.criteria, criterion
|
||||
assert result_spec.satisfies("^pkg-b@1.0")
|
||||
|
||||
@pytest.mark.only_clingo("Use case not supported by the original concretizer")
|
||||
def test_reuse_succeeds_with_config_compatible_os(self):
|
||||
root_spec = Spec("pkg-b")
|
||||
s = root_spec.concretized()
|
||||
@@ -1915,6 +1977,7 @@ def test_git_hash_assigned_version_is_preferred(self):
|
||||
assert hash in str(c)
|
||||
|
||||
@pytest.mark.parametrize("git_ref", ("a" * 40, "0.2.15", "main"))
|
||||
@pytest.mark.only_clingo("Original concretizer cannot account for git hashes")
|
||||
def test_git_ref_version_is_equivalent_to_specified_version(self, git_ref):
|
||||
s = Spec("develop-branch-version@git.%s=develop" % git_ref)
|
||||
c = s.concretized()
|
||||
@@ -1924,6 +1987,7 @@ def test_git_ref_version_is_equivalent_to_specified_version(self, git_ref):
|
||||
assert s.satisfies("@0.1:")
|
||||
|
||||
@pytest.mark.parametrize("git_ref", ("a" * 40, "0.2.15", "fbranch"))
|
||||
@pytest.mark.only_clingo("Original concretizer cannot account for git hashes")
|
||||
def test_git_ref_version_succeeds_with_unknown_version(self, git_ref):
|
||||
# main is not defined in the package.py for this file
|
||||
s = Spec("develop-branch-version@git.%s=main" % git_ref)
|
||||
@@ -1931,6 +1995,7 @@ def test_git_ref_version_succeeds_with_unknown_version(self, git_ref):
|
||||
assert s.satisfies("develop-branch-version@main")
|
||||
|
||||
@pytest.mark.regression("31484")
|
||||
@pytest.mark.only_clingo("Use case not supported by the original concretizer")
|
||||
def test_installed_externals_are_reused(
|
||||
self, mutable_database, repo_with_changing_recipe, tmp_path
|
||||
):
|
||||
@@ -1962,6 +2027,7 @@ def test_installed_externals_are_reused(
|
||||
assert external3.dag_hash() == external1.dag_hash()
|
||||
|
||||
@pytest.mark.regression("31484")
|
||||
@pytest.mark.only_clingo("Use case not supported by the original concretizer")
|
||||
def test_user_can_select_externals_with_require(self, mutable_database, tmp_path):
|
||||
"""Test that users have means to select an external even in presence of reusable specs."""
|
||||
external_conf = {
|
||||
@@ -1990,6 +2056,7 @@ def test_user_can_select_externals_with_require(self, mutable_database, tmp_path
|
||||
assert mpi_spec.name == "multi-provider-mpi"
|
||||
|
||||
@pytest.mark.regression("31484")
|
||||
@pytest.mark.only_clingo("Use case not supported by the original concretizer")
|
||||
def test_installed_specs_disregard_conflicts(self, mutable_database, monkeypatch):
|
||||
"""Test that installed specs do not trigger conflicts. This covers for the rare case
|
||||
where a conflict is added on a package after a spec matching the conflict was installed.
|
||||
@@ -2010,6 +2077,7 @@ def test_installed_specs_disregard_conflicts(self, mutable_database, monkeypatch
|
||||
assert s.satisfies("~debug"), s
|
||||
|
||||
@pytest.mark.regression("32471")
|
||||
@pytest.mark.only_clingo("Use case not supported by the original concretizer")
|
||||
def test_require_targets_are_allowed(self, mutable_database):
|
||||
"""Test that users can set target constraints under the require attribute."""
|
||||
# Configuration to be added to packages.yaml
|
||||
@@ -2184,6 +2252,7 @@ def test_unsolved_specs_raises_error(self, monkeypatch, mock_packages):
|
||||
solver.driver.solve(setup, specs, reuse=[])
|
||||
|
||||
@pytest.mark.regression("43141")
|
||||
@pytest.mark.only_clingo("Use case not supported by the original concretizer")
|
||||
def test_clear_error_when_unknown_compiler_requested(self, mock_packages):
|
||||
"""Tests that the solver can report a case where the compiler cannot be set"""
|
||||
with pytest.raises(
|
||||
@@ -2286,25 +2355,7 @@ def test_virtuals_are_annotated_on_edges(self, spec_str):
|
||||
edges = spec.edges_to_dependencies(name="callpath")
|
||||
assert len(edges) == 1 and edges[0].virtuals == ()
|
||||
|
||||
@pytest.mark.parametrize("transitive", [True, False])
|
||||
def test_explicit_splices(
|
||||
self, mutable_config, database_mutable_config, mock_packages, transitive
|
||||
):
|
||||
mpich_spec = database_mutable_config.query("mpich")[0]
|
||||
splice_info = {
|
||||
"target": "mpi",
|
||||
"replacement": f"/{mpich_spec.dag_hash()}",
|
||||
"transitive": transitive,
|
||||
}
|
||||
spack.config.CONFIG.set("splice", [splice_info])
|
||||
|
||||
spec = spack.spec.Spec("hdf5 ^zmpi").concretized()
|
||||
|
||||
assert spec.satisfies(f"^mpich/{mpich_spec.dag_hash()}")
|
||||
assert spec.build_spec.satisfies("^zmpi")
|
||||
assert not spec.build_spec.satisfies(f"^mpich/{mpich_spec.dag_hash()}")
|
||||
assert not spec.satisfies("^zmpi")
|
||||
|
||||
@pytest.mark.only_clingo("Use case not supported by the original concretizer")
|
||||
@pytest.mark.db
|
||||
@pytest.mark.parametrize(
|
||||
"spec_str,mpi_name",
|
||||
@@ -2319,6 +2370,7 @@ def test_virtuals_are_reconstructed_on_reuse(self, spec_str, mpi_name, mutable_d
|
||||
assert len(mpi_edges) == 1
|
||||
assert "mpi" in mpi_edges[0].virtuals
|
||||
|
||||
@pytest.mark.only_clingo("Use case not supported by the original concretizer")
|
||||
def test_dont_define_new_version_from_input_if_checksum_required(self, working_env):
|
||||
os.environ["SPACK_CONCRETIZER_REQUIRE_CHECKSUM"] = "yes"
|
||||
with pytest.raises(spack.error.UnsatisfiableSpecError):
|
||||
@@ -2355,6 +2407,7 @@ def test_reuse_python_from_cli_and_extension_from_db(self, mutable_database):
|
||||
("hdf5 ^gmake", {"gmake": "duplicates.test", "hdf5": "duplicates.test"}),
|
||||
],
|
||||
)
|
||||
@pytest.mark.only_clingo("Uses specs requiring multiple gmake specs")
|
||||
def test_select_lower_priority_package_from_repository_stack(
|
||||
self, spec_str, expected_namespaces
|
||||
):
|
||||
@@ -2370,6 +2423,7 @@ def test_select_lower_priority_package_from_repository_stack(
|
||||
assert s[name].concrete
|
||||
assert s[name].namespace == namespace
|
||||
|
||||
@pytest.mark.only_clingo("Old concretizer cannot reuse")
|
||||
def test_reuse_specs_from_non_available_compilers(self, mutable_config, mutable_database):
|
||||
"""Tests that we can reuse specs with compilers that are not configured locally."""
|
||||
# All the specs in the mutable DB have been compiled with %gcc@=10.2.1
|
||||
@@ -2440,6 +2494,7 @@ def test_spec_with_build_dep_from_json(self, tmp_path):
|
||||
assert s["dttop"].dag_hash() == build_dep.dag_hash()
|
||||
|
||||
@pytest.mark.regression("44040")
|
||||
@pytest.mark.only_clingo("Use case not supported by the original concretizer")
|
||||
def test_exclude_specs_from_reuse(self, monkeypatch):
|
||||
"""Tests that we can exclude a spec from reuse when concretizing, and that the spec
|
||||
is not added back to the solve as a dependency of another reusable spec.
|
||||
@@ -2489,6 +2544,7 @@ def test_exclude_specs_from_reuse(self, monkeypatch):
|
||||
[],
|
||||
],
|
||||
)
|
||||
@pytest.mark.only_clingo("Use case not supported by the original concretizer")
|
||||
def test_include_specs_from_externals_and_libcs(
|
||||
self, included_externals, mutable_config, tmp_path
|
||||
):
|
||||
@@ -2521,6 +2577,7 @@ def test_include_specs_from_externals_and_libcs(
|
||||
assert result["deprecated-versions"].satisfies("@1.0.0")
|
||||
|
||||
@pytest.mark.regression("44085")
|
||||
@pytest.mark.only_clingo("Use case not supported by the original concretizer")
|
||||
def test_can_reuse_concrete_externals_for_dependents(self, mutable_config, tmp_path):
|
||||
"""Test that external specs that are in the DB can be reused. This means they are
|
||||
preferred to concretizing another external from packages.yaml
|
||||
@@ -2543,6 +2600,7 @@ def test_can_reuse_concrete_externals_for_dependents(self, mutable_config, tmp_p
|
||||
sombrero = result.specs[0]
|
||||
assert sombrero["externaltool"].dag_hash() == external_spec.dag_hash()
|
||||
|
||||
@pytest.mark.only_clingo("Original concretizer cannot reuse")
|
||||
def test_cannot_reuse_host_incompatible_libc(self):
|
||||
"""Test whether reuse concretization correctly fails to reuse a spec with a host
|
||||
incompatible libc."""
|
||||
@@ -2623,6 +2681,7 @@ def duplicates_test_repository():
|
||||
|
||||
|
||||
@pytest.mark.usefixtures("mutable_config", "duplicates_test_repository")
|
||||
@pytest.mark.only_clingo("Not supported by the original concretizer")
|
||||
class TestConcretizeSeparately:
|
||||
"""Collects test on separate concretization"""
|
||||
|
||||
@@ -2841,6 +2900,7 @@ def edges_test_repository():
|
||||
|
||||
|
||||
@pytest.mark.usefixtures("mutable_config", "edges_test_repository")
|
||||
@pytest.mark.only_clingo("Edge properties not supported by the original concretizer")
|
||||
class TestConcretizeEdges:
|
||||
"""Collects tests on edge properties"""
|
||||
|
||||
@@ -2991,6 +3051,7 @@ def test_concretization_version_order():
|
||||
]
|
||||
|
||||
|
||||
@pytest.mark.only_clingo("Original concretizer cannot reuse specs")
|
||||
@pytest.mark.parametrize(
|
||||
"roots,reuse_yaml,expected,not_expected,expected_length",
|
||||
[
|
||||
@@ -3065,6 +3126,7 @@ def test_spec_filters(specs, include, exclude, expected):
|
||||
assert f.selected_specs() == expected
|
||||
|
||||
|
||||
@pytest.mark.only_clingo("clingo only reuse feature being tested")
|
||||
@pytest.mark.regression("38484")
|
||||
def test_git_ref_version_can_be_reused(install_mockery, do_not_check_runtimes_on_reuse):
|
||||
first_spec = spack.spec.Spec("git-ref-package@git.2.1.5=2.1.5~opt").concretized()
|
||||
@@ -3082,6 +3144,7 @@ def test_git_ref_version_can_be_reused(install_mockery, do_not_check_runtimes_on
|
||||
assert third_spec.dag_hash() == first_spec.dag_hash()
|
||||
|
||||
|
||||
@pytest.mark.only_clingo("clingo only reuse feature being tested")
|
||||
@pytest.mark.parametrize("standard_version", ["2.0.0", "2.1.5", "2.1.6"])
|
||||
def test_reuse_prefers_standard_over_git_versions(
|
||||
standard_version, install_mockery, do_not_check_runtimes_on_reuse
|
||||
|
@@ -17,7 +17,10 @@
|
||||
from spack.environment.environment import ViewDescriptor
|
||||
from spack.version import Version
|
||||
|
||||
pytestmark = [pytest.mark.usefixtures("enable_runtimes")]
|
||||
pytestmark = [
|
||||
pytest.mark.only_clingo("Original concretizer does not support compiler runtimes"),
|
||||
pytest.mark.usefixtures("enable_runtimes"),
|
||||
]
|
||||
|
||||
|
||||
def _concretize_with_reuse(*, root_str, reused_str):
|
||||
|
@@ -8,7 +8,10 @@
|
||||
import spack.solver.asp
|
||||
import spack.spec
|
||||
|
||||
pytestmark = [pytest.mark.not_on_windows("Windows uses old concretizer")]
|
||||
pytestmark = [
|
||||
pytest.mark.not_on_windows("Windows uses old concretizer"),
|
||||
pytest.mark.only_clingo("Original concretizer does not support configuration requirements"),
|
||||
]
|
||||
|
||||
version_error_messages = [
|
||||
"Cannot satisfy 'fftw@:1.0' and 'fftw@1.1:",
|
||||
|
@@ -113,6 +113,7 @@ def test_preferred_compilers(self, compiler_str, spec_str):
|
||||
spec = spack.spec.Spec(spec_str).concretized()
|
||||
assert spec.compiler == CompilerSpec(compiler_str)
|
||||
|
||||
@pytest.mark.only_clingo("Use case not supported by the original concretizer")
|
||||
def test_preferred_target(self, mutable_mock_repo):
|
||||
"""Test preferred targets are applied correctly"""
|
||||
spec = concretize("mpich")
|
||||
@@ -142,6 +143,7 @@ def test_preferred_versions(self):
|
||||
spec = concretize("mpileaks")
|
||||
assert spec.version == Version("2.2")
|
||||
|
||||
@pytest.mark.only_clingo("This behavior is not enforced for the old concretizer")
|
||||
def test_preferred_versions_mixed_version_types(self):
|
||||
update_packages("mixedversions", "version", ["=2.0"])
|
||||
spec = concretize("mixedversions")
|
||||
@@ -223,6 +225,7 @@ def test_preferred(self):
|
||||
spec.concretize()
|
||||
assert spec.version == Version("3.5.0")
|
||||
|
||||
@pytest.mark.only_clingo("This behavior is not enforced for the old concretizer")
|
||||
def test_preferred_undefined_raises(self):
|
||||
"""Preference should not specify an undefined version"""
|
||||
update_packages("python", "version", ["3.5.0.1"])
|
||||
@@ -230,6 +233,7 @@ def test_preferred_undefined_raises(self):
|
||||
with pytest.raises(spack.config.ConfigError):
|
||||
spec.concretize()
|
||||
|
||||
@pytest.mark.only_clingo("This behavior is not enforced for the old concretizer")
|
||||
def test_preferred_truncated(self):
|
||||
"""Versions without "=" are treated as version ranges: if there is
|
||||
a satisfying version defined in the package.py, we should use that
|
||||
@@ -506,6 +510,7 @@ def test_sticky_variant_accounts_for_packages_yaml(self):
|
||||
assert s.satisfies("%gcc") and s.satisfies("+allow-gcc")
|
||||
|
||||
@pytest.mark.regression("41134")
|
||||
@pytest.mark.only_clingo("Not backporting the fix to the old concretizer")
|
||||
def test_default_preference_variant_different_type_does_not_error(self):
|
||||
"""Tests that a different type for an existing variant in the 'all:' section of
|
||||
packages.yaml doesn't fail with an error.
|
||||
|
@@ -19,7 +19,10 @@
|
||||
from spack.test.conftest import create_test_repo
|
||||
from spack.util.url import path_to_file_url
|
||||
|
||||
pytestmark = [pytest.mark.not_on_windows("Windows uses old concretizer")]
|
||||
pytestmark = [
|
||||
pytest.mark.not_on_windows("Windows uses old concretizer"),
|
||||
pytest.mark.only_clingo("Original concretizer does not support configuration requirements"),
|
||||
]
|
||||
|
||||
|
||||
def update_packages_config(conf_str):
|
||||
|
@@ -18,7 +18,6 @@
|
||||
import spack.config
|
||||
import spack.directory_layout
|
||||
import spack.environment as ev
|
||||
import spack.fetch_strategy
|
||||
import spack.main
|
||||
import spack.package_base
|
||||
import spack.paths
|
||||
@@ -307,14 +306,14 @@ def test_add_config_path(mutable_config):
|
||||
|
||||
@pytest.mark.regression("17543,23259")
|
||||
def test_add_config_path_with_enumerated_type(mutable_config):
|
||||
spack.config.add("config:flags:keep_werror:all")
|
||||
assert spack.config.get("config")["flags"]["keep_werror"] == "all"
|
||||
spack.config.add("config:concretizer:clingo")
|
||||
assert spack.config.get("config")["concretizer"] == "clingo"
|
||||
|
||||
spack.config.add("config:flags:keep_werror:specific")
|
||||
assert spack.config.get("config")["flags"]["keep_werror"] == "specific"
|
||||
spack.config.add("config:concretizer:original")
|
||||
assert spack.config.get("config")["concretizer"] == "original"
|
||||
|
||||
with pytest.raises(spack.config.ConfigError):
|
||||
spack.config.add("config:flags:keep_werror:foo")
|
||||
spack.config.add("config:concretizer:foo")
|
||||
|
||||
|
||||
def test_add_config_filename(mock_low_high_config, tmpdir):
|
||||
|
@@ -56,17 +56,12 @@
|
||||
import spack.util.executable
|
||||
import spack.util.git
|
||||
import spack.util.gpg
|
||||
import spack.util.parallel
|
||||
import spack.util.spack_yaml as syaml
|
||||
import spack.util.url as url_util
|
||||
import spack.util.web
|
||||
import spack.version
|
||||
from spack.fetch_strategy import URLFetchStrategy
|
||||
from spack.main import SpackCommand
|
||||
from spack.util.pattern import Bunch
|
||||
|
||||
mirror_cmd = SpackCommand("mirror")
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def check_config_fixture(request):
|
||||
@@ -709,10 +704,11 @@ def configuration_dir(tmpdir_factory, linux_os):
|
||||
tmpdir.ensure("user", dir=True)
|
||||
|
||||
# Fill out config.yaml, compilers.yaml and modules.yaml templates.
|
||||
solver = os.environ.get("SPACK_TEST_SOLVER", "clingo")
|
||||
locks = sys.platform != "win32"
|
||||
config = tmpdir.join("site", "config.yaml")
|
||||
config_template = test_config / "config.yaml"
|
||||
config.write(config_template.read_text().format(install_tree_root, locks))
|
||||
config.write(config_template.read_text().format(install_tree_root, solver, locks))
|
||||
|
||||
target = str(archspec.cpu.host().family)
|
||||
compilers = tmpdir.join("site", "compilers.yaml")
|
||||
@@ -992,38 +988,6 @@ def install_mockery(temporary_store: spack.store.Store, mutable_config, mock_pac
|
||||
temporary_store.failure_tracker.clear_all()
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def temporary_mirror_dir(tmpdir_factory):
|
||||
dir = tmpdir_factory.mktemp("mirror")
|
||||
dir.ensure("build_cache", dir=True)
|
||||
yield str(dir)
|
||||
dir.join("build_cache").remove()
|
||||
|
||||
|
||||
@pytest.fixture(scope="function")
|
||||
def temporary_mirror(temporary_mirror_dir):
|
||||
mirror_url = url_util.path_to_file_url(temporary_mirror_dir)
|
||||
mirror_cmd("add", "--scope", "site", "test-mirror-func", mirror_url)
|
||||
yield temporary_mirror_dir
|
||||
mirror_cmd("rm", "--scope=site", "test-mirror-func")
|
||||
|
||||
|
||||
@pytest.fixture(scope="function")
|
||||
def mutable_temporary_mirror_dir(tmpdir_factory):
|
||||
dir = tmpdir_factory.mktemp("mirror")
|
||||
dir.ensure("build_cache", dir=True)
|
||||
yield str(dir)
|
||||
dir.join("build_cache").remove()
|
||||
|
||||
|
||||
@pytest.fixture(scope="function")
|
||||
def mutable_temporary_mirror(mutable_temporary_mirror_dir):
|
||||
mirror_url = url_util.path_to_file_url(mutable_temporary_mirror_dir)
|
||||
mirror_cmd("add", "--scope", "site", "test-mirror-func", mirror_url)
|
||||
yield mutable_temporary_mirror_dir
|
||||
mirror_cmd("rm", "--scope=site", "test-mirror-func")
|
||||
|
||||
|
||||
@pytest.fixture(scope="function")
|
||||
def temporary_store(tmpdir, request):
|
||||
"""Hooks a temporary empty store for the test function."""
|
||||
@@ -1038,7 +1002,7 @@ def temporary_store(tmpdir, request):
|
||||
def mock_fetch(mock_archive, monkeypatch):
|
||||
"""Fake the URL for a package so it downloads from a file."""
|
||||
monkeypatch.setattr(
|
||||
spack.package_base.PackageBase, "fetcher", URLFetchStrategy(url=mock_archive.url)
|
||||
spack.package_base.PackageBase, "fetcher", URLFetchStrategy(mock_archive.url)
|
||||
)
|
||||
|
||||
|
||||
@@ -1453,24 +1417,6 @@ def mock_git_repository(git, tmpdir_factory):
|
||||
r1 = rev_hash(branch)
|
||||
r1_file = branch_file
|
||||
|
||||
multiple_directories_branch = "many_dirs"
|
||||
num_dirs = 3
|
||||
num_files = 2
|
||||
dir_files = []
|
||||
for i in range(num_dirs):
|
||||
for j in range(num_files):
|
||||
dir_files.append(f"dir{i}/file{j}")
|
||||
|
||||
git("checkout", "-b", multiple_directories_branch)
|
||||
for f in dir_files:
|
||||
repodir.ensure(f, file=True)
|
||||
git("add", f)
|
||||
|
||||
git("-c", "commit.gpgsign=false", "commit", "-m", "many_dirs add files")
|
||||
|
||||
# restore default
|
||||
git("checkout", default_branch)
|
||||
|
||||
# Map of version -> bunch. Each bunch includes; all the args
|
||||
# that must be specified as part of a version() declaration (used to
|
||||
# manufacture a version for the 'git-test' package); the associated
|
||||
@@ -1490,11 +1436,6 @@ def mock_git_repository(git, tmpdir_factory):
|
||||
"default-no-per-version-git": Bunch(
|
||||
revision=default_branch, file=r0_file, args={"branch": default_branch}
|
||||
),
|
||||
"many-directories": Bunch(
|
||||
revision=multiple_directories_branch,
|
||||
file=dir_files[0],
|
||||
args={"git": url, "branch": multiple_directories_branch},
|
||||
),
|
||||
}
|
||||
|
||||
t = Bunch(
|
||||
@@ -1871,7 +1812,12 @@ def __call__(self, *args, **kwargs):
|
||||
tty.msg("curl: (22) The requested URL returned error: 404")
|
||||
self.returncode = 22
|
||||
|
||||
monkeypatch.setattr(spack.util.web, "require_curl", MockCurl)
|
||||
def mock_curl(*args):
|
||||
return MockCurl()
|
||||
|
||||
monkeypatch.setattr(spack.util.web, "_curl", mock_curl)
|
||||
|
||||
yield
|
||||
|
||||
|
||||
@pytest.fixture(scope="function")
|
||||
@@ -2010,18 +1956,26 @@ def nullify_globals(request, monkeypatch):
|
||||
|
||||
|
||||
def pytest_runtest_setup(item):
|
||||
# Skip tests if they are marked only clingo and are run with the original concretizer
|
||||
only_clingo_marker = item.get_closest_marker(name="only_clingo")
|
||||
if only_clingo_marker and os.environ.get("SPACK_TEST_SOLVER") == "original":
|
||||
pytest.skip(*only_clingo_marker.args)
|
||||
|
||||
# Skip tests if they are marked only original and are run with clingo
|
||||
only_original_marker = item.get_closest_marker(name="only_original")
|
||||
if only_original_marker and os.environ.get("SPACK_TEST_SOLVER", "clingo") == "clingo":
|
||||
pytest.skip(*only_original_marker.args)
|
||||
|
||||
# Skip test marked "not_on_windows" if they're run on Windows
|
||||
not_on_windows_marker = item.get_closest_marker(name="not_on_windows")
|
||||
if not_on_windows_marker and sys.platform == "win32":
|
||||
pytest.skip(*not_on_windows_marker.args)
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
@pytest.fixture(scope="function")
|
||||
def disable_parallel_buildcache_push(monkeypatch):
|
||||
"""Disable process pools in tests."""
|
||||
monkeypatch.setattr(
|
||||
spack.util.parallel, "make_concurrent_executor", spack.util.parallel.SequentialExecutor
|
||||
)
|
||||
monkeypatch.setattr(spack.cmd.buildcache, "_make_pool", spack.cmd.buildcache.NoPool)
|
||||
|
||||
|
||||
def _root_path(x, y, *, path):
|
||||
|
@@ -27,7 +27,7 @@ def test_listing_possible_os():
|
||||
assert expected_os in output
|
||||
|
||||
|
||||
@pytest.mark.not_on_windows("test unsupported on Windows")
|
||||
@pytest.mark.skipif(str(spack.platforms.host()) == "windows", reason="test unsupported on Windows")
|
||||
@pytest.mark.maybeslow
|
||||
@pytest.mark.requires_executables("git")
|
||||
def test_bootstrap_phase(minimal_configuration, config_dumper, capsys):
|
||||
|
@@ -13,4 +13,5 @@ config:
|
||||
ssl_certs: $SSL_CERT_FILE
|
||||
checksum: true
|
||||
dirty: false
|
||||
locks: {1}
|
||||
concretizer: {1}
|
||||
locks: {2}
|
||||
|
@@ -1,10 +0,0 @@
|
||||
<html>
|
||||
<head>
|
||||
This is the root page.
|
||||
</head>
|
||||
<body>
|
||||
This is a page with a Vue javascript drop down with links as used in GitLab.
|
||||
|
||||
<div class="js-source-code-dropdown" data-css-class="" data-download-artifacts="[]" data-download-links="[{"text":"tar.gz","path":"/foo-5.0.0.tar.gz"}]"></div>
|
||||
</body>
|
||||
</html>
|
@@ -573,6 +573,9 @@ def test_conflicts_with_packages_that_are_not_dependencies(
|
||||
"""Tests that we cannot concretize two specs together, if one conflicts with the other,
|
||||
even though they don't have a dependency relation.
|
||||
"""
|
||||
if spack.config.get("config:concretizer") == "original":
|
||||
pytest.xfail("Known failure of the original concretizer")
|
||||
|
||||
manifest = tmp_path / "spack.yaml"
|
||||
manifest.write_text(
|
||||
f"""\
|
||||
@@ -594,6 +597,7 @@ def test_conflicts_with_packages_that_are_not_dependencies(
|
||||
|
||||
|
||||
@pytest.mark.regression("39455")
|
||||
@pytest.mark.only_clingo("Known failure of the original concretizer")
|
||||
@pytest.mark.parametrize(
|
||||
"possible_mpi_spec,unify", [("mpich", False), ("mpich", True), ("zmpi", False), ("zmpi", True)]
|
||||
)
|
||||
@@ -694,6 +698,7 @@ def test_removing_spec_from_manifest_with_exact_duplicates(
|
||||
|
||||
|
||||
@pytest.mark.regression("35298")
|
||||
@pytest.mark.only_clingo("Propagation not supported in the original concretizer")
|
||||
def test_variant_propagation_with_unify_false(tmp_path, mock_packages, config):
|
||||
"""Spack distributes concretizations to different processes, when unify:false is selected and
|
||||
the number of roots is 2 or more. When that happens, the specs to be concretized need to be
|
||||
@@ -809,6 +814,7 @@ def test_deconcretize_then_concretize_does_not_error(mutable_mock_env_path, mock
|
||||
|
||||
|
||||
@pytest.mark.regression("44216")
|
||||
@pytest.mark.only_clingo()
|
||||
def test_root_version_weights_for_old_versions(mutable_mock_env_path, mock_packages):
|
||||
"""Tests that, when we select two old versions of root specs that have the same version
|
||||
optimization penalty, both are considered.
|
||||
|
@@ -3,21 +3,54 @@
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
import spack.config
|
||||
import spack.error
|
||||
import spack.fetch_strategy
|
||||
import spack.stage
|
||||
|
||||
|
||||
def test_gcsfetchstrategy_downloaded(tmp_path):
|
||||
@pytest.mark.parametrize("_fetch_method", ["curl", "urllib"])
|
||||
def test_gcsfetchstrategy_without_url(_fetch_method):
|
||||
"""Ensure constructor with no URL fails."""
|
||||
with spack.config.override("config:url_fetch_method", _fetch_method):
|
||||
with pytest.raises(ValueError):
|
||||
spack.fetch_strategy.GCSFetchStrategy(None)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("_fetch_method", ["curl", "urllib"])
|
||||
def test_gcsfetchstrategy_bad_url(tmpdir, _fetch_method):
|
||||
"""Ensure fetch with bad URL fails as expected."""
|
||||
testpath = str(tmpdir)
|
||||
|
||||
with spack.config.override("config:url_fetch_method", _fetch_method):
|
||||
fetcher = spack.fetch_strategy.GCSFetchStrategy(url="file:///does-not-exist")
|
||||
assert fetcher is not None
|
||||
|
||||
with spack.stage.Stage(fetcher, path=testpath) as stage:
|
||||
assert stage is not None
|
||||
assert fetcher.archive_file is None
|
||||
with pytest.raises(spack.error.FetchError):
|
||||
fetcher.fetch()
|
||||
|
||||
|
||||
@pytest.mark.parametrize("_fetch_method", ["curl", "urllib"])
|
||||
def test_gcsfetchstrategy_downloaded(tmpdir, _fetch_method):
|
||||
"""Ensure fetch with archive file already downloaded is a noop."""
|
||||
archive = tmp_path / "gcs.tar.gz"
|
||||
testpath = str(tmpdir)
|
||||
archive = os.path.join(testpath, "gcs.tar.gz")
|
||||
|
||||
class Archived_GCSFS(spack.fetch_strategy.GCSFetchStrategy):
|
||||
@property
|
||||
def archive_file(self):
|
||||
return str(archive)
|
||||
with spack.config.override("config:url_fetch_method", _fetch_method):
|
||||
|
||||
fetcher = Archived_GCSFS(url="gs://example/gcs.tar.gz")
|
||||
with spack.stage.Stage(fetcher, path=str(tmp_path)):
|
||||
fetcher.fetch()
|
||||
class Archived_GCSFS(spack.fetch_strategy.GCSFetchStrategy):
|
||||
@property
|
||||
def archive_file(self):
|
||||
return archive
|
||||
|
||||
url = "gcs:///{0}".format(archive)
|
||||
fetcher = Archived_GCSFS(url=url)
|
||||
with spack.stage.Stage(fetcher, path=testpath):
|
||||
fetcher.fetch()
|
||||
|
@@ -390,38 +390,3 @@ def submodules_callback(package):
|
||||
assert not os.path.isfile(file_path)
|
||||
file_path = os.path.join(s.package.stage.source_path, "third_party/submodule1/r0_file_1")
|
||||
assert not os.path.isfile(file_path)
|
||||
|
||||
|
||||
@pytest.mark.disable_clean_stage_check
|
||||
def test_git_sparse_paths_partial_clone(
|
||||
mock_git_repository, git_version, default_mock_concretization, mutable_mock_repo, monkeypatch
|
||||
):
|
||||
"""
|
||||
Test partial clone of repository when using git_sparse_paths property
|
||||
"""
|
||||
type_of_test = "many-directories"
|
||||
sparse_paths = ["dir0"]
|
||||
omitted_paths = ["dir1", "dir2"]
|
||||
t = mock_git_repository.checks[type_of_test]
|
||||
args = copy.copy(t.args)
|
||||
args["git_sparse_paths"] = sparse_paths
|
||||
s = default_mock_concretization("git-test")
|
||||
monkeypatch.setitem(s.package.versions, Version("git"), args)
|
||||
s.package.do_stage()
|
||||
with working_dir(s.package.stage.source_path):
|
||||
# top level directory files are cloned via sparse-checkout
|
||||
assert os.path.isfile("r0_file")
|
||||
|
||||
for p in sparse_paths:
|
||||
assert os.path.isdir(p)
|
||||
|
||||
if git_version < Version("2.25.0.0"):
|
||||
# older versions of git should fall back to a full clone
|
||||
for p in omitted_paths:
|
||||
assert os.path.isdir(p)
|
||||
else:
|
||||
for p in omitted_paths:
|
||||
assert not os.path.isdir(p)
|
||||
|
||||
# fixture file is in the sparse-path expansion tree
|
||||
assert os.path.isfile(t.file)
|
||||
|
@@ -9,6 +9,33 @@
|
||||
import spack.spec
|
||||
|
||||
|
||||
def test_static_graph_mpileaks(config, mock_packages):
|
||||
"""Test a static spack graph for a simple package."""
|
||||
s = spack.spec.Spec("mpileaks").normalized()
|
||||
|
||||
stream = io.StringIO()
|
||||
spack.graph.static_graph_dot([s], out=stream)
|
||||
|
||||
dot = stream.getvalue()
|
||||
|
||||
assert ' "mpileaks" [label="mpileaks"]\n' in dot
|
||||
assert ' "dyninst" [label="dyninst"]\n' in dot
|
||||
assert ' "callpath" [label="callpath"]\n' in dot
|
||||
assert ' "libelf" [label="libelf"]\n' in dot
|
||||
assert ' "libdwarf" [label="libdwarf"]\n' in dot
|
||||
|
||||
mpi_providers = spack.repo.PATH.providers_for("mpi")
|
||||
for spec in mpi_providers:
|
||||
assert ('"mpileaks" -> "%s"' % spec.name) in dot
|
||||
assert ('"callpath" -> "%s"' % spec.name) in dot
|
||||
|
||||
assert ' "dyninst" -> "libdwarf"\n' in dot
|
||||
assert ' "callpath" -> "dyninst"\n' in dot
|
||||
assert ' "libdwarf" -> "libelf"\n' in dot
|
||||
assert ' "mpileaks" -> "callpath"\n' in dot
|
||||
assert ' "dyninst" -> "libelf"\n' in dot
|
||||
|
||||
|
||||
def test_dynamic_dot_graph_mpileaks(default_mock_concretization):
|
||||
"""Test dynamically graphing the mpileaks package."""
|
||||
s = default_mock_concretization("mpileaks")
|
||||
|
@@ -612,7 +612,9 @@ def test_install_from_binary_with_missing_patch_succeeds(
|
||||
# Push it to a binary cache
|
||||
build_cache = tmp_path / "my_build_cache"
|
||||
binary_distribution.push_or_raise(
|
||||
[s], out_url=build_cache.as_uri(), signing_key=None, force=False
|
||||
s,
|
||||
build_cache.as_uri(),
|
||||
binary_distribution.PushOptions(unsigned=True, regenerate_index=True),
|
||||
)
|
||||
|
||||
# Now re-install it.
|
||||
|
@@ -32,7 +32,6 @@
|
||||
import spack.store
|
||||
import spack.util.lock as lk
|
||||
import spack.version
|
||||
from spack.main import SpackCommand
|
||||
|
||||
|
||||
def _mock_repo(root, namespace):
|
||||
@@ -583,7 +582,7 @@ def test_clear_failures_success(tmpdir):
|
||||
assert os.path.isfile(failures.locker.lock_path)
|
||||
|
||||
|
||||
@pytest.mark.not_on_windows("chmod does not prevent removal on Win")
|
||||
@pytest.mark.xfail(sys.platform == "win32", reason="chmod does not prevent removal on Win")
|
||||
def test_clear_failures_errs(tmpdir, capsys):
|
||||
"""Test the clear_failures exception paths."""
|
||||
failures = spack.database.FailureTracker(str(tmpdir), default_timeout=0.1)
|
||||
@@ -740,85 +739,6 @@ def test_installer_init_requests(install_mockery):
|
||||
assert request.pkg.name == spec_name
|
||||
|
||||
|
||||
@pytest.mark.parametrize("transitive", [True, False])
|
||||
def test_install_spliced(
|
||||
install_mockery, mock_fetch, default_mock_concretization, monkeypatch, capsys, transitive
|
||||
):
|
||||
"""TODO: description"""
|
||||
spec = default_mock_concretization("splice-t")
|
||||
dep = default_mock_concretization("splice-h+foo")
|
||||
|
||||
# Do the splice.
|
||||
out = spec.splice(dep, transitive)
|
||||
installer = create_installer([out], {"vebose": True, "fail_fast": True})
|
||||
installer.install()
|
||||
for node in out.traverse():
|
||||
assert node.installed
|
||||
assert node.build_spec.installed
|
||||
|
||||
|
||||
@pytest.mark.parametrize("transitive", [True, False])
|
||||
def test_install_spliced_build_spec_installed(
|
||||
install_mockery, default_mock_concretization, capfd, mock_fetch, transitive
|
||||
):
|
||||
"""TODO: description"""
|
||||
spec = default_mock_concretization("splice-t")
|
||||
dep = default_mock_concretization("splice-h+foo")
|
||||
|
||||
# Do the splice.
|
||||
out = spec.splice(dep, transitive)
|
||||
out.build_spec.package.do_install()
|
||||
installer = create_installer([out], {"vebose": True, "fail_fast": True})
|
||||
installer._init_queue()
|
||||
for _, task in installer.build_pq:
|
||||
assert isinstance(task, inst.RewireTask if task.pkg.spec.spliced else inst.BuildTask)
|
||||
assert installer.build_pq[-1][0][0] == 2
|
||||
installer.install()
|
||||
for node in out.traverse():
|
||||
assert node.installed
|
||||
assert node.build_spec.installed
|
||||
|
||||
|
||||
@pytest.mark.not_on_windows("lacking windows support for binary installs")
|
||||
@pytest.mark.parametrize("transitive", [True, False])
|
||||
@pytest.mark.parametrize("root_str", ["splice-t^splice-h~foo", "splice-h~foo"])
|
||||
def test_install_splice_root_from_binary(
|
||||
install_mockery,
|
||||
default_mock_concretization,
|
||||
mock_fetch,
|
||||
mutable_temporary_mirror,
|
||||
transitive,
|
||||
root_str,
|
||||
):
|
||||
"""TODO: Docstring"""
|
||||
# Test splicing and rewiring a spec with the same name, different hash.
|
||||
original_spec = spack.spec.Spec(root_str).concretized()
|
||||
spec_to_splice = spack.spec.Spec("splice-h+foo").concretized()
|
||||
|
||||
original_spec.package.do_install()
|
||||
spec_to_splice.package.do_install()
|
||||
|
||||
out = original_spec.splice(spec_to_splice, transitive)
|
||||
|
||||
buildcache = SpackCommand("buildcache")
|
||||
buildcache(
|
||||
"push",
|
||||
"--allow-root",
|
||||
"--unsigned",
|
||||
"--update-index",
|
||||
mutable_temporary_mirror,
|
||||
str(original_spec),
|
||||
str(spec_to_splice),
|
||||
)
|
||||
|
||||
uninstall = SpackCommand("uninstall")
|
||||
uninstall("-ay")
|
||||
|
||||
out.package.do_install(unsigned=True)
|
||||
|
||||
assert len(spack.store.STORE.db.query()) == len(list(out.traverse()))
|
||||
|
||||
|
||||
def test_install_task_use_cache(install_mockery, monkeypatch):
|
||||
installer = create_installer(["trivial-install-test-package"], {})
|
||||
request = installer.build_requests[0]
|
||||
@@ -841,7 +761,7 @@ def _add(_compilers):
|
||||
|
||||
# Preclude any meaningful side-effects
|
||||
monkeypatch.setattr(spack.package_base.PackageBase, "unit_test_check", _true)
|
||||
monkeypatch.setattr(inst.BuildTask, "_setup_install_dir", _noop)
|
||||
monkeypatch.setattr(inst.PackageInstaller, "_setup_install_dir", _noop)
|
||||
monkeypatch.setattr(spack.build_environment, "start_build_process", _noop)
|
||||
monkeypatch.setattr(spack.database.Database, "add", _noop)
|
||||
monkeypatch.setattr(spack.compilers, "add_compilers_to_config", _add)
|
||||
@@ -947,10 +867,8 @@ def _chgrp(path, group, follow_symlinks=True):
|
||||
monkeypatch.setattr(prefs, "get_package_group", _get_group)
|
||||
monkeypatch.setattr(fs, "chgrp", _chgrp)
|
||||
|
||||
build_task = create_build_task(
|
||||
spack.spec.Spec("trivial-install-test-package").concretized().package
|
||||
)
|
||||
spec = build_task.request.pkg.spec
|
||||
installer = create_installer(["trivial-install-test-package"], {})
|
||||
spec = installer.build_requests[0].pkg.spec
|
||||
|
||||
fs.touchp(spec.prefix)
|
||||
metadatadir = spack.store.STORE.layout.metadata_path(spec)
|
||||
@@ -960,7 +878,7 @@ def _chgrp(path, group, follow_symlinks=True):
|
||||
metadatadir = None
|
||||
# Should fail with a "not a directory" error
|
||||
with pytest.raises(OSError, match=metadatadir):
|
||||
build_task._setup_install_dir(spec.package)
|
||||
installer._setup_install_dir(spec.package)
|
||||
|
||||
out = str(capfd.readouterr()[0])
|
||||
|
||||
@@ -1047,76 +965,79 @@ def test_install_failed_not_fast(install_mockery, monkeypatch, capsys):
|
||||
assert "Skipping build of pkg-a" in out
|
||||
|
||||
|
||||
def _interrupt(installer, task, install_status, **kwargs):
|
||||
if task.pkg.name == "a":
|
||||
raise KeyboardInterrupt("mock keyboard interrupt for a")
|
||||
else:
|
||||
return installer._real_install_task(task, None)
|
||||
# installer.installed.add(task.pkg.name)
|
||||
|
||||
|
||||
def test_install_fail_on_interrupt(install_mockery, mock_fetch, monkeypatch):
|
||||
def test_install_fail_on_interrupt(install_mockery, monkeypatch):
|
||||
"""Test ctrl-c interrupted install."""
|
||||
spec_name = "pkg-a"
|
||||
err_msg = "mock keyboard interrupt for {0}".format(spec_name)
|
||||
|
||||
def _interrupt(installer, task, install_status, **kwargs):
|
||||
if task.pkg.name == spec_name:
|
||||
raise KeyboardInterrupt(err_msg)
|
||||
else:
|
||||
installer.installed.add(task.pkg.name)
|
||||
|
||||
installer = create_installer([spec_name], {})
|
||||
# TODO: Clean this up in fixture with delattr.
|
||||
setattr(inst.PackageInstaller, "_real_install_task", inst.PackageInstaller._install_task)
|
||||
|
||||
# Raise a KeyboardInterrupt error to trigger early termination
|
||||
monkeypatch.setattr(inst.PackageInstaller, "_install_task", _interrupt)
|
||||
|
||||
with pytest.raises(KeyboardInterrupt, match=err_msg):
|
||||
installer.install()
|
||||
|
||||
assert not any(i.startswith("pkg-a-") for i in installer.installed)
|
||||
assert any(
|
||||
i.startswith("pkg-b-") for i in installer.installed
|
||||
) # ensure dependency of a is 'installed'
|
||||
# assert spec_name not in installer.installed
|
||||
assert "pkg-b" in installer.installed # ensure dependency of pkg-a is 'installed'
|
||||
assert spec_name not in installer.installed
|
||||
|
||||
|
||||
class MyBuildException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def _install_fail_my_build_exception(installer, task, install_status, **kwargs):
|
||||
print(task, task.pkg.name)
|
||||
if task.pkg.name == "pkg-a":
|
||||
raise MyBuildException("mock internal package build error for pkg-a")
|
||||
else:
|
||||
# No need for more complex logic here because no splices
|
||||
task.execute(install_status)
|
||||
installer._update_installed(task)
|
||||
|
||||
|
||||
def test_install_fail_single(install_mockery, mock_fetch, monkeypatch):
|
||||
def test_install_fail_single(install_mockery, monkeypatch):
|
||||
"""Test expected results for failure of single package."""
|
||||
installer = create_installer(["pkg-a"], {})
|
||||
spec_name = "pkg-a"
|
||||
err_msg = "mock internal package build error for {0}".format(spec_name)
|
||||
|
||||
class MyBuildException(Exception):
|
||||
pass
|
||||
|
||||
def _install(installer, task, install_status, **kwargs):
|
||||
if task.pkg.name == spec_name:
|
||||
raise MyBuildException(err_msg)
|
||||
else:
|
||||
installer.installed.add(task.pkg.name)
|
||||
|
||||
installer = create_installer([spec_name], {})
|
||||
|
||||
# Raise a KeyboardInterrupt error to trigger early termination
|
||||
monkeypatch.setattr(inst.PackageInstaller, "_install_task", _install_fail_my_build_exception)
|
||||
monkeypatch.setattr(inst.PackageInstaller, "_install_task", _install)
|
||||
|
||||
with pytest.raises(MyBuildException, match="mock internal package build error for a"):
|
||||
with pytest.raises(MyBuildException, match=err_msg):
|
||||
installer.install()
|
||||
|
||||
# ensure dependency of a is 'installed' and a is not
|
||||
assert any(pkg_id.startswith("pkg-b-") for pkg_id in installer.installed)
|
||||
assert not any(pkg_id.startswith("pkg-a-") for pkg_id in installer.installed)
|
||||
assert "pkg-b" in installer.installed # ensure dependency of a is 'installed'
|
||||
assert spec_name not in installer.installed
|
||||
|
||||
|
||||
def test_install_fail_multi(install_mockery, mock_fetch, monkeypatch):
|
||||
def test_install_fail_multi(install_mockery, monkeypatch):
|
||||
"""Test expected results for failure of multiple packages."""
|
||||
installer = create_installer(["pkg-a", "pkg-c"], {})
|
||||
spec_name = "pkg-c"
|
||||
err_msg = "mock internal package build error"
|
||||
|
||||
class MyBuildException(Exception):
|
||||
pass
|
||||
|
||||
def _install(installer, task, install_status, **kwargs):
|
||||
if task.pkg.name == spec_name:
|
||||
raise MyBuildException(err_msg)
|
||||
else:
|
||||
installer.installed.add(task.pkg.name)
|
||||
|
||||
installer = create_installer([spec_name, "pkg-a"], {})
|
||||
|
||||
# Raise a KeyboardInterrupt error to trigger early termination
|
||||
monkeypatch.setattr(inst.PackageInstaller, "_install_task", _install_fail_my_build_exception)
|
||||
monkeypatch.setattr(inst.PackageInstaller, "_install_task", _install)
|
||||
|
||||
with pytest.raises(inst.InstallError, match="Installation request failed"):
|
||||
installer.install()
|
||||
|
||||
# ensure the the second spec installed but not the first
|
||||
assert any(pkg_id.startswith("pkg-c-") for pkg_id in installer.installed)
|
||||
assert not any(pkg_id.startswith("pkg-a-") for pkg_id in installer.installed)
|
||||
assert "pkg-a" in installer.installed # ensure the the second spec installed
|
||||
assert spec_name not in installer.installed
|
||||
|
||||
|
||||
def test_install_fail_fast_on_detect(install_mockery, monkeypatch, capsys):
|
||||
|
@@ -274,7 +274,7 @@ def test_symlinks_false(self, stage):
|
||||
assert not os.path.islink("dest/2")
|
||||
check_added_exe_permissions("source/2", "dest/2")
|
||||
|
||||
@pytest.mark.not_on_windows("Broken symlinks not allowed on Windows")
|
||||
@pytest.mark.skipif(sys.platform == "win32", reason="Broken symlinks not allowed on Windows")
|
||||
def test_allow_broken_symlinks(self, stage):
|
||||
"""Test installing with a broken symlink."""
|
||||
with fs.working_dir(str(stage)):
|
||||
|
@@ -4,13 +4,19 @@
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
import contextlib
|
||||
import multiprocessing
|
||||
import os
|
||||
import signal
|
||||
import sys
|
||||
import time
|
||||
from types import ModuleType
|
||||
from typing import Optional
|
||||
|
||||
import pytest
|
||||
|
||||
import llnl.util.lang as lang
|
||||
import llnl.util.tty.log as log
|
||||
import llnl.util.tty.pty as pty
|
||||
|
||||
from spack.util.executable import which
|
||||
|
||||
@@ -167,3 +173,342 @@ def test_log_subproc_and_echo_output_capfd(capfd, tmpdir):
|
||||
print("logged")
|
||||
|
||||
assert capfd.readouterr()[0] == "echo\n"
|
||||
|
||||
|
||||
#
|
||||
# Tests below use a pseudoterminal to test llnl.util.tty.log
|
||||
#
|
||||
def simple_logger(**kwargs):
|
||||
"""Mock logger (minion) process for testing log.keyboard_input."""
|
||||
running = [True]
|
||||
|
||||
def handler(signum, frame):
|
||||
running[0] = False
|
||||
|
||||
signal.signal(signal.SIGUSR1, handler)
|
||||
|
||||
log_path = kwargs["log_path"]
|
||||
with log.log_output(log_path):
|
||||
while running[0]:
|
||||
print("line")
|
||||
time.sleep(1e-3)
|
||||
|
||||
|
||||
def mock_shell_fg(proc, ctl, **kwargs):
|
||||
"""PseudoShell controller function for test_foreground_background."""
|
||||
ctl.fg()
|
||||
ctl.status()
|
||||
ctl.wait_enabled()
|
||||
|
||||
os.kill(proc.pid, signal.SIGUSR1)
|
||||
|
||||
|
||||
def mock_shell_fg_no_termios(proc, ctl, **kwargs):
|
||||
"""PseudoShell controller function for test_foreground_background."""
|
||||
ctl.fg()
|
||||
ctl.status()
|
||||
ctl.wait_disabled_fg()
|
||||
|
||||
os.kill(proc.pid, signal.SIGUSR1)
|
||||
|
||||
|
||||
def mock_shell_bg(proc, ctl, **kwargs):
|
||||
"""PseudoShell controller function for test_foreground_background."""
|
||||
ctl.bg()
|
||||
ctl.status()
|
||||
ctl.wait_disabled()
|
||||
|
||||
os.kill(proc.pid, signal.SIGUSR1)
|
||||
|
||||
|
||||
def mock_shell_tstp_cont(proc, ctl, **kwargs):
|
||||
"""PseudoShell controller function for test_foreground_background."""
|
||||
ctl.tstp()
|
||||
ctl.wait_stopped()
|
||||
|
||||
ctl.cont()
|
||||
ctl.wait_running()
|
||||
|
||||
os.kill(proc.pid, signal.SIGUSR1)
|
||||
|
||||
|
||||
def mock_shell_tstp_tstp_cont(proc, ctl, **kwargs):
|
||||
"""PseudoShell controller function for test_foreground_background."""
|
||||
ctl.tstp()
|
||||
ctl.wait_stopped()
|
||||
|
||||
ctl.tstp()
|
||||
ctl.wait_stopped()
|
||||
|
||||
ctl.cont()
|
||||
ctl.wait_running()
|
||||
|
||||
os.kill(proc.pid, signal.SIGUSR1)
|
||||
|
||||
|
||||
def mock_shell_tstp_tstp_cont_cont(proc, ctl, **kwargs):
|
||||
"""PseudoShell controller function for test_foreground_background."""
|
||||
ctl.tstp()
|
||||
ctl.wait_stopped()
|
||||
|
||||
ctl.tstp()
|
||||
ctl.wait_stopped()
|
||||
|
||||
ctl.cont()
|
||||
ctl.wait_running()
|
||||
|
||||
ctl.cont()
|
||||
ctl.wait_running()
|
||||
|
||||
os.kill(proc.pid, signal.SIGUSR1)
|
||||
|
||||
|
||||
def mock_shell_bg_fg(proc, ctl, **kwargs):
|
||||
"""PseudoShell controller function for test_foreground_background."""
|
||||
ctl.bg()
|
||||
ctl.status()
|
||||
ctl.wait_disabled()
|
||||
|
||||
ctl.fg()
|
||||
ctl.status()
|
||||
ctl.wait_enabled()
|
||||
|
||||
os.kill(proc.pid, signal.SIGUSR1)
|
||||
|
||||
|
||||
def mock_shell_bg_fg_no_termios(proc, ctl, **kwargs):
|
||||
"""PseudoShell controller function for test_foreground_background."""
|
||||
ctl.bg()
|
||||
ctl.status()
|
||||
ctl.wait_disabled()
|
||||
|
||||
ctl.fg()
|
||||
ctl.status()
|
||||
ctl.wait_disabled_fg()
|
||||
|
||||
os.kill(proc.pid, signal.SIGUSR1)
|
||||
|
||||
|
||||
def mock_shell_fg_bg(proc, ctl, **kwargs):
|
||||
"""PseudoShell controller function for test_foreground_background."""
|
||||
ctl.fg()
|
||||
ctl.status()
|
||||
ctl.wait_enabled()
|
||||
|
||||
ctl.bg()
|
||||
ctl.status()
|
||||
ctl.wait_disabled()
|
||||
|
||||
os.kill(proc.pid, signal.SIGUSR1)
|
||||
|
||||
|
||||
def mock_shell_fg_bg_no_termios(proc, ctl, **kwargs):
|
||||
"""PseudoShell controller function for test_foreground_background."""
|
||||
ctl.fg()
|
||||
ctl.status()
|
||||
ctl.wait_disabled_fg()
|
||||
|
||||
ctl.bg()
|
||||
ctl.status()
|
||||
ctl.wait_disabled()
|
||||
|
||||
os.kill(proc.pid, signal.SIGUSR1)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def no_termios():
|
||||
saved = log.termios
|
||||
log.termios = None
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
log.termios = saved
|
||||
|
||||
|
||||
@pytest.mark.skipif(not which("ps"), reason="requires ps utility")
|
||||
@pytest.mark.skipif(not termios, reason="requires termios support")
|
||||
@pytest.mark.parametrize(
|
||||
"test_fn,termios_on_or_off",
|
||||
[
|
||||
# tests with termios
|
||||
(mock_shell_fg, lang.nullcontext),
|
||||
(mock_shell_bg, lang.nullcontext),
|
||||
(mock_shell_bg_fg, lang.nullcontext),
|
||||
(mock_shell_fg_bg, lang.nullcontext),
|
||||
(mock_shell_tstp_cont, lang.nullcontext),
|
||||
(mock_shell_tstp_tstp_cont, lang.nullcontext),
|
||||
(mock_shell_tstp_tstp_cont_cont, lang.nullcontext),
|
||||
# tests without termios
|
||||
(mock_shell_fg_no_termios, no_termios),
|
||||
(mock_shell_bg, no_termios),
|
||||
(mock_shell_bg_fg_no_termios, no_termios),
|
||||
(mock_shell_fg_bg_no_termios, no_termios),
|
||||
(mock_shell_tstp_cont, no_termios),
|
||||
(mock_shell_tstp_tstp_cont, no_termios),
|
||||
(mock_shell_tstp_tstp_cont_cont, no_termios),
|
||||
],
|
||||
)
|
||||
@pytest.mark.xfail(reason="Fails almost consistently when run with coverage and xdist")
|
||||
def test_foreground_background(test_fn, termios_on_or_off, tmpdir):
|
||||
"""Functional tests for foregrounding and backgrounding a logged process.
|
||||
|
||||
This ensures that things like SIGTTOU are not raised and that
|
||||
terminal settings are corrected on foreground/background and on
|
||||
process stop and start.
|
||||
|
||||
"""
|
||||
shell = pty.PseudoShell(test_fn, simple_logger)
|
||||
log_path = str(tmpdir.join("log.txt"))
|
||||
|
||||
# run the shell test
|
||||
with termios_on_or_off():
|
||||
shell.start(log_path=log_path, debug=True)
|
||||
exitcode = shell.join()
|
||||
|
||||
# processes completed successfully
|
||||
assert exitcode == 0
|
||||
|
||||
# assert log was created
|
||||
assert os.path.exists(log_path)
|
||||
|
||||
|
||||
def synchronized_logger(**kwargs):
|
||||
"""Mock logger (minion) process for testing log.keyboard_input.
|
||||
|
||||
This logger synchronizes with the parent process to test that 'v' can
|
||||
toggle output. It is used in ``test_foreground_background_output`` below.
|
||||
|
||||
"""
|
||||
running = [True]
|
||||
|
||||
def handler(signum, frame):
|
||||
running[0] = False
|
||||
|
||||
signal.signal(signal.SIGUSR1, handler)
|
||||
|
||||
log_path = kwargs["log_path"]
|
||||
write_lock = kwargs["write_lock"]
|
||||
v_lock = kwargs["v_lock"]
|
||||
|
||||
sys.stderr.write(os.getcwd() + "\n")
|
||||
with log.log_output(log_path) as logger:
|
||||
with logger.force_echo():
|
||||
print("forced output")
|
||||
|
||||
while running[0]:
|
||||
with write_lock:
|
||||
if v_lock.acquire(False): # non-blocking acquire
|
||||
print("off")
|
||||
v_lock.release()
|
||||
else:
|
||||
print("on") # lock held; v is toggled on
|
||||
time.sleep(1e-2)
|
||||
|
||||
|
||||
def mock_shell_v_v(proc, ctl, **kwargs):
|
||||
"""Controller function for test_foreground_background_output."""
|
||||
write_lock = kwargs["write_lock"]
|
||||
v_lock = kwargs["v_lock"]
|
||||
|
||||
ctl.fg()
|
||||
ctl.wait_enabled()
|
||||
time.sleep(0.1)
|
||||
|
||||
write_lock.acquire() # suspend writing
|
||||
v_lock.acquire() # enable v lock
|
||||
ctl.write(b"v") # toggle v on stdin
|
||||
time.sleep(0.1)
|
||||
write_lock.release() # resume writing
|
||||
|
||||
time.sleep(0.1)
|
||||
|
||||
write_lock.acquire() # suspend writing
|
||||
ctl.write(b"v") # toggle v on stdin
|
||||
time.sleep(0.1)
|
||||
v_lock.release() # disable v lock
|
||||
write_lock.release() # resume writing
|
||||
time.sleep(0.1)
|
||||
|
||||
os.kill(proc.pid, signal.SIGUSR1)
|
||||
|
||||
|
||||
def mock_shell_v_v_no_termios(proc, ctl, **kwargs):
|
||||
"""Controller function for test_foreground_background_output."""
|
||||
write_lock = kwargs["write_lock"]
|
||||
v_lock = kwargs["v_lock"]
|
||||
|
||||
ctl.fg()
|
||||
ctl.wait_disabled_fg()
|
||||
time.sleep(0.1)
|
||||
|
||||
write_lock.acquire() # suspend writing
|
||||
v_lock.acquire() # enable v lock
|
||||
ctl.write(b"v\n") # toggle v on stdin
|
||||
time.sleep(0.1)
|
||||
write_lock.release() # resume writing
|
||||
|
||||
time.sleep(0.1)
|
||||
|
||||
write_lock.acquire() # suspend writing
|
||||
ctl.write(b"v\n") # toggle v on stdin
|
||||
time.sleep(0.1)
|
||||
v_lock.release() # disable v lock
|
||||
write_lock.release() # resume writing
|
||||
time.sleep(0.1)
|
||||
|
||||
os.kill(proc.pid, signal.SIGUSR1)
|
||||
|
||||
|
||||
@pytest.mark.skipif(not which("ps"), reason="requires ps utility")
|
||||
@pytest.mark.skipif(not termios, reason="requires termios support")
|
||||
@pytest.mark.parametrize(
|
||||
"test_fn,termios_on_or_off",
|
||||
[(mock_shell_v_v, lang.nullcontext), (mock_shell_v_v_no_termios, no_termios)],
|
||||
)
|
||||
@pytest.mark.xfail(reason="Fails almost consistently when run with coverage and xdist")
|
||||
def test_foreground_background_output(test_fn, capfd, termios_on_or_off, tmpdir):
|
||||
"""Tests hitting 'v' toggles output, and that force_echo works."""
|
||||
if sys.version_info >= (3, 8) and sys.platform == "darwin" and termios_on_or_off == no_termios:
|
||||
return
|
||||
|
||||
shell = pty.PseudoShell(test_fn, synchronized_logger)
|
||||
log_path = str(tmpdir.join("log.txt"))
|
||||
|
||||
# Locks for synchronizing with minion
|
||||
write_lock = multiprocessing.Lock() # must be held by minion to write
|
||||
v_lock = multiprocessing.Lock() # held while controller is in v mode
|
||||
|
||||
with termios_on_or_off():
|
||||
shell.start(write_lock=write_lock, v_lock=v_lock, debug=True, log_path=log_path)
|
||||
|
||||
exitcode = shell.join()
|
||||
out, err = capfd.readouterr()
|
||||
print(err) # will be shown if something goes wrong
|
||||
print(out)
|
||||
|
||||
# processes completed successfully
|
||||
assert exitcode == 0
|
||||
|
||||
# split output into lines
|
||||
output = out.strip().split("\n")
|
||||
|
||||
# also get lines of log file
|
||||
assert os.path.exists(log_path)
|
||||
with open(log_path) as logfile:
|
||||
log_data = logfile.read().strip().split("\n")
|
||||
|
||||
# Controller and minion process coordinate with locks such that the
|
||||
# minion writes "off" when echo is off, and "on" when echo is on. The
|
||||
# output should contain mostly "on" lines, but may contain "off"
|
||||
# lines if the controller is slow. The important thing to observe
|
||||
# here is that we started seeing 'on' in the end.
|
||||
assert ["forced output", "on"] == lang.uniq(output) or [
|
||||
"forced output",
|
||||
"off",
|
||||
"on",
|
||||
] == lang.uniq(output)
|
||||
|
||||
# log should be off for a while, then on, then off
|
||||
assert ["forced output", "off", "on", "off"] == lang.uniq(log_data) and log_data.count(
|
||||
"off"
|
||||
) > 2 # ensure some "off" lines were omitted
|
||||
|
@@ -9,13 +9,16 @@
|
||||
This just tests whether the right args are getting passed to make.
|
||||
"""
|
||||
import os
|
||||
import sys
|
||||
|
||||
import pytest
|
||||
|
||||
from spack.build_environment import MakeExecutable
|
||||
from spack.util.environment import path_put_first
|
||||
|
||||
pytestmark = pytest.mark.not_on_windows("MakeExecutable not supported on Windows")
|
||||
pytestmark = pytest.mark.skipif(
|
||||
sys.platform == "win32", reason="MakeExecutable not supported on Windows"
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
|
@@ -10,10 +10,7 @@
|
||||
|
||||
from llnl.util.symlink import resolve_link_target_relative_to_the_link
|
||||
|
||||
import spack.caches
|
||||
import spack.fetch_strategy
|
||||
import spack.mirror
|
||||
import spack.patch
|
||||
import spack.repo
|
||||
import spack.util.executable
|
||||
import spack.util.spack_json as sjson
|
||||
@@ -205,7 +202,7 @@ def test_invalid_json_mirror_collection(invalid_json, error_message):
|
||||
|
||||
def test_mirror_archive_paths_no_version(mock_packages, mock_archive):
|
||||
spec = Spec("trivial-install-test-package@=nonexistingversion").concretized()
|
||||
fetcher = spack.fetch_strategy.URLFetchStrategy(url=mock_archive.url)
|
||||
fetcher = spack.fetch_strategy.URLFetchStrategy(mock_archive.url)
|
||||
spack.mirror.mirror_archive_paths(fetcher, "per-package-ref", spec)
|
||||
|
||||
|
||||
@@ -276,7 +273,7 @@ def test_mirror_cache_symlinks(tmpdir):
|
||||
cosmetic_path = "zlib/zlib-1.2.11.tar.gz"
|
||||
global_path = "_source-cache/archive/c3/c3e5.tar.gz"
|
||||
cache = spack.caches.MirrorCache(str(tmpdir), False)
|
||||
reference = spack.mirror.DefaultLayout(cosmetic_path, global_path)
|
||||
reference = spack.mirror.MirrorReference(cosmetic_path, global_path)
|
||||
|
||||
cache.store(MockFetcher(), reference.storage_path)
|
||||
cache.symlink(reference)
|
||||
|
@@ -14,6 +14,7 @@
|
||||
|
||||
pytestmark = [
|
||||
pytest.mark.usefixtures("mock_packages", "config"),
|
||||
pytest.mark.only_clingo("The original concretizer cannot concretize most of the specs"),
|
||||
pytest.mark.not_on_windows("Not running on windows"),
|
||||
]
|
||||
|
||||
|
@@ -10,21 +10,12 @@
|
||||
import json
|
||||
import os
|
||||
import pathlib
|
||||
import re
|
||||
import urllib.error
|
||||
from contextlib import contextmanager
|
||||
|
||||
import pytest
|
||||
|
||||
import spack.binary_distribution
|
||||
import spack.cmd.buildcache
|
||||
import spack.database
|
||||
import spack.environment as ev
|
||||
import spack.error
|
||||
import spack.oci.opener
|
||||
import spack.spec
|
||||
from spack.main import SpackCommand
|
||||
from spack.oci.image import Digest, ImageReference, default_config, default_manifest, default_tag
|
||||
from spack.oci.image import Digest, ImageReference, default_config, default_manifest
|
||||
from spack.oci.oci import blob_exists, get_manifest_and_config, upload_blob, upload_manifest
|
||||
from spack.test.oci.mock_registry import DummyServer, InMemoryOCIRegistry, create_opener
|
||||
from spack.util.archive import gzip_compressed_tarfile
|
||||
@@ -43,7 +34,7 @@ def oci_servers(*servers: DummyServer):
|
||||
spack.oci.opener.urlopen = old_opener
|
||||
|
||||
|
||||
def test_buildcache_push_command(mutable_database):
|
||||
def test_buildcache_push_command(mutable_database, disable_parallel_buildcache_push):
|
||||
with oci_servers(InMemoryOCIRegistry("example.com")):
|
||||
mirror("add", "oci-test", "oci://example.com/image")
|
||||
|
||||
@@ -66,7 +57,9 @@ def test_buildcache_push_command(mutable_database):
|
||||
assert os.path.exists(os.path.join(spec.prefix, "bin", "mpileaks"))
|
||||
|
||||
|
||||
def test_buildcache_tag(install_mockery, mock_fetch, mutable_mock_env_path):
|
||||
def test_buildcache_tag(
|
||||
install_mockery, mock_fetch, mutable_mock_env_path, disable_parallel_buildcache_push
|
||||
):
|
||||
"""Tests whether we can create an OCI image from a full environment with multiple roots."""
|
||||
env("create", "test")
|
||||
with ev.read("test"):
|
||||
@@ -104,7 +97,9 @@ def test_buildcache_tag(install_mockery, mock_fetch, mutable_mock_env_path):
|
||||
assert len(manifest["layers"]) == 1
|
||||
|
||||
|
||||
def test_buildcache_push_with_base_image_command(mutable_database, tmpdir):
|
||||
def test_buildcache_push_with_base_image_command(
|
||||
mutable_database, tmpdir, disable_parallel_buildcache_push
|
||||
):
|
||||
"""Test that we can push a package with a base image to an OCI registry.
|
||||
|
||||
This test is a bit involved, cause we have to create a small base image."""
|
||||
@@ -205,7 +200,7 @@ def test_buildcache_push_with_base_image_command(mutable_database, tmpdir):
|
||||
|
||||
|
||||
def test_uploading_with_base_image_in_docker_image_manifest_v2_format(
|
||||
tmp_path: pathlib.Path, mutable_database
|
||||
tmp_path: pathlib.Path, mutable_database, disable_parallel_buildcache_push
|
||||
):
|
||||
"""If the base image uses an old manifest schema, Spack should also use that.
|
||||
That is necessary for container images to work with Apptainer, which is rather strict about
|
||||
@@ -291,79 +286,3 @@ def test_uploading_with_base_image_in_docker_image_manifest_v2_format(
|
||||
for layer in m["layers"]:
|
||||
assert layer["mediaType"] == "application/vnd.docker.image.rootfs.diff.tar.gzip"
|
||||
assert "annotations" not in m
|
||||
|
||||
|
||||
def test_best_effort_upload(mutable_database: spack.database.Database, monkeypatch):
|
||||
"""Failure to upload a blob or manifest should not prevent others from being uploaded -- it
|
||||
should be a best-effort operation. If any runtime dep fails to upload, it results in a missing
|
||||
layer for dependents. But we do still create manifests for dependents, so that the build cache
|
||||
is maximally useful. (The downside is that container images are not runnable)."""
|
||||
|
||||
_push_blob = spack.binary_distribution._oci_push_pkg_blob
|
||||
_push_manifest = spack.binary_distribution._oci_put_manifest
|
||||
|
||||
def push_blob(image_ref, spec, tmpdir):
|
||||
# fail to upload the blob of mpich
|
||||
if spec.name == "mpich":
|
||||
raise Exception("Blob Server Error")
|
||||
return _push_blob(image_ref, spec, tmpdir)
|
||||
|
||||
def put_manifest(base_images, checksums, image_ref, tmpdir, extra_config, annotations, *specs):
|
||||
# fail to upload the manifest of libdwarf
|
||||
if "libdwarf" in (s.name for s in specs):
|
||||
raise Exception("Manifest Server Error")
|
||||
return _push_manifest(
|
||||
base_images, checksums, image_ref, tmpdir, extra_config, annotations, *specs
|
||||
)
|
||||
|
||||
monkeypatch.setattr(spack.binary_distribution, "_oci_push_pkg_blob", push_blob)
|
||||
monkeypatch.setattr(spack.binary_distribution, "_oci_put_manifest", put_manifest)
|
||||
|
||||
mirror("add", "oci-test", "oci://example.com/image")
|
||||
registry = InMemoryOCIRegistry("example.com")
|
||||
image = ImageReference.from_string("example.com/image")
|
||||
|
||||
with oci_servers(registry):
|
||||
with pytest.raises(spack.error.SpackError, match="The following 2 errors occurred") as e:
|
||||
buildcache("push", "--update-index", "oci-test", "mpileaks^mpich")
|
||||
|
||||
# mpich's blob failed to upload and libdwarf's manifest failed to upload
|
||||
assert re.search("mpich.+: Exception: Blob Server Error", e.value)
|
||||
assert re.search("libdwarf.+: Exception: Manifest Server Error", e.value)
|
||||
|
||||
mpileaks: spack.spec.Spec = mutable_database.query_local("mpileaks^mpich")[0]
|
||||
|
||||
without_manifest = ("mpich", "libdwarf")
|
||||
|
||||
# Verify that manifests of mpich/libdwarf are missing due to upload failure.
|
||||
for name in without_manifest:
|
||||
tagged_img = image.with_tag(default_tag(mpileaks[name]))
|
||||
with pytest.raises(urllib.error.HTTPError, match="404"):
|
||||
get_manifest_and_config(tagged_img)
|
||||
|
||||
# Collect the layer digests of successfully uploaded packages. Every package should refer
|
||||
# to its own tarballs and those of its runtime deps that were uploaded.
|
||||
pkg_to_all_digests = {}
|
||||
pkg_to_own_digest = {}
|
||||
for s in mpileaks.traverse():
|
||||
if s.name in without_manifest:
|
||||
continue
|
||||
# This should not raise a 404.
|
||||
manifest, _ = get_manifest_and_config(image.with_tag(default_tag(s)))
|
||||
|
||||
# Collect layer digests
|
||||
pkg_to_all_digests[s.name] = {layer["digest"] for layer in manifest["layers"]}
|
||||
pkg_to_own_digest[s.name] = manifest["layers"][-1]["digest"]
|
||||
|
||||
# Verify that all packages reference blobs of their runtime deps that uploaded fine.
|
||||
for s in mpileaks.traverse():
|
||||
if s.name in without_manifest:
|
||||
continue
|
||||
expected_digests = {
|
||||
pkg_to_own_digest[t.name]
|
||||
for t in s.traverse(deptype=("link", "run"), root=True)
|
||||
if t.name not in without_manifest
|
||||
}
|
||||
|
||||
# Test with issubset, cause we don't have the blob of libdwarf as it has no manifest.
|
||||
assert expected_digests and expected_digests.issubset(pkg_to_all_digests[s.name])
|
||||
|
@@ -72,6 +72,13 @@ def spec_and_expected(request):
|
||||
return spec, Spec.from_literal(d)
|
||||
|
||||
|
||||
def test_normalize(spec_and_expected, config, mock_packages):
|
||||
spec, expected = spec_and_expected
|
||||
spec = Spec(spec)
|
||||
spec.normalize()
|
||||
assert spec.eq_dag(expected, deptypes=False)
|
||||
|
||||
|
||||
def test_default_variant(config, mock_packages):
|
||||
spec = Spec("optional-dep-test-3")
|
||||
spec.concretize()
|
||||
|
@@ -259,7 +259,6 @@ def test_git_url_top_level_git_versions(version_str, tag, commit, branch):
|
||||
assert fetcher.tag == tag
|
||||
assert fetcher.commit == commit
|
||||
assert fetcher.branch == branch
|
||||
assert fetcher.url == pkg_factory("git-url-top-level").git
|
||||
|
||||
|
||||
@pytest.mark.usefixtures("mock_packages", "config")
|
||||
@@ -320,14 +319,3 @@ def test_package_deprecated_version(mock_packages, mock_fetch, mock_stage):
|
||||
|
||||
assert spack.package_base.deprecated_version(pkg_cls, "1.1.0")
|
||||
assert not spack.package_base.deprecated_version(pkg_cls, "1.0.0")
|
||||
|
||||
|
||||
def test_package_can_have_sparse_checkout_properties(mock_packages, mock_fetch, mock_stage):
|
||||
spec = Spec("git-sparsepaths-pkg")
|
||||
pkg_cls = spack.repo.PATH.get_pkg_class(spec.name)
|
||||
assert hasattr(pkg_cls, "git_sparse_paths")
|
||||
|
||||
fetcher = spack.fetch_strategy.for_package_version(pkg_cls(spec), "1.0")
|
||||
assert isinstance(fetcher, spack.fetch_strategy.GitFetchStrategy)
|
||||
assert hasattr(fetcher, "git_sparse_paths")
|
||||
assert fetcher.git_sparse_paths == pkg_cls.git_sparse_paths
|
||||
|
@@ -11,7 +11,6 @@
|
||||
import pathlib
|
||||
import platform
|
||||
import shutil
|
||||
import urllib.error
|
||||
from collections import OrderedDict
|
||||
|
||||
import pytest
|
||||
@@ -22,7 +21,6 @@
|
||||
import spack.binary_distribution as bindist
|
||||
import spack.cmd.buildcache as buildcache
|
||||
import spack.error
|
||||
import spack.fetch_strategy
|
||||
import spack.package_base
|
||||
import spack.repo
|
||||
import spack.store
|
||||
@@ -48,7 +46,7 @@
|
||||
def test_buildcache(mock_archive, tmp_path, monkeypatch, mutable_config):
|
||||
# Install a test package
|
||||
spec = Spec("trivial-install-test-package").concretized()
|
||||
monkeypatch.setattr(spec.package, "fetcher", URLFetchStrategy(url=mock_archive.url))
|
||||
monkeypatch.setattr(spec.package, "fetcher", URLFetchStrategy(mock_archive.url))
|
||||
spec.package.do_install()
|
||||
pkghash = "/" + str(spec.dag_hash(7))
|
||||
|
||||
@@ -480,7 +478,7 @@ def test_macho_make_paths():
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def mock_download(monkeypatch):
|
||||
def mock_download():
|
||||
"""Mock a failing download strategy."""
|
||||
|
||||
class FailedDownloadStrategy(spack.fetch_strategy.FetchStrategy):
|
||||
@@ -489,14 +487,19 @@ def mirror_id(self):
|
||||
|
||||
def fetch(self):
|
||||
raise spack.fetch_strategy.FailedDownloadError(
|
||||
urllib.error.URLError("This FetchStrategy always fails")
|
||||
"<non-existent URL>", "This FetchStrategy always fails"
|
||||
)
|
||||
|
||||
fetcher = FailedDownloadStrategy()
|
||||
|
||||
@property
|
||||
def fake_fn(self):
|
||||
return FailedDownloadStrategy()
|
||||
return fetcher
|
||||
|
||||
monkeypatch.setattr(spack.package_base.PackageBase, "fetcher", fake_fn)
|
||||
orig_fn = spack.package_base.PackageBase.fetcher
|
||||
spack.package_base.PackageBase.fetcher = fake_fn
|
||||
yield
|
||||
spack.package_base.PackageBase.fetcher = orig_fn
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
|
@@ -3,19 +3,54 @@
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
import spack.config as spack_config
|
||||
import spack.error
|
||||
import spack.fetch_strategy as spack_fs
|
||||
import spack.stage as spack_stage
|
||||
|
||||
|
||||
def test_s3fetchstrategy_downloaded(tmp_path):
|
||||
@pytest.mark.parametrize("_fetch_method", ["curl", "urllib"])
|
||||
def test_s3fetchstrategy_sans_url(_fetch_method):
|
||||
"""Ensure constructor with no URL fails."""
|
||||
with spack_config.override("config:url_fetch_method", _fetch_method):
|
||||
with pytest.raises(ValueError):
|
||||
spack_fs.S3FetchStrategy(None)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("_fetch_method", ["curl", "urllib"])
|
||||
def test_s3fetchstrategy_bad_url(tmpdir, _fetch_method):
|
||||
"""Ensure fetch with bad URL fails as expected."""
|
||||
testpath = str(tmpdir)
|
||||
|
||||
with spack_config.override("config:url_fetch_method", _fetch_method):
|
||||
fetcher = spack_fs.S3FetchStrategy(url="file:///does-not-exist")
|
||||
assert fetcher is not None
|
||||
|
||||
with spack_stage.Stage(fetcher, path=testpath) as stage:
|
||||
assert stage is not None
|
||||
assert fetcher.archive_file is None
|
||||
with pytest.raises(spack.error.FetchError):
|
||||
fetcher.fetch()
|
||||
|
||||
|
||||
@pytest.mark.parametrize("_fetch_method", ["curl", "urllib"])
|
||||
def test_s3fetchstrategy_downloaded(tmpdir, _fetch_method):
|
||||
"""Ensure fetch with archive file already downloaded is a noop."""
|
||||
archive = tmp_path / "s3.tar.gz"
|
||||
testpath = str(tmpdir)
|
||||
archive = os.path.join(testpath, "s3.tar.gz")
|
||||
|
||||
class Archived_S3FS(spack_fs.S3FetchStrategy):
|
||||
@property
|
||||
def archive_file(self):
|
||||
return archive
|
||||
with spack_config.override("config:url_fetch_method", _fetch_method):
|
||||
|
||||
fetcher = Archived_S3FS(url="s3://example/s3.tar.gz")
|
||||
with spack_stage.Stage(fetcher, path=str(tmp_path)):
|
||||
fetcher.fetch()
|
||||
class Archived_S3FS(spack_fs.S3FetchStrategy):
|
||||
@property
|
||||
def archive_file(self):
|
||||
return archive
|
||||
|
||||
url = "s3:///{0}".format(archive)
|
||||
fetcher = Archived_S3FS(url=url)
|
||||
with spack_stage.Stage(fetcher, path=testpath):
|
||||
fetcher.fetch()
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user