Compare commits
191 Commits
prerelease
...
invariant-
Author | SHA1 | Date | |
---|---|---|---|
![]() |
5d961e8ed2 | ||
![]() |
fc105a1a26 | ||
![]() |
8a9e16dc3b | ||
![]() |
0b7fc360fa | ||
![]() |
79d79969bb | ||
![]() |
422f829e4e | ||
![]() |
f54c101b44 | ||
![]() |
05acd29f38 | ||
![]() |
77e2187e13 | ||
![]() |
5c88e035f2 | ||
![]() |
94bd7b9afb | ||
![]() |
f181ac199a | ||
![]() |
a8da7993ad | ||
![]() |
b808338792 | ||
![]() |
112e47cc23 | ||
![]() |
901cea7a54 | ||
![]() |
c1b2ac549d | ||
![]() |
4693b323ac | ||
![]() |
1f2a68f2b6 | ||
![]() |
3fcc38ef04 | ||
![]() |
22d104d7a9 | ||
![]() |
8b1009a4a0 | ||
![]() |
f54526957a | ||
![]() |
175a4bf101 | ||
![]() |
aa81d59958 | ||
![]() |
6aafefd43d | ||
![]() |
ac82f344bd | ||
![]() |
16fd77f9da | ||
![]() |
f82554a39b | ||
![]() |
2aaf50b8f7 | ||
![]() |
b0b9cf15f7 | ||
![]() |
8898e14e69 | ||
![]() |
63c72634ea | ||
![]() |
a7eacd77e3 | ||
![]() |
09b7ea0400 | ||
![]() |
b31dd46ab8 | ||
![]() |
ad7417dee9 | ||
![]() |
c3de3b0b6f | ||
![]() |
6da9bf226a | ||
![]() |
b3ee954e5b | ||
![]() |
db090b0cad | ||
![]() |
3a6c361a85 | ||
![]() |
bb5bd030d4 | ||
![]() |
b9c60f96ea | ||
![]() |
6b16c64c0e | ||
![]() |
3ea970746d | ||
![]() |
d8f2e080e6 | ||
![]() |
ecb8a48376 | ||
![]() |
30176582e4 | ||
![]() |
ac17e8bea4 | ||
![]() |
c30c85a99c | ||
![]() |
2ae8eb6686 | ||
![]() |
b5cc5b701c | ||
![]() |
8e7641e584 | ||
![]() |
e692d401eb | ||
![]() |
99319b1d91 | ||
![]() |
839ed9447c | ||
![]() |
8e5a040985 | ||
![]() |
5ddbb1566d | ||
![]() |
eb17680d28 | ||
![]() |
f4d81be9cf | ||
![]() |
ea5ffe35f5 | ||
![]() |
1e37a77e72 | ||
![]() |
29427d3e9e | ||
![]() |
2a2d1989c1 | ||
![]() |
c6e292f55f | ||
![]() |
bf5e6b4aaf | ||
![]() |
9760089089 | ||
![]() |
da7c5c551d | ||
![]() |
a575fa8529 | ||
![]() |
39a65d88f6 | ||
![]() |
06ff8c88ac | ||
![]() |
a96b67ce3d | ||
![]() |
67d494fa0b | ||
![]() |
e37e53cfe8 | ||
![]() |
cf31d20d4c | ||
![]() |
b74db341c8 | ||
![]() |
e88a3f6f85 | ||
![]() |
9bd7483e73 | ||
![]() |
04c76fab63 | ||
![]() |
ecbf9fcacf | ||
![]() |
69fb594699 | ||
![]() |
d28614151f | ||
![]() |
f1d6af6c94 | ||
![]() |
192821f361 | ||
![]() |
18790ca397 | ||
![]() |
c22d77a38e | ||
![]() |
d82bdb3bf7 | ||
![]() |
a042bdfe0b | ||
![]() |
60e3e645e8 | ||
![]() |
51785437bc | ||
![]() |
2e8db0815d | ||
![]() |
8a6428746f | ||
![]() |
6b9c099af8 | ||
![]() |
30814fb4e0 | ||
![]() |
3194be2e92 | ||
![]() |
41be2f5899 | ||
![]() |
02af41ebb3 | ||
![]() |
9d33c89030 | ||
![]() |
51ab7bad3b | ||
![]() |
0b094f2473 | ||
![]() |
cd306d0bc6 | ||
![]() |
fdb9cf2412 | ||
![]() |
a546441d2e | ||
![]() |
141cdb6810 | ||
![]() |
f2ab74efe5 | ||
![]() |
38b838e405 | ||
![]() |
c037188b59 | ||
![]() |
0835a3c5f2 | ||
![]() |
38a2f9c2f2 | ||
![]() |
eecd4afe58 | ||
![]() |
83624551e0 | ||
![]() |
741652caa1 | ||
![]() |
8e914308f0 | ||
![]() |
3c220d0989 | ||
![]() |
8094fa1e2f | ||
![]() |
5c67051980 | ||
![]() |
c01fb9a6d2 | ||
![]() |
bf12bb57e7 | ||
![]() |
406c73ae11 | ||
![]() |
3f50ccfcdd | ||
![]() |
9883a2144d | ||
![]() |
94815d2227 | ||
![]() |
a15563f890 | ||
![]() |
ac2ede8d2f | ||
![]() |
b256a7c50d | ||
![]() |
21e10d6d98 | ||
![]() |
ed39967848 | ||
![]() |
eda0c6888e | ||
![]() |
66055f903c | ||
![]() |
a1c57d86c3 | ||
![]() |
9da8dcae97 | ||
![]() |
c93f223a73 | ||
![]() |
f1faf31735 | ||
![]() |
8957ef0df5 | ||
![]() |
347ec87fc5 | ||
![]() |
cd8c46e54e | ||
![]() |
75b03bc12f | ||
![]() |
58511a3352 | ||
![]() |
325873a4c7 | ||
![]() |
9156e4be04 | ||
![]() |
12d3abc736 | ||
![]() |
4208aa6291 | ||
![]() |
0bad754e23 | ||
![]() |
cde2620f41 | ||
![]() |
a35aa038b0 | ||
![]() |
150416919e | ||
![]() |
281c274e0b | ||
![]() |
16e130ece1 | ||
![]() |
7586303fba | ||
![]() |
6501880fbf | ||
![]() |
c76098038c | ||
![]() |
124b616b27 | ||
![]() |
1148c8f195 | ||
![]() |
c57452dd08 | ||
![]() |
a7e57c9a14 | ||
![]() |
85d83f9c26 | ||
![]() |
39a081d7fd | ||
![]() |
71b65bb424 | ||
![]() |
3dcbd118df | ||
![]() |
5dacb774f6 | ||
![]() |
cb3d6549c9 | ||
![]() |
559c2f1eb9 | ||
![]() |
ed1dbea77b | ||
![]() |
6ebafe4631 | ||
![]() |
7f0bb7147d | ||
![]() |
f41b38e93d | ||
![]() |
5fd12b7bea | ||
![]() |
fe746bdebb | ||
![]() |
453af4b9f7 | ||
![]() |
29cf1559cc | ||
![]() |
a9b3e1670b | ||
![]() |
4f9aa6004b | ||
![]() |
aa2c18e4df | ||
![]() |
0ff3e86315 | ||
![]() |
df208c1095 | ||
![]() |
853f70edc8 | ||
![]() |
50970f866e | ||
![]() |
8821300985 | ||
![]() |
adc8e1d996 | ||
![]() |
1e0aac6ac3 | ||
![]() |
99e2313d81 | ||
![]() |
22690a7576 | ||
![]() |
5325cfe865 | ||
![]() |
5333925dd7 | ||
![]() |
2db99e1ff6 | ||
![]() |
68aa712a3e | ||
![]() |
2e71bc640c | ||
![]() |
661f3621a7 | ||
![]() |
f182032337 | ||
![]() |
066666b7b1 |
11
.github/workflows/build-containers.yml
vendored
11
.github/workflows/build-containers.yml
vendored
@@ -57,7 +57,13 @@ jobs:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
|
||||
- uses: docker/metadata-action@8e5442c4ef9f78752691e2d8f8d19755c6f78e81
|
||||
- name: Determine latest release tag
|
||||
id: latest
|
||||
run: |
|
||||
git fetch --quiet --tags
|
||||
echo "tag=$(git tag --list --sort=-v:refname | grep -E '^v[0-9]+\.[0-9]+\.[0-9]+$' | head -n 1)" | tee -a $GITHUB_OUTPUT
|
||||
|
||||
- uses: docker/metadata-action@369eb591f429131d6889c46b94e711f089e6ca96
|
||||
id: docker_meta
|
||||
with:
|
||||
images: |
|
||||
@@ -71,6 +77,7 @@ jobs:
|
||||
type=semver,pattern={{major}}
|
||||
type=ref,event=branch
|
||||
type=ref,event=pr
|
||||
type=raw,value=latest,enable=${{ github.ref == format('refs/tags/{0}', steps.latest.outputs.tag) }}
|
||||
|
||||
- name: Generate the Dockerfile
|
||||
env:
|
||||
@@ -113,7 +120,7 @@ jobs:
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Build & Deploy ${{ matrix.dockerfile[0] }}
|
||||
uses: docker/build-push-action@4f58ea79222b3b9dc2c8bbdd6debcef730109a75
|
||||
uses: docker/build-push-action@48aba3b46d1b1fec4febb7c5d0c644b249a11355
|
||||
with:
|
||||
context: dockerfiles/${{ matrix.dockerfile[0] }}
|
||||
platforms: ${{ matrix.dockerfile[1] }}
|
||||
|
3
.github/workflows/coverage.yml
vendored
3
.github/workflows/coverage.yml
vendored
@@ -29,6 +29,7 @@ jobs:
|
||||
- run: coverage xml
|
||||
|
||||
- name: "Upload coverage report to CodeCov"
|
||||
uses: codecov/codecov-action@5c47607acb93fed5485fdbf7232e8a31425f672a
|
||||
uses: codecov/codecov-action@05f5a9cfad807516dbbef9929c4a42df3eb78766
|
||||
with:
|
||||
verbose: true
|
||||
fail_ci_if_error: true
|
||||
|
@@ -3,5 +3,5 @@ clingo==5.7.1
|
||||
flake8==7.1.1
|
||||
isort==5.13.2
|
||||
mypy==1.8.0
|
||||
types-six==1.16.21.20241105
|
||||
types-six==1.17.0.20241205
|
||||
vermin==1.6.0
|
||||
|
@@ -70,7 +70,7 @@ Tutorial
|
||||
----------------
|
||||
|
||||
We maintain a
|
||||
[**hands-on tutorial**](https://spack.readthedocs.io/en/latest/tutorial.html).
|
||||
[**hands-on tutorial**](https://spack-tutorial.readthedocs.io/).
|
||||
It covers basic to advanced usage, packaging, developer features, and large HPC
|
||||
deployments. You can do all of the exercises on your own laptop using a
|
||||
Docker container.
|
||||
|
@@ -55,3 +55,11 @@ concretizer:
|
||||
splice:
|
||||
explicit: []
|
||||
automatic: false
|
||||
# Maximum time, in seconds, allowed for the 'solve' phase. If set to 0, there is no time limit.
|
||||
timeout: 0
|
||||
# If set to true, exceeding the timeout will always result in a concretization error. If false,
|
||||
# the best (suboptimal) model computed before the timeout is used.
|
||||
#
|
||||
# Setting this to false yields unreproducible results, so we advise to use that value only
|
||||
# for debugging purposes (e.g. check which constraints can help Spack concretize faster).
|
||||
error_on_timeout: true
|
||||
|
@@ -76,6 +76,8 @@ packages:
|
||||
buildable: false
|
||||
cray-mvapich2:
|
||||
buildable: false
|
||||
egl:
|
||||
buildable: false
|
||||
fujitsu-mpi:
|
||||
buildable: false
|
||||
hpcx-mpi:
|
||||
|
@@ -1326,6 +1326,7 @@ Required:
|
||||
* Microsoft Visual Studio
|
||||
* Python
|
||||
* Git
|
||||
* 7z
|
||||
|
||||
Optional:
|
||||
* Intel Fortran (needed for some packages)
|
||||
@@ -1391,6 +1392,13 @@ as the project providing Git support on Windows. This is additionally the recomm
|
||||
for installing Git on Windows, a link to which can be found above. Spack requires the
|
||||
utilities vendored by this project.
|
||||
|
||||
"""
|
||||
7zip
|
||||
"""
|
||||
|
||||
A tool for extracting ``.xz`` files is required for extracting source tarballs. The latest 7zip
|
||||
can be located at https://sourceforge.net/projects/sevenzip/.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Step 2: Install and setup Spack
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
@@ -6,7 +6,7 @@ python-levenshtein==0.26.1
|
||||
docutils==0.21.2
|
||||
pygments==2.18.0
|
||||
urllib3==2.2.3
|
||||
pytest==8.3.3
|
||||
pytest==8.3.4
|
||||
isort==5.13.2
|
||||
black==24.10.0
|
||||
flake8==7.1.1
|
||||
|
@@ -24,6 +24,7 @@
|
||||
Callable,
|
||||
Deque,
|
||||
Dict,
|
||||
Generator,
|
||||
Iterable,
|
||||
List,
|
||||
Match,
|
||||
@@ -2772,22 +2773,6 @@ def prefixes(path):
|
||||
return paths
|
||||
|
||||
|
||||
@system_path_filter
|
||||
def md5sum(file):
|
||||
"""Compute the MD5 sum of a file.
|
||||
|
||||
Args:
|
||||
file (str): file to be checksummed
|
||||
|
||||
Returns:
|
||||
MD5 sum of the file's content
|
||||
"""
|
||||
md5 = hashlib.md5()
|
||||
with open(file, "rb") as f:
|
||||
md5.update(f.read())
|
||||
return md5.digest()
|
||||
|
||||
|
||||
@system_path_filter
|
||||
def remove_directory_contents(dir):
|
||||
"""Remove all contents of a directory."""
|
||||
@@ -2838,6 +2823,25 @@ def temporary_dir(
|
||||
remove_directory_contents(tmp_dir)
|
||||
|
||||
|
||||
@contextmanager
|
||||
def edit_in_place_through_temporary_file(file_path: str) -> Generator[str, None, None]:
|
||||
"""Context manager for modifying ``file_path`` in place, preserving its inode and hardlinks,
|
||||
for functions or external tools that do not support in-place editing. Notice that this function
|
||||
is unsafe in that it works with paths instead of a file descriptors, but this is by design,
|
||||
since we assume the call site will create a new inode at the same path."""
|
||||
tmp_fd, tmp_path = tempfile.mkstemp(
|
||||
dir=os.path.dirname(file_path), prefix=f"{os.path.basename(file_path)}."
|
||||
)
|
||||
# windows cannot replace a file with open fds, so close since the call site needs to replace.
|
||||
os.close(tmp_fd)
|
||||
try:
|
||||
shutil.copyfile(file_path, tmp_path, follow_symlinks=True)
|
||||
yield tmp_path
|
||||
shutil.copyfile(tmp_path, file_path, follow_symlinks=True)
|
||||
finally:
|
||||
os.unlink(tmp_path)
|
||||
|
||||
|
||||
def filesummary(path, print_bytes=16) -> Tuple[int, bytes]:
|
||||
"""Create a small summary of the given file. Does not error
|
||||
when file does not exist.
|
||||
|
@@ -693,19 +693,19 @@ def invalid_sha256_digest(fetcher):
|
||||
return h, True
|
||||
return None, False
|
||||
|
||||
error_msg = "Package '{}' does not use sha256 checksum".format(pkg_name)
|
||||
error_msg = f"Package '{pkg_name}' does not use sha256 checksum"
|
||||
details = []
|
||||
for v, args in pkg.versions.items():
|
||||
fetcher = spack.fetch_strategy.for_package_version(pkg, v)
|
||||
digest, is_bad = invalid_sha256_digest(fetcher)
|
||||
if is_bad:
|
||||
details.append("{}@{} uses {}".format(pkg_name, v, digest))
|
||||
details.append(f"{pkg_name}@{v} uses {digest}")
|
||||
|
||||
for _, resources in pkg.resources.items():
|
||||
for resource in resources:
|
||||
digest, is_bad = invalid_sha256_digest(resource.fetcher)
|
||||
if is_bad:
|
||||
details.append("Resource in '{}' uses {}".format(pkg_name, digest))
|
||||
details.append(f"Resource in '{pkg_name}' uses {digest}")
|
||||
if details:
|
||||
errors.append(error_cls(error_msg, details))
|
||||
|
||||
|
@@ -40,7 +40,7 @@
|
||||
import spack.hash_types as ht
|
||||
import spack.hooks
|
||||
import spack.hooks.sbang
|
||||
import spack.mirror
|
||||
import spack.mirrors.mirror
|
||||
import spack.oci.image
|
||||
import spack.oci.oci
|
||||
import spack.oci.opener
|
||||
@@ -369,7 +369,7 @@ def update(self, with_cooldown=False):
|
||||
on disk under ``_index_cache_root``)."""
|
||||
self._init_local_index_cache()
|
||||
configured_mirror_urls = [
|
||||
m.fetch_url for m in spack.mirror.MirrorCollection(binary=True).values()
|
||||
m.fetch_url for m in spack.mirrors.mirror.MirrorCollection(binary=True).values()
|
||||
]
|
||||
items_to_remove = []
|
||||
spec_cache_clear_needed = False
|
||||
@@ -1176,7 +1176,7 @@ def _url_upload_tarball_and_specfile(
|
||||
|
||||
|
||||
class Uploader:
|
||||
def __init__(self, mirror: spack.mirror.Mirror, force: bool, update_index: bool):
|
||||
def __init__(self, mirror: spack.mirrors.mirror.Mirror, force: bool, update_index: bool):
|
||||
self.mirror = mirror
|
||||
self.force = force
|
||||
self.update_index = update_index
|
||||
@@ -1224,7 +1224,7 @@ def tag(self, tag: str, roots: List[spack.spec.Spec]):
|
||||
class OCIUploader(Uploader):
|
||||
def __init__(
|
||||
self,
|
||||
mirror: spack.mirror.Mirror,
|
||||
mirror: spack.mirrors.mirror.Mirror,
|
||||
force: bool,
|
||||
update_index: bool,
|
||||
base_image: Optional[str],
|
||||
@@ -1273,7 +1273,7 @@ def tag(self, tag: str, roots: List[spack.spec.Spec]):
|
||||
class URLUploader(Uploader):
|
||||
def __init__(
|
||||
self,
|
||||
mirror: spack.mirror.Mirror,
|
||||
mirror: spack.mirrors.mirror.Mirror,
|
||||
force: bool,
|
||||
update_index: bool,
|
||||
signing_key: Optional[str],
|
||||
@@ -1297,7 +1297,7 @@ def push(
|
||||
|
||||
|
||||
def make_uploader(
|
||||
mirror: spack.mirror.Mirror,
|
||||
mirror: spack.mirrors.mirror.Mirror,
|
||||
force: bool = False,
|
||||
update_index: bool = False,
|
||||
signing_key: Optional[str] = None,
|
||||
@@ -1953,9 +1953,9 @@ def download_tarball(spec, unsigned: Optional[bool] = False, mirrors_for_spec=No
|
||||
"signature_verified": "true-if-binary-pkg-was-already-verified"
|
||||
}
|
||||
"""
|
||||
configured_mirrors: Iterable[spack.mirror.Mirror] = spack.mirror.MirrorCollection(
|
||||
binary=True
|
||||
).values()
|
||||
configured_mirrors: Iterable[spack.mirrors.mirror.Mirror] = (
|
||||
spack.mirrors.mirror.MirrorCollection(binary=True).values()
|
||||
)
|
||||
if not configured_mirrors:
|
||||
tty.die("Please add a spack mirror to allow download of pre-compiled packages.")
|
||||
|
||||
@@ -1980,7 +1980,7 @@ def fetch_url_to_mirror(url):
|
||||
for mirror in configured_mirrors:
|
||||
if mirror.fetch_url == url:
|
||||
return mirror
|
||||
return spack.mirror.Mirror(url)
|
||||
return spack.mirrors.mirror.Mirror(url)
|
||||
|
||||
mirrors = [fetch_url_to_mirror(url) for url in mirror_urls]
|
||||
|
||||
@@ -2334,7 +2334,9 @@ def is_backup_file(file):
|
||||
if not codesign:
|
||||
return
|
||||
for binary in changed_files:
|
||||
codesign("-fs-", binary)
|
||||
# preserve the original inode by running codesign on a copy
|
||||
with fsys.edit_in_place_through_temporary_file(binary) as tmp_binary:
|
||||
codesign("-fs-", tmp_binary)
|
||||
|
||||
# If we are installing back to the same location
|
||||
# relocate the sbang location if the spack directory changed
|
||||
@@ -2648,7 +2650,7 @@ def try_direct_fetch(spec, mirrors=None):
|
||||
specfile_is_signed = False
|
||||
found_specs = []
|
||||
|
||||
binary_mirrors = spack.mirror.MirrorCollection(mirrors=mirrors, binary=True).values()
|
||||
binary_mirrors = spack.mirrors.mirror.MirrorCollection(mirrors=mirrors, binary=True).values()
|
||||
|
||||
for mirror in binary_mirrors:
|
||||
buildcache_fetch_url_json = url_util.join(
|
||||
@@ -2709,7 +2711,7 @@ def get_mirrors_for_spec(spec=None, mirrors_to_check=None, index_only=False):
|
||||
if spec is None:
|
||||
return []
|
||||
|
||||
if not spack.mirror.MirrorCollection(mirrors=mirrors_to_check, binary=True):
|
||||
if not spack.mirrors.mirror.MirrorCollection(mirrors=mirrors_to_check, binary=True):
|
||||
tty.debug("No Spack mirrors are currently configured")
|
||||
return {}
|
||||
|
||||
@@ -2748,7 +2750,7 @@ def clear_spec_cache():
|
||||
|
||||
def get_keys(install=False, trust=False, force=False, mirrors=None):
|
||||
"""Get pgp public keys available on mirror with suffix .pub"""
|
||||
mirror_collection = mirrors or spack.mirror.MirrorCollection(binary=True)
|
||||
mirror_collection = mirrors or spack.mirrors.mirror.MirrorCollection(binary=True)
|
||||
|
||||
if not mirror_collection:
|
||||
tty.die("Please add a spack mirror to allow " + "download of build caches.")
|
||||
@@ -2803,7 +2805,7 @@ def get_keys(install=False, trust=False, force=False, mirrors=None):
|
||||
|
||||
|
||||
def _url_push_keys(
|
||||
*mirrors: Union[spack.mirror.Mirror, str],
|
||||
*mirrors: Union[spack.mirrors.mirror.Mirror, str],
|
||||
keys: List[str],
|
||||
tmpdir: str,
|
||||
update_index: bool = False,
|
||||
@@ -2870,7 +2872,7 @@ def check_specs_against_mirrors(mirrors, specs, output_file=None):
|
||||
|
||||
"""
|
||||
rebuilds = {}
|
||||
for mirror in spack.mirror.MirrorCollection(mirrors, binary=True).values():
|
||||
for mirror in spack.mirrors.mirror.MirrorCollection(mirrors, binary=True).values():
|
||||
tty.debug("Checking for built specs at {0}".format(mirror.fetch_url))
|
||||
|
||||
rebuild_list = []
|
||||
@@ -2914,7 +2916,7 @@ def _download_buildcache_entry(mirror_root, descriptions):
|
||||
|
||||
|
||||
def download_buildcache_entry(file_descriptions, mirror_url=None):
|
||||
if not mirror_url and not spack.mirror.MirrorCollection(binary=True):
|
||||
if not mirror_url and not spack.mirrors.mirror.MirrorCollection(binary=True):
|
||||
tty.die(
|
||||
"Please provide or add a spack mirror to allow " + "download of buildcache entries."
|
||||
)
|
||||
@@ -2923,7 +2925,7 @@ def download_buildcache_entry(file_descriptions, mirror_url=None):
|
||||
mirror_root = os.path.join(mirror_url, BUILD_CACHE_RELATIVE_PATH)
|
||||
return _download_buildcache_entry(mirror_root, file_descriptions)
|
||||
|
||||
for mirror in spack.mirror.MirrorCollection(binary=True).values():
|
||||
for mirror in spack.mirrors.mirror.MirrorCollection(binary=True).values():
|
||||
mirror_root = os.path.join(mirror.fetch_url, BUILD_CACHE_RELATIVE_PATH)
|
||||
|
||||
if _download_buildcache_entry(mirror_root, file_descriptions):
|
||||
|
@@ -37,7 +37,7 @@
|
||||
import spack.binary_distribution
|
||||
import spack.config
|
||||
import spack.detection
|
||||
import spack.mirror
|
||||
import spack.mirrors.mirror
|
||||
import spack.platforms
|
||||
import spack.spec
|
||||
import spack.store
|
||||
@@ -91,7 +91,7 @@ def __init__(self, conf: ConfigDictionary) -> None:
|
||||
self.metadata_dir = spack.util.path.canonicalize_path(conf["metadata"])
|
||||
|
||||
# Promote (relative) paths to file urls
|
||||
self.url = spack.mirror.Mirror(conf["info"]["url"]).fetch_url
|
||||
self.url = spack.mirrors.mirror.Mirror(conf["info"]["url"]).fetch_url
|
||||
|
||||
@property
|
||||
def mirror_scope(self) -> spack.config.InternalConfigScope:
|
||||
|
@@ -882,6 +882,9 @@ def __init__(self, *roots: spack.spec.Spec, context: Context):
|
||||
elif context == Context.RUN:
|
||||
self.root_depflag = dt.RUN | dt.LINK
|
||||
|
||||
def accept(self, item):
|
||||
return True
|
||||
|
||||
def neighbors(self, item):
|
||||
spec = item.edge.spec
|
||||
if spec.dag_hash() in self.root_hashes:
|
||||
@@ -919,19 +922,19 @@ def effective_deptypes(
|
||||
a flag specifying in what way they do so. The list is ordered topologically
|
||||
from root to leaf, meaning that environment modifications should be applied
|
||||
in reverse so that dependents override dependencies, not the other way around."""
|
||||
visitor = traverse.TopoVisitor(
|
||||
EnvironmentVisitor(*specs, context=context),
|
||||
key=lambda x: x.dag_hash(),
|
||||
topo_sorted_edges = traverse.traverse_topo_edges_generator(
|
||||
traverse.with_artificial_edges(specs),
|
||||
visitor=EnvironmentVisitor(*specs, context=context),
|
||||
key=traverse.by_dag_hash,
|
||||
root=True,
|
||||
all_edges=True,
|
||||
)
|
||||
traverse.traverse_depth_first_with_visitor(traverse.with_artificial_edges(specs), visitor)
|
||||
|
||||
# Dictionary with "no mode" as default value, so it's easy to write modes[x] |= flag.
|
||||
use_modes = defaultdict(lambda: UseMode(0))
|
||||
nodes_with_type = []
|
||||
|
||||
for edge in visitor.edges:
|
||||
for edge in topo_sorted_edges:
|
||||
parent, child, depflag = edge.parent, edge.spec, edge.depflag
|
||||
|
||||
# Mark the starting point
|
||||
@@ -1423,27 +1426,20 @@ def make_stack(tb, stack=None):
|
||||
# We found obj, the Package implementation we care about.
|
||||
# Point out the location in the install method where we failed.
|
||||
filename = inspect.getfile(frame.f_code)
|
||||
lineno = frame.f_lineno
|
||||
if os.path.basename(filename) == "package.py":
|
||||
# subtract 1 because we inject a magic import at the top of package files.
|
||||
# TODO: get rid of the magic import.
|
||||
lineno -= 1
|
||||
|
||||
lines = ["{0}:{1:d}, in {2}:".format(filename, lineno, frame.f_code.co_name)]
|
||||
lines = [f"{filename}:{frame.f_lineno}, in {frame.f_code.co_name}:"]
|
||||
|
||||
# Build a message showing context in the install method.
|
||||
sourcelines, start = inspect.getsourcelines(frame)
|
||||
|
||||
# Calculate lineno of the error relative to the start of the function.
|
||||
fun_lineno = lineno - start
|
||||
fun_lineno = frame.f_lineno - start
|
||||
start_ctx = max(0, fun_lineno - context)
|
||||
sourcelines = sourcelines[start_ctx : fun_lineno + context + 1]
|
||||
|
||||
for i, line in enumerate(sourcelines):
|
||||
is_error = start_ctx + i == fun_lineno
|
||||
mark = ">> " if is_error else " "
|
||||
# Add start to get lineno relative to start of file, not function.
|
||||
marked = " {0}{1:-6d}{2}".format(mark, start + start_ctx + i, line.rstrip())
|
||||
marked = f" {'>> ' if is_error else ' '}{start + start_ctx + i:-6d}{line.rstrip()}"
|
||||
if is_error:
|
||||
marked = colorize("@R{%s}" % cescape(marked))
|
||||
lines.append(marked)
|
||||
|
@@ -9,7 +9,7 @@
|
||||
import re
|
||||
import sys
|
||||
from itertools import chain
|
||||
from typing import Any, List, Optional, Set, Tuple
|
||||
from typing import Any, List, Optional, Tuple
|
||||
|
||||
import llnl.util.filesystem as fs
|
||||
from llnl.util.lang import stable_partition
|
||||
@@ -21,6 +21,7 @@
|
||||
import spack.phase_callbacks
|
||||
import spack.spec
|
||||
import spack.util.prefix
|
||||
from spack import traverse
|
||||
from spack.directives import build_system, conflicts, depends_on, variant
|
||||
from spack.multimethod import when
|
||||
from spack.util.environment import filter_system_paths
|
||||
@@ -166,15 +167,18 @@ def _values(x):
|
||||
def get_cmake_prefix_path(pkg: spack.package_base.PackageBase) -> List[str]:
|
||||
"""Obtain the CMAKE_PREFIX_PATH entries for a package, based on the cmake_prefix_path package
|
||||
attribute of direct build/test and transitive link dependencies."""
|
||||
# Add direct build/test deps
|
||||
selected: Set[str] = {s.dag_hash() for s in pkg.spec.dependencies(deptype=dt.BUILD | dt.TEST)}
|
||||
# Add transitive link deps
|
||||
selected.update(s.dag_hash() for s in pkg.spec.traverse(root=False, deptype=dt.LINK))
|
||||
# Separate out externals so they do not shadow Spack prefixes
|
||||
externals, spack_built = stable_partition(
|
||||
(s for s in pkg.spec.traverse(root=False, order="topo") if s.dag_hash() in selected),
|
||||
lambda x: x.external,
|
||||
edges = traverse.traverse_topo_edges_generator(
|
||||
traverse.with_artificial_edges([pkg.spec]),
|
||||
visitor=traverse.MixedDepthVisitor(
|
||||
direct=dt.BUILD | dt.TEST, transitive=dt.LINK, key=traverse.by_dag_hash
|
||||
),
|
||||
key=traverse.by_dag_hash,
|
||||
root=False,
|
||||
all_edges=False, # cover all nodes, not all edges
|
||||
)
|
||||
ordered_specs = [edge.spec for edge in edges]
|
||||
# Separate out externals so they do not shadow Spack prefixes
|
||||
externals, spack_built = stable_partition((s for s in ordered_specs), lambda x: x.external)
|
||||
|
||||
return filter_system_paths(
|
||||
path for spec in chain(spack_built, externals) for path in spec.package.cmake_prefix_paths
|
||||
|
@@ -255,7 +255,7 @@ def libs(self):
|
||||
return find_libraries("*", root=self.component_prefix.lib, recursive=not self.v2_layout)
|
||||
|
||||
|
||||
class IntelOneApiLibraryPackageWithSdk(IntelOneApiPackage):
|
||||
class IntelOneApiLibraryPackageWithSdk(IntelOneApiLibraryPackage):
|
||||
"""Base class for Intel oneAPI library packages with SDK components.
|
||||
|
||||
Contains some convenient default implementations for libraries
|
||||
|
@@ -37,7 +37,8 @@
|
||||
import spack.config as cfg
|
||||
import spack.error
|
||||
import spack.main
|
||||
import spack.mirror
|
||||
import spack.mirrors.mirror
|
||||
import spack.mirrors.utils
|
||||
import spack.paths
|
||||
import spack.repo
|
||||
import spack.spec
|
||||
@@ -204,7 +205,7 @@ def _print_staging_summary(spec_labels, stages, rebuild_decisions):
|
||||
if not stages:
|
||||
return
|
||||
|
||||
mirrors = spack.mirror.MirrorCollection(binary=True)
|
||||
mirrors = spack.mirrors.mirror.MirrorCollection(binary=True)
|
||||
tty.msg("Checked the following mirrors for binaries:")
|
||||
for m in mirrors.values():
|
||||
tty.msg(f" {m.fetch_url}")
|
||||
@@ -797,7 +798,7 @@ def ensure_expected_target_path(path):
|
||||
path = path.replace("\\", "/")
|
||||
return path
|
||||
|
||||
pipeline_mirrors = spack.mirror.MirrorCollection(binary=True)
|
||||
pipeline_mirrors = spack.mirrors.mirror.MirrorCollection(binary=True)
|
||||
buildcache_destination = None
|
||||
if "buildcache-destination" not in pipeline_mirrors:
|
||||
raise SpackCIError("spack ci generate requires a mirror named 'buildcache-destination'")
|
||||
@@ -1323,7 +1324,7 @@ def push_to_build_cache(spec: spack.spec.Spec, mirror_url: str, sign_binaries: b
|
||||
"""
|
||||
tty.debug(f"Pushing to build cache ({'signed' if sign_binaries else 'unsigned'})")
|
||||
signing_key = bindist.select_signing_key() if sign_binaries else None
|
||||
mirror = spack.mirror.Mirror.from_url(mirror_url)
|
||||
mirror = spack.mirrors.mirror.Mirror.from_url(mirror_url)
|
||||
try:
|
||||
with bindist.make_uploader(mirror, signing_key=signing_key) as uploader:
|
||||
uploader.push_or_raise([spec])
|
||||
@@ -1343,7 +1344,7 @@ def remove_other_mirrors(mirrors_to_keep, scope=None):
|
||||
mirrors_to_remove.append(name)
|
||||
|
||||
for mirror_name in mirrors_to_remove:
|
||||
spack.mirror.remove(mirror_name, scope)
|
||||
spack.mirrors.utils.remove(mirror_name, scope)
|
||||
|
||||
|
||||
def copy_files_to_artifacts(src, artifacts_dir):
|
||||
|
@@ -4,6 +4,7 @@
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
import argparse
|
||||
import difflib
|
||||
import importlib
|
||||
import os
|
||||
import re
|
||||
@@ -125,6 +126,8 @@ def get_module(cmd_name):
|
||||
tty.debug("Imported {0} from built-in commands".format(pname))
|
||||
except ImportError:
|
||||
module = spack.extensions.get_module(cmd_name)
|
||||
if not module:
|
||||
raise CommandNotFoundError(cmd_name)
|
||||
|
||||
attr_setdefault(module, SETUP_PARSER, lambda *args: None) # null-op
|
||||
attr_setdefault(module, DESCRIPTION, "")
|
||||
@@ -691,3 +694,24 @@ def find_environment(args):
|
||||
def first_line(docstring):
|
||||
"""Return the first line of the docstring."""
|
||||
return docstring.split("\n")[0]
|
||||
|
||||
|
||||
class CommandNotFoundError(spack.error.SpackError):
|
||||
"""Exception class thrown when a requested command is not recognized as
|
||||
such.
|
||||
"""
|
||||
|
||||
def __init__(self, cmd_name):
|
||||
msg = (
|
||||
f"{cmd_name} is not a recognized Spack command or extension command; "
|
||||
"check with `spack commands`."
|
||||
)
|
||||
long_msg = None
|
||||
|
||||
similar = difflib.get_close_matches(cmd_name, all_commands())
|
||||
|
||||
if 1 <= len(similar) <= 5:
|
||||
long_msg = "\nDid you mean one of the following commands?\n "
|
||||
long_msg += "\n ".join(similar)
|
||||
|
||||
super().__init__(msg, long_msg)
|
||||
|
@@ -16,7 +16,7 @@
|
||||
import spack.bootstrap.config
|
||||
import spack.bootstrap.core
|
||||
import spack.config
|
||||
import spack.mirror
|
||||
import spack.mirrors.utils
|
||||
import spack.spec
|
||||
import spack.stage
|
||||
import spack.util.path
|
||||
@@ -400,7 +400,7 @@ def _mirror(args):
|
||||
llnl.util.tty.set_msg_enabled(False)
|
||||
spec = spack.spec.Spec(spec_str).concretized()
|
||||
for node in spec.traverse():
|
||||
spack.mirror.create(mirror_dir, [node])
|
||||
spack.mirrors.utils.create(mirror_dir, [node])
|
||||
llnl.util.tty.set_msg_enabled(True)
|
||||
|
||||
if args.binary_packages:
|
||||
|
@@ -21,7 +21,7 @@
|
||||
import spack.deptypes as dt
|
||||
import spack.environment as ev
|
||||
import spack.error
|
||||
import spack.mirror
|
||||
import spack.mirrors.mirror
|
||||
import spack.oci.oci
|
||||
import spack.spec
|
||||
import spack.stage
|
||||
@@ -392,7 +392,7 @@ def push_fn(args):
|
||||
roots = spack.cmd.require_active_env(cmd_name="buildcache push").concrete_roots()
|
||||
|
||||
mirror = args.mirror
|
||||
assert isinstance(mirror, spack.mirror.Mirror)
|
||||
assert isinstance(mirror, spack.mirrors.mirror.Mirror)
|
||||
|
||||
push_url = mirror.push_url
|
||||
|
||||
@@ -750,7 +750,7 @@ def manifest_copy(manifest_file_list, dest_mirror=None):
|
||||
copy_buildcache_file(copy_file["src"], dest)
|
||||
|
||||
|
||||
def update_index(mirror: spack.mirror.Mirror, update_keys=False):
|
||||
def update_index(mirror: spack.mirrors.mirror.Mirror, update_keys=False):
|
||||
# Special case OCI images for now.
|
||||
try:
|
||||
image_ref = spack.oci.oci.image_from_mirror(mirror)
|
||||
|
@@ -20,7 +20,7 @@
|
||||
import spack.config as cfg
|
||||
import spack.environment as ev
|
||||
import spack.hash_types as ht
|
||||
import spack.mirror
|
||||
import spack.mirrors.mirror
|
||||
import spack.util.gpg as gpg_util
|
||||
import spack.util.timer as timer
|
||||
import spack.util.url as url_util
|
||||
@@ -240,7 +240,7 @@ def ci_reindex(args):
|
||||
ci_mirrors = yaml_root["mirrors"]
|
||||
mirror_urls = [url for url in ci_mirrors.values()]
|
||||
remote_mirror_url = mirror_urls[0]
|
||||
mirror = spack.mirror.Mirror(remote_mirror_url)
|
||||
mirror = spack.mirrors.mirror.Mirror(remote_mirror_url)
|
||||
|
||||
buildcache.update_index(mirror, update_keys=True)
|
||||
|
||||
@@ -328,7 +328,7 @@ def ci_rebuild(args):
|
||||
|
||||
full_rebuild = True if rebuild_everything and rebuild_everything.lower() == "true" else False
|
||||
|
||||
pipeline_mirrors = spack.mirror.MirrorCollection(binary=True)
|
||||
pipeline_mirrors = spack.mirrors.mirror.MirrorCollection(binary=True)
|
||||
buildcache_destination = None
|
||||
if "buildcache-destination" not in pipeline_mirrors:
|
||||
tty.die("spack ci rebuild requires a mirror named 'buildcache-destination")
|
||||
|
@@ -14,7 +14,8 @@
|
||||
import spack.config
|
||||
import spack.deptypes as dt
|
||||
import spack.environment as ev
|
||||
import spack.mirror
|
||||
import spack.mirrors.mirror
|
||||
import spack.mirrors.utils
|
||||
import spack.reporters
|
||||
import spack.spec
|
||||
import spack.store
|
||||
@@ -689,31 +690,31 @@ def mirror_name_or_url(m):
|
||||
|
||||
# If there's a \ or / in the name, it's interpreted as a path or url.
|
||||
if "/" in m or "\\" in m or m in (".", ".."):
|
||||
return spack.mirror.Mirror(m)
|
||||
return spack.mirrors.mirror.Mirror(m)
|
||||
|
||||
# Otherwise, the named mirror is required to exist.
|
||||
try:
|
||||
return spack.mirror.require_mirror_name(m)
|
||||
return spack.mirrors.utils.require_mirror_name(m)
|
||||
except ValueError as e:
|
||||
raise argparse.ArgumentTypeError(f"{e}. Did you mean {os.path.join('.', m)}?") from e
|
||||
|
||||
|
||||
def mirror_url(url):
|
||||
try:
|
||||
return spack.mirror.Mirror.from_url(url)
|
||||
return spack.mirrors.mirror.Mirror.from_url(url)
|
||||
except ValueError as e:
|
||||
raise argparse.ArgumentTypeError(str(e)) from e
|
||||
|
||||
|
||||
def mirror_directory(path):
|
||||
try:
|
||||
return spack.mirror.Mirror.from_local_path(path)
|
||||
return spack.mirrors.mirror.Mirror.from_local_path(path)
|
||||
except ValueError as e:
|
||||
raise argparse.ArgumentTypeError(str(e)) from e
|
||||
|
||||
|
||||
def mirror_name(name):
|
||||
try:
|
||||
return spack.mirror.require_mirror_name(name)
|
||||
return spack.mirrors.utils.require_mirror_name(name)
|
||||
except ValueError as e:
|
||||
raise argparse.ArgumentTypeError(str(e)) from e
|
||||
|
@@ -90,12 +90,16 @@ def compare_specs(a, b, to_string=False, color=None, ignore_packages=None):
|
||||
# specs and to descend into dependency hashes so we include all facts.
|
||||
a_facts = set(
|
||||
shift(func)
|
||||
for func in setup.spec_clauses(a, body=True, expand_hashes=True, concrete_build_deps=True)
|
||||
for func in setup.spec_clauses(
|
||||
a, body=True, expand_hashes=True, concrete_build_deps=True, node=True
|
||||
)
|
||||
if func.name == "attr"
|
||||
)
|
||||
b_facts = set(
|
||||
shift(func)
|
||||
for func in setup.spec_clauses(b, body=True, expand_hashes=True, concrete_build_deps=True)
|
||||
for func in setup.spec_clauses(
|
||||
b, body=True, expand_hashes=True, concrete_build_deps=True, node=True
|
||||
)
|
||||
if func.name == "attr"
|
||||
)
|
||||
|
||||
|
@@ -8,7 +8,7 @@
|
||||
import tempfile
|
||||
|
||||
import spack.binary_distribution
|
||||
import spack.mirror
|
||||
import spack.mirrors.mirror
|
||||
import spack.paths
|
||||
import spack.stage
|
||||
import spack.util.gpg
|
||||
@@ -217,11 +217,11 @@ def gpg_publish(args):
|
||||
mirror = None
|
||||
if args.directory:
|
||||
url = spack.util.url.path_to_file_url(args.directory)
|
||||
mirror = spack.mirror.Mirror(url, url)
|
||||
mirror = spack.mirrors.mirror.Mirror(url, url)
|
||||
elif args.mirror_name:
|
||||
mirror = spack.mirror.MirrorCollection(binary=True).lookup(args.mirror_name)
|
||||
mirror = spack.mirrors.mirror.MirrorCollection(binary=True).lookup(args.mirror_name)
|
||||
elif args.mirror_url:
|
||||
mirror = spack.mirror.Mirror(args.mirror_url, args.mirror_url)
|
||||
mirror = spack.mirrors.mirror.Mirror(args.mirror_url, args.mirror_url)
|
||||
|
||||
with tempfile.TemporaryDirectory(dir=spack.stage.get_stage_root()) as tmpdir:
|
||||
spack.binary_distribution._url_push_keys(
|
||||
|
@@ -14,7 +14,8 @@
|
||||
import spack.concretize
|
||||
import spack.config
|
||||
import spack.environment as ev
|
||||
import spack.mirror
|
||||
import spack.mirrors.mirror
|
||||
import spack.mirrors.utils
|
||||
import spack.repo
|
||||
import spack.spec
|
||||
import spack.util.web as web_util
|
||||
@@ -365,15 +366,15 @@ def mirror_add(args):
|
||||
connection["autopush"] = args.autopush
|
||||
if args.signed is not None:
|
||||
connection["signed"] = args.signed
|
||||
mirror = spack.mirror.Mirror(connection, name=args.name)
|
||||
mirror = spack.mirrors.mirror.Mirror(connection, name=args.name)
|
||||
else:
|
||||
mirror = spack.mirror.Mirror(args.url, name=args.name)
|
||||
spack.mirror.add(mirror, args.scope)
|
||||
mirror = spack.mirrors.mirror.Mirror(args.url, name=args.name)
|
||||
spack.mirrors.utils.add(mirror, args.scope)
|
||||
|
||||
|
||||
def mirror_remove(args):
|
||||
"""remove a mirror by name"""
|
||||
spack.mirror.remove(args.name, args.scope)
|
||||
spack.mirrors.utils.remove(args.name, args.scope)
|
||||
|
||||
|
||||
def _configure_mirror(args):
|
||||
@@ -382,7 +383,7 @@ def _configure_mirror(args):
|
||||
if args.name not in mirrors:
|
||||
tty.die(f"No mirror found with name {args.name}.")
|
||||
|
||||
entry = spack.mirror.Mirror(mirrors[args.name], args.name)
|
||||
entry = spack.mirrors.mirror.Mirror(mirrors[args.name], args.name)
|
||||
direction = "fetch" if args.fetch else "push" if args.push else None
|
||||
changes = {}
|
||||
if args.url:
|
||||
@@ -449,7 +450,7 @@ def mirror_set_url(args):
|
||||
def mirror_list(args):
|
||||
"""print out available mirrors to the console"""
|
||||
|
||||
mirrors = spack.mirror.MirrorCollection(scope=args.scope)
|
||||
mirrors = spack.mirrors.mirror.MirrorCollection(scope=args.scope)
|
||||
if not mirrors:
|
||||
tty.msg("No mirrors configured.")
|
||||
return
|
||||
@@ -489,9 +490,9 @@ def concrete_specs_from_user(args):
|
||||
|
||||
def extend_with_additional_versions(specs, num_versions):
|
||||
if num_versions == "all":
|
||||
mirror_specs = spack.mirror.get_all_versions(specs)
|
||||
mirror_specs = spack.mirrors.utils.get_all_versions(specs)
|
||||
else:
|
||||
mirror_specs = spack.mirror.get_matching_versions(specs, num_versions=num_versions)
|
||||
mirror_specs = spack.mirrors.utils.get_matching_versions(specs, num_versions=num_versions)
|
||||
mirror_specs = [x.concretized() for x in mirror_specs]
|
||||
return mirror_specs
|
||||
|
||||
@@ -570,7 +571,7 @@ def concrete_specs_from_environment():
|
||||
|
||||
def all_specs_with_all_versions():
|
||||
specs = [spack.spec.Spec(n) for n in spack.repo.all_package_names()]
|
||||
mirror_specs = spack.mirror.get_all_versions(specs)
|
||||
mirror_specs = spack.mirrors.utils.get_all_versions(specs)
|
||||
mirror_specs.sort(key=lambda s: (s.name, s.version))
|
||||
return mirror_specs
|
||||
|
||||
@@ -659,19 +660,21 @@ def _specs_and_action(args):
|
||||
|
||||
|
||||
def create_mirror_for_all_specs(mirror_specs, path, skip_unstable_versions):
|
||||
mirror_cache, mirror_stats = spack.mirror.mirror_cache_and_stats(
|
||||
mirror_cache, mirror_stats = spack.mirrors.utils.mirror_cache_and_stats(
|
||||
path, skip_unstable_versions=skip_unstable_versions
|
||||
)
|
||||
for candidate in mirror_specs:
|
||||
pkg_cls = spack.repo.PATH.get_pkg_class(candidate.name)
|
||||
pkg_obj = pkg_cls(spack.spec.Spec(candidate))
|
||||
mirror_stats.next_spec(pkg_obj.spec)
|
||||
spack.mirror.create_mirror_from_package_object(pkg_obj, mirror_cache, mirror_stats)
|
||||
spack.mirrors.utils.create_mirror_from_package_object(pkg_obj, mirror_cache, mirror_stats)
|
||||
process_mirror_stats(*mirror_stats.stats())
|
||||
|
||||
|
||||
def create_mirror_for_individual_specs(mirror_specs, path, skip_unstable_versions):
|
||||
present, mirrored, error = spack.mirror.create(path, mirror_specs, skip_unstable_versions)
|
||||
present, mirrored, error = spack.mirrors.utils.create(
|
||||
path, mirror_specs, skip_unstable_versions
|
||||
)
|
||||
tty.msg("Summary for mirror in {}".format(path))
|
||||
process_mirror_stats(present, mirrored, error)
|
||||
|
||||
@@ -681,7 +684,7 @@ def mirror_destroy(args):
|
||||
mirror_url = None
|
||||
|
||||
if args.mirror_name:
|
||||
result = spack.mirror.MirrorCollection().lookup(args.mirror_name)
|
||||
result = spack.mirrors.mirror.MirrorCollection().lookup(args.mirror_name)
|
||||
mirror_url = result.push_url
|
||||
elif args.mirror_url:
|
||||
mirror_url = args.mirror_url
|
||||
|
@@ -8,6 +8,7 @@
|
||||
import spack.cmd.common.arguments
|
||||
import spack.cmd.modules
|
||||
import spack.config
|
||||
import spack.modules
|
||||
import spack.modules.lmod
|
||||
|
||||
|
||||
|
@@ -7,6 +7,7 @@
|
||||
import spack.cmd.common.arguments
|
||||
import spack.cmd.modules
|
||||
import spack.config
|
||||
import spack.modules
|
||||
import spack.modules.tcl
|
||||
|
||||
|
||||
|
@@ -15,6 +15,7 @@
|
||||
from llnl.util.filesystem import working_dir
|
||||
|
||||
import spack.paths
|
||||
import spack.repo
|
||||
import spack.util.git
|
||||
from spack.util.executable import Executable, which
|
||||
|
||||
@@ -38,7 +39,7 @@ def grouper(iterable, n, fillvalue=None):
|
||||
#: double-check the results of other tools (if, e.g., --fix was provided)
|
||||
#: The list maps an executable name to a method to ensure the tool is
|
||||
#: bootstrapped or present in the environment.
|
||||
tool_names = ["import-check", "isort", "black", "flake8", "mypy"]
|
||||
tool_names = ["import", "isort", "black", "flake8", "mypy"]
|
||||
|
||||
#: warnings to ignore in mypy
|
||||
mypy_ignores = [
|
||||
@@ -322,8 +323,6 @@ def process_files(file_list, is_args):
|
||||
rewrite_and_print_output(output, args, pat, replacement)
|
||||
|
||||
packages_isort_args = (
|
||||
"--rm",
|
||||
"spack",
|
||||
"--rm",
|
||||
"spack.pkgkit",
|
||||
"--rm",
|
||||
@@ -370,10 +369,19 @@ def run_black(black_cmd, file_list, args):
|
||||
|
||||
def _module_part(root: str, expr: str):
|
||||
parts = expr.split(".")
|
||||
# spack.pkg is for repositories, don't try to resolve it here.
|
||||
if ".".join(parts[:2]) == spack.repo.ROOT_PYTHON_NAMESPACE:
|
||||
return None
|
||||
while parts:
|
||||
f1 = os.path.join(root, "lib", "spack", *parts) + ".py"
|
||||
f2 = os.path.join(root, "lib", "spack", *parts, "__init__.py")
|
||||
if os.path.exists(f1) or os.path.exists(f2):
|
||||
|
||||
if (
|
||||
os.path.exists(f1)
|
||||
# ensure case sensitive match
|
||||
and f"{parts[-1]}.py" in os.listdir(os.path.dirname(f1))
|
||||
or os.path.exists(f2)
|
||||
):
|
||||
return ".".join(parts)
|
||||
parts.pop()
|
||||
return None
|
||||
@@ -389,7 +397,7 @@ def _run_import_check(
|
||||
out=sys.stdout,
|
||||
):
|
||||
if sys.version_info < (3, 9):
|
||||
print("import-check requires Python 3.9 or later")
|
||||
print("import check requires Python 3.9 or later")
|
||||
return 0
|
||||
|
||||
is_use = re.compile(r"(?<!from )(?<!import )(?:llnl|spack)\.[a-zA-Z0-9_\.]+")
|
||||
@@ -431,10 +439,11 @@ def _run_import_check(
|
||||
module = _module_part(root, m.group(0))
|
||||
if not module or module in to_add:
|
||||
continue
|
||||
if f"import {module}" not in filtered_contents:
|
||||
to_add.add(module)
|
||||
exit_code = 1
|
||||
print(f"{pretty_path}: missing import: {module}", file=out)
|
||||
if re.search(rf"import {re.escape(module)}\b(?!\.)", contents):
|
||||
continue
|
||||
to_add.add(module)
|
||||
exit_code = 1
|
||||
print(f"{pretty_path}: missing import: {module} ({m.group(0)})", file=out)
|
||||
|
||||
if not fix or not to_add and not to_remove:
|
||||
continue
|
||||
@@ -465,7 +474,7 @@ def _run_import_check(
|
||||
return exit_code
|
||||
|
||||
|
||||
@tool("import-check", external=False)
|
||||
@tool("import", external=False)
|
||||
def run_import_check(import_check_cmd, file_list, args):
|
||||
exit_code = _run_import_check(
|
||||
file_list,
|
||||
@@ -474,7 +483,7 @@ def run_import_check(import_check_cmd, file_list, args):
|
||||
root=args.root,
|
||||
working_dir=args.initial_working_dir,
|
||||
)
|
||||
print_tool_result("import-check", exit_code)
|
||||
print_tool_result("import", exit_code)
|
||||
return exit_code
|
||||
|
||||
|
||||
|
@@ -124,8 +124,8 @@ def setup_custom_environment(self, pkg, env):
|
||||
# Edge cases for Intel's oneAPI compilers when using the legacy classic compilers:
|
||||
# Always pass flags to disable deprecation warnings, since these warnings can
|
||||
# confuse tools that parse the output of compiler commands (e.g. version checks).
|
||||
if self.real_version >= Version("2021") and self.real_version <= Version("2023"):
|
||||
if self.real_version >= Version("2021") and self.real_version < Version("2024"):
|
||||
env.append_flags("SPACK_ALWAYS_CFLAGS", "-diag-disable=10441")
|
||||
env.append_flags("SPACK_ALWAYS_CXXFLAGS", "-diag-disable=10441")
|
||||
if self.real_version >= Version("2021") and self.real_version <= Version("2024"):
|
||||
if self.real_version >= Version("2021") and self.real_version < Version("2025"):
|
||||
env.append_flags("SPACK_ALWAYS_FFLAGS", "-diag-disable=10448")
|
||||
|
@@ -155,10 +155,10 @@ def setup_custom_environment(self, pkg, env):
|
||||
# icx+icpx+ifx or icx+icpx+ifort. But to be on the safe side (some users may
|
||||
# want to try to swap icpx against icpc, for example), and since the Intel LLVM
|
||||
# compilers accept these diag-disable flags, we apply them for all compilers.
|
||||
if self.real_version >= Version("2021") and self.real_version <= Version("2023"):
|
||||
if self.real_version >= Version("2021") and self.real_version < Version("2024"):
|
||||
env.append_flags("SPACK_ALWAYS_CFLAGS", "-diag-disable=10441")
|
||||
env.append_flags("SPACK_ALWAYS_CXXFLAGS", "-diag-disable=10441")
|
||||
if self.real_version >= Version("2021") and self.real_version <= Version("2024"):
|
||||
if self.real_version >= Version("2021") and self.real_version < Version("2025"):
|
||||
env.append_flags("SPACK_ALWAYS_FFLAGS", "-diag-disable=10448")
|
||||
|
||||
# 2024 release bumped the libsycl version because of an ABI
|
||||
|
@@ -160,6 +160,11 @@ def concretize_separately(
|
||||
# TODO: support parallel concretization on macOS and Windows
|
||||
num_procs = min(len(args), spack.config.determine_number_of_jobs(parallel=True))
|
||||
|
||||
msg = "Starting concretization"
|
||||
if sys.platform not in ("darwin", "win32") and num_procs > 1:
|
||||
msg += f" pool with {num_procs} processes"
|
||||
tty.msg(msg)
|
||||
|
||||
for j, (i, concrete, duration) in enumerate(
|
||||
spack.util.parallel.imap_unordered(
|
||||
_concretize_task, args, processes=num_procs, debug=tty.is_debug(), maxtaskperchild=1
|
||||
|
@@ -3,7 +3,7 @@
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
"""Data structures that represent Spack's dependency relationships."""
|
||||
from typing import Dict, List
|
||||
from typing import Dict, List, Type
|
||||
|
||||
import spack.deptypes as dt
|
||||
import spack.spec
|
||||
@@ -38,7 +38,7 @@ class Dependency:
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
pkg: "spack.package_base.PackageBase",
|
||||
pkg: Type["spack.package_base.PackageBase"],
|
||||
spec: "spack.spec.Spec",
|
||||
depflag: dt.DepFlag = dt.DEFAULT,
|
||||
):
|
||||
|
@@ -21,6 +21,7 @@ class OpenMpi(Package):
|
||||
* ``conflicts``
|
||||
* ``depends_on``
|
||||
* ``extends``
|
||||
* ``license``
|
||||
* ``patch``
|
||||
* ``provides``
|
||||
* ``resource``
|
||||
@@ -34,12 +35,12 @@ class OpenMpi(Package):
|
||||
import collections.abc
|
||||
import os.path
|
||||
import re
|
||||
from typing import Any, Callable, List, Optional, Tuple, Union
|
||||
from typing import Any, Callable, List, Optional, Tuple, Type, Union
|
||||
|
||||
import llnl.util.lang
|
||||
import llnl.util.tty.color
|
||||
|
||||
import spack.deptypes as dt
|
||||
import spack.fetch_strategy
|
||||
import spack.package_base
|
||||
import spack.patch
|
||||
import spack.spec
|
||||
@@ -47,7 +48,6 @@ class OpenMpi(Package):
|
||||
import spack.variant
|
||||
from spack.dependency import Dependency
|
||||
from spack.directives_meta import DirectiveError, DirectiveMeta
|
||||
from spack.fetch_strategy import from_kwargs
|
||||
from spack.resource import Resource
|
||||
from spack.version import (
|
||||
GitVersion,
|
||||
@@ -82,8 +82,8 @@ class OpenMpi(Package):
|
||||
SpecType = str
|
||||
DepType = Union[Tuple[str, ...], str]
|
||||
WhenType = Optional[Union[spack.spec.Spec, str, bool]]
|
||||
Patcher = Callable[[Union[spack.package_base.PackageBase, Dependency]], None]
|
||||
PatchesType = Optional[Union[Patcher, str, List[Union[Patcher, str]]]]
|
||||
Patcher = Callable[[Union[Type[spack.package_base.PackageBase], Dependency]], None]
|
||||
PatchesType = Union[Patcher, str, List[Union[Patcher, str]]]
|
||||
|
||||
|
||||
SUPPORTED_LANGUAGES = ("fortran", "cxx", "c")
|
||||
@@ -219,7 +219,7 @@ def version(
|
||||
return lambda pkg: _execute_version(pkg, ver, **kwargs)
|
||||
|
||||
|
||||
def _execute_version(pkg, ver, **kwargs):
|
||||
def _execute_version(pkg: Type[spack.package_base.PackageBase], ver: Union[str, int], **kwargs):
|
||||
if (
|
||||
(any(s in kwargs for s in spack.util.crypto.hashes) or "checksum" in kwargs)
|
||||
and hasattr(pkg, "has_code")
|
||||
@@ -250,12 +250,12 @@ def _execute_version(pkg, ver, **kwargs):
|
||||
|
||||
|
||||
def _depends_on(
|
||||
pkg: spack.package_base.PackageBase,
|
||||
pkg: Type[spack.package_base.PackageBase],
|
||||
spec: spack.spec.Spec,
|
||||
*,
|
||||
when: WhenType = None,
|
||||
type: DepType = dt.DEFAULT_TYPES,
|
||||
patches: PatchesType = None,
|
||||
patches: Optional[PatchesType] = None,
|
||||
):
|
||||
when_spec = _make_when_spec(when)
|
||||
if not when_spec:
|
||||
@@ -330,7 +330,7 @@ def conflicts(conflict_spec: SpecType, when: WhenType = None, msg: Optional[str]
|
||||
msg (str): optional user defined message
|
||||
"""
|
||||
|
||||
def _execute_conflicts(pkg: spack.package_base.PackageBase):
|
||||
def _execute_conflicts(pkg: Type[spack.package_base.PackageBase]):
|
||||
# If when is not specified the conflict always holds
|
||||
when_spec = _make_when_spec(when)
|
||||
if not when_spec:
|
||||
@@ -349,7 +349,7 @@ def depends_on(
|
||||
spec: SpecType,
|
||||
when: WhenType = None,
|
||||
type: DepType = dt.DEFAULT_TYPES,
|
||||
patches: PatchesType = None,
|
||||
patches: Optional[PatchesType] = None,
|
||||
):
|
||||
"""Creates a dict of deps with specs defining when they apply.
|
||||
|
||||
@@ -371,14 +371,16 @@ def depends_on(
|
||||
assert type == "build", "languages must be of 'build' type"
|
||||
return _language(lang_spec_str=spec, when=when)
|
||||
|
||||
def _execute_depends_on(pkg: spack.package_base.PackageBase):
|
||||
def _execute_depends_on(pkg: Type[spack.package_base.PackageBase]):
|
||||
_depends_on(pkg, dep_spec, when=when, type=type, patches=patches)
|
||||
|
||||
return _execute_depends_on
|
||||
|
||||
|
||||
@directive("disable_redistribute")
|
||||
def redistribute(source=None, binary=None, when: WhenType = None):
|
||||
def redistribute(
|
||||
source: Optional[bool] = None, binary: Optional[bool] = None, when: WhenType = None
|
||||
):
|
||||
"""Can be used inside a Package definition to declare that
|
||||
the package source and/or compiled binaries should not be
|
||||
redistributed.
|
||||
@@ -393,7 +395,10 @@ def redistribute(source=None, binary=None, when: WhenType = None):
|
||||
|
||||
|
||||
def _execute_redistribute(
|
||||
pkg: spack.package_base.PackageBase, source=None, binary=None, when: WhenType = None
|
||||
pkg: Type[spack.package_base.PackageBase],
|
||||
source: Optional[bool],
|
||||
binary: Optional[bool],
|
||||
when: WhenType,
|
||||
):
|
||||
if source is None and binary is None:
|
||||
return
|
||||
@@ -469,9 +474,7 @@ def provides(*specs: SpecType, when: WhenType = None):
|
||||
when: condition when this provides clause needs to be considered
|
||||
"""
|
||||
|
||||
def _execute_provides(pkg: spack.package_base.PackageBase):
|
||||
import spack.parser # Avoid circular dependency
|
||||
|
||||
def _execute_provides(pkg: Type[spack.package_base.PackageBase]):
|
||||
when_spec = _make_when_spec(when)
|
||||
if not when_spec:
|
||||
return
|
||||
@@ -517,7 +520,7 @@ def can_splice(
|
||||
variants will be skipped by '*'.
|
||||
"""
|
||||
|
||||
def _execute_can_splice(pkg: spack.package_base.PackageBase):
|
||||
def _execute_can_splice(pkg: Type[spack.package_base.PackageBase]):
|
||||
when_spec = _make_when_spec(when)
|
||||
if isinstance(match_variants, str) and match_variants != "*":
|
||||
raise ValueError(
|
||||
@@ -558,10 +561,10 @@ def patch(
|
||||
compressed URL patches)
|
||||
"""
|
||||
|
||||
def _execute_patch(pkg_or_dep: Union[spack.package_base.PackageBase, Dependency]):
|
||||
pkg = pkg_or_dep
|
||||
if isinstance(pkg, Dependency):
|
||||
pkg = pkg.pkg
|
||||
def _execute_patch(
|
||||
pkg_or_dep: Union[Type[spack.package_base.PackageBase], Dependency]
|
||||
) -> None:
|
||||
pkg = pkg_or_dep.pkg if isinstance(pkg_or_dep, Dependency) else pkg_or_dep
|
||||
|
||||
if hasattr(pkg, "has_code") and not pkg.has_code:
|
||||
raise UnsupportedPackageDirective(
|
||||
@@ -735,58 +738,55 @@ def _execute_variant(pkg):
|
||||
|
||||
|
||||
@directive("resources")
|
||||
def resource(**kwargs):
|
||||
"""Define an external resource to be fetched and staged when building the
|
||||
package. Based on the keywords present in the dictionary the appropriate
|
||||
FetchStrategy will be used for the resource. Resources are fetched and
|
||||
staged in their own folder inside spack stage area, and then moved into
|
||||
the stage area of the package that needs them.
|
||||
def resource(
|
||||
*,
|
||||
name: Optional[str] = None,
|
||||
destination: str = "",
|
||||
placement: Optional[str] = None,
|
||||
when: WhenType = None,
|
||||
# additional kwargs are as for `version()`
|
||||
**kwargs,
|
||||
):
|
||||
"""Define an external resource to be fetched and staged when building the package.
|
||||
Based on the keywords present in the dictionary the appropriate FetchStrategy will
|
||||
be used for the resource. Resources are fetched and staged in their own folder
|
||||
inside spack stage area, and then moved into the stage area of the package that
|
||||
needs them.
|
||||
|
||||
List of recognized keywords:
|
||||
Keyword Arguments:
|
||||
name: name for the resource
|
||||
when: condition defining when the resource is needed
|
||||
destination: path, relative to the package stage area, to which resource should be moved
|
||||
placement: optionally rename the expanded resource inside the destination directory
|
||||
|
||||
* 'when' : (optional) represents the condition upon which the resource is
|
||||
needed
|
||||
* 'destination' : (optional) path where to move the resource. This path
|
||||
must be relative to the main package stage area.
|
||||
* 'placement' : (optional) gives the possibility to fine tune how the
|
||||
resource is moved into the main package stage area.
|
||||
"""
|
||||
|
||||
def _execute_resource(pkg):
|
||||
when = kwargs.get("when")
|
||||
when_spec = _make_when_spec(when)
|
||||
if not when_spec:
|
||||
return
|
||||
|
||||
destination = kwargs.get("destination", "")
|
||||
placement = kwargs.get("placement", None)
|
||||
|
||||
# Check if the path is relative
|
||||
if os.path.isabs(destination):
|
||||
message = (
|
||||
"The destination keyword of a resource directive " "can't be an absolute path.\n"
|
||||
)
|
||||
message += "\tdestination : '{dest}\n'".format(dest=destination)
|
||||
raise RuntimeError(message)
|
||||
msg = "The destination keyword of a resource directive can't be an absolute path.\n"
|
||||
msg += f"\tdestination : '{destination}\n'"
|
||||
raise RuntimeError(msg)
|
||||
|
||||
# Check if the path falls within the main package stage area
|
||||
test_path = "stage_folder_root"
|
||||
normalized_destination = os.path.normpath(
|
||||
os.path.join(test_path, destination)
|
||||
) # Normalized absolute path
|
||||
|
||||
# Normalized absolute path
|
||||
normalized_destination = os.path.normpath(os.path.join(test_path, destination))
|
||||
|
||||
if test_path not in normalized_destination:
|
||||
message = (
|
||||
"The destination folder of a resource must fall "
|
||||
"within the main package stage directory.\n"
|
||||
)
|
||||
message += "\tdestination : '{dest}'\n".format(dest=destination)
|
||||
raise RuntimeError(message)
|
||||
msg = "Destination of a resource must be within the package stage directory.\n"
|
||||
msg += f"\tdestination : '{destination}'\n"
|
||||
raise RuntimeError(msg)
|
||||
|
||||
resources = pkg.resources.setdefault(when_spec, [])
|
||||
name = kwargs.get("name")
|
||||
fetcher = from_kwargs(**kwargs)
|
||||
resources.append(Resource(name, fetcher, destination, placement))
|
||||
resources.append(
|
||||
Resource(name, spack.fetch_strategy.from_kwargs(**kwargs), destination, placement)
|
||||
)
|
||||
|
||||
return _execute_resource
|
||||
|
||||
@@ -818,7 +818,9 @@ def _execute_maintainer(pkg):
|
||||
return _execute_maintainer
|
||||
|
||||
|
||||
def _execute_license(pkg, license_identifier: str, when):
|
||||
def _execute_license(
|
||||
pkg: Type[spack.package_base.PackageBase], license_identifier: str, when: WhenType
|
||||
):
|
||||
# If when is not specified the license always holds
|
||||
when_spec = _make_when_spec(when)
|
||||
if not when_spec:
|
||||
@@ -882,7 +884,7 @@ def requires(*requirement_specs: str, policy="one_of", when=None, msg=None):
|
||||
msg: optional user defined message
|
||||
"""
|
||||
|
||||
def _execute_requires(pkg: spack.package_base.PackageBase):
|
||||
def _execute_requires(pkg: Type[spack.package_base.PackageBase]):
|
||||
if policy not in ("one_of", "any_of"):
|
||||
err_msg = (
|
||||
f"the 'policy' argument of the 'requires' directive in {pkg.name} is set "
|
||||
@@ -907,7 +909,7 @@ def _execute_requires(pkg: spack.package_base.PackageBase):
|
||||
def _language(lang_spec_str: str, *, when: Optional[Union[str, bool]] = None):
|
||||
"""Temporary implementation of language virtuals, until compilers are proper dependencies."""
|
||||
|
||||
def _execute_languages(pkg: spack.package_base.PackageBase):
|
||||
def _execute_languages(pkg: Type[spack.package_base.PackageBase]):
|
||||
when_spec = _make_when_spec(when)
|
||||
if not when_spec:
|
||||
return
|
||||
|
@@ -5,7 +5,7 @@
|
||||
|
||||
import collections.abc
|
||||
import functools
|
||||
from typing import List, Set
|
||||
from typing import Any, Callable, Dict, List, Optional, Sequence, Set, Type, Union
|
||||
|
||||
import llnl.util.lang
|
||||
|
||||
@@ -25,11 +25,13 @@ class DirectiveMeta(type):
|
||||
|
||||
# Set of all known directives
|
||||
_directive_dict_names: Set[str] = set()
|
||||
_directives_to_be_executed: List[str] = []
|
||||
_when_constraints_from_context: List[str] = []
|
||||
_directives_to_be_executed: List[Callable] = []
|
||||
_when_constraints_from_context: List[spack.spec.Spec] = []
|
||||
_default_args: List[dict] = []
|
||||
|
||||
def __new__(cls, name, bases, attr_dict):
|
||||
def __new__(
|
||||
cls: Type["DirectiveMeta"], name: str, bases: tuple, attr_dict: dict
|
||||
) -> "DirectiveMeta":
|
||||
# Initialize the attribute containing the list of directives
|
||||
# to be executed. Here we go reversed because we want to execute
|
||||
# commands:
|
||||
@@ -60,7 +62,7 @@ def __new__(cls, name, bases, attr_dict):
|
||||
|
||||
return super(DirectiveMeta, cls).__new__(cls, name, bases, attr_dict)
|
||||
|
||||
def __init__(cls, name, bases, attr_dict):
|
||||
def __init__(cls: "DirectiveMeta", name: str, bases: tuple, attr_dict: dict):
|
||||
# The instance is being initialized: if it is a package we must ensure
|
||||
# that the directives are called to set it up.
|
||||
|
||||
@@ -81,27 +83,27 @@ def __init__(cls, name, bases, attr_dict):
|
||||
super(DirectiveMeta, cls).__init__(name, bases, attr_dict)
|
||||
|
||||
@staticmethod
|
||||
def push_to_context(when_spec):
|
||||
def push_to_context(when_spec: spack.spec.Spec) -> None:
|
||||
"""Add a spec to the context constraints."""
|
||||
DirectiveMeta._when_constraints_from_context.append(when_spec)
|
||||
|
||||
@staticmethod
|
||||
def pop_from_context():
|
||||
def pop_from_context() -> spack.spec.Spec:
|
||||
"""Pop the last constraint from the context"""
|
||||
return DirectiveMeta._when_constraints_from_context.pop()
|
||||
|
||||
@staticmethod
|
||||
def push_default_args(default_args):
|
||||
def push_default_args(default_args: Dict[str, Any]) -> None:
|
||||
"""Push default arguments"""
|
||||
DirectiveMeta._default_args.append(default_args)
|
||||
|
||||
@staticmethod
|
||||
def pop_default_args():
|
||||
def pop_default_args() -> dict:
|
||||
"""Pop default arguments"""
|
||||
return DirectiveMeta._default_args.pop()
|
||||
|
||||
@staticmethod
|
||||
def directive(dicts=None):
|
||||
def directive(dicts: Optional[Union[Sequence[str], str]] = None) -> Callable:
|
||||
"""Decorator for Spack directives.
|
||||
|
||||
Spack directives allow you to modify a package while it is being
|
||||
@@ -156,7 +158,7 @@ class Foo(Package):
|
||||
DirectiveMeta._directive_dict_names |= set(dicts)
|
||||
|
||||
# This decorator just returns the directive functions
|
||||
def _decorator(decorated_function):
|
||||
def _decorator(decorated_function: Callable) -> Callable:
|
||||
directive_names.append(decorated_function.__name__)
|
||||
|
||||
@functools.wraps(decorated_function)
|
||||
|
@@ -192,3 +192,10 @@ def __reduce__(self):
|
||||
|
||||
def _make_stop_phase(msg, long_msg):
|
||||
return StopPhase(msg, long_msg)
|
||||
|
||||
|
||||
class MirrorError(SpackError):
|
||||
"""Superclass of all mirror-creation related errors."""
|
||||
|
||||
def __init__(self, msg, long_msg=None):
|
||||
super().__init__(msg, long_msg)
|
||||
|
@@ -5,7 +5,6 @@
|
||||
"""Service functions and classes to implement the hooks
|
||||
for Spack's command extensions.
|
||||
"""
|
||||
import difflib
|
||||
import glob
|
||||
import importlib
|
||||
import os
|
||||
@@ -17,7 +16,6 @@
|
||||
|
||||
import llnl.util.lang
|
||||
|
||||
import spack.cmd
|
||||
import spack.config
|
||||
import spack.error
|
||||
import spack.util.path
|
||||
@@ -25,9 +23,6 @@
|
||||
_extension_regexp = re.compile(r"spack-(\w[-\w]*)$")
|
||||
|
||||
|
||||
# TODO: For consistency we should use spack.cmd.python_name(), but
|
||||
# currently this would create a circular relationship between
|
||||
# spack.cmd and spack.extensions.
|
||||
def _python_name(cmd_name):
|
||||
return cmd_name.replace("-", "_")
|
||||
|
||||
@@ -211,8 +206,7 @@ def get_module(cmd_name):
|
||||
module = load_command_extension(cmd_name, folder)
|
||||
if module:
|
||||
return module
|
||||
else:
|
||||
raise CommandNotFoundError(cmd_name)
|
||||
return None
|
||||
|
||||
|
||||
def get_template_dirs():
|
||||
@@ -224,27 +218,6 @@ def get_template_dirs():
|
||||
return extensions
|
||||
|
||||
|
||||
class CommandNotFoundError(spack.error.SpackError):
|
||||
"""Exception class thrown when a requested command is not recognized as
|
||||
such.
|
||||
"""
|
||||
|
||||
def __init__(self, cmd_name):
|
||||
msg = (
|
||||
"{0} is not a recognized Spack command or extension command;"
|
||||
" check with `spack commands`.".format(cmd_name)
|
||||
)
|
||||
long_msg = None
|
||||
|
||||
similar = difflib.get_close_matches(cmd_name, spack.cmd.all_commands())
|
||||
|
||||
if 1 <= len(similar) <= 5:
|
||||
long_msg = "\nDid you mean one of the following commands?\n "
|
||||
long_msg += "\n ".join(similar)
|
||||
|
||||
super().__init__(msg, long_msg)
|
||||
|
||||
|
||||
class ExtensionNamingError(spack.error.SpackError):
|
||||
"""Exception class thrown when a configured extension does not follow
|
||||
the expected naming convention.
|
||||
|
@@ -325,12 +325,7 @@ def write(self, spec, color=None, out=None):
|
||||
self._out = llnl.util.tty.color.ColorStream(out, color=color)
|
||||
|
||||
# We'll traverse the spec in topological order as we graph it.
|
||||
nodes_in_topological_order = [
|
||||
edge.spec
|
||||
for edge in spack.traverse.traverse_edges_topo(
|
||||
[spec], direction="children", deptype=self.depflag
|
||||
)
|
||||
]
|
||||
nodes_in_topological_order = list(spec.traverse(order="topo", deptype=self.depflag))
|
||||
nodes_in_topological_order.reverse()
|
||||
|
||||
# Work on a copy to be nondestructive
|
||||
|
@@ -6,7 +6,7 @@
|
||||
import llnl.util.tty as tty
|
||||
|
||||
import spack.binary_distribution as bindist
|
||||
import spack.mirror
|
||||
import spack.mirrors.mirror
|
||||
|
||||
|
||||
def post_install(spec, explicit):
|
||||
@@ -22,7 +22,7 @@ def post_install(spec, explicit):
|
||||
return
|
||||
|
||||
# Push the package to all autopush mirrors
|
||||
for mirror in spack.mirror.MirrorCollection(binary=True, autopush=True).values():
|
||||
for mirror in spack.mirrors.mirror.MirrorCollection(binary=True, autopush=True).values():
|
||||
signing_key = bindist.select_signing_key() if mirror.signed else None
|
||||
with bindist.make_uploader(mirror=mirror, force=True, signing_key=signing_key) as uploader:
|
||||
uploader.push_or_raise([spec])
|
||||
|
@@ -375,23 +375,16 @@ def phase_tests(self, builder, phase_name: str, method_names: List[str]):
|
||||
|
||||
for name in method_names:
|
||||
try:
|
||||
# Prefer the method in the package over the builder's.
|
||||
# We need this primarily to pick up arbitrarily named test
|
||||
# methods but also some build-time checks.
|
||||
fn = getattr(builder.pkg, name, getattr(builder, name))
|
||||
|
||||
msg = f"RUN-TESTS: {phase_name}-time tests [{name}]"
|
||||
print_message(logger, msg, verbose)
|
||||
|
||||
fn()
|
||||
|
||||
fn = getattr(builder, name, None) or getattr(builder.pkg, name)
|
||||
except AttributeError as e:
|
||||
msg = f"RUN-TESTS: method not implemented [{name}]"
|
||||
print_message(logger, msg, verbose)
|
||||
|
||||
self.add_failure(e, msg)
|
||||
print_message(logger, f"RUN-TESTS: method not implemented [{name}]", verbose)
|
||||
self.add_failure(e, f"RUN-TESTS: method not implemented [{name}]")
|
||||
if fail_fast:
|
||||
break
|
||||
continue
|
||||
|
||||
print_message(logger, f"RUN-TESTS: {phase_name}-time tests [{name}]", verbose)
|
||||
fn()
|
||||
|
||||
if have_tests:
|
||||
print_message(logger, "Completed testing", verbose)
|
||||
|
@@ -56,7 +56,7 @@
|
||||
import spack.deptypes as dt
|
||||
import spack.error
|
||||
import spack.hooks
|
||||
import spack.mirror
|
||||
import spack.mirrors.mirror
|
||||
import spack.package_base
|
||||
import spack.package_prefs as prefs
|
||||
import spack.repo
|
||||
@@ -491,7 +491,7 @@ def _try_install_from_binary_cache(
|
||||
timer: timer to keep track of binary install phases.
|
||||
"""
|
||||
# Early exit if no binary mirrors are configured.
|
||||
if not spack.mirror.MirrorCollection(binary=True):
|
||||
if not spack.mirrors.mirror.MirrorCollection(binary=True):
|
||||
return False
|
||||
|
||||
tty.debug(f"Searching for binary cache of {package_id(pkg.spec)}")
|
||||
|
4
lib/spack/spack/mirrors/__init__.py
Normal file
4
lib/spack/spack/mirrors/__init__.py
Normal file
@@ -0,0 +1,4 @@
|
||||
# Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
146
lib/spack/spack/mirrors/layout.py
Normal file
146
lib/spack/spack/mirrors/layout.py
Normal file
@@ -0,0 +1,146 @@
|
||||
# Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
import os
|
||||
import os.path
|
||||
from typing import Optional
|
||||
|
||||
import llnl.url
|
||||
import llnl.util.symlink
|
||||
from llnl.util.filesystem import mkdirp
|
||||
|
||||
import spack.fetch_strategy
|
||||
import spack.oci.image
|
||||
import spack.repo
|
||||
import spack.spec
|
||||
from spack.error import MirrorError
|
||||
|
||||
|
||||
class MirrorLayout:
|
||||
"""A ``MirrorLayout`` object describes the relative path of a mirror entry."""
|
||||
|
||||
def __init__(self, path: str) -> None:
|
||||
self.path = path
|
||||
|
||||
def __iter__(self):
|
||||
"""Yield all paths including aliases where the resource can be found."""
|
||||
yield self.path
|
||||
|
||||
def make_alias(self, root: str) -> None:
|
||||
"""Make the entry ``root / self.path`` available under a human readable alias"""
|
||||
pass
|
||||
|
||||
|
||||
class DefaultLayout(MirrorLayout):
|
||||
def __init__(self, alias_path: str, digest_path: Optional[str] = None) -> None:
|
||||
# When we have a digest, it is used as the primary storage location. If not, then we use
|
||||
# the human-readable alias. In case of mirrors of a VCS checkout, we currently do not have
|
||||
# a digest, that's why an alias is required and a digest optional.
|
||||
super().__init__(path=digest_path or alias_path)
|
||||
self.alias = alias_path
|
||||
self.digest_path = digest_path
|
||||
|
||||
def make_alias(self, root: str) -> None:
|
||||
"""Symlink a human readible path in our mirror to the actual storage location."""
|
||||
# We already use the human-readable path as the main storage location.
|
||||
if not self.digest_path:
|
||||
return
|
||||
|
||||
alias, digest = os.path.join(root, self.alias), os.path.join(root, self.digest_path)
|
||||
|
||||
alias_dir = os.path.dirname(alias)
|
||||
relative_dst = os.path.relpath(digest, start=alias_dir)
|
||||
|
||||
mkdirp(alias_dir)
|
||||
tmp = f"{alias}.tmp"
|
||||
llnl.util.symlink.symlink(relative_dst, tmp)
|
||||
|
||||
try:
|
||||
os.rename(tmp, alias)
|
||||
except OSError:
|
||||
# Clean up the temporary if possible
|
||||
try:
|
||||
os.unlink(tmp)
|
||||
except OSError:
|
||||
pass
|
||||
raise
|
||||
|
||||
def __iter__(self):
|
||||
if self.digest_path:
|
||||
yield self.digest_path
|
||||
yield self.alias
|
||||
|
||||
|
||||
class OCILayout(MirrorLayout):
|
||||
"""Follow the OCI Image Layout Specification to archive blobs where paths are of the form
|
||||
``blobs/<algorithm>/<digest>``"""
|
||||
|
||||
def __init__(self, digest: spack.oci.image.Digest) -> None:
|
||||
super().__init__(os.path.join("blobs", digest.algorithm, digest.digest))
|
||||
|
||||
|
||||
def _determine_extension(fetcher):
|
||||
if isinstance(fetcher, spack.fetch_strategy.URLFetchStrategy):
|
||||
if fetcher.expand_archive:
|
||||
# If we fetch with a URLFetchStrategy, use URL's archive type
|
||||
ext = llnl.url.determine_url_file_extension(fetcher.url)
|
||||
|
||||
if ext:
|
||||
# Remove any leading dots
|
||||
ext = ext.lstrip(".")
|
||||
else:
|
||||
msg = """\
|
||||
Unable to parse extension from {0}.
|
||||
|
||||
If this URL is for a tarball but does not include the file extension
|
||||
in the name, you can explicitly declare it with the following syntax:
|
||||
|
||||
version('1.2.3', 'hash', extension='tar.gz')
|
||||
|
||||
If this URL is for a download like a .jar or .whl that does not need
|
||||
to be expanded, or an uncompressed installation script, you can tell
|
||||
Spack not to expand it with the following syntax:
|
||||
|
||||
version('1.2.3', 'hash', expand=False)
|
||||
"""
|
||||
raise MirrorError(msg.format(fetcher.url))
|
||||
else:
|
||||
# If the archive shouldn't be expanded, don't check extension.
|
||||
ext = None
|
||||
else:
|
||||
# Otherwise we'll make a .tar.gz ourselves
|
||||
ext = "tar.gz"
|
||||
|
||||
return ext
|
||||
|
||||
|
||||
def default_mirror_layout(
|
||||
fetcher: "spack.fetch_strategy.FetchStrategy",
|
||||
per_package_ref: str,
|
||||
spec: Optional["spack.spec.Spec"] = None,
|
||||
) -> MirrorLayout:
|
||||
"""Returns a ``MirrorReference`` object which keeps track of the relative
|
||||
storage path of the resource associated with the specified ``fetcher``."""
|
||||
ext = None
|
||||
if spec:
|
||||
pkg_cls = spack.repo.PATH.get_pkg_class(spec.name)
|
||||
versions = pkg_cls.versions.get(spec.version, {})
|
||||
ext = versions.get("extension", None)
|
||||
# If the spec does not explicitly specify an extension (the default case),
|
||||
# then try to determine it automatically. An extension can only be
|
||||
# specified for the primary source of the package (e.g. the source code
|
||||
# identified in the 'version' declaration). Resources/patches don't have
|
||||
# an option to specify an extension, so it must be inferred for those.
|
||||
ext = ext or _determine_extension(fetcher)
|
||||
|
||||
if ext:
|
||||
per_package_ref += ".%s" % ext
|
||||
|
||||
global_ref = fetcher.mirror_id()
|
||||
if global_ref:
|
||||
global_ref = os.path.join("_source-cache", global_ref)
|
||||
if global_ref and ext:
|
||||
global_ref += ".%s" % ext
|
||||
|
||||
return DefaultLayout(per_package_ref, global_ref)
|
@@ -2,42 +2,20 @@
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
"""
|
||||
This file contains code for creating spack mirror directories. A
|
||||
mirror is an organized hierarchy containing specially named archive
|
||||
files. This enabled spack to know where to find files in a mirror if
|
||||
the main server for a particular package is down. Or, if the computer
|
||||
where spack is run is not connected to the internet, it allows spack
|
||||
to download packages directly from a mirror (e.g., on an intranet).
|
||||
"""
|
||||
import collections
|
||||
import collections.abc
|
||||
import operator
|
||||
import os
|
||||
import os.path
|
||||
import sys
|
||||
import traceback
|
||||
import urllib.parse
|
||||
from typing import Any, Dict, Optional, Tuple, Union
|
||||
|
||||
import llnl.url
|
||||
import llnl.util.symlink
|
||||
import llnl.util.tty as tty
|
||||
from llnl.util.filesystem import mkdirp
|
||||
|
||||
import spack.caches
|
||||
import spack.config
|
||||
import spack.error
|
||||
import spack.fetch_strategy
|
||||
import spack.mirror
|
||||
import spack.oci.image
|
||||
import spack.repo
|
||||
import spack.spec
|
||||
import spack.util.path
|
||||
import spack.util.spack_json as sjson
|
||||
import spack.util.spack_yaml as syaml
|
||||
import spack.util.url as url_util
|
||||
import spack.version
|
||||
from spack.error import MirrorError
|
||||
|
||||
#: What schemes do we support
|
||||
supported_url_schemes = ("file", "http", "https", "sftp", "ftp", "s3", "gs", "oci")
|
||||
@@ -490,380 +468,3 @@ def __iter__(self):
|
||||
|
||||
def __len__(self):
|
||||
return len(self._mirrors)
|
||||
|
||||
|
||||
def _determine_extension(fetcher):
|
||||
if isinstance(fetcher, spack.fetch_strategy.URLFetchStrategy):
|
||||
if fetcher.expand_archive:
|
||||
# If we fetch with a URLFetchStrategy, use URL's archive type
|
||||
ext = llnl.url.determine_url_file_extension(fetcher.url)
|
||||
|
||||
if ext:
|
||||
# Remove any leading dots
|
||||
ext = ext.lstrip(".")
|
||||
else:
|
||||
msg = """\
|
||||
Unable to parse extension from {0}.
|
||||
|
||||
If this URL is for a tarball but does not include the file extension
|
||||
in the name, you can explicitly declare it with the following syntax:
|
||||
|
||||
version('1.2.3', 'hash', extension='tar.gz')
|
||||
|
||||
If this URL is for a download like a .jar or .whl that does not need
|
||||
to be expanded, or an uncompressed installation script, you can tell
|
||||
Spack not to expand it with the following syntax:
|
||||
|
||||
version('1.2.3', 'hash', expand=False)
|
||||
"""
|
||||
raise MirrorError(msg.format(fetcher.url))
|
||||
else:
|
||||
# If the archive shouldn't be expanded, don't check extension.
|
||||
ext = None
|
||||
else:
|
||||
# Otherwise we'll make a .tar.gz ourselves
|
||||
ext = "tar.gz"
|
||||
|
||||
return ext
|
||||
|
||||
|
||||
class MirrorLayout:
|
||||
"""A ``MirrorLayout`` object describes the relative path of a mirror entry."""
|
||||
|
||||
def __init__(self, path: str) -> None:
|
||||
self.path = path
|
||||
|
||||
def __iter__(self):
|
||||
"""Yield all paths including aliases where the resource can be found."""
|
||||
yield self.path
|
||||
|
||||
def make_alias(self, root: str) -> None:
|
||||
"""Make the entry ``root / self.path`` available under a human readable alias"""
|
||||
pass
|
||||
|
||||
|
||||
class DefaultLayout(MirrorLayout):
|
||||
def __init__(self, alias_path: str, digest_path: Optional[str] = None) -> None:
|
||||
# When we have a digest, it is used as the primary storage location. If not, then we use
|
||||
# the human-readable alias. In case of mirrors of a VCS checkout, we currently do not have
|
||||
# a digest, that's why an alias is required and a digest optional.
|
||||
super().__init__(path=digest_path or alias_path)
|
||||
self.alias = alias_path
|
||||
self.digest_path = digest_path
|
||||
|
||||
def make_alias(self, root: str) -> None:
|
||||
"""Symlink a human readible path in our mirror to the actual storage location."""
|
||||
# We already use the human-readable path as the main storage location.
|
||||
if not self.digest_path:
|
||||
return
|
||||
|
||||
alias, digest = os.path.join(root, self.alias), os.path.join(root, self.digest_path)
|
||||
|
||||
alias_dir = os.path.dirname(alias)
|
||||
relative_dst = os.path.relpath(digest, start=alias_dir)
|
||||
|
||||
mkdirp(alias_dir)
|
||||
tmp = f"{alias}.tmp"
|
||||
llnl.util.symlink.symlink(relative_dst, tmp)
|
||||
|
||||
try:
|
||||
os.rename(tmp, alias)
|
||||
except OSError:
|
||||
# Clean up the temporary if possible
|
||||
try:
|
||||
os.unlink(tmp)
|
||||
except OSError:
|
||||
pass
|
||||
raise
|
||||
|
||||
def __iter__(self):
|
||||
if self.digest_path:
|
||||
yield self.digest_path
|
||||
yield self.alias
|
||||
|
||||
|
||||
class OCILayout(MirrorLayout):
|
||||
"""Follow the OCI Image Layout Specification to archive blobs where paths are of the form
|
||||
``blobs/<algorithm>/<digest>``"""
|
||||
|
||||
def __init__(self, digest: spack.oci.image.Digest) -> None:
|
||||
super().__init__(os.path.join("blobs", digest.algorithm, digest.digest))
|
||||
|
||||
|
||||
def default_mirror_layout(
|
||||
fetcher: "spack.fetch_strategy.FetchStrategy",
|
||||
per_package_ref: str,
|
||||
spec: Optional["spack.spec.Spec"] = None,
|
||||
) -> MirrorLayout:
|
||||
"""Returns a ``MirrorReference`` object which keeps track of the relative
|
||||
storage path of the resource associated with the specified ``fetcher``."""
|
||||
ext = None
|
||||
if spec:
|
||||
pkg_cls = spack.repo.PATH.get_pkg_class(spec.name)
|
||||
versions = pkg_cls.versions.get(spec.version, {})
|
||||
ext = versions.get("extension", None)
|
||||
# If the spec does not explicitly specify an extension (the default case),
|
||||
# then try to determine it automatically. An extension can only be
|
||||
# specified for the primary source of the package (e.g. the source code
|
||||
# identified in the 'version' declaration). Resources/patches don't have
|
||||
# an option to specify an extension, so it must be inferred for those.
|
||||
ext = ext or _determine_extension(fetcher)
|
||||
|
||||
if ext:
|
||||
per_package_ref += ".%s" % ext
|
||||
|
||||
global_ref = fetcher.mirror_id()
|
||||
if global_ref:
|
||||
global_ref = os.path.join("_source-cache", global_ref)
|
||||
if global_ref and ext:
|
||||
global_ref += ".%s" % ext
|
||||
|
||||
return DefaultLayout(per_package_ref, global_ref)
|
||||
|
||||
|
||||
def get_all_versions(specs):
|
||||
"""Given a set of initial specs, return a new set of specs that includes
|
||||
each version of each package in the original set.
|
||||
|
||||
Note that if any spec in the original set specifies properties other than
|
||||
version, this information will be omitted in the new set; for example; the
|
||||
new set of specs will not include variant settings.
|
||||
"""
|
||||
version_specs = []
|
||||
for spec in specs:
|
||||
pkg_cls = spack.repo.PATH.get_pkg_class(spec.name)
|
||||
# Skip any package that has no known versions.
|
||||
if not pkg_cls.versions:
|
||||
tty.msg("No safe (checksummed) versions for package %s" % pkg_cls.name)
|
||||
continue
|
||||
|
||||
for version in pkg_cls.versions:
|
||||
version_spec = spack.spec.Spec(pkg_cls.name)
|
||||
version_spec.versions = spack.version.VersionList([version])
|
||||
version_specs.append(version_spec)
|
||||
|
||||
return version_specs
|
||||
|
||||
|
||||
def get_matching_versions(specs, num_versions=1):
|
||||
"""Get a spec for EACH known version matching any spec in the list.
|
||||
For concrete specs, this retrieves the concrete version and, if more
|
||||
than one version per spec is requested, retrieves the latest versions
|
||||
of the package.
|
||||
"""
|
||||
matching = []
|
||||
for spec in specs:
|
||||
pkg = spec.package
|
||||
|
||||
# Skip any package that has no known versions.
|
||||
if not pkg.versions:
|
||||
tty.msg("No safe (checksummed) versions for package %s" % pkg.name)
|
||||
continue
|
||||
|
||||
pkg_versions = num_versions
|
||||
|
||||
version_order = list(reversed(sorted(pkg.versions)))
|
||||
matching_spec = []
|
||||
if spec.concrete:
|
||||
matching_spec.append(spec)
|
||||
pkg_versions -= 1
|
||||
if spec.version in version_order:
|
||||
version_order.remove(spec.version)
|
||||
|
||||
for v in version_order:
|
||||
# Generate no more than num_versions versions for each spec.
|
||||
if pkg_versions < 1:
|
||||
break
|
||||
|
||||
# Generate only versions that satisfy the spec.
|
||||
if spec.concrete or v.intersects(spec.versions):
|
||||
s = spack.spec.Spec(pkg.name)
|
||||
s.versions = spack.version.VersionList([v])
|
||||
s.variants = spec.variants.copy()
|
||||
# This is needed to avoid hanging references during the
|
||||
# concretization phase
|
||||
s.variants.spec = s
|
||||
matching_spec.append(s)
|
||||
pkg_versions -= 1
|
||||
|
||||
if not matching_spec:
|
||||
tty.warn("No known version matches spec: %s" % spec)
|
||||
matching.extend(matching_spec)
|
||||
|
||||
return matching
|
||||
|
||||
|
||||
def create(path, specs, skip_unstable_versions=False):
|
||||
"""Create a directory to be used as a spack mirror, and fill it with
|
||||
package archives.
|
||||
|
||||
Arguments:
|
||||
path: Path to create a mirror directory hierarchy in.
|
||||
specs: Any package versions matching these specs will be added \
|
||||
to the mirror.
|
||||
skip_unstable_versions: if true, this skips adding resources when
|
||||
they do not have a stable archive checksum (as determined by
|
||||
``fetch_strategy.stable_target``)
|
||||
|
||||
Return Value:
|
||||
Returns a tuple of lists: (present, mirrored, error)
|
||||
|
||||
* present: Package specs that were already present.
|
||||
* mirrored: Package specs that were successfully mirrored.
|
||||
* error: Package specs that failed to mirror due to some error.
|
||||
"""
|
||||
# automatically spec-ify anything in the specs array.
|
||||
specs = [s if isinstance(s, spack.spec.Spec) else spack.spec.Spec(s) for s in specs]
|
||||
|
||||
mirror_cache, mirror_stats = mirror_cache_and_stats(path, skip_unstable_versions)
|
||||
for spec in specs:
|
||||
mirror_stats.next_spec(spec)
|
||||
create_mirror_from_package_object(spec.package, mirror_cache, mirror_stats)
|
||||
|
||||
return mirror_stats.stats()
|
||||
|
||||
|
||||
def mirror_cache_and_stats(path, skip_unstable_versions=False):
|
||||
"""Return both a mirror cache and a mirror stats, starting from the path
|
||||
where a mirror ought to be created.
|
||||
|
||||
Args:
|
||||
path (str): path to create a mirror directory hierarchy in.
|
||||
skip_unstable_versions: if true, this skips adding resources when
|
||||
they do not have a stable archive checksum (as determined by
|
||||
``fetch_strategy.stable_target``)
|
||||
"""
|
||||
# Get the absolute path of the root before we start jumping around.
|
||||
if not os.path.isdir(path):
|
||||
try:
|
||||
mkdirp(path)
|
||||
except OSError as e:
|
||||
raise MirrorError("Cannot create directory '%s':" % path, str(e))
|
||||
mirror_cache = spack.caches.MirrorCache(path, skip_unstable_versions=skip_unstable_versions)
|
||||
mirror_stats = MirrorStats()
|
||||
return mirror_cache, mirror_stats
|
||||
|
||||
|
||||
def add(mirror: Mirror, scope=None):
|
||||
"""Add a named mirror in the given scope"""
|
||||
mirrors = spack.config.get("mirrors", scope=scope)
|
||||
if not mirrors:
|
||||
mirrors = syaml.syaml_dict()
|
||||
|
||||
if mirror.name in mirrors:
|
||||
tty.die("Mirror with name {} already exists.".format(mirror.name))
|
||||
|
||||
items = [(n, u) for n, u in mirrors.items()]
|
||||
items.insert(0, (mirror.name, mirror.to_dict()))
|
||||
mirrors = syaml.syaml_dict(items)
|
||||
spack.config.set("mirrors", mirrors, scope=scope)
|
||||
|
||||
|
||||
def remove(name, scope):
|
||||
"""Remove the named mirror in the given scope"""
|
||||
mirrors = spack.config.get("mirrors", scope=scope)
|
||||
if not mirrors:
|
||||
mirrors = syaml.syaml_dict()
|
||||
|
||||
if name not in mirrors:
|
||||
tty.die("No mirror with name %s" % name)
|
||||
|
||||
mirrors.pop(name)
|
||||
spack.config.set("mirrors", mirrors, scope=scope)
|
||||
tty.msg("Removed mirror %s." % name)
|
||||
|
||||
|
||||
class MirrorStats:
|
||||
def __init__(self):
|
||||
self.present = {}
|
||||
self.new = {}
|
||||
self.errors = set()
|
||||
|
||||
self.current_spec = None
|
||||
self.added_resources = set()
|
||||
self.existing_resources = set()
|
||||
|
||||
def next_spec(self, spec):
|
||||
self._tally_current_spec()
|
||||
self.current_spec = spec
|
||||
|
||||
def _tally_current_spec(self):
|
||||
if self.current_spec:
|
||||
if self.added_resources:
|
||||
self.new[self.current_spec] = len(self.added_resources)
|
||||
if self.existing_resources:
|
||||
self.present[self.current_spec] = len(self.existing_resources)
|
||||
self.added_resources = set()
|
||||
self.existing_resources = set()
|
||||
self.current_spec = None
|
||||
|
||||
def stats(self):
|
||||
self._tally_current_spec()
|
||||
return list(self.present), list(self.new), list(self.errors)
|
||||
|
||||
def already_existed(self, resource):
|
||||
# If an error occurred after caching a subset of a spec's
|
||||
# resources, a secondary attempt may consider them already added
|
||||
if resource not in self.added_resources:
|
||||
self.existing_resources.add(resource)
|
||||
|
||||
def added(self, resource):
|
||||
self.added_resources.add(resource)
|
||||
|
||||
def error(self):
|
||||
self.errors.add(self.current_spec)
|
||||
|
||||
|
||||
def create_mirror_from_package_object(pkg_obj, mirror_cache, mirror_stats):
|
||||
"""Add a single package object to a mirror.
|
||||
|
||||
The package object is only required to have an associated spec
|
||||
with a concrete version.
|
||||
|
||||
Args:
|
||||
pkg_obj (spack.package_base.PackageBase): package object with to be added.
|
||||
mirror_cache (spack.caches.MirrorCache): mirror where to add the spec.
|
||||
mirror_stats (spack.mirror.MirrorStats): statistics on the current mirror
|
||||
|
||||
Return:
|
||||
True if the spec was added successfully, False otherwise
|
||||
"""
|
||||
tty.msg("Adding package {} to mirror".format(pkg_obj.spec.format("{name}{@version}")))
|
||||
num_retries = 3
|
||||
while num_retries > 0:
|
||||
try:
|
||||
# Includes patches and resources
|
||||
with pkg_obj.stage as pkg_stage:
|
||||
pkg_stage.cache_mirror(mirror_cache, mirror_stats)
|
||||
exception = None
|
||||
break
|
||||
except Exception as e:
|
||||
exc_tuple = sys.exc_info()
|
||||
exception = e
|
||||
num_retries -= 1
|
||||
if exception:
|
||||
if spack.config.get("config:debug"):
|
||||
traceback.print_exception(file=sys.stderr, *exc_tuple)
|
||||
else:
|
||||
tty.warn(
|
||||
"Error while fetching %s" % pkg_obj.spec.cformat("{name}{@version}"),
|
||||
getattr(exception, "message", exception),
|
||||
)
|
||||
mirror_stats.error()
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def require_mirror_name(mirror_name):
|
||||
"""Find a mirror by name and raise if it does not exist"""
|
||||
mirror = MirrorCollection().get(mirror_name)
|
||||
if not mirror:
|
||||
raise ValueError(f'no mirror named "{mirror_name}"')
|
||||
return mirror
|
||||
|
||||
|
||||
class MirrorError(spack.error.SpackError):
|
||||
"""Superclass of all mirror-creation related errors."""
|
||||
|
||||
def __init__(self, msg, long_msg=None):
|
||||
super().__init__(msg, long_msg)
|
258
lib/spack/spack/mirrors/utils.py
Normal file
258
lib/spack/spack/mirrors/utils.py
Normal file
@@ -0,0 +1,258 @@
|
||||
# Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
import os
|
||||
import os.path
|
||||
import traceback
|
||||
|
||||
import llnl.util.tty as tty
|
||||
from llnl.util.filesystem import mkdirp
|
||||
|
||||
import spack.caches
|
||||
import spack.config
|
||||
import spack.error
|
||||
import spack.repo
|
||||
import spack.spec
|
||||
import spack.util.spack_yaml as syaml
|
||||
import spack.version
|
||||
from spack.error import MirrorError
|
||||
from spack.mirrors.mirror import Mirror, MirrorCollection
|
||||
|
||||
|
||||
def get_all_versions(specs):
|
||||
"""Given a set of initial specs, return a new set of specs that includes
|
||||
each version of each package in the original set.
|
||||
|
||||
Note that if any spec in the original set specifies properties other than
|
||||
version, this information will be omitted in the new set; for example; the
|
||||
new set of specs will not include variant settings.
|
||||
"""
|
||||
version_specs = []
|
||||
for spec in specs:
|
||||
pkg_cls = spack.repo.PATH.get_pkg_class(spec.name)
|
||||
# Skip any package that has no known versions.
|
||||
if not pkg_cls.versions:
|
||||
tty.msg("No safe (checksummed) versions for package %s" % pkg_cls.name)
|
||||
continue
|
||||
|
||||
for version in pkg_cls.versions:
|
||||
version_spec = spack.spec.Spec(pkg_cls.name)
|
||||
version_spec.versions = spack.version.VersionList([version])
|
||||
version_specs.append(version_spec)
|
||||
|
||||
return version_specs
|
||||
|
||||
|
||||
def get_matching_versions(specs, num_versions=1):
|
||||
"""Get a spec for EACH known version matching any spec in the list.
|
||||
For concrete specs, this retrieves the concrete version and, if more
|
||||
than one version per spec is requested, retrieves the latest versions
|
||||
of the package.
|
||||
"""
|
||||
matching = []
|
||||
for spec in specs:
|
||||
pkg = spec.package
|
||||
|
||||
# Skip any package that has no known versions.
|
||||
if not pkg.versions:
|
||||
tty.msg("No safe (checksummed) versions for package %s" % pkg.name)
|
||||
continue
|
||||
|
||||
pkg_versions = num_versions
|
||||
|
||||
version_order = list(reversed(sorted(pkg.versions)))
|
||||
matching_spec = []
|
||||
if spec.concrete:
|
||||
matching_spec.append(spec)
|
||||
pkg_versions -= 1
|
||||
if spec.version in version_order:
|
||||
version_order.remove(spec.version)
|
||||
|
||||
for v in version_order:
|
||||
# Generate no more than num_versions versions for each spec.
|
||||
if pkg_versions < 1:
|
||||
break
|
||||
|
||||
# Generate only versions that satisfy the spec.
|
||||
if spec.concrete or v.intersects(spec.versions):
|
||||
s = spack.spec.Spec(pkg.name)
|
||||
s.versions = spack.version.VersionList([v])
|
||||
s.variants = spec.variants.copy()
|
||||
# This is needed to avoid hanging references during the
|
||||
# concretization phase
|
||||
s.variants.spec = s
|
||||
matching_spec.append(s)
|
||||
pkg_versions -= 1
|
||||
|
||||
if not matching_spec:
|
||||
tty.warn("No known version matches spec: %s" % spec)
|
||||
matching.extend(matching_spec)
|
||||
|
||||
return matching
|
||||
|
||||
|
||||
def create(path, specs, skip_unstable_versions=False):
|
||||
"""Create a directory to be used as a spack mirror, and fill it with
|
||||
package archives.
|
||||
|
||||
Arguments:
|
||||
path: Path to create a mirror directory hierarchy in.
|
||||
specs: Any package versions matching these specs will be added \
|
||||
to the mirror.
|
||||
skip_unstable_versions: if true, this skips adding resources when
|
||||
they do not have a stable archive checksum (as determined by
|
||||
``fetch_strategy.stable_target``)
|
||||
|
||||
Return Value:
|
||||
Returns a tuple of lists: (present, mirrored, error)
|
||||
|
||||
* present: Package specs that were already present.
|
||||
* mirrored: Package specs that were successfully mirrored.
|
||||
* error: Package specs that failed to mirror due to some error.
|
||||
"""
|
||||
# automatically spec-ify anything in the specs array.
|
||||
specs = [s if isinstance(s, spack.spec.Spec) else spack.spec.Spec(s) for s in specs]
|
||||
|
||||
mirror_cache, mirror_stats = mirror_cache_and_stats(path, skip_unstable_versions)
|
||||
for spec in specs:
|
||||
mirror_stats.next_spec(spec)
|
||||
create_mirror_from_package_object(spec.package, mirror_cache, mirror_stats)
|
||||
|
||||
return mirror_stats.stats()
|
||||
|
||||
|
||||
def mirror_cache_and_stats(path, skip_unstable_versions=False):
|
||||
"""Return both a mirror cache and a mirror stats, starting from the path
|
||||
where a mirror ought to be created.
|
||||
|
||||
Args:
|
||||
path (str): path to create a mirror directory hierarchy in.
|
||||
skip_unstable_versions: if true, this skips adding resources when
|
||||
they do not have a stable archive checksum (as determined by
|
||||
``fetch_strategy.stable_target``)
|
||||
"""
|
||||
# Get the absolute path of the root before we start jumping around.
|
||||
if not os.path.isdir(path):
|
||||
try:
|
||||
mkdirp(path)
|
||||
except OSError as e:
|
||||
raise MirrorError("Cannot create directory '%s':" % path, str(e))
|
||||
mirror_cache = spack.caches.MirrorCache(path, skip_unstable_versions=skip_unstable_versions)
|
||||
mirror_stats = MirrorStats()
|
||||
return mirror_cache, mirror_stats
|
||||
|
||||
|
||||
def add(mirror: Mirror, scope=None):
|
||||
"""Add a named mirror in the given scope"""
|
||||
mirrors = spack.config.get("mirrors", scope=scope)
|
||||
if not mirrors:
|
||||
mirrors = syaml.syaml_dict()
|
||||
|
||||
if mirror.name in mirrors:
|
||||
tty.die("Mirror with name {} already exists.".format(mirror.name))
|
||||
|
||||
items = [(n, u) for n, u in mirrors.items()]
|
||||
items.insert(0, (mirror.name, mirror.to_dict()))
|
||||
mirrors = syaml.syaml_dict(items)
|
||||
spack.config.set("mirrors", mirrors, scope=scope)
|
||||
|
||||
|
||||
def remove(name, scope):
|
||||
"""Remove the named mirror in the given scope"""
|
||||
mirrors = spack.config.get("mirrors", scope=scope)
|
||||
if not mirrors:
|
||||
mirrors = syaml.syaml_dict()
|
||||
|
||||
if name not in mirrors:
|
||||
tty.die("No mirror with name %s" % name)
|
||||
|
||||
mirrors.pop(name)
|
||||
spack.config.set("mirrors", mirrors, scope=scope)
|
||||
tty.msg("Removed mirror %s." % name)
|
||||
|
||||
|
||||
class MirrorStats:
|
||||
def __init__(self):
|
||||
self.present = {}
|
||||
self.new = {}
|
||||
self.errors = set()
|
||||
|
||||
self.current_spec = None
|
||||
self.added_resources = set()
|
||||
self.existing_resources = set()
|
||||
|
||||
def next_spec(self, spec):
|
||||
self._tally_current_spec()
|
||||
self.current_spec = spec
|
||||
|
||||
def _tally_current_spec(self):
|
||||
if self.current_spec:
|
||||
if self.added_resources:
|
||||
self.new[self.current_spec] = len(self.added_resources)
|
||||
if self.existing_resources:
|
||||
self.present[self.current_spec] = len(self.existing_resources)
|
||||
self.added_resources = set()
|
||||
self.existing_resources = set()
|
||||
self.current_spec = None
|
||||
|
||||
def stats(self):
|
||||
self._tally_current_spec()
|
||||
return list(self.present), list(self.new), list(self.errors)
|
||||
|
||||
def already_existed(self, resource):
|
||||
# If an error occurred after caching a subset of a spec's
|
||||
# resources, a secondary attempt may consider them already added
|
||||
if resource not in self.added_resources:
|
||||
self.existing_resources.add(resource)
|
||||
|
||||
def added(self, resource):
|
||||
self.added_resources.add(resource)
|
||||
|
||||
def error(self):
|
||||
self.errors.add(self.current_spec)
|
||||
|
||||
|
||||
def create_mirror_from_package_object(
|
||||
pkg_obj, mirror_cache: "spack.caches.MirrorCache", mirror_stats: MirrorStats
|
||||
) -> bool:
|
||||
"""Add a single package object to a mirror.
|
||||
|
||||
The package object is only required to have an associated spec
|
||||
with a concrete version.
|
||||
|
||||
Args:
|
||||
pkg_obj (spack.package_base.PackageBase): package object with to be added.
|
||||
mirror_cache: mirror where to add the spec.
|
||||
mirror_stats: statistics on the current mirror
|
||||
|
||||
Return:
|
||||
True if the spec was added successfully, False otherwise
|
||||
"""
|
||||
tty.msg("Adding package {} to mirror".format(pkg_obj.spec.format("{name}{@version}")))
|
||||
max_retries = 3
|
||||
for num_retries in range(max_retries):
|
||||
try:
|
||||
# Includes patches and resources
|
||||
with pkg_obj.stage as pkg_stage:
|
||||
pkg_stage.cache_mirror(mirror_cache, mirror_stats)
|
||||
break
|
||||
except Exception as e:
|
||||
if num_retries + 1 == max_retries:
|
||||
if spack.config.get("config:debug"):
|
||||
traceback.print_exc()
|
||||
else:
|
||||
tty.warn(
|
||||
"Error while fetching %s" % pkg_obj.spec.format("{name}{@version}"), str(e)
|
||||
)
|
||||
mirror_stats.error()
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def require_mirror_name(mirror_name):
|
||||
"""Find a mirror by name and raise if it does not exist"""
|
||||
mirror = MirrorCollection().get(mirror_name)
|
||||
if not mirror:
|
||||
raise ValueError(f'no mirror named "{mirror_name}"')
|
||||
return mirror
|
@@ -16,7 +16,8 @@
|
||||
import llnl.util.tty as tty
|
||||
|
||||
import spack.fetch_strategy
|
||||
import spack.mirror
|
||||
import spack.mirrors.layout
|
||||
import spack.mirrors.mirror
|
||||
import spack.oci.opener
|
||||
import spack.stage
|
||||
import spack.util.url
|
||||
@@ -213,7 +214,7 @@ def upload_manifest(
|
||||
return digest, size
|
||||
|
||||
|
||||
def image_from_mirror(mirror: spack.mirror.Mirror) -> ImageReference:
|
||||
def image_from_mirror(mirror: spack.mirrors.mirror.Mirror) -> ImageReference:
|
||||
"""Given an OCI based mirror, extract the URL and image name from it"""
|
||||
url = mirror.push_url
|
||||
if not url.startswith("oci://"):
|
||||
@@ -385,5 +386,8 @@ def make_stage(
|
||||
# is the `oci-layout` and `index.json` files, which are
|
||||
# required by the spec.
|
||||
return spack.stage.Stage(
|
||||
fetch_strategy, mirror_paths=spack.mirror.OCILayout(digest), name=digest.digest, keep=keep
|
||||
fetch_strategy,
|
||||
mirror_paths=spack.mirrors.layout.OCILayout(digest),
|
||||
name=digest.digest,
|
||||
keep=keep,
|
||||
)
|
||||
|
@@ -20,7 +20,7 @@
|
||||
import llnl.util.lang
|
||||
|
||||
import spack.config
|
||||
import spack.mirror
|
||||
import spack.mirrors.mirror
|
||||
import spack.parser
|
||||
import spack.util.web
|
||||
|
||||
@@ -367,11 +367,11 @@ def http_error_401(self, req: Request, fp, code, msg, headers):
|
||||
|
||||
|
||||
def credentials_from_mirrors(
|
||||
domain: str, *, mirrors: Optional[Iterable[spack.mirror.Mirror]] = None
|
||||
domain: str, *, mirrors: Optional[Iterable[spack.mirrors.mirror.Mirror]] = None
|
||||
) -> Optional[UsernamePassword]:
|
||||
"""Filter out OCI registry credentials from a list of mirrors."""
|
||||
|
||||
mirrors = mirrors or spack.mirror.MirrorCollection().values()
|
||||
mirrors = mirrors or spack.mirrors.mirror.MirrorCollection().values()
|
||||
|
||||
for mirror in mirrors:
|
||||
# Prefer push credentials over fetch. Unlikely that those are different
|
||||
|
@@ -40,7 +40,8 @@
|
||||
import spack.error
|
||||
import spack.fetch_strategy as fs
|
||||
import spack.hooks
|
||||
import spack.mirror
|
||||
import spack.mirrors.layout
|
||||
import spack.mirrors.mirror
|
||||
import spack.multimethod
|
||||
import spack.patch
|
||||
import spack.phase_callbacks
|
||||
@@ -54,6 +55,7 @@
|
||||
import spack.variant
|
||||
from spack.error import InstallError, NoURLError, PackageError
|
||||
from spack.filesystem_view import YamlFilesystemView
|
||||
from spack.resource import Resource
|
||||
from spack.solver.version_order import concretization_version_order
|
||||
from spack.stage import DevelopStage, ResourceStage, Stage, StageComposite, compute_stage_name
|
||||
from spack.util.package_hash import package_hash
|
||||
@@ -585,6 +587,7 @@ class PackageBase(WindowsRPath, PackageViewMixin, metaclass=PackageMeta):
|
||||
# Declare versions dictionary as placeholder for values.
|
||||
# This allows analysis tools to correctly interpret the class attributes.
|
||||
versions: dict
|
||||
resources: Dict[spack.spec.Spec, List[Resource]]
|
||||
dependencies: Dict[spack.spec.Spec, Dict[str, spack.dependency.Dependency]]
|
||||
conflicts: Dict[spack.spec.Spec, List[Tuple[spack.spec.Spec, Optional[str]]]]
|
||||
requirements: Dict[
|
||||
@@ -595,6 +598,7 @@ class PackageBase(WindowsRPath, PackageViewMixin, metaclass=PackageMeta):
|
||||
patches: Dict[spack.spec.Spec, List[spack.patch.Patch]]
|
||||
variants: Dict[spack.spec.Spec, Dict[str, spack.variant.Variant]]
|
||||
languages: Dict[spack.spec.Spec, Set[str]]
|
||||
licenses: Dict[spack.spec.Spec, str]
|
||||
splice_specs: Dict[spack.spec.Spec, Tuple[spack.spec.Spec, Union[None, str, List[str]]]]
|
||||
|
||||
#: Store whether a given Spec source/binary should not be redistributed.
|
||||
@@ -1184,10 +1188,10 @@ def _make_resource_stage(self, root_stage, resource):
|
||||
root=root_stage,
|
||||
resource=resource,
|
||||
name=self._resource_stage(resource),
|
||||
mirror_paths=spack.mirror.default_mirror_layout(
|
||||
mirror_paths=spack.mirrors.layout.default_mirror_layout(
|
||||
resource.fetcher, os.path.join(self.name, pretty_resource_name)
|
||||
),
|
||||
mirrors=spack.mirror.MirrorCollection(source=True).values(),
|
||||
mirrors=spack.mirrors.mirror.MirrorCollection(source=True).values(),
|
||||
path=self.path,
|
||||
)
|
||||
|
||||
@@ -1199,7 +1203,7 @@ def _make_root_stage(self, fetcher):
|
||||
# Construct a mirror path (TODO: get this out of package.py)
|
||||
format_string = "{name}-{version}"
|
||||
pretty_name = self.spec.format_path(format_string)
|
||||
mirror_paths = spack.mirror.default_mirror_layout(
|
||||
mirror_paths = spack.mirrors.layout.default_mirror_layout(
|
||||
fetcher, os.path.join(self.name, pretty_name), self.spec
|
||||
)
|
||||
# Construct a path where the stage should build..
|
||||
@@ -1208,7 +1212,7 @@ def _make_root_stage(self, fetcher):
|
||||
stage = Stage(
|
||||
fetcher,
|
||||
mirror_paths=mirror_paths,
|
||||
mirrors=spack.mirror.MirrorCollection(source=True).values(),
|
||||
mirrors=spack.mirrors.mirror.MirrorCollection(source=True).values(),
|
||||
name=stage_name,
|
||||
path=self.path,
|
||||
search_fn=self._download_search,
|
||||
|
@@ -16,7 +16,8 @@
|
||||
import spack
|
||||
import spack.error
|
||||
import spack.fetch_strategy
|
||||
import spack.mirror
|
||||
import spack.mirrors.layout
|
||||
import spack.mirrors.mirror
|
||||
import spack.repo
|
||||
import spack.stage
|
||||
import spack.util.spack_json as sjson
|
||||
@@ -329,12 +330,12 @@ def stage(self) -> "spack.stage.Stage":
|
||||
name = "{0}-{1}".format(os.path.basename(self.url), fetch_digest[:7])
|
||||
|
||||
per_package_ref = os.path.join(self.owner.split(".")[-1], name)
|
||||
mirror_ref = spack.mirror.default_mirror_layout(fetcher, per_package_ref)
|
||||
mirror_ref = spack.mirrors.layout.default_mirror_layout(fetcher, per_package_ref)
|
||||
self._stage = spack.stage.Stage(
|
||||
fetcher,
|
||||
name=f"{spack.stage.stage_prefix}patch-{fetch_digest}",
|
||||
mirror_paths=mirror_ref,
|
||||
mirrors=spack.mirror.MirrorCollection(source=True).values(),
|
||||
mirrors=spack.mirrors.mirror.MirrorCollection(source=True).values(),
|
||||
)
|
||||
return self._stage
|
||||
|
||||
|
@@ -13,6 +13,7 @@
|
||||
import macholib.mach_o
|
||||
import macholib.MachO
|
||||
|
||||
import llnl.util.filesystem as fs
|
||||
import llnl.util.lang
|
||||
import llnl.util.tty as tty
|
||||
from llnl.util.lang import memoized
|
||||
@@ -275,10 +276,10 @@ def modify_macho_object(cur_path, rpaths, deps, idpath, paths_to_paths):
|
||||
|
||||
# Deduplicate and flatten
|
||||
args = list(itertools.chain.from_iterable(llnl.util.lang.dedupe(args)))
|
||||
install_name_tool = executable.Executable("install_name_tool")
|
||||
if args:
|
||||
args.append(str(cur_path))
|
||||
install_name_tool = executable.Executable("install_name_tool")
|
||||
install_name_tool(*args)
|
||||
with fs.edit_in_place_through_temporary_file(cur_path) as temp_path:
|
||||
install_name_tool(*args, temp_path)
|
||||
|
||||
|
||||
def macholib_get_paths(cur_path):
|
||||
@@ -717,8 +718,8 @@ def fixup_macos_rpath(root, filename):
|
||||
# No fixes needed
|
||||
return False
|
||||
|
||||
args.append(abspath)
|
||||
executable.Executable("install_name_tool")(*args)
|
||||
with fs.edit_in_place_through_temporary_file(abspath) as temp_path:
|
||||
executable.Executable("install_name_tool")(*args, temp_path)
|
||||
return True
|
||||
|
||||
|
||||
|
@@ -41,6 +41,7 @@
|
||||
import spack.provider_index
|
||||
import spack.spec
|
||||
import spack.tag
|
||||
import spack.tengine
|
||||
import spack.util.file_cache
|
||||
import spack.util.git
|
||||
import spack.util.naming as nm
|
||||
@@ -81,43 +82,6 @@ def namespace_from_fullname(fullname):
|
||||
return namespace
|
||||
|
||||
|
||||
class _PrependFileLoader(importlib.machinery.SourceFileLoader):
|
||||
def __init__(self, fullname, path, prepend=None):
|
||||
super(_PrependFileLoader, self).__init__(fullname, path)
|
||||
self.prepend = prepend
|
||||
|
||||
def path_stats(self, path):
|
||||
stats = super(_PrependFileLoader, self).path_stats(path)
|
||||
if self.prepend:
|
||||
stats["size"] += len(self.prepend) + 1
|
||||
return stats
|
||||
|
||||
def get_data(self, path):
|
||||
data = super(_PrependFileLoader, self).get_data(path)
|
||||
if path != self.path or self.prepend is None:
|
||||
return data
|
||||
else:
|
||||
return self.prepend.encode() + b"\n" + data
|
||||
|
||||
|
||||
class RepoLoader(_PrependFileLoader):
|
||||
"""Loads a Python module associated with a package in specific repository"""
|
||||
|
||||
#: Code in ``_package_prepend`` is prepended to imported packages.
|
||||
#:
|
||||
#: Spack packages are expected to call `from spack.package import *`
|
||||
#: themselves, but we are allowing a deprecation period before breaking
|
||||
#: external repos that don't do this yet.
|
||||
_package_prepend = "from spack.package import *"
|
||||
|
||||
def __init__(self, fullname, repo, package_name):
|
||||
self.repo = repo
|
||||
self.package_name = package_name
|
||||
self.package_py = repo.filename_for_package_name(package_name)
|
||||
self.fullname = fullname
|
||||
super().__init__(self.fullname, self.package_py, prepend=self._package_prepend)
|
||||
|
||||
|
||||
class SpackNamespaceLoader:
|
||||
def create_module(self, spec):
|
||||
return SpackNamespace(spec.name)
|
||||
@@ -187,7 +151,8 @@ def compute_loader(self, fullname):
|
||||
# With 2 nested conditionals we can call "repo.real_name" only once
|
||||
package_name = repo.real_name(module_name)
|
||||
if package_name:
|
||||
return RepoLoader(fullname, repo, package_name)
|
||||
module_path = repo.filename_for_package_name(package_name)
|
||||
return importlib.machinery.SourceFileLoader(fullname, module_path)
|
||||
|
||||
# We are importing a full namespace like 'spack.pkg.builtin'
|
||||
if fullname == repo.full_namespace:
|
||||
@@ -1521,8 +1486,6 @@ def add_package(self, name, dependencies=None):
|
||||
Both "dep_type" and "condition" can default to ``None`` in which case
|
||||
``spack.dependency.default_deptype`` and ``spack.spec.Spec()`` are used.
|
||||
"""
|
||||
import spack.tengine # avoid circular import
|
||||
|
||||
dependencies = dependencies or []
|
||||
context = {"cls_name": nm.mod_to_class(name), "dependencies": dependencies}
|
||||
template = spack.tengine.make_environment().get_template("mock-repository/package.pyt")
|
||||
|
@@ -12,7 +12,10 @@
|
||||
|
||||
|
||||
class Resource:
|
||||
"""Represents an optional resource to be fetched by a package.
|
||||
"""Represents any resource to be fetched by a package.
|
||||
|
||||
This includes the main tarball or source archive, as well as extra archives defined
|
||||
by the resource() directive.
|
||||
|
||||
Aggregates a name, a fetcher, a destination and a placement.
|
||||
"""
|
||||
|
@@ -88,6 +88,8 @@
|
||||
"strategy": {"type": "string", "enum": ["none", "minimal", "full"]}
|
||||
},
|
||||
},
|
||||
"timeout": {"type": "integer", "minimum": 0},
|
||||
"error_on_timeout": {"type": "boolean"},
|
||||
"os_compatible": {"type": "object", "additionalProperties": {"type": "array"}},
|
||||
},
|
||||
}
|
||||
|
@@ -106,8 +106,8 @@
|
||||
{
|
||||
"names": ["install_missing_compilers"],
|
||||
"message": "The config:install_missing_compilers option has been deprecated in "
|
||||
"Spack v0.23, and is currently ignored. It will be removed from config in "
|
||||
"Spack v0.25.",
|
||||
"Spack v0.23, and is currently ignored. It will be removed from config after "
|
||||
"Spack v1.0.",
|
||||
"error": False,
|
||||
},
|
||||
],
|
||||
|
@@ -27,6 +27,7 @@
|
||||
|
||||
import spack
|
||||
import spack.binary_distribution
|
||||
import spack.compiler
|
||||
import spack.compilers
|
||||
import spack.concretize
|
||||
import spack.config
|
||||
@@ -47,8 +48,6 @@
|
||||
import spack.version as vn
|
||||
import spack.version.git_ref_lookup
|
||||
from spack import traverse
|
||||
from spack.config import get_mark_from_yaml_data
|
||||
from spack.error import SpecSyntaxError
|
||||
|
||||
from .core import (
|
||||
AspFunction,
|
||||
@@ -64,6 +63,7 @@
|
||||
parse_term,
|
||||
)
|
||||
from .counter import FullDuplicatesCounter, MinimalDuplicatesCounter, NoDuplicatesCounter
|
||||
from .requirements import RequirementKind, RequirementParser, RequirementRule
|
||||
from .version_order import concretization_version_order
|
||||
|
||||
GitOrStandardVersion = Union[spack.version.GitVersion, spack.version.StandardVersion]
|
||||
@@ -143,17 +143,6 @@ def named_spec(
|
||||
spec.name = old_name
|
||||
|
||||
|
||||
class RequirementKind(enum.Enum):
|
||||
"""Purpose / provenance of a requirement"""
|
||||
|
||||
#: Default requirement expressed under the 'all' attribute of packages.yaml
|
||||
DEFAULT = enum.auto()
|
||||
#: Requirement expressed on a virtual package
|
||||
VIRTUAL = enum.auto()
|
||||
#: Requirement expressed on a specific package
|
||||
PACKAGE = enum.auto()
|
||||
|
||||
|
||||
class DeclaredVersion(NamedTuple):
|
||||
"""Data class to contain information on declared versions used in the solve"""
|
||||
|
||||
@@ -277,11 +266,6 @@ def specify(spec):
|
||||
return spack.spec.Spec(spec)
|
||||
|
||||
|
||||
def remove_node(spec: spack.spec.Spec, facts: List[AspFunction]) -> List[AspFunction]:
|
||||
"""Transformation that removes all "node" and "virtual_node" from the input list of facts."""
|
||||
return list(filter(lambda x: x.args[0] not in ("node", "virtual_node"), facts))
|
||||
|
||||
|
||||
def _create_counter(specs: List[spack.spec.Spec], tests: bool):
|
||||
strategy = spack.config.CONFIG.get("concretizer:duplicates:strategy", "none")
|
||||
if strategy == "full":
|
||||
@@ -756,17 +740,6 @@ def on_model(model):
|
||||
raise UnsatisfiableSpecError(msg)
|
||||
|
||||
|
||||
class RequirementRule(NamedTuple):
|
||||
"""Data class to collect information on a requirement"""
|
||||
|
||||
pkg_name: str
|
||||
policy: str
|
||||
requirements: List["spack.spec.Spec"]
|
||||
condition: "spack.spec.Spec"
|
||||
kind: RequirementKind
|
||||
message: Optional[str]
|
||||
|
||||
|
||||
class KnownCompiler(NamedTuple):
|
||||
"""Data class to collect information on compilers"""
|
||||
|
||||
@@ -885,7 +858,22 @@ def on_model(model):
|
||||
solve_kwargs["on_unsat"] = cores.append
|
||||
|
||||
timer.start("solve")
|
||||
solve_result = self.control.solve(**solve_kwargs)
|
||||
time_limit = spack.config.CONFIG.get("concretizer:timeout", -1)
|
||||
error_on_timeout = spack.config.CONFIG.get("concretizer:error_on_timeout", True)
|
||||
# Spack uses 0 to set no time limit, clingo API uses -1
|
||||
if time_limit == 0:
|
||||
time_limit = -1
|
||||
with self.control.solve(**solve_kwargs, async_=True) as handle:
|
||||
finished = handle.wait(time_limit)
|
||||
if not finished:
|
||||
specs_str = ", ".join(llnl.util.lang.elide_list([str(s) for s in specs], 4))
|
||||
header = f"Spack is taking more than {time_limit} seconds to solve for {specs_str}"
|
||||
if error_on_timeout:
|
||||
raise UnsatisfiableSpecError(f"{header}, stopping concretization")
|
||||
warnings.warn(f"{header}, using the best configuration found so far")
|
||||
handle.cancel()
|
||||
|
||||
solve_result = handle.get()
|
||||
timer.stop("solve")
|
||||
|
||||
# once done, construct the solve result
|
||||
@@ -1130,6 +1118,7 @@ class SpackSolverSetup:
|
||||
def __init__(self, tests: bool = False):
|
||||
# these are all initialized in setup()
|
||||
self.gen: "ProblemInstanceBuilder" = ProblemInstanceBuilder()
|
||||
self.requirement_parser = RequirementParser(spack.config.CONFIG)
|
||||
self.possible_virtuals: Set[str] = set()
|
||||
|
||||
self.assumptions: List[Tuple["clingo.Symbol", bool]] = [] # type: ignore[name-defined]
|
||||
@@ -1316,8 +1305,7 @@ def compiler_facts(self):
|
||||
self.gen.newline()
|
||||
|
||||
def package_requirement_rules(self, pkg):
|
||||
parser = RequirementParser(spack.config.CONFIG)
|
||||
self.emit_facts_from_requirement_rules(parser.rules(pkg))
|
||||
self.emit_facts_from_requirement_rules(self.requirement_parser.rules(pkg))
|
||||
|
||||
def pkg_rules(self, pkg, tests):
|
||||
pkg = self.pkg_class(pkg)
|
||||
@@ -1529,7 +1517,8 @@ def _get_condition_id(
|
||||
return result[0]
|
||||
|
||||
cond_id = next(self._id_counter)
|
||||
requirements = self.spec_clauses(named_cond, body=body, context=context)
|
||||
|
||||
requirements = self.spec_clauses(named_cond, body=body, context=context, node=True)
|
||||
if context.transform:
|
||||
requirements = context.transform(named_cond, requirements)
|
||||
pkg_cache[named_cond_key] = (cond_id, requirements)
|
||||
@@ -1567,7 +1556,6 @@ def condition(
|
||||
|
||||
if not context:
|
||||
context = ConditionContext()
|
||||
context.transform_imposed = remove_node
|
||||
|
||||
if imposed_spec:
|
||||
imposed_name = imposed_spec.name or imposed_name
|
||||
@@ -1602,14 +1590,6 @@ def condition(
|
||||
|
||||
return condition_id
|
||||
|
||||
def impose(self, condition_id, imposed_spec, node=True, body=False):
|
||||
imposed_constraints = self.spec_clauses(imposed_spec, body=body)
|
||||
for pred in imposed_constraints:
|
||||
# imposed "node"-like conditions are no-ops
|
||||
if not node and pred.args[0] in ("node", "virtual_node"):
|
||||
continue
|
||||
self.gen.fact(fn.imposed_constraint(condition_id, *pred.args))
|
||||
|
||||
def package_provider_rules(self, pkg):
|
||||
for vpkg_name in pkg.provided_virtual_names():
|
||||
if vpkg_name not in self.possible_virtuals:
|
||||
@@ -1667,7 +1647,7 @@ def track_dependencies(input_spec, requirements):
|
||||
return requirements + [fn.attr("track_dependencies", input_spec.name)]
|
||||
|
||||
def dependency_holds(input_spec, requirements):
|
||||
return remove_node(input_spec, requirements) + [
|
||||
return requirements + [
|
||||
fn.attr(
|
||||
"dependency_holds", pkg.name, input_spec.name, dt.flag_to_string(t)
|
||||
)
|
||||
@@ -1726,12 +1706,10 @@ def package_splice_rules(self, pkg):
|
||||
when_spec_attrs = [
|
||||
fn.attr(c.args[0], splice_node, *(c.args[2:]))
|
||||
for c in self.spec_clauses(cond, body=True, required_from=None)
|
||||
if c.args[0] != "node"
|
||||
]
|
||||
splice_spec_hash_attrs = [
|
||||
fn.hash_attr(hash_var, *(c.args))
|
||||
for c in self.spec_clauses(spec_to_splice, body=True, required_from=None)
|
||||
if c.args[0] != "node"
|
||||
]
|
||||
if match_variants is None:
|
||||
variant_constraints = []
|
||||
@@ -1795,9 +1773,8 @@ def provider_defaults(self):
|
||||
|
||||
def provider_requirements(self):
|
||||
self.gen.h2("Requirements on virtual providers")
|
||||
parser = RequirementParser(spack.config.CONFIG)
|
||||
for virtual_str in sorted(self.possible_virtuals):
|
||||
rules = parser.rules_from_virtual(virtual_str)
|
||||
rules = self.requirement_parser.rules_from_virtual(virtual_str)
|
||||
if rules:
|
||||
self.emit_facts_from_requirement_rules(rules)
|
||||
self.trigger_rules()
|
||||
@@ -1854,10 +1831,6 @@ def emit_facts_from_requirement_rules(self, rules: List[RequirementRule]):
|
||||
context.source = ConstraintOrigin.append_type_suffix(
|
||||
pkg_name, ConstraintOrigin.REQUIRE
|
||||
)
|
||||
if not virtual:
|
||||
context.transform_imposed = remove_node
|
||||
# else: for virtuals we want to emit "node" and
|
||||
# "virtual_node" in imposed specs
|
||||
|
||||
member_id = self.condition(
|
||||
required_spec=when_spec,
|
||||
@@ -2031,6 +2004,7 @@ def spec_clauses(
|
||||
self,
|
||||
spec: spack.spec.Spec,
|
||||
*,
|
||||
node: bool = False,
|
||||
body: bool = False,
|
||||
transitive: bool = True,
|
||||
expand_hashes: bool = False,
|
||||
@@ -2048,6 +2022,7 @@ def spec_clauses(
|
||||
try:
|
||||
clauses = self._spec_clauses(
|
||||
spec,
|
||||
node=node,
|
||||
body=body,
|
||||
transitive=transitive,
|
||||
expand_hashes=expand_hashes,
|
||||
@@ -2065,6 +2040,7 @@ def _spec_clauses(
|
||||
self,
|
||||
spec: spack.spec.Spec,
|
||||
*,
|
||||
node: bool = False,
|
||||
body: bool = False,
|
||||
transitive: bool = True,
|
||||
expand_hashes: bool = False,
|
||||
@@ -2075,6 +2051,7 @@ def _spec_clauses(
|
||||
|
||||
Arguments:
|
||||
spec: the spec to analyze
|
||||
node: if True, emit node(PackageName, ...) and virtual_node(PackageaName, ...) facts
|
||||
body: if True, generate clauses to be used in rule bodies (final values) instead
|
||||
of rule heads (setters).
|
||||
transitive: if False, don't generate clauses from dependencies (default True)
|
||||
@@ -2094,8 +2071,10 @@ def _spec_clauses(
|
||||
|
||||
f: Union[Type[_Head], Type[_Body]] = _Body if body else _Head
|
||||
|
||||
if spec.name:
|
||||
# only generate this if caller asked for node facts -- not needed for most conditions
|
||||
if node and spec.name:
|
||||
clauses.append(f.node(spec.name) if not spec.virtual else f.virtual_node(spec.name))
|
||||
|
||||
if spec.namespace:
|
||||
clauses.append(f.namespace(spec.name, spec.namespace))
|
||||
|
||||
@@ -2253,6 +2232,7 @@ def _spec_clauses(
|
||||
clauses.extend(
|
||||
self._spec_clauses(
|
||||
dep,
|
||||
node=node,
|
||||
body=body,
|
||||
expand_hashes=expand_hashes,
|
||||
concrete_build_deps=concrete_build_deps,
|
||||
@@ -2896,7 +2876,7 @@ def literal_specs(self, specs):
|
||||
effect_id = next(self._id_counter)
|
||||
context = SourceContext()
|
||||
context.source = "literal"
|
||||
requirements = self.spec_clauses(spec, context=context)
|
||||
requirements = self.spec_clauses(spec, context=context, node=True)
|
||||
root_name = spec.name
|
||||
for clause in requirements:
|
||||
clause_name = clause.args[0]
|
||||
@@ -3072,202 +3052,6 @@ def value(self) -> str:
|
||||
return "".join(self.asp_problem)
|
||||
|
||||
|
||||
def parse_spec_from_yaml_string(string: str) -> "spack.spec.Spec":
|
||||
"""Parse a spec from YAML and add file/line info to errors, if it's available.
|
||||
|
||||
Parse a ``Spec`` from the supplied string, but also intercept any syntax errors and
|
||||
add file/line information for debugging using file/line annotations from the string.
|
||||
|
||||
Arguments:
|
||||
string: a string representing a ``Spec`` from config YAML.
|
||||
|
||||
"""
|
||||
try:
|
||||
return spack.spec.Spec(string)
|
||||
except SpecSyntaxError as e:
|
||||
mark = get_mark_from_yaml_data(string)
|
||||
if mark:
|
||||
msg = f"{mark.name}:{mark.line + 1}: {str(e)}"
|
||||
raise SpecSyntaxError(msg) from e
|
||||
raise e
|
||||
|
||||
|
||||
class RequirementParser:
|
||||
"""Parses requirements from package.py files and configuration, and returns rules."""
|
||||
|
||||
def __init__(self, configuration):
|
||||
self.config = configuration
|
||||
|
||||
def rules(self, pkg: "spack.package_base.PackageBase") -> List[RequirementRule]:
|
||||
result = []
|
||||
result.extend(self.rules_from_package_py(pkg))
|
||||
result.extend(self.rules_from_require(pkg))
|
||||
result.extend(self.rules_from_prefer(pkg))
|
||||
result.extend(self.rules_from_conflict(pkg))
|
||||
return result
|
||||
|
||||
def rules_from_package_py(self, pkg) -> List[RequirementRule]:
|
||||
rules = []
|
||||
for when_spec, requirement_list in pkg.requirements.items():
|
||||
for requirements, policy, message in requirement_list:
|
||||
rules.append(
|
||||
RequirementRule(
|
||||
pkg_name=pkg.name,
|
||||
policy=policy,
|
||||
requirements=requirements,
|
||||
kind=RequirementKind.PACKAGE,
|
||||
condition=when_spec,
|
||||
message=message,
|
||||
)
|
||||
)
|
||||
return rules
|
||||
|
||||
def rules_from_virtual(self, virtual_str: str) -> List[RequirementRule]:
|
||||
requirements = self.config.get("packages", {}).get(virtual_str, {}).get("require", [])
|
||||
return self._rules_from_requirements(
|
||||
virtual_str, requirements, kind=RequirementKind.VIRTUAL
|
||||
)
|
||||
|
||||
def rules_from_require(self, pkg: "spack.package_base.PackageBase") -> List[RequirementRule]:
|
||||
kind, requirements = self._raw_yaml_data(pkg, section="require")
|
||||
return self._rules_from_requirements(pkg.name, requirements, kind=kind)
|
||||
|
||||
def rules_from_prefer(self, pkg: "spack.package_base.PackageBase") -> List[RequirementRule]:
|
||||
result = []
|
||||
kind, preferences = self._raw_yaml_data(pkg, section="prefer")
|
||||
for item in preferences:
|
||||
spec, condition, message = self._parse_prefer_conflict_item(item)
|
||||
result.append(
|
||||
# A strong preference is defined as:
|
||||
#
|
||||
# require:
|
||||
# - any_of: [spec_str, "@:"]
|
||||
RequirementRule(
|
||||
pkg_name=pkg.name,
|
||||
policy="any_of",
|
||||
requirements=[spec, spack.spec.Spec("@:")],
|
||||
kind=kind,
|
||||
message=message,
|
||||
condition=condition,
|
||||
)
|
||||
)
|
||||
return result
|
||||
|
||||
def rules_from_conflict(self, pkg: "spack.package_base.PackageBase") -> List[RequirementRule]:
|
||||
result = []
|
||||
kind, conflicts = self._raw_yaml_data(pkg, section="conflict")
|
||||
for item in conflicts:
|
||||
spec, condition, message = self._parse_prefer_conflict_item(item)
|
||||
result.append(
|
||||
# A conflict is defined as:
|
||||
#
|
||||
# require:
|
||||
# - one_of: [spec_str, "@:"]
|
||||
RequirementRule(
|
||||
pkg_name=pkg.name,
|
||||
policy="one_of",
|
||||
requirements=[spec, spack.spec.Spec("@:")],
|
||||
kind=kind,
|
||||
message=message,
|
||||
condition=condition,
|
||||
)
|
||||
)
|
||||
return result
|
||||
|
||||
def _parse_prefer_conflict_item(self, item):
|
||||
# The item is either a string or an object with at least a "spec" attribute
|
||||
if isinstance(item, str):
|
||||
spec = parse_spec_from_yaml_string(item)
|
||||
condition = spack.spec.Spec()
|
||||
message = None
|
||||
else:
|
||||
spec = parse_spec_from_yaml_string(item["spec"])
|
||||
condition = spack.spec.Spec(item.get("when"))
|
||||
message = item.get("message")
|
||||
return spec, condition, message
|
||||
|
||||
def _raw_yaml_data(self, pkg: "spack.package_base.PackageBase", *, section: str):
|
||||
config = self.config.get("packages")
|
||||
data = config.get(pkg.name, {}).get(section, [])
|
||||
kind = RequirementKind.PACKAGE
|
||||
if not data:
|
||||
data = config.get("all", {}).get(section, [])
|
||||
kind = RequirementKind.DEFAULT
|
||||
return kind, data
|
||||
|
||||
def _rules_from_requirements(
|
||||
self, pkg_name: str, requirements, *, kind: RequirementKind
|
||||
) -> List[RequirementRule]:
|
||||
"""Manipulate requirements from packages.yaml, and return a list of tuples
|
||||
with a uniform structure (name, policy, requirements).
|
||||
"""
|
||||
if isinstance(requirements, str):
|
||||
requirements = [requirements]
|
||||
|
||||
rules = []
|
||||
for requirement in requirements:
|
||||
# A string is equivalent to a one_of group with a single element
|
||||
if isinstance(requirement, str):
|
||||
requirement = {"one_of": [requirement]}
|
||||
|
||||
for policy in ("spec", "one_of", "any_of"):
|
||||
if policy not in requirement:
|
||||
continue
|
||||
|
||||
constraints = requirement[policy]
|
||||
# "spec" is for specifying a single spec
|
||||
if policy == "spec":
|
||||
constraints = [constraints]
|
||||
policy = "one_of"
|
||||
|
||||
# validate specs from YAML first, and fail with line numbers if parsing fails.
|
||||
constraints = [
|
||||
parse_spec_from_yaml_string(constraint) for constraint in constraints
|
||||
]
|
||||
when_str = requirement.get("when")
|
||||
when = parse_spec_from_yaml_string(when_str) if when_str else spack.spec.Spec()
|
||||
|
||||
constraints = [
|
||||
x
|
||||
for x in constraints
|
||||
if not self.reject_requirement_constraint(pkg_name, constraint=x, kind=kind)
|
||||
]
|
||||
if not constraints:
|
||||
continue
|
||||
|
||||
rules.append(
|
||||
RequirementRule(
|
||||
pkg_name=pkg_name,
|
||||
policy=policy,
|
||||
requirements=constraints,
|
||||
kind=kind,
|
||||
message=requirement.get("message"),
|
||||
condition=when,
|
||||
)
|
||||
)
|
||||
return rules
|
||||
|
||||
def reject_requirement_constraint(
|
||||
self, pkg_name: str, *, constraint: spack.spec.Spec, kind: RequirementKind
|
||||
) -> bool:
|
||||
"""Returns True if a requirement constraint should be rejected"""
|
||||
if kind == RequirementKind.DEFAULT:
|
||||
# Requirements under all: are applied only if they are satisfiable considering only
|
||||
# package rules, so e.g. variants must exist etc. Otherwise, they are rejected.
|
||||
try:
|
||||
s = spack.spec.Spec(pkg_name)
|
||||
s.constrain(constraint)
|
||||
s.validate_or_raise()
|
||||
except spack.error.SpackError as e:
|
||||
tty.debug(
|
||||
f"[SETUP] Rejecting the default '{constraint}' requirement "
|
||||
f"on '{pkg_name}': {str(e)}",
|
||||
level=2,
|
||||
)
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
class CompilerParser:
|
||||
"""Parses configuration files, and builds a list of possible compilers for the solve."""
|
||||
|
||||
@@ -3454,7 +3238,7 @@ def depends_on(
|
||||
for language in languages:
|
||||
body_str += f' attr("language", {node_variable}, "{language}")'
|
||||
|
||||
head_clauses = self._setup.spec_clauses(dependency_spec, body=False)
|
||||
head_clauses = self._setup.spec_clauses(dependency_spec, body=False, node=True)
|
||||
|
||||
runtime_pkg = dependency_spec.name
|
||||
|
||||
|
@@ -1003,6 +1003,8 @@ variant_default_not_used(node(ID, Package), Variant, Value)
|
||||
node_has_variant(node(ID, Package), Variant, _),
|
||||
not attr("variant_value", node(ID, Package), Variant, Value),
|
||||
not propagate(node(ID, Package), variant_value(Variant, _, _)),
|
||||
% variant set explicitly don't count for this metric
|
||||
not attr("variant_set", node(ID, Package), Variant, _),
|
||||
attr("node", node(ID, Package)).
|
||||
|
||||
% The variant is set in an external spec
|
||||
|
232
lib/spack/spack/solver/requirements.py
Normal file
232
lib/spack/spack/solver/requirements.py
Normal file
@@ -0,0 +1,232 @@
|
||||
# Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
import enum
|
||||
from typing import List, NamedTuple, Optional, Sequence
|
||||
|
||||
from llnl.util import tty
|
||||
|
||||
import spack.config
|
||||
import spack.error
|
||||
import spack.package_base
|
||||
import spack.spec
|
||||
from spack.config import get_mark_from_yaml_data
|
||||
|
||||
|
||||
class RequirementKind(enum.Enum):
|
||||
"""Purpose / provenance of a requirement"""
|
||||
|
||||
#: Default requirement expressed under the 'all' attribute of packages.yaml
|
||||
DEFAULT = enum.auto()
|
||||
#: Requirement expressed on a virtual package
|
||||
VIRTUAL = enum.auto()
|
||||
#: Requirement expressed on a specific package
|
||||
PACKAGE = enum.auto()
|
||||
|
||||
|
||||
class RequirementRule(NamedTuple):
|
||||
"""Data class to collect information on a requirement"""
|
||||
|
||||
pkg_name: str
|
||||
policy: str
|
||||
requirements: Sequence[spack.spec.Spec]
|
||||
condition: spack.spec.Spec
|
||||
kind: RequirementKind
|
||||
message: Optional[str]
|
||||
|
||||
|
||||
class RequirementParser:
|
||||
"""Parses requirements from package.py files and configuration, and returns rules."""
|
||||
|
||||
def __init__(self, configuration: spack.config.Configuration):
|
||||
self.config = configuration
|
||||
|
||||
def rules(self, pkg: spack.package_base.PackageBase) -> List[RequirementRule]:
|
||||
result = []
|
||||
result.extend(self.rules_from_package_py(pkg))
|
||||
result.extend(self.rules_from_require(pkg))
|
||||
result.extend(self.rules_from_prefer(pkg))
|
||||
result.extend(self.rules_from_conflict(pkg))
|
||||
return result
|
||||
|
||||
def rules_from_package_py(self, pkg: spack.package_base.PackageBase) -> List[RequirementRule]:
|
||||
rules = []
|
||||
for when_spec, requirement_list in pkg.requirements.items():
|
||||
for requirements, policy, message in requirement_list:
|
||||
rules.append(
|
||||
RequirementRule(
|
||||
pkg_name=pkg.name,
|
||||
policy=policy,
|
||||
requirements=requirements,
|
||||
kind=RequirementKind.PACKAGE,
|
||||
condition=when_spec,
|
||||
message=message,
|
||||
)
|
||||
)
|
||||
return rules
|
||||
|
||||
def rules_from_virtual(self, virtual_str: str) -> List[RequirementRule]:
|
||||
requirements = self.config.get("packages", {}).get(virtual_str, {}).get("require", [])
|
||||
return self._rules_from_requirements(
|
||||
virtual_str, requirements, kind=RequirementKind.VIRTUAL
|
||||
)
|
||||
|
||||
def rules_from_require(self, pkg: spack.package_base.PackageBase) -> List[RequirementRule]:
|
||||
kind, requirements = self._raw_yaml_data(pkg, section="require")
|
||||
return self._rules_from_requirements(pkg.name, requirements, kind=kind)
|
||||
|
||||
def rules_from_prefer(self, pkg: spack.package_base.PackageBase) -> List[RequirementRule]:
|
||||
result = []
|
||||
kind, preferences = self._raw_yaml_data(pkg, section="prefer")
|
||||
for item in preferences:
|
||||
spec, condition, message = self._parse_prefer_conflict_item(item)
|
||||
result.append(
|
||||
# A strong preference is defined as:
|
||||
#
|
||||
# require:
|
||||
# - any_of: [spec_str, "@:"]
|
||||
RequirementRule(
|
||||
pkg_name=pkg.name,
|
||||
policy="any_of",
|
||||
requirements=[spec, spack.spec.Spec("@:")],
|
||||
kind=kind,
|
||||
message=message,
|
||||
condition=condition,
|
||||
)
|
||||
)
|
||||
return result
|
||||
|
||||
def rules_from_conflict(self, pkg: spack.package_base.PackageBase) -> List[RequirementRule]:
|
||||
result = []
|
||||
kind, conflicts = self._raw_yaml_data(pkg, section="conflict")
|
||||
for item in conflicts:
|
||||
spec, condition, message = self._parse_prefer_conflict_item(item)
|
||||
result.append(
|
||||
# A conflict is defined as:
|
||||
#
|
||||
# require:
|
||||
# - one_of: [spec_str, "@:"]
|
||||
RequirementRule(
|
||||
pkg_name=pkg.name,
|
||||
policy="one_of",
|
||||
requirements=[spec, spack.spec.Spec("@:")],
|
||||
kind=kind,
|
||||
message=message,
|
||||
condition=condition,
|
||||
)
|
||||
)
|
||||
return result
|
||||
|
||||
def _parse_prefer_conflict_item(self, item):
|
||||
# The item is either a string or an object with at least a "spec" attribute
|
||||
if isinstance(item, str):
|
||||
spec = parse_spec_from_yaml_string(item)
|
||||
condition = spack.spec.Spec()
|
||||
message = None
|
||||
else:
|
||||
spec = parse_spec_from_yaml_string(item["spec"])
|
||||
condition = spack.spec.Spec(item.get("when"))
|
||||
message = item.get("message")
|
||||
return spec, condition, message
|
||||
|
||||
def _raw_yaml_data(self, pkg: spack.package_base.PackageBase, *, section: str):
|
||||
config = self.config.get("packages")
|
||||
data = config.get(pkg.name, {}).get(section, [])
|
||||
kind = RequirementKind.PACKAGE
|
||||
if not data:
|
||||
data = config.get("all", {}).get(section, [])
|
||||
kind = RequirementKind.DEFAULT
|
||||
return kind, data
|
||||
|
||||
def _rules_from_requirements(
|
||||
self, pkg_name: str, requirements, *, kind: RequirementKind
|
||||
) -> List[RequirementRule]:
|
||||
"""Manipulate requirements from packages.yaml, and return a list of tuples
|
||||
with a uniform structure (name, policy, requirements).
|
||||
"""
|
||||
if isinstance(requirements, str):
|
||||
requirements = [requirements]
|
||||
|
||||
rules = []
|
||||
for requirement in requirements:
|
||||
# A string is equivalent to a one_of group with a single element
|
||||
if isinstance(requirement, str):
|
||||
requirement = {"one_of": [requirement]}
|
||||
|
||||
for policy in ("spec", "one_of", "any_of"):
|
||||
if policy not in requirement:
|
||||
continue
|
||||
|
||||
constraints = requirement[policy]
|
||||
# "spec" is for specifying a single spec
|
||||
if policy == "spec":
|
||||
constraints = [constraints]
|
||||
policy = "one_of"
|
||||
|
||||
# validate specs from YAML first, and fail with line numbers if parsing fails.
|
||||
constraints = [
|
||||
parse_spec_from_yaml_string(constraint) for constraint in constraints
|
||||
]
|
||||
when_str = requirement.get("when")
|
||||
when = parse_spec_from_yaml_string(when_str) if when_str else spack.spec.Spec()
|
||||
|
||||
constraints = [
|
||||
x
|
||||
for x in constraints
|
||||
if not self.reject_requirement_constraint(pkg_name, constraint=x, kind=kind)
|
||||
]
|
||||
if not constraints:
|
||||
continue
|
||||
|
||||
rules.append(
|
||||
RequirementRule(
|
||||
pkg_name=pkg_name,
|
||||
policy=policy,
|
||||
requirements=constraints,
|
||||
kind=kind,
|
||||
message=requirement.get("message"),
|
||||
condition=when,
|
||||
)
|
||||
)
|
||||
return rules
|
||||
|
||||
def reject_requirement_constraint(
|
||||
self, pkg_name: str, *, constraint: spack.spec.Spec, kind: RequirementKind
|
||||
) -> bool:
|
||||
"""Returns True if a requirement constraint should be rejected"""
|
||||
if kind == RequirementKind.DEFAULT:
|
||||
# Requirements under all: are applied only if they are satisfiable considering only
|
||||
# package rules, so e.g. variants must exist etc. Otherwise, they are rejected.
|
||||
try:
|
||||
s = spack.spec.Spec(pkg_name)
|
||||
s.constrain(constraint)
|
||||
s.validate_or_raise()
|
||||
except spack.error.SpackError as e:
|
||||
tty.debug(
|
||||
f"[SETUP] Rejecting the default '{constraint}' requirement "
|
||||
f"on '{pkg_name}': {str(e)}",
|
||||
level=2,
|
||||
)
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def parse_spec_from_yaml_string(string: str) -> spack.spec.Spec:
|
||||
"""Parse a spec from YAML and add file/line info to errors, if it's available.
|
||||
|
||||
Parse a ``Spec`` from the supplied string, but also intercept any syntax errors and
|
||||
add file/line information for debugging using file/line annotations from the string.
|
||||
|
||||
Arguments:
|
||||
string: a string representing a ``Spec`` from config YAML.
|
||||
|
||||
"""
|
||||
try:
|
||||
return spack.spec.Spec(string)
|
||||
except spack.error.SpecSyntaxError as e:
|
||||
mark = get_mark_from_yaml_data(string)
|
||||
if mark:
|
||||
msg = f"{mark.name}:{mark.line + 1}: {str(e)}"
|
||||
raise spack.error.SpecSyntaxError(msg) from e
|
||||
raise e
|
@@ -34,7 +34,8 @@
|
||||
import spack.caches
|
||||
import spack.config
|
||||
import spack.error
|
||||
import spack.mirror
|
||||
import spack.mirrors.layout
|
||||
import spack.mirrors.utils
|
||||
import spack.resource
|
||||
import spack.spec
|
||||
import spack.util.crypto
|
||||
@@ -353,8 +354,8 @@ def __init__(
|
||||
url_or_fetch_strategy,
|
||||
*,
|
||||
name=None,
|
||||
mirror_paths: Optional["spack.mirror.MirrorLayout"] = None,
|
||||
mirrors: Optional[Iterable["spack.mirror.Mirror"]] = None,
|
||||
mirror_paths: Optional["spack.mirrors.layout.MirrorLayout"] = None,
|
||||
mirrors: Optional[Iterable["spack.mirrors.mirror.Mirror"]] = None,
|
||||
keep=False,
|
||||
path=None,
|
||||
lock=True,
|
||||
@@ -488,7 +489,7 @@ def _generate_fetchers(self, mirror_only=False) -> Generator["fs.FetchStrategy",
|
||||
# Insert fetchers in the order that the URLs are provided.
|
||||
fetchers[:0] = (
|
||||
fs.from_url_scheme(
|
||||
url_util.join(mirror.fetch_url, self.mirror_layout.path),
|
||||
url_util.join(mirror.fetch_url, *self.mirror_layout.path.split(os.sep)),
|
||||
checksum=digest,
|
||||
expand=expand,
|
||||
extension=extension,
|
||||
@@ -601,7 +602,7 @@ def cache_local(self):
|
||||
spack.caches.FETCH_CACHE.store(self.fetcher, self.mirror_layout.path)
|
||||
|
||||
def cache_mirror(
|
||||
self, mirror: "spack.caches.MirrorCache", stats: "spack.mirror.MirrorStats"
|
||||
self, mirror: "spack.caches.MirrorCache", stats: "spack.mirrors.utils.MirrorStats"
|
||||
) -> None:
|
||||
"""Perform a fetch if the resource is not already cached
|
||||
|
||||
|
@@ -32,7 +32,7 @@
|
||||
import spack.fetch_strategy
|
||||
import spack.hooks.sbang as sbang
|
||||
import spack.main
|
||||
import spack.mirror
|
||||
import spack.mirrors.mirror
|
||||
import spack.paths
|
||||
import spack.spec
|
||||
import spack.stage
|
||||
@@ -324,8 +324,8 @@ def test_push_and_fetch_keys(mock_gnupghome, tmp_path):
|
||||
|
||||
mirror = os.path.join(testpath, "mirror")
|
||||
mirrors = {"test-mirror": url_util.path_to_file_url(mirror)}
|
||||
mirrors = spack.mirror.MirrorCollection(mirrors)
|
||||
mirror = spack.mirror.Mirror(url_util.path_to_file_url(mirror))
|
||||
mirrors = spack.mirrors.mirror.MirrorCollection(mirrors)
|
||||
mirror = spack.mirrors.mirror.Mirror(url_util.path_to_file_url(mirror))
|
||||
|
||||
gpg_dir1 = os.path.join(testpath, "gpg1")
|
||||
gpg_dir2 = os.path.join(testpath, "gpg2")
|
||||
|
@@ -9,7 +9,7 @@
|
||||
import pytest
|
||||
|
||||
import spack.binary_distribution as bd
|
||||
import spack.mirror
|
||||
import spack.mirrors.mirror
|
||||
import spack.spec
|
||||
from spack.installer import PackageInstaller
|
||||
|
||||
@@ -23,7 +23,7 @@ def test_build_tarball_overwrite(install_mockery, mock_fetch, monkeypatch, tmp_p
|
||||
specs = [spec]
|
||||
|
||||
# populate cache, everything is new
|
||||
mirror = spack.mirror.Mirror.from_local_path(str(tmp_path))
|
||||
mirror = spack.mirrors.mirror.Mirror.from_local_path(str(tmp_path))
|
||||
with bd.make_uploader(mirror) as uploader:
|
||||
skipped = uploader.push_or_raise(specs)
|
||||
assert not skipped
|
||||
|
@@ -14,7 +14,7 @@
|
||||
import spack.config
|
||||
import spack.environment as ev
|
||||
import spack.main
|
||||
import spack.mirror
|
||||
import spack.mirrors.utils
|
||||
import spack.spec
|
||||
|
||||
_bootstrap = spack.main.SpackCommand("bootstrap")
|
||||
@@ -182,8 +182,8 @@ def test_bootstrap_mirror_metadata(mutable_config, linux_os, monkeypatch, tmpdir
|
||||
`spack bootstrap add`. Here we don't download data, since that would be an
|
||||
expensive operation for a unit test.
|
||||
"""
|
||||
old_create = spack.mirror.create
|
||||
monkeypatch.setattr(spack.mirror, "create", lambda p, s: old_create(p, []))
|
||||
old_create = spack.mirrors.utils.create
|
||||
monkeypatch.setattr(spack.mirrors.utils, "create", lambda p, s: old_create(p, []))
|
||||
monkeypatch.setattr(spack.spec.Spec, "concretized", lambda p: p)
|
||||
|
||||
# Create the mirror in a temporary folder
|
||||
|
@@ -16,7 +16,7 @@
|
||||
import spack.environment as ev
|
||||
import spack.error
|
||||
import spack.main
|
||||
import spack.mirror
|
||||
import spack.mirrors.mirror
|
||||
import spack.spec
|
||||
import spack.util.url
|
||||
from spack.installer import PackageInstaller
|
||||
@@ -385,7 +385,9 @@ def test_correct_specs_are_pushed(
|
||||
|
||||
class DontUpload(spack.binary_distribution.Uploader):
|
||||
def __init__(self):
|
||||
super().__init__(spack.mirror.Mirror.from_local_path(str(tmpdir)), False, False)
|
||||
super().__init__(
|
||||
spack.mirrors.mirror.Mirror.from_local_path(str(tmpdir)), False, False
|
||||
)
|
||||
self.pushed = []
|
||||
|
||||
def push(self, specs: List[spack.spec.Spec]):
|
||||
|
@@ -17,6 +17,7 @@
|
||||
import spack
|
||||
import spack.binary_distribution
|
||||
import spack.ci as ci
|
||||
import spack.cmd
|
||||
import spack.cmd.ci
|
||||
import spack.environment as ev
|
||||
import spack.hash_types as ht
|
||||
|
@@ -20,6 +20,8 @@
|
||||
_p1 = (
|
||||
"p1",
|
||||
"""\
|
||||
from spack.package import *
|
||||
|
||||
class P1(Package):
|
||||
version("1.0")
|
||||
|
||||
@@ -35,6 +37,8 @@ class P1(Package):
|
||||
_p2 = (
|
||||
"p2",
|
||||
"""\
|
||||
from spack.package import *
|
||||
|
||||
class P2(Package):
|
||||
version("1.0")
|
||||
|
||||
@@ -48,6 +52,8 @@ class P2(Package):
|
||||
_p3 = (
|
||||
"p3",
|
||||
"""\
|
||||
from spack.package import *
|
||||
|
||||
class P3(Package):
|
||||
version("1.0")
|
||||
|
||||
@@ -58,6 +64,8 @@ class P3(Package):
|
||||
_i1 = (
|
||||
"i1",
|
||||
"""\
|
||||
from spack.package import *
|
||||
|
||||
class I1(Package):
|
||||
version("1.0")
|
||||
|
||||
@@ -73,6 +81,8 @@ class I1(Package):
|
||||
_i2 = (
|
||||
"i2",
|
||||
"""\
|
||||
from spack.package import *
|
||||
|
||||
class I2(Package):
|
||||
version("1.0")
|
||||
|
||||
@@ -89,6 +99,8 @@ class I2(Package):
|
||||
_p4 = (
|
||||
"p4",
|
||||
"""\
|
||||
from spack.package import *
|
||||
|
||||
class P4(Package):
|
||||
version("1.0")
|
||||
|
||||
|
@@ -462,6 +462,8 @@ def test_environment_with_version_range_in_compiler_doesnt_fail(tmp_path):
|
||||
_pkga = (
|
||||
"a0",
|
||||
"""\
|
||||
from spack.package import *
|
||||
|
||||
class A0(Package):
|
||||
version("1.2")
|
||||
version("1.1")
|
||||
@@ -475,6 +477,8 @@ class A0(Package):
|
||||
_pkgb = (
|
||||
"b0",
|
||||
"""\
|
||||
from spack.package import *
|
||||
|
||||
class B0(Package):
|
||||
version("1.2")
|
||||
version("1.1")
|
||||
@@ -485,6 +489,8 @@ class B0(Package):
|
||||
_pkgc = (
|
||||
"c0",
|
||||
"""\
|
||||
from spack.package import *
|
||||
|
||||
class C0(Package):
|
||||
version("1.2")
|
||||
version("1.1")
|
||||
@@ -497,6 +503,8 @@ class C0(Package):
|
||||
_pkgd = (
|
||||
"d0",
|
||||
"""\
|
||||
from spack.package import *
|
||||
|
||||
class D0(Package):
|
||||
version("1.2")
|
||||
version("1.1")
|
||||
@@ -510,6 +518,8 @@ class D0(Package):
|
||||
_pkge = (
|
||||
"e0",
|
||||
"""\
|
||||
from spack.package import *
|
||||
|
||||
class E0(Package):
|
||||
tags = ["tag1", "tag2"]
|
||||
|
||||
|
@@ -11,7 +11,7 @@
|
||||
import spack.config
|
||||
import spack.environment as ev
|
||||
import spack.error
|
||||
import spack.mirror
|
||||
import spack.mirrors.utils
|
||||
import spack.spec
|
||||
import spack.util.url as url_util
|
||||
import spack.version
|
||||
@@ -74,7 +74,7 @@ def test_mirror_skip_unstable(tmpdir_factory, mock_packages, config, source_for_
|
||||
mirror_dir = str(tmpdir_factory.mktemp("mirror-dir"))
|
||||
|
||||
specs = [spack.spec.Spec(x).concretized() for x in ["git-test", "trivial-pkg-with-valid-hash"]]
|
||||
spack.mirror.create(mirror_dir, specs, skip_unstable_versions=True)
|
||||
spack.mirrors.utils.create(mirror_dir, specs, skip_unstable_versions=True)
|
||||
|
||||
assert set(os.listdir(mirror_dir)) - set(["_source-cache"]) == set(
|
||||
["trivial-pkg-with-valid-hash"]
|
||||
|
@@ -10,6 +10,7 @@
|
||||
|
||||
from llnl.util.filesystem import mkdirp, working_dir
|
||||
|
||||
import spack.cmd
|
||||
import spack.cmd.pkg
|
||||
import spack.main
|
||||
import spack.paths
|
||||
|
@@ -295,7 +295,7 @@ def test_style_with_black(flake8_package_with_errors):
|
||||
|
||||
|
||||
def test_skip_tools():
|
||||
output = style("--skip", "import-check,isort,mypy,black,flake8")
|
||||
output = style("--skip", "import,isort,mypy,black,flake8")
|
||||
assert "Nothing to run" in output
|
||||
|
||||
|
||||
@@ -314,6 +314,7 @@ class Example(spack.build_systems.autotools.AutotoolsPackage):
|
||||
def foo(config: "spack.error.SpackError"):
|
||||
# the type hint is quoted, so it should not be removed
|
||||
spack.util.executable.Executable("example")
|
||||
print(spack.__version__)
|
||||
'''
|
||||
file.write_text(contents)
|
||||
root = str(tmp_path)
|
||||
@@ -330,6 +331,7 @@ def foo(config: "spack.error.SpackError"):
|
||||
|
||||
assert "issues.py: redundant import: spack.cmd" in output
|
||||
assert "issues.py: redundant import: spack.config" not in output # comment prevents removal
|
||||
assert "issues.py: missing import: spack" in output # used by spack.__version__
|
||||
assert "issues.py: missing import: spack.build_systems.autotools" in output
|
||||
assert "issues.py: missing import: spack.util.executable" in output
|
||||
assert "issues.py: missing import: spack.error" not in output # not directly used
|
||||
@@ -349,6 +351,7 @@ def foo(config: "spack.error.SpackError"):
|
||||
output = output_buf.getvalue()
|
||||
assert exit_code == 1
|
||||
assert "issues.py: redundant import: spack.cmd" in output
|
||||
assert "issues.py: missing import: spack" in output
|
||||
assert "issues.py: missing import: spack.build_systems.autotools" in output
|
||||
assert "issues.py: missing import: spack.util.executable" in output
|
||||
|
||||
@@ -369,8 +372,9 @@ def foo(config: "spack.error.SpackError"):
|
||||
# check that the file was fixed
|
||||
new_contents = file.read_text()
|
||||
assert "import spack.cmd" not in new_contents
|
||||
assert "import spack.build_systems.autotools" in new_contents
|
||||
assert "import spack.util.executable" in new_contents
|
||||
assert "import spack\n" in new_contents
|
||||
assert "import spack.build_systems.autotools\n" in new_contents
|
||||
assert "import spack.util.executable\n" in new_contents
|
||||
|
||||
|
||||
@pytest.mark.skipif(sys.version_info < (3, 9), reason="requires Python 3.9+")
|
||||
@@ -389,3 +393,16 @@ def test_run_import_check_syntax_error_and_missing(tmp_path: pathlib.Path):
|
||||
assert "syntax-error.py: could not parse" in output
|
||||
assert "missing.py: could not parse" in output
|
||||
assert exit_code == 1
|
||||
|
||||
|
||||
def test_case_sensitive_imports(tmp_path: pathlib.Path):
|
||||
# example.Example is a name, while example.example is a module.
|
||||
(tmp_path / "lib" / "spack" / "example").mkdir(parents=True)
|
||||
(tmp_path / "lib" / "spack" / "example" / "__init__.py").write_text("class Example:\n pass")
|
||||
(tmp_path / "lib" / "spack" / "example" / "example.py").write_text("foo = 1")
|
||||
assert spack.cmd.style._module_part(str(tmp_path), "example.Example") == "example"
|
||||
|
||||
|
||||
def test_pkg_imports():
|
||||
assert spack.cmd.style._module_part(spack.paths.prefix, "spack.pkg.builtin.boost") is None
|
||||
assert spack.cmd.style._module_part(spack.paths.prefix, "spack.pkg") is None
|
||||
|
@@ -210,7 +210,7 @@ def test_missing_command():
|
||||
"""Ensure that we raise the expected exception if the desired command is
|
||||
not present.
|
||||
"""
|
||||
with pytest.raises(spack.extensions.CommandNotFoundError):
|
||||
with pytest.raises(spack.cmd.CommandNotFoundError):
|
||||
spack.cmd.get_module("no-such-command")
|
||||
|
||||
|
||||
@@ -220,9 +220,9 @@ def test_missing_command():
|
||||
("/my/bad/extension", spack.extensions.ExtensionNamingError),
|
||||
("", spack.extensions.ExtensionNamingError),
|
||||
("/my/bad/spack--extra-hyphen", spack.extensions.ExtensionNamingError),
|
||||
("/my/good/spack-extension", spack.extensions.CommandNotFoundError),
|
||||
("/my/still/good/spack-extension/", spack.extensions.CommandNotFoundError),
|
||||
("/my/spack-hyphenated-extension", spack.extensions.CommandNotFoundError),
|
||||
("/my/good/spack-extension", spack.cmd.CommandNotFoundError),
|
||||
("/my/still/good/spack-extension/", spack.cmd.CommandNotFoundError),
|
||||
("/my/spack-hyphenated-extension", spack.cmd.CommandNotFoundError),
|
||||
],
|
||||
ids=["no_stem", "vacuous", "leading_hyphen", "basic_good", "trailing_slash", "hyphenated"],
|
||||
)
|
||||
|
@@ -188,6 +188,8 @@ def repo_with_changing_recipe(tmp_path_factory, mutable_mock_repo):
|
||||
|
||||
packages_dir = repo_dir / "packages"
|
||||
root_pkg_str = """
|
||||
from spack.package import *
|
||||
|
||||
class Root(Package):
|
||||
homepage = "http://www.example.com"
|
||||
url = "http://www.example.com/root-1.0.tar.gz"
|
||||
@@ -202,6 +204,8 @@ class Root(Package):
|
||||
package_py.write_text(root_pkg_str)
|
||||
|
||||
changing_template = """
|
||||
from spack.package import *
|
||||
|
||||
class Changing(Package):
|
||||
homepage = "http://www.example.com"
|
||||
url = "http://www.example.com/changing-1.0.tar.gz"
|
||||
|
@@ -15,6 +15,7 @@
|
||||
import llnl.util.tty as tty
|
||||
from llnl.util.filesystem import join_path, touch, touchp
|
||||
|
||||
import spack
|
||||
import spack.config
|
||||
import spack.directory_layout
|
||||
import spack.environment as ev
|
||||
|
@@ -148,6 +148,8 @@ def test_version_type_validation():
|
||||
_pkgx = (
|
||||
"x",
|
||||
"""\
|
||||
from spack.package import *
|
||||
|
||||
class X(Package):
|
||||
version("1.3")
|
||||
version("1.2")
|
||||
@@ -166,6 +168,8 @@ class X(Package):
|
||||
_pkgy = (
|
||||
"y",
|
||||
"""\
|
||||
from spack.package import *
|
||||
|
||||
class Y(Package):
|
||||
version("2.1")
|
||||
version("2.0")
|
||||
@@ -219,10 +223,10 @@ class MockPackage:
|
||||
disable_redistribute = {}
|
||||
|
||||
cls = MockPackage
|
||||
spack.directives._execute_redistribute(cls, source=False, when="@1.0")
|
||||
spack.directives._execute_redistribute(cls, source=False, binary=None, when="@1.0")
|
||||
spec_key = spack.directives._make_when_spec("@1.0")
|
||||
assert not cls.disable_redistribute[spec_key].binary
|
||||
assert cls.disable_redistribute[spec_key].source
|
||||
spack.directives._execute_redistribute(cls, binary=False, when="@1.0")
|
||||
spack.directives._execute_redistribute(cls, source=None, binary=False, when="@1.0")
|
||||
assert cls.disable_redistribute[spec_key].binary
|
||||
assert cls.disable_redistribute[spec_key].source
|
||||
|
@@ -73,5 +73,18 @@ def test_ascii_graph_mpileaks(config, mock_packages, monkeypatch):
|
||||
o | libdwarf
|
||||
|/
|
||||
o libelf
|
||||
"""
|
||||
or graph_str
|
||||
== r"""o mpileaks
|
||||
|\
|
||||
| o callpath
|
||||
|/|
|
||||
| o dyninst
|
||||
| |\
|
||||
o | | mpich
|
||||
/ /
|
||||
| o libdwarf
|
||||
|/
|
||||
o libelf
|
||||
"""
|
||||
)
|
||||
|
@@ -16,7 +16,8 @@
|
||||
import spack.database
|
||||
import spack.error
|
||||
import spack.installer
|
||||
import spack.mirror
|
||||
import spack.mirrors.mirror
|
||||
import spack.mirrors.utils
|
||||
import spack.package_base
|
||||
import spack.patch
|
||||
import spack.repo
|
||||
@@ -615,7 +616,7 @@ def test_install_from_binary_with_missing_patch_succeeds(
|
||||
temporary_store.db.add(s, explicit=True)
|
||||
|
||||
# Push it to a binary cache
|
||||
mirror = spack.mirror.Mirror.from_local_path(str(tmp_path / "my_build_cache"))
|
||||
mirror = spack.mirrors.mirror.Mirror.from_local_path(str(tmp_path / "my_build_cache"))
|
||||
with binary_distribution.make_uploader(mirror=mirror) as uploader:
|
||||
uploader.push_or_raise([s])
|
||||
|
||||
@@ -628,7 +629,7 @@ def test_install_from_binary_with_missing_patch_succeeds(
|
||||
PackageInstaller([s.package], explicit=True).install()
|
||||
|
||||
# Binary install: succeeds, we don't need the patch.
|
||||
spack.mirror.add(mirror)
|
||||
spack.mirrors.utils.add(mirror)
|
||||
PackageInstaller(
|
||||
[s.package],
|
||||
explicit=True,
|
||||
|
@@ -1249,3 +1249,14 @@ def test_find_input_types(tmp_path: pathlib.Path):
|
||||
|
||||
with pytest.raises(TypeError):
|
||||
fs.find(1, "file.txt") # type: ignore
|
||||
|
||||
|
||||
def test_edit_in_place_through_temporary_file(tmp_path):
|
||||
(tmp_path / "example.txt").write_text("Hello")
|
||||
current_ino = os.stat(tmp_path / "example.txt").st_ino
|
||||
with fs.edit_in_place_through_temporary_file(tmp_path / "example.txt") as temporary:
|
||||
os.unlink(temporary)
|
||||
with open(temporary, "w") as f:
|
||||
f.write("World")
|
||||
assert (tmp_path / "example.txt").read_text() == "World"
|
||||
assert os.stat(tmp_path / "example.txt").st_ino == current_ino
|
||||
|
@@ -14,7 +14,9 @@
|
||||
import spack.caches
|
||||
import spack.config
|
||||
import spack.fetch_strategy
|
||||
import spack.mirror
|
||||
import spack.mirrors.layout
|
||||
import spack.mirrors.mirror
|
||||
import spack.mirrors.utils
|
||||
import spack.patch
|
||||
import spack.stage
|
||||
import spack.util.executable
|
||||
@@ -60,7 +62,7 @@ def check_mirror():
|
||||
with spack.config.override("mirrors", mirrors):
|
||||
with spack.config.override("config:checksum", False):
|
||||
specs = [Spec(x).concretized() for x in repos]
|
||||
spack.mirror.create(mirror_root, specs)
|
||||
spack.mirrors.utils.create(mirror_root, specs)
|
||||
|
||||
# Stage directory exists
|
||||
assert os.path.isdir(mirror_root)
|
||||
@@ -68,7 +70,9 @@ def check_mirror():
|
||||
for spec in specs:
|
||||
fetcher = spec.package.fetcher
|
||||
per_package_ref = os.path.join(spec.name, "-".join([spec.name, str(spec.version)]))
|
||||
mirror_layout = spack.mirror.default_mirror_layout(fetcher, per_package_ref)
|
||||
mirror_layout = spack.mirrors.layout.default_mirror_layout(
|
||||
fetcher, per_package_ref
|
||||
)
|
||||
expected_path = os.path.join(mirror_root, mirror_layout.path)
|
||||
assert os.path.exists(expected_path)
|
||||
|
||||
@@ -135,16 +139,16 @@ def test_all_mirror(mock_git_repository, mock_svn_repository, mock_hg_repository
|
||||
@pytest.mark.parametrize(
|
||||
"mirror",
|
||||
[
|
||||
spack.mirror.Mirror(
|
||||
spack.mirrors.mirror.Mirror(
|
||||
{"fetch": "https://example.com/fetch", "push": "https://example.com/push"}
|
||||
)
|
||||
],
|
||||
)
|
||||
def test_roundtrip_mirror(mirror: spack.mirror.Mirror):
|
||||
def test_roundtrip_mirror(mirror: spack.mirrors.mirror.Mirror):
|
||||
mirror_yaml = mirror.to_yaml()
|
||||
assert spack.mirror.Mirror.from_yaml(mirror_yaml) == mirror
|
||||
assert spack.mirrors.mirror.Mirror.from_yaml(mirror_yaml) == mirror
|
||||
mirror_json = mirror.to_json()
|
||||
assert spack.mirror.Mirror.from_json(mirror_json) == mirror
|
||||
assert spack.mirrors.mirror.Mirror.from_json(mirror_json) == mirror
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
@@ -152,14 +156,14 @@ def test_roundtrip_mirror(mirror: spack.mirror.Mirror):
|
||||
)
|
||||
def test_invalid_yaml_mirror(invalid_yaml):
|
||||
with pytest.raises(SpackYAMLError, match="error parsing YAML") as e:
|
||||
spack.mirror.Mirror.from_yaml(invalid_yaml)
|
||||
spack.mirrors.mirror.Mirror.from_yaml(invalid_yaml)
|
||||
assert invalid_yaml in str(e.value)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("invalid_json, error_message", [("{13:", "Expecting property name")])
|
||||
def test_invalid_json_mirror(invalid_json, error_message):
|
||||
with pytest.raises(sjson.SpackJSONError) as e:
|
||||
spack.mirror.Mirror.from_json(invalid_json)
|
||||
spack.mirrors.mirror.Mirror.from_json(invalid_json)
|
||||
exc_msg = str(e.value)
|
||||
assert exc_msg.startswith("error parsing JSON mirror:")
|
||||
assert error_message in exc_msg
|
||||
@@ -168,9 +172,9 @@ def test_invalid_json_mirror(invalid_json, error_message):
|
||||
@pytest.mark.parametrize(
|
||||
"mirror_collection",
|
||||
[
|
||||
spack.mirror.MirrorCollection(
|
||||
spack.mirrors.mirror.MirrorCollection(
|
||||
mirrors={
|
||||
"example-mirror": spack.mirror.Mirror(
|
||||
"example-mirror": spack.mirrors.mirror.Mirror(
|
||||
"https://example.com/fetch", "https://example.com/push"
|
||||
).to_dict()
|
||||
}
|
||||
@@ -179,9 +183,15 @@ def test_invalid_json_mirror(invalid_json, error_message):
|
||||
)
|
||||
def test_roundtrip_mirror_collection(mirror_collection):
|
||||
mirror_collection_yaml = mirror_collection.to_yaml()
|
||||
assert spack.mirror.MirrorCollection.from_yaml(mirror_collection_yaml) == mirror_collection
|
||||
assert (
|
||||
spack.mirrors.mirror.MirrorCollection.from_yaml(mirror_collection_yaml)
|
||||
== mirror_collection
|
||||
)
|
||||
mirror_collection_json = mirror_collection.to_json()
|
||||
assert spack.mirror.MirrorCollection.from_json(mirror_collection_json) == mirror_collection
|
||||
assert (
|
||||
spack.mirrors.mirror.MirrorCollection.from_json(mirror_collection_json)
|
||||
== mirror_collection
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
@@ -189,14 +199,14 @@ def test_roundtrip_mirror_collection(mirror_collection):
|
||||
)
|
||||
def test_invalid_yaml_mirror_collection(invalid_yaml):
|
||||
with pytest.raises(SpackYAMLError, match="error parsing YAML") as e:
|
||||
spack.mirror.MirrorCollection.from_yaml(invalid_yaml)
|
||||
spack.mirrors.mirror.MirrorCollection.from_yaml(invalid_yaml)
|
||||
assert invalid_yaml in str(e.value)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("invalid_json, error_message", [("{13:", "Expecting property name")])
|
||||
def test_invalid_json_mirror_collection(invalid_json, error_message):
|
||||
with pytest.raises(sjson.SpackJSONError) as e:
|
||||
spack.mirror.MirrorCollection.from_json(invalid_json)
|
||||
spack.mirrors.mirror.MirrorCollection.from_json(invalid_json)
|
||||
exc_msg = str(e.value)
|
||||
assert exc_msg.startswith("error parsing JSON mirror collection:")
|
||||
assert error_message in exc_msg
|
||||
@@ -205,7 +215,7 @@ def test_invalid_json_mirror_collection(invalid_json, error_message):
|
||||
def test_mirror_archive_paths_no_version(mock_packages, mock_archive):
|
||||
spec = Spec("trivial-install-test-package@=nonexistingversion").concretized()
|
||||
fetcher = spack.fetch_strategy.URLFetchStrategy(url=mock_archive.url)
|
||||
spack.mirror.default_mirror_layout(fetcher, "per-package-ref", spec)
|
||||
spack.mirrors.layout.default_mirror_layout(fetcher, "per-package-ref", spec)
|
||||
|
||||
|
||||
def test_mirror_with_url_patches(mock_packages, monkeypatch):
|
||||
@@ -238,10 +248,12 @@ def successful_make_alias(*args, **kwargs):
|
||||
monkeypatch.setattr(spack.fetch_strategy.URLFetchStrategy, "expand", successful_expand)
|
||||
monkeypatch.setattr(spack.patch, "apply_patch", successful_apply)
|
||||
monkeypatch.setattr(spack.caches.MirrorCache, "store", record_store)
|
||||
monkeypatch.setattr(spack.mirror.DefaultLayout, "make_alias", successful_make_alias)
|
||||
monkeypatch.setattr(
|
||||
spack.mirrors.layout.DefaultLayout, "make_alias", successful_make_alias
|
||||
)
|
||||
|
||||
with spack.config.override("config:checksum", False):
|
||||
spack.mirror.create(mirror_root, list(spec.traverse()))
|
||||
spack.mirrors.utils.create(mirror_root, list(spec.traverse()))
|
||||
|
||||
assert {
|
||||
"abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234",
|
||||
@@ -268,7 +280,7 @@ def test_mirror_layout_make_alias(tmpdir):
|
||||
alias = os.path.join("zlib", "zlib-1.2.11.tar.gz")
|
||||
path = os.path.join("_source-cache", "archive", "c3", "c3e5.tar.gz")
|
||||
cache = spack.caches.MirrorCache(root=str(tmpdir), skip_unstable_versions=False)
|
||||
layout = spack.mirror.DefaultLayout(alias, path)
|
||||
layout = spack.mirrors.layout.DefaultLayout(alias, path)
|
||||
|
||||
cache.store(MockFetcher(), layout.path)
|
||||
layout.make_alias(cache.root)
|
||||
@@ -288,7 +300,7 @@ def test_mirror_layout_make_alias(tmpdir):
|
||||
)
|
||||
def test_get_all_versions(specs, expected_specs):
|
||||
specs = [Spec(s) for s in specs]
|
||||
output_list = spack.mirror.get_all_versions(specs)
|
||||
output_list = spack.mirrors.utils.get_all_versions(specs)
|
||||
output_list = [str(x) for x in output_list]
|
||||
# Compare sets since order is not important
|
||||
assert set(output_list) == set(expected_specs)
|
||||
@@ -296,14 +308,14 @@ def test_get_all_versions(specs, expected_specs):
|
||||
|
||||
def test_update_1():
|
||||
# No change
|
||||
m = spack.mirror.Mirror("https://example.com")
|
||||
m = spack.mirrors.mirror.Mirror("https://example.com")
|
||||
assert not m.update({"url": "https://example.com"})
|
||||
assert m.to_dict() == "https://example.com"
|
||||
|
||||
|
||||
def test_update_2():
|
||||
# Change URL, shouldn't expand to {"url": ...} dict.
|
||||
m = spack.mirror.Mirror("https://example.com")
|
||||
m = spack.mirrors.mirror.Mirror("https://example.com")
|
||||
assert m.update({"url": "https://example.org"})
|
||||
assert m.to_dict() == "https://example.org"
|
||||
assert m.fetch_url == "https://example.org"
|
||||
@@ -312,7 +324,7 @@ def test_update_2():
|
||||
|
||||
def test_update_3():
|
||||
# Change fetch url, ensure minimal config
|
||||
m = spack.mirror.Mirror("https://example.com")
|
||||
m = spack.mirrors.mirror.Mirror("https://example.com")
|
||||
assert m.update({"url": "https://example.org"}, "fetch")
|
||||
assert m.to_dict() == {"url": "https://example.com", "fetch": "https://example.org"}
|
||||
assert m.fetch_url == "https://example.org"
|
||||
@@ -321,7 +333,7 @@ def test_update_3():
|
||||
|
||||
def test_update_4():
|
||||
# Change push url, ensure minimal config
|
||||
m = spack.mirror.Mirror("https://example.com")
|
||||
m = spack.mirrors.mirror.Mirror("https://example.com")
|
||||
assert m.update({"url": "https://example.org"}, "push")
|
||||
assert m.to_dict() == {"url": "https://example.com", "push": "https://example.org"}
|
||||
assert m.push_url == "https://example.org"
|
||||
@@ -331,7 +343,7 @@ def test_update_4():
|
||||
@pytest.mark.parametrize("direction", ["fetch", "push"])
|
||||
def test_update_connection_params(direction, tmpdir, monkeypatch):
|
||||
"""Test whether new connection params expand the mirror config to a dict."""
|
||||
m = spack.mirror.Mirror("https://example.com", "example")
|
||||
m = spack.mirrors.mirror.Mirror("https://example.com", "example")
|
||||
|
||||
assert m.update(
|
||||
{
|
||||
|
@@ -12,6 +12,7 @@
|
||||
import spack.cmd.modules
|
||||
import spack.config
|
||||
import spack.error
|
||||
import spack.modules
|
||||
import spack.modules.common
|
||||
import spack.modules.tcl
|
||||
import spack.package_base
|
||||
|
@@ -14,7 +14,7 @@
|
||||
|
||||
import pytest
|
||||
|
||||
import spack.mirror
|
||||
import spack.mirrors.mirror
|
||||
from spack.oci.image import Digest, ImageReference, default_config, default_manifest
|
||||
from spack.oci.oci import (
|
||||
copy_missing_layers,
|
||||
@@ -474,7 +474,7 @@ def test_copy_missing_layers(tmpdir, config):
|
||||
|
||||
|
||||
def test_image_from_mirror():
|
||||
mirror = spack.mirror.Mirror("oci://example.com/image")
|
||||
mirror = spack.mirrors.mirror.Mirror("oci://example.com/image")
|
||||
assert image_from_mirror(mirror) == ImageReference.from_string("example.com/image")
|
||||
|
||||
|
||||
@@ -511,25 +511,25 @@ def test_default_credentials_provider():
|
||||
|
||||
mirrors = [
|
||||
# OCI mirror with push credentials
|
||||
spack.mirror.Mirror(
|
||||
spack.mirrors.mirror.Mirror(
|
||||
{"url": "oci://a.example.com/image", "push": {"access_pair": ["user.a", "pass.a"]}}
|
||||
),
|
||||
# Not an OCI mirror
|
||||
spack.mirror.Mirror(
|
||||
spack.mirrors.mirror.Mirror(
|
||||
{"url": "https://b.example.com/image", "access_pair": ["user.b", "pass.b"]}
|
||||
),
|
||||
# No credentials
|
||||
spack.mirror.Mirror("oci://c.example.com/image"),
|
||||
spack.mirrors.mirror.Mirror("oci://c.example.com/image"),
|
||||
# Top-level credentials
|
||||
spack.mirror.Mirror(
|
||||
spack.mirrors.mirror.Mirror(
|
||||
{"url": "oci://d.example.com/image", "access_pair": ["user.d", "pass.d"]}
|
||||
),
|
||||
# Dockerhub short reference
|
||||
spack.mirror.Mirror(
|
||||
spack.mirrors.mirror.Mirror(
|
||||
{"url": "oci://user/image", "access_pair": ["dockerhub_user", "dockerhub_pass"]}
|
||||
),
|
||||
# Localhost (not a dockerhub short reference)
|
||||
spack.mirror.Mirror(
|
||||
spack.mirrors.mirror.Mirror(
|
||||
{"url": "oci://localhost/image", "access_pair": ["user.localhost", "pass.localhost"]}
|
||||
),
|
||||
]
|
||||
|
@@ -21,6 +21,7 @@
|
||||
import spack.deptypes as dt
|
||||
import spack.error
|
||||
import spack.install_test
|
||||
import spack.package
|
||||
import spack.package_base
|
||||
import spack.repo
|
||||
import spack.spec
|
||||
|
@@ -24,7 +24,7 @@
|
||||
import spack.config
|
||||
import spack.error
|
||||
import spack.fetch_strategy
|
||||
import spack.mirror
|
||||
import spack.mirrors.utils
|
||||
import spack.package_base
|
||||
import spack.stage
|
||||
import spack.util.gpg
|
||||
@@ -64,7 +64,7 @@ def test_buildcache(mock_archive, tmp_path, monkeypatch, mutable_config):
|
||||
|
||||
# Create the build cache and put it directly into the mirror
|
||||
mirror_path = str(tmp_path / "test-mirror")
|
||||
spack.mirror.create(mirror_path, specs=[])
|
||||
spack.mirrors.utils.create(mirror_path, specs=[])
|
||||
|
||||
# register mirror with spack config
|
||||
mirrors = {"spack-mirror-test": url_util.path_to_file_url(mirror_path)}
|
||||
|
@@ -20,9 +20,8 @@ def create_dag(nodes, edges):
|
||||
"""
|
||||
specs = {name: Spec(name) for name in nodes}
|
||||
for parent, child, deptypes in edges:
|
||||
specs[parent].add_dependency_edge(
|
||||
specs[child], depflag=dt.canonicalize(deptypes), virtuals=()
|
||||
)
|
||||
depflag = deptypes if isinstance(deptypes, dt.DepFlag) else dt.canonicalize(deptypes)
|
||||
specs[parent].add_dependency_edge(specs[child], depflag=depflag, virtuals=())
|
||||
return specs
|
||||
|
||||
|
||||
@@ -431,3 +430,84 @@ def test_traverse_nodes_no_deps(abstract_specs_dtuse):
|
||||
]
|
||||
outputs = [x for x in traverse.traverse_nodes(inputs, deptype=dt.NONE)]
|
||||
assert outputs == [abstract_specs_dtuse["dtuse"], abstract_specs_dtuse["dtlink5"]]
|
||||
|
||||
|
||||
@pytest.mark.parametrize("cover", ["nodes", "edges"])
|
||||
def test_topo_is_bfs_for_trees(cover):
|
||||
"""For trees, both DFS and BFS produce a topological order, but BFS is the most sensible for
|
||||
our applications, where we typically want to avoid that transitive dependencies shadow direct
|
||||
depenencies in global search paths, etc. This test ensures that for trees, the default topo
|
||||
order coincides with BFS."""
|
||||
binary_tree = create_dag(
|
||||
nodes=["A", "B", "C", "D", "E", "F", "G"],
|
||||
edges=(
|
||||
("A", "B", "all"),
|
||||
("A", "C", "all"),
|
||||
("B", "D", "all"),
|
||||
("B", "E", "all"),
|
||||
("C", "F", "all"),
|
||||
("C", "G", "all"),
|
||||
),
|
||||
)
|
||||
|
||||
assert list(traverse.traverse_nodes([binary_tree["A"]], order="topo", cover=cover)) == list(
|
||||
traverse.traverse_nodes([binary_tree["A"]], order="breadth", cover=cover)
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("roots", [["A"], ["A", "B"], ["B", "A"], ["A", "B", "A"]])
|
||||
@pytest.mark.parametrize("order", ["breadth", "post", "pre"])
|
||||
@pytest.mark.parametrize("include_root", [True, False])
|
||||
def test_mixed_depth_visitor(roots, order, include_root):
|
||||
"""Test that the MixedDepthVisitor lists unique edges that are reachable either directly from
|
||||
roots through build type edges, or transitively through link type edges. The tests ensures that
|
||||
unique edges are listed exactly once."""
|
||||
my_graph = create_dag(
|
||||
nodes=["A", "B", "C", "D", "E", "F", "G", "H", "I"],
|
||||
edges=(
|
||||
("A", "B", dt.LINK | dt.RUN),
|
||||
("A", "C", dt.BUILD),
|
||||
("A", "D", dt.BUILD | dt.RUN),
|
||||
("A", "H", dt.LINK),
|
||||
("A", "I", dt.RUN),
|
||||
("B", "D", dt.BUILD | dt.LINK),
|
||||
("C", "E", dt.BUILD | dt.LINK | dt.RUN),
|
||||
("D", "F", dt.LINK),
|
||||
("D", "G", dt.BUILD | dt.RUN),
|
||||
("H", "B", dt.LINK),
|
||||
),
|
||||
)
|
||||
starting_points = traverse.with_artificial_edges([my_graph[root] for root in roots])
|
||||
visitor = traverse.MixedDepthVisitor(direct=dt.BUILD, transitive=dt.LINK)
|
||||
|
||||
if order == "pre":
|
||||
edges = traverse.traverse_depth_first_edges_generator(
|
||||
starting_points, visitor, post_order=False, root=include_root
|
||||
)
|
||||
elif order == "post":
|
||||
edges = traverse.traverse_depth_first_edges_generator(
|
||||
starting_points, visitor, post_order=True, root=include_root
|
||||
)
|
||||
elif order == "breadth":
|
||||
edges = traverse.traverse_breadth_first_edges_generator(
|
||||
starting_points, visitor, root=include_root
|
||||
)
|
||||
|
||||
artificial_edges = [(None, root) for root in roots] if include_root else []
|
||||
simple_edges = [
|
||||
(None if edge.parent is None else edge.parent.name, edge.spec.name) for edge in edges
|
||||
]
|
||||
|
||||
# make sure that every edge is listed exactly once and that the right edges are listed
|
||||
assert len(simple_edges) == len(set(simple_edges))
|
||||
assert set(simple_edges) == {
|
||||
# the roots
|
||||
*artificial_edges,
|
||||
("A", "B"),
|
||||
("A", "C"),
|
||||
("A", "D"),
|
||||
("A", "H"),
|
||||
("B", "D"),
|
||||
("D", "F"),
|
||||
("H", "B"),
|
||||
}
|
||||
|
@@ -607,6 +607,9 @@ def test_stringify_version(version_str):
|
||||
v.string = None
|
||||
assert str(v) == version_str
|
||||
|
||||
v.string = None
|
||||
assert v.string == version_str
|
||||
|
||||
|
||||
def test_len():
|
||||
a = Version("1.2.3.4")
|
||||
|
@@ -14,7 +14,7 @@
|
||||
import llnl.util.tty as tty
|
||||
|
||||
import spack.config
|
||||
import spack.mirror
|
||||
import spack.mirrors.mirror
|
||||
import spack.paths
|
||||
import spack.url
|
||||
import spack.util.s3
|
||||
@@ -276,7 +276,7 @@ def head_object(self, Bucket=None, Key=None):
|
||||
|
||||
|
||||
def test_gather_s3_information(monkeypatch, capfd):
|
||||
mirror = spack.mirror.Mirror(
|
||||
mirror = spack.mirrors.mirror.Mirror(
|
||||
{
|
||||
"fetch": {
|
||||
"access_token": "AAAAAAA",
|
||||
|
@@ -4,7 +4,7 @@
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
from collections import defaultdict
|
||||
from typing import NamedTuple, Union
|
||||
from typing import Any, Callable, List, NamedTuple, Set, Union
|
||||
|
||||
import spack.deptypes as dt
|
||||
import spack.spec
|
||||
@@ -115,68 +115,62 @@ def neighbors(self, item):
|
||||
return self.visitor.neighbors(item)
|
||||
|
||||
|
||||
class TopoVisitor:
|
||||
"""Visitor that can be used in :py:func:`depth-first traversal
|
||||
<spack.traverse.traverse_depth_first_with_visitor>` to generate
|
||||
a topologically ordered list of specs.
|
||||
class MixedDepthVisitor:
|
||||
"""Visits all unique edges of the sub-DAG induced by direct dependencies of type ``direct``
|
||||
and transitive dependencies of type ``transitive``. An example use for this is traversing build
|
||||
type dependencies non-recursively, and link dependencies recursively."""
|
||||
|
||||
Algorithm based on "Section 22.4: Topological sort", Introduction to Algorithms
|
||||
(2001, 2nd edition) by Cormen, Thomas H.; Leiserson, Charles E.; Rivest, Ronald L.;
|
||||
Stein, Clifford.
|
||||
|
||||
Summary of the algorithm: prepend each vertex to a list in depth-first post-order,
|
||||
not following edges to nodes already seen. This ensures all descendants occur after
|
||||
their parent, yielding a topological order.
|
||||
|
||||
Note: in this particular implementation we collect the *edges* through which the
|
||||
vertices are discovered, meaning that a topological order of *vertices* is obtained
|
||||
by taking the specs pointed to: ``map(lambda edge: edge.spec, visitor.edges)``.
|
||||
Lastly, ``all_edges=True`` can be used to retrieve a list of all reachable
|
||||
edges, with the property that for each vertex all in-edges precede all out-edges.
|
||||
"""
|
||||
|
||||
def __init__(self, visitor, key=id, root=True, all_edges=False):
|
||||
"""
|
||||
Arguments:
|
||||
visitor: visitor that implements accept(), pre(), post() and neighbors()
|
||||
key: uniqueness key for nodes
|
||||
root (bool): Whether to include the root node.
|
||||
all_edges (bool): when ``False`` (default): Each node is reached once,
|
||||
and ``map(lambda edge: edge.spec, visitor.edges)`` is topologically
|
||||
ordered. When ``True``, every edge is listed, ordered such that for
|
||||
each node all in-edges precede all out-edges.
|
||||
"""
|
||||
self.visited = set()
|
||||
self.visitor = visitor
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
direct: dt.DepFlag,
|
||||
transitive: dt.DepFlag,
|
||||
key: Callable[["spack.spec.Spec"], Any] = id,
|
||||
) -> None:
|
||||
self.direct_type = direct
|
||||
self.transitive_type = transitive
|
||||
self.key = key
|
||||
self.root = root
|
||||
self.reverse_order = []
|
||||
self.all_edges = all_edges
|
||||
self.seen: Set[Any] = set()
|
||||
self.seen_roots: Set[Any] = set()
|
||||
|
||||
def accept(self, item):
|
||||
if self.key(item.edge.spec) not in self.visited:
|
||||
return True
|
||||
if self.all_edges and (self.root or item.depth > 0):
|
||||
self.reverse_order.append(item.edge)
|
||||
return False
|
||||
def accept(self, item: EdgeAndDepth) -> bool:
|
||||
# Do not accept duplicate root nodes. This only happens if the user starts iterating from
|
||||
# multiple roots and lists one of the roots multiple times.
|
||||
if item.edge.parent is None:
|
||||
node_id = self.key(item.edge.spec)
|
||||
if node_id in self.seen_roots:
|
||||
return False
|
||||
self.seen_roots.add(node_id)
|
||||
return True
|
||||
|
||||
def pre(self, item):
|
||||
# You could add a temporary marker for cycle detection
|
||||
# that's cleared in `post`, but we assume no cycles.
|
||||
pass
|
||||
def neighbors(self, item: EdgeAndDepth) -> List[EdgeAndDepth]:
|
||||
# If we're here through an artificial source node, it's a root, and we return all
|
||||
# direct_type and transitive_type edges. If we're here through a transitive_type edge, we
|
||||
# return all transitive_type edges. To avoid returning the same edge twice:
|
||||
# 1. If we had already encountered the current node through a transitive_type edge, we
|
||||
# don't need to return transitive_type edges again.
|
||||
# 2. If we encounter the current node through a direct_type edge, and we had already seen
|
||||
# it through a transitive_type edge, only return the non-transitive_type, direct_type
|
||||
# edges.
|
||||
node_id = self.key(item.edge.spec)
|
||||
seen = node_id in self.seen
|
||||
is_root = item.edge.parent is None
|
||||
follow_transitive = is_root or bool(item.edge.depflag & self.transitive_type)
|
||||
follow = self.direct_type if is_root else dt.NONE
|
||||
|
||||
def post(self, item):
|
||||
self.visited.add(self.key(item.edge.spec))
|
||||
if self.root or item.depth > 0:
|
||||
self.reverse_order.append(item.edge)
|
||||
if follow_transitive and not seen:
|
||||
follow |= self.transitive_type
|
||||
self.seen.add(node_id)
|
||||
elif follow == dt.NONE:
|
||||
return []
|
||||
|
||||
def neighbors(self, item):
|
||||
return self.visitor.neighbors(item)
|
||||
edges = item.edge.spec.edges_to_dependencies(depflag=follow)
|
||||
|
||||
@property
|
||||
def edges(self):
|
||||
"""Return edges in topological order (in-edges precede out-edges)."""
|
||||
return list(reversed(self.reverse_order))
|
||||
# filter direct_type edges already followed before becuase they were also transitive_type.
|
||||
if seen:
|
||||
edges = [edge for edge in edges if not edge.depflag & self.transitive_type]
|
||||
|
||||
return sort_edges(edges)
|
||||
|
||||
|
||||
def get_visitor_from_args(
|
||||
@@ -381,39 +375,52 @@ def traverse_breadth_first_tree_nodes(parent_id, edges, key=id, depth=0):
|
||||
yield item
|
||||
|
||||
|
||||
# Topologic order
|
||||
def traverse_edges_topo(
|
||||
specs,
|
||||
direction="children",
|
||||
deptype: Union[dt.DepFlag, dt.DepTypes] = "all",
|
||||
key=id,
|
||||
root=True,
|
||||
all_edges=False,
|
||||
):
|
||||
def traverse_topo_edges_generator(edges, visitor, key=id, root=True, all_edges=False):
|
||||
"""
|
||||
Returns a list of edges in topological order, in the sense that all in-edges of a
|
||||
vertex appear before all out-edges. By default with direction=children edges are
|
||||
directed from dependent to dependency. With directions=parents, the edges are
|
||||
directed from dependency to dependent.
|
||||
Returns a list of edges in topological order, in the sense that all in-edges of a vertex appear
|
||||
before all out-edges.
|
||||
|
||||
Arguments:
|
||||
specs (list): List of root specs (considered to be depth 0)
|
||||
direction (str): ``children`` (edges are directed from dependent to dependency)
|
||||
or ``parents`` (edges are flipped / directed from dependency to dependent)
|
||||
deptype: allowed dependency types
|
||||
edges (list): List of EdgeAndDepth instances
|
||||
visitor: visitor instance that defines the sub-DAG to traverse
|
||||
key: function that takes a spec and outputs a key for uniqueness test.
|
||||
root (bool): Yield the root nodes themselves
|
||||
all_edges (bool): When ``False`` only one in-edge per node is returned, when
|
||||
``True`` all reachable edges are returned.
|
||||
"""
|
||||
if not isinstance(deptype, dt.DepFlag):
|
||||
deptype = dt.canonicalize(deptype)
|
||||
visitor: Union[BaseVisitor, ReverseVisitor, TopoVisitor] = BaseVisitor(deptype)
|
||||
if direction == "parents":
|
||||
visitor = ReverseVisitor(visitor, deptype)
|
||||
visitor = TopoVisitor(visitor, key=key, root=root, all_edges=all_edges)
|
||||
traverse_depth_first_with_visitor(with_artificial_edges(specs), visitor)
|
||||
return visitor.edges
|
||||
# Topo order used to be implemented using a DFS visitor, which was relatively efficient in that
|
||||
# it would visit nodes only once, and it was composable. In practice however it would yield a
|
||||
# DFS order on DAGs that are trees, which is undesirable in many cases. For example, a list of
|
||||
# search paths for trees is better in BFS order, so that direct dependencies are listed first.
|
||||
# That way a transitive dependency cannot shadow a direct one. So, here we collect the sub-DAG
|
||||
# of interest and then compute a topological order that is the most breadth-first possible.
|
||||
|
||||
# maps node identifier to the number of remaining in-edges
|
||||
in_edge_count = defaultdict(int)
|
||||
# maps parent identifier to a list of edges, where None is a special identifier
|
||||
# for the artificial root/source.
|
||||
node_to_edges = defaultdict(list)
|
||||
for edge in traverse_breadth_first_edges_generator(edges, visitor, root=True, depth=False):
|
||||
in_edge_count[key(edge.spec)] += 1
|
||||
parent_id = key(edge.parent) if edge.parent is not None else None
|
||||
node_to_edges[parent_id].append(edge)
|
||||
|
||||
queue = [None]
|
||||
|
||||
while queue:
|
||||
for edge in node_to_edges[queue.pop(0)]:
|
||||
child_id = key(edge.spec)
|
||||
in_edge_count[child_id] -= 1
|
||||
|
||||
should_yield = root or edge.parent is not None
|
||||
|
||||
if all_edges and should_yield:
|
||||
yield edge
|
||||
|
||||
if in_edge_count[child_id] == 0:
|
||||
if not all_edges and should_yield:
|
||||
yield edge
|
||||
queue.append(key(edge.spec))
|
||||
|
||||
|
||||
# High-level API: traverse_edges, traverse_nodes, traverse_tree.
|
||||
@@ -462,20 +469,20 @@ def traverse_edges(
|
||||
A generator that yields ``DependencySpec`` if depth is ``False``
|
||||
or a tuple of ``(depth, DependencySpec)`` if depth is ``True``.
|
||||
"""
|
||||
|
||||
# validate input
|
||||
if order == "topo":
|
||||
if cover == "paths":
|
||||
raise ValueError("cover=paths not supported for order=topo")
|
||||
# TODO: There is no known need for topological ordering of traversals (edge or node)
|
||||
# with an initialized "visited" set. Revisit if needed.
|
||||
if visited is not None:
|
||||
raise ValueError("visited set not implemented for order=topo")
|
||||
return traverse_edges_topo(
|
||||
specs, direction, deptype, key, root, all_edges=cover == "edges"
|
||||
)
|
||||
elif order not in ("post", "pre", "breadth"):
|
||||
raise ValueError(f"Unknown order {order}")
|
||||
|
||||
# In topo traversal we need to construct a sub-DAG including all unique edges even if we are
|
||||
# yielding a subset of them, hence "edges".
|
||||
_cover = "edges" if order == "topo" else cover
|
||||
visitor = get_visitor_from_args(_cover, direction, deptype, key, visited)
|
||||
root_edges = with_artificial_edges(specs)
|
||||
visitor = get_visitor_from_args(cover, direction, deptype, key, visited)
|
||||
|
||||
# Depth-first
|
||||
if order in ("pre", "post"):
|
||||
@@ -484,8 +491,10 @@ def traverse_edges(
|
||||
)
|
||||
elif order == "breadth":
|
||||
return traverse_breadth_first_edges_generator(root_edges, visitor, root, depth)
|
||||
|
||||
raise ValueError("Unknown order {}".format(order))
|
||||
elif order == "topo":
|
||||
return traverse_topo_edges_generator(
|
||||
root_edges, visitor, key, root, all_edges=cover == "edges"
|
||||
)
|
||||
|
||||
|
||||
def traverse_nodes(
|
||||
|
@@ -7,7 +7,7 @@
|
||||
import re
|
||||
import struct
|
||||
from struct import calcsize, unpack, unpack_from
|
||||
from typing import BinaryIO, Dict, List, NamedTuple, Optional, Pattern, Tuple
|
||||
from typing import BinaryIO, Callable, Dict, List, NamedTuple, Optional, Pattern, Tuple
|
||||
|
||||
|
||||
class ElfHeader(NamedTuple):
|
||||
@@ -476,6 +476,31 @@ def get_interpreter(path: str) -> Optional[str]:
|
||||
return None
|
||||
|
||||
|
||||
def _delete_dynamic_array_entry(
|
||||
f: BinaryIO, elf: ElfFile, should_delete: Callable[[int, int], bool]
|
||||
) -> None:
|
||||
f.seek(elf.pt_dynamic_p_offset)
|
||||
dynamic_array_fmt = elf.byte_order + ("qQ" if elf.is_64_bit else "lL")
|
||||
dynamic_array_size = calcsize(dynamic_array_fmt)
|
||||
new_offset = elf.pt_dynamic_p_offset # points to the new dynamic array
|
||||
old_offset = elf.pt_dynamic_p_offset # points to the current dynamic array
|
||||
for _ in range(elf.pt_dynamic_p_filesz // dynamic_array_size):
|
||||
data = read_exactly(f, dynamic_array_size, "Malformed dynamic array entry")
|
||||
tag, val = unpack(dynamic_array_fmt, data)
|
||||
|
||||
if tag == ELF_CONSTANTS.DT_NULL or not should_delete(tag, val):
|
||||
if new_offset != old_offset:
|
||||
f.seek(new_offset)
|
||||
f.write(data)
|
||||
f.seek(old_offset + dynamic_array_size)
|
||||
new_offset += dynamic_array_size
|
||||
|
||||
if tag == ELF_CONSTANTS.DT_NULL:
|
||||
break
|
||||
|
||||
old_offset += dynamic_array_size
|
||||
|
||||
|
||||
def delete_rpath(path: str) -> None:
|
||||
"""Modifies a binary to remove the rpath. It zeros out the rpath string and also drops the
|
||||
DT_R(UN)PATH entry from the dynamic section, so it doesn't show up in 'readelf -d file', nor
|
||||
@@ -492,29 +517,22 @@ def delete_rpath(path: str) -> None:
|
||||
f.seek(rpath_offset)
|
||||
f.write(new_rpath_string)
|
||||
|
||||
# Next update the dynamic array
|
||||
f.seek(elf.pt_dynamic_p_offset)
|
||||
dynamic_array_fmt = elf.byte_order + ("qQ" if elf.is_64_bit else "lL")
|
||||
dynamic_array_size = calcsize(dynamic_array_fmt)
|
||||
new_offset = elf.pt_dynamic_p_offset # points to the new dynamic array
|
||||
old_offset = elf.pt_dynamic_p_offset # points to the current dynamic array
|
||||
for _ in range(elf.pt_dynamic_p_filesz // dynamic_array_size):
|
||||
data = read_exactly(f, dynamic_array_size, "Malformed dynamic array entry")
|
||||
tag, _ = unpack(dynamic_array_fmt, data)
|
||||
# Delete DT_RPATH / DT_RUNPATH entries from the dynamic section
|
||||
_delete_dynamic_array_entry(
|
||||
f, elf, lambda tag, _: tag == ELF_CONSTANTS.DT_RPATH or tag == ELF_CONSTANTS.DT_RUNPATH
|
||||
)
|
||||
|
||||
# Overwrite any entry that is not DT_RPATH or DT_RUNPATH, including DT_NULL
|
||||
if tag != ELF_CONSTANTS.DT_RPATH and tag != ELF_CONSTANTS.DT_RUNPATH:
|
||||
if new_offset != old_offset:
|
||||
f.seek(new_offset)
|
||||
f.write(data)
|
||||
f.seek(old_offset + dynamic_array_size)
|
||||
new_offset += dynamic_array_size
|
||||
|
||||
# End of the dynamic array
|
||||
if tag == ELF_CONSTANTS.DT_NULL:
|
||||
break
|
||||
def delete_needed_from_elf(f: BinaryIO, elf: ElfFile, needed: bytes) -> None:
|
||||
"""Delete a needed library from the dynamic section of an ELF file"""
|
||||
if not elf.has_needed or needed not in elf.dt_needed_strs:
|
||||
return
|
||||
|
||||
old_offset += dynamic_array_size
|
||||
offset = elf.dt_needed_strtab_offsets[elf.dt_needed_strs.index(needed)]
|
||||
|
||||
_delete_dynamic_array_entry(
|
||||
f, elf, lambda tag, val: tag == ELF_CONSTANTS.DT_NEEDED and val == offset
|
||||
)
|
||||
|
||||
|
||||
class CStringType:
|
||||
|
@@ -13,7 +13,7 @@
|
||||
import sys
|
||||
|
||||
from llnl.util import tty
|
||||
from llnl.util.filesystem import join_path
|
||||
from llnl.util.filesystem import edit_in_place_through_temporary_file
|
||||
from llnl.util.lang import memoized
|
||||
|
||||
from spack.util.executable import Executable, which
|
||||
@@ -81,12 +81,11 @@ def fix_darwin_install_name(path):
|
||||
Parameters:
|
||||
path (str): directory in which .dylib files are located
|
||||
"""
|
||||
libs = glob.glob(join_path(path, "*.dylib"))
|
||||
libs = glob.glob(os.path.join(path, "*.dylib"))
|
||||
install_name_tool = Executable("install_name_tool")
|
||||
otool = Executable("otool")
|
||||
for lib in libs:
|
||||
# fix install name first:
|
||||
install_name_tool = Executable("install_name_tool")
|
||||
install_name_tool("-id", lib, lib)
|
||||
otool = Executable("otool")
|
||||
args = ["-id", lib]
|
||||
long_deps = otool("-L", lib, output=str).split("\n")
|
||||
deps = [dep.partition(" ")[0][1::] for dep in long_deps[2:-1]]
|
||||
# fix all dependencies:
|
||||
@@ -98,5 +97,8 @@ def fix_darwin_install_name(path):
|
||||
# but we don't know builddir (nor how symbolic links look
|
||||
# in builddir). We thus only compare the basenames.
|
||||
if os.path.basename(dep) == os.path.basename(loc):
|
||||
install_name_tool("-change", dep, loc, lib)
|
||||
args.extend(("-change", dep, loc))
|
||||
break
|
||||
|
||||
with edit_in_place_through_temporary_file(lib) as tmp:
|
||||
install_name_tool(*args, tmp)
|
||||
|
@@ -55,6 +55,7 @@ def get_user():
|
||||
# Substitutions to perform
|
||||
def replacements():
|
||||
# break circular imports
|
||||
import spack
|
||||
import spack.environment as ev
|
||||
import spack.paths
|
||||
|
||||
|
@@ -25,7 +25,7 @@ def get_s3_session(url, method="fetch"):
|
||||
from botocore.exceptions import ClientError
|
||||
|
||||
# Circular dependency
|
||||
from spack.mirror import MirrorCollection
|
||||
from spack.mirrors.mirror import MirrorCollection
|
||||
|
||||
global s3_client_cache
|
||||
|
||||
@@ -87,7 +87,7 @@ def _parse_s3_endpoint_url(endpoint_url):
|
||||
def get_mirror_s3_connection_info(mirror, method):
|
||||
"""Create s3 config for session/client from a Mirror instance (or just set defaults
|
||||
when no mirror is given.)"""
|
||||
from spack.mirror import Mirror
|
||||
from spack.mirrors.mirror import Mirror
|
||||
|
||||
s3_connection = {}
|
||||
s3_client_args = {"use_ssl": spack.config.get("config:verify_ssl")}
|
||||
|
@@ -26,6 +26,7 @@
|
||||
from llnl.util import lang, tty
|
||||
from llnl.util.filesystem import mkdirp, rename, working_dir
|
||||
|
||||
import spack
|
||||
import spack.config
|
||||
import spack.error
|
||||
import spack.util.executable
|
||||
|
@@ -25,11 +25,13 @@
|
||||
)
|
||||
from .version_types import (
|
||||
ClosedOpenRange,
|
||||
ConcreteVersion,
|
||||
GitVersion,
|
||||
StandardVersion,
|
||||
Version,
|
||||
VersionList,
|
||||
VersionRange,
|
||||
VersionType,
|
||||
_next_version,
|
||||
_prev_version,
|
||||
from_string,
|
||||
@@ -40,21 +42,23 @@
|
||||
any_version: VersionList = VersionList([":"])
|
||||
|
||||
__all__ = [
|
||||
"Version",
|
||||
"VersionRange",
|
||||
"ver",
|
||||
"from_string",
|
||||
"is_git_version",
|
||||
"infinity_versions",
|
||||
"_prev_version",
|
||||
"_next_version",
|
||||
"VersionList",
|
||||
"ClosedOpenRange",
|
||||
"StandardVersion",
|
||||
"GitVersion",
|
||||
"VersionError",
|
||||
"VersionChecksumError",
|
||||
"VersionLookupError",
|
||||
"ConcreteVersion",
|
||||
"EmptyRangeError",
|
||||
"GitVersion",
|
||||
"StandardVersion",
|
||||
"Version",
|
||||
"VersionChecksumError",
|
||||
"VersionError",
|
||||
"VersionList",
|
||||
"VersionLookupError",
|
||||
"VersionRange",
|
||||
"VersionType",
|
||||
"_next_version",
|
||||
"_prev_version",
|
||||
"any_version",
|
||||
"from_string",
|
||||
"infinity_versions",
|
||||
"is_git_version",
|
||||
"ver",
|
||||
]
|
||||
|
@@ -3,10 +3,9 @@
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
import numbers
|
||||
import re
|
||||
from bisect import bisect_left
|
||||
from typing import List, Optional, Tuple, Union
|
||||
from typing import Dict, Iterable, Iterator, List, Optional, Tuple, Union
|
||||
|
||||
from spack.util.spack_yaml import syaml_dict
|
||||
|
||||
@@ -32,26 +31,44 @@
|
||||
|
||||
|
||||
class VersionStrComponent:
|
||||
"""Internal representation of the string (non-integer) components of Spack versions.
|
||||
|
||||
Versions comprise string and integer components (see ``SEGMENT_REGEX`` above).
|
||||
|
||||
This represents a string component, which is either some component consisting only
|
||||
of alphabetical characters, *or* a special "infinity version" like ``main``,
|
||||
``develop``, ``master``, etc.
|
||||
|
||||
For speed, Spack versions are designed to map to Python tuples, so that we can use
|
||||
Python's fast lexicographic tuple comparison on them. ``VersionStrComponent`` is
|
||||
designed to work as a component in these version tuples, and as such must compare
|
||||
directly with ``int`` or other ``VersionStrComponent`` objects.
|
||||
|
||||
"""
|
||||
|
||||
__slots__ = ["data"]
|
||||
|
||||
def __init__(self, data):
|
||||
data: Union[int, str]
|
||||
|
||||
def __init__(self, data: Union[int, str]):
|
||||
# int for infinity index, str for literal.
|
||||
self.data: Union[int, str] = data
|
||||
self.data = data
|
||||
|
||||
@staticmethod
|
||||
def from_string(string):
|
||||
def from_string(string: str) -> "VersionStrComponent":
|
||||
value: Union[int, str] = string
|
||||
if len(string) >= iv_min_len:
|
||||
try:
|
||||
string = infinity_versions.index(string)
|
||||
value = infinity_versions.index(string)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
return VersionStrComponent(string)
|
||||
return VersionStrComponent(value)
|
||||
|
||||
def __hash__(self):
|
||||
def __hash__(self) -> int:
|
||||
return hash(self.data)
|
||||
|
||||
def __str__(self):
|
||||
def __str__(self) -> str:
|
||||
return (
|
||||
("infinity" if self.data >= len(infinity_versions) else infinity_versions[self.data])
|
||||
if isinstance(self.data, int)
|
||||
@@ -61,38 +78,61 @@ def __str__(self):
|
||||
def __repr__(self) -> str:
|
||||
return f'VersionStrComponent("{self}")'
|
||||
|
||||
def __eq__(self, other):
|
||||
def __eq__(self, other: object) -> bool:
|
||||
return isinstance(other, VersionStrComponent) and self.data == other.data
|
||||
|
||||
def __lt__(self, other):
|
||||
lhs_inf = isinstance(self.data, int)
|
||||
# ignore typing for certain parts of these methods b/c a) they are performance-critical, and
|
||||
# b) mypy isn't smart enough to figure out that if l_inf and r_inf are the same, comparing
|
||||
# self.data and other.data is type safe.
|
||||
def __lt__(self, other: object) -> bool:
|
||||
l_inf = isinstance(self.data, int)
|
||||
if isinstance(other, int):
|
||||
return not lhs_inf
|
||||
rhs_inf = isinstance(other.data, int)
|
||||
return (not lhs_inf and rhs_inf) if lhs_inf ^ rhs_inf else self.data < other.data
|
||||
return not l_inf
|
||||
r_inf = isinstance(other.data, int) # type: ignore
|
||||
return (not l_inf and r_inf) if l_inf ^ r_inf else self.data < other.data # type: ignore
|
||||
|
||||
def __le__(self, other):
|
||||
def __gt__(self, other: object) -> bool:
|
||||
l_inf = isinstance(self.data, int)
|
||||
if isinstance(other, int):
|
||||
return l_inf
|
||||
r_inf = isinstance(other.data, int) # type: ignore
|
||||
return (l_inf and not r_inf) if l_inf ^ r_inf else self.data > other.data # type: ignore
|
||||
|
||||
def __le__(self, other: object) -> bool:
|
||||
return self < other or self == other
|
||||
|
||||
def __gt__(self, other):
|
||||
lhs_inf = isinstance(self.data, int)
|
||||
if isinstance(other, int):
|
||||
return lhs_inf
|
||||
rhs_inf = isinstance(other.data, int)
|
||||
return (lhs_inf and not rhs_inf) if lhs_inf ^ rhs_inf else self.data > other.data
|
||||
|
||||
def __ge__(self, other):
|
||||
def __ge__(self, other: object) -> bool:
|
||||
return self > other or self == other
|
||||
|
||||
|
||||
def parse_string_components(string: str) -> Tuple[tuple, tuple]:
|
||||
# Tuple types that make up the internal representation of StandardVersion.
|
||||
# We use Tuples so that Python can quickly compare versions.
|
||||
|
||||
#: Version components are integers for numeric parts, VersionStrComponents for string parts.
|
||||
VersionComponentTuple = Tuple[Union[int, VersionStrComponent], ...]
|
||||
|
||||
#: A Prerelease identifier is a constant for alpha/beta/rc/final and one optional number.
|
||||
#: Most versions will have this set to ``(FINAL,)``. Prereleases will have some other
|
||||
#: initial constant followed by a number, e.g. ``(RC, 1)``.
|
||||
PrereleaseTuple = Tuple[int, ...]
|
||||
|
||||
#: Actual version tuple, including the split version number itself and the prerelease,
|
||||
#: all represented as tuples.
|
||||
VersionTuple = Tuple[VersionComponentTuple, PrereleaseTuple]
|
||||
|
||||
#: Separators from a parsed version.
|
||||
SeparatorTuple = Tuple[str, ...]
|
||||
|
||||
|
||||
def parse_string_components(string: str) -> Tuple[VersionTuple, SeparatorTuple]:
|
||||
"""Parse a string into a ``VersionTuple`` and ``SeparatorTuple``."""
|
||||
string = string.strip()
|
||||
|
||||
if string and not VALID_VERSION.match(string):
|
||||
raise ValueError("Bad characters in version string: %s" % string)
|
||||
|
||||
segments = SEGMENT_REGEX.findall(string)
|
||||
separators = tuple(m[2] for m in segments)
|
||||
separators: Tuple[str] = tuple(m[2] for m in segments)
|
||||
prerelease: Tuple[int, ...]
|
||||
|
||||
# <version>(alpha|beta|rc)<number>
|
||||
@@ -109,63 +149,150 @@ def parse_string_components(string: str) -> Tuple[tuple, tuple]:
|
||||
else:
|
||||
prerelease = (FINAL,)
|
||||
|
||||
release = tuple(int(m[0]) if m[0] else VersionStrComponent.from_string(m[1]) for m in segments)
|
||||
release: VersionComponentTuple = tuple(
|
||||
int(m[0]) if m[0] else VersionStrComponent.from_string(m[1]) for m in segments
|
||||
)
|
||||
|
||||
return (release, prerelease), separators
|
||||
|
||||
|
||||
class ConcreteVersion:
|
||||
pass
|
||||
class VersionType:
|
||||
"""Base type for all versions in Spack (ranges, lists, regular versions, and git versions).
|
||||
|
||||
Versions in Spack behave like sets, and support some basic set operations. There are
|
||||
four subclasses of ``VersionType``:
|
||||
|
||||
* ``StandardVersion``: a single, concrete version, e.g. 3.4.5 or 5.4b0.
|
||||
* ``GitVersion``: subclass of ``StandardVersion`` for handling git repositories.
|
||||
* ``ClosedOpenRange``: an inclusive version range, closed or open, e.g. ``3.0:5.0``,
|
||||
``3.0:``, or ``:5.0``
|
||||
* ``VersionList``: An ordered list of any of the above types.
|
||||
|
||||
Notably, when Spack parses a version, it's always a range *unless* specified with
|
||||
``@=`` to make it concrete.
|
||||
|
||||
"""
|
||||
|
||||
def intersection(self, other: "VersionType") -> "VersionType":
|
||||
"""Any versions contained in both self and other, or empty VersionList if no overlap."""
|
||||
raise NotImplementedError
|
||||
|
||||
def intersects(self, other: "VersionType") -> bool:
|
||||
"""Whether self and other overlap."""
|
||||
raise NotImplementedError
|
||||
|
||||
def overlaps(self, other: "VersionType") -> bool:
|
||||
"""Whether self and other overlap (same as ``intersects()``)."""
|
||||
return self.intersects(other)
|
||||
|
||||
def satisfies(self, other: "VersionType") -> bool:
|
||||
"""Whether self is entirely contained in other."""
|
||||
raise NotImplementedError
|
||||
|
||||
def union(self, other: "VersionType") -> "VersionType":
|
||||
"""Return a VersionType containing self and other."""
|
||||
raise NotImplementedError
|
||||
|
||||
# We can use SupportsRichComparisonT in Python 3.8 or later, but alas in 3.6 we need
|
||||
# to write all the operators out
|
||||
def __eq__(self, other: object) -> bool:
|
||||
raise NotImplementedError
|
||||
|
||||
def __lt__(self, other: object) -> bool:
|
||||
raise NotImplementedError
|
||||
|
||||
def __gt__(self, other: object) -> bool:
|
||||
raise NotImplementedError
|
||||
|
||||
def __ge__(self, other: object) -> bool:
|
||||
raise NotImplementedError
|
||||
|
||||
def __le__(self, other: object) -> bool:
|
||||
raise NotImplementedError
|
||||
|
||||
def __hash__(self) -> int:
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
def _stringify_version(versions: Tuple[tuple, tuple], separators: tuple) -> str:
|
||||
class ConcreteVersion(VersionType):
|
||||
"""Base type for versions that represents a single (non-range or list) version."""
|
||||
|
||||
|
||||
def _stringify_version(versions: VersionTuple, separators: Tuple[str, ...]) -> str:
|
||||
"""Create a string representation from version components."""
|
||||
release, prerelease = versions
|
||||
string = ""
|
||||
for i in range(len(release)):
|
||||
string += f"{release[i]}{separators[i]}"
|
||||
|
||||
components = [f"{rel}{sep}" for rel, sep in zip(release, separators)]
|
||||
if prerelease[0] != FINAL:
|
||||
string += f"{PRERELEASE_TO_STRING[prerelease[0]]}{separators[len(release)]}"
|
||||
if len(prerelease) > 1:
|
||||
string += str(prerelease[1])
|
||||
return string
|
||||
components.append(PRERELEASE_TO_STRING[prerelease[0]])
|
||||
if len(prerelease) > 1:
|
||||
components.append(separators[len(release)])
|
||||
components.append(str(prerelease[1]))
|
||||
|
||||
return "".join(components)
|
||||
|
||||
|
||||
class StandardVersion(ConcreteVersion):
|
||||
"""Class to represent versions"""
|
||||
|
||||
__slots__ = ["version", "string", "separators"]
|
||||
__slots__ = ["version", "_string", "separators"]
|
||||
|
||||
def __init__(self, string: Optional[str], version: Tuple[tuple, tuple], separators: tuple):
|
||||
self.string = string
|
||||
_string: str
|
||||
version: VersionTuple
|
||||
separators: Tuple[str, ...]
|
||||
|
||||
def __init__(self, string: str, version: VersionTuple, separators: Tuple[str, ...]):
|
||||
"""Create a StandardVersion from a string and parsed version components.
|
||||
|
||||
Arguments:
|
||||
string: The original version string, or ``""`` if the it is not available.
|
||||
version: A tuple as returned by ``parse_string_components()``. Contains two tuples:
|
||||
one with alpha or numeric components and another with prerelease components.
|
||||
separators: separators parsed from the original version string.
|
||||
|
||||
If constructed with ``string=""``, the string will be lazily constructed from components
|
||||
when ``str()`` is called.
|
||||
"""
|
||||
self._string = string
|
||||
self.version = version
|
||||
self.separators = separators
|
||||
|
||||
@staticmethod
|
||||
def from_string(string: str):
|
||||
def from_string(string: str) -> "StandardVersion":
|
||||
return StandardVersion(string, *parse_string_components(string))
|
||||
|
||||
@staticmethod
|
||||
def typemin():
|
||||
def typemin() -> "StandardVersion":
|
||||
return _STANDARD_VERSION_TYPEMIN
|
||||
|
||||
@staticmethod
|
||||
def typemax():
|
||||
def typemax() -> "StandardVersion":
|
||||
return _STANDARD_VERSION_TYPEMAX
|
||||
|
||||
def __bool__(self):
|
||||
@property
|
||||
def string(self) -> str:
|
||||
if not self._string:
|
||||
self._string = _stringify_version(self.version, self.separators)
|
||||
return self._string
|
||||
|
||||
@string.setter
|
||||
def string(self, string) -> None:
|
||||
self._string = string
|
||||
|
||||
def __bool__(self) -> bool:
|
||||
return True
|
||||
|
||||
def __eq__(self, other):
|
||||
def __eq__(self, other: object) -> bool:
|
||||
if isinstance(other, StandardVersion):
|
||||
return self.version == other.version
|
||||
return False
|
||||
|
||||
def __ne__(self, other):
|
||||
def __ne__(self, other: object) -> bool:
|
||||
if isinstance(other, StandardVersion):
|
||||
return self.version != other.version
|
||||
return True
|
||||
|
||||
def __lt__(self, other):
|
||||
def __lt__(self, other: object) -> bool:
|
||||
if isinstance(other, StandardVersion):
|
||||
return self.version < other.version
|
||||
if isinstance(other, ClosedOpenRange):
|
||||
@@ -173,7 +300,7 @@ def __lt__(self, other):
|
||||
return self <= other.lo
|
||||
return NotImplemented
|
||||
|
||||
def __le__(self, other):
|
||||
def __le__(self, other: object) -> bool:
|
||||
if isinstance(other, StandardVersion):
|
||||
return self.version <= other.version
|
||||
if isinstance(other, ClosedOpenRange):
|
||||
@@ -181,7 +308,7 @@ def __le__(self, other):
|
||||
return self <= other.lo
|
||||
return NotImplemented
|
||||
|
||||
def __ge__(self, other):
|
||||
def __ge__(self, other: object) -> bool:
|
||||
if isinstance(other, StandardVersion):
|
||||
return self.version >= other.version
|
||||
if isinstance(other, ClosedOpenRange):
|
||||
@@ -189,25 +316,25 @@ def __ge__(self, other):
|
||||
return self > other.lo
|
||||
return NotImplemented
|
||||
|
||||
def __gt__(self, other):
|
||||
def __gt__(self, other: object) -> bool:
|
||||
if isinstance(other, StandardVersion):
|
||||
return self.version > other.version
|
||||
if isinstance(other, ClosedOpenRange):
|
||||
return self > other.lo
|
||||
return NotImplemented
|
||||
|
||||
def __iter__(self):
|
||||
def __iter__(self) -> Iterator:
|
||||
return iter(self.version[0])
|
||||
|
||||
def __len__(self):
|
||||
def __len__(self) -> int:
|
||||
return len(self.version[0])
|
||||
|
||||
def __getitem__(self, idx):
|
||||
def __getitem__(self, idx: Union[int, slice]):
|
||||
cls = type(self)
|
||||
|
||||
release = self.version[0]
|
||||
|
||||
if isinstance(idx, numbers.Integral):
|
||||
if isinstance(idx, int):
|
||||
return release[idx]
|
||||
|
||||
elif isinstance(idx, slice):
|
||||
@@ -220,45 +347,38 @@ def __getitem__(self, idx):
|
||||
|
||||
if string_arg:
|
||||
string_arg.pop() # We don't need the last separator
|
||||
string_arg = "".join(string_arg)
|
||||
return cls.from_string(string_arg)
|
||||
return cls.from_string("".join(string_arg))
|
||||
else:
|
||||
return StandardVersion.from_string("")
|
||||
|
||||
message = "{cls.__name__} indices must be integers"
|
||||
raise TypeError(message.format(cls=cls))
|
||||
raise TypeError(f"{cls.__name__} indices must be integers or slices")
|
||||
|
||||
def __str__(self):
|
||||
return self.string or _stringify_version(self.version, self.separators)
|
||||
def __str__(self) -> str:
|
||||
return self.string
|
||||
|
||||
def __repr__(self) -> str:
|
||||
# Print indirect repr through Version(...)
|
||||
return f'Version("{str(self)}")'
|
||||
|
||||
def __hash__(self):
|
||||
def __hash__(self) -> int:
|
||||
# If this is a final release, do not hash the prerelease part for backward compat.
|
||||
return hash(self.version if self.is_prerelease() else self.version[0])
|
||||
|
||||
def __contains__(rhs, lhs):
|
||||
def __contains__(rhs, lhs) -> bool:
|
||||
# We should probably get rid of `x in y` for versions, since
|
||||
# versions still have a dual interpretation as singleton sets
|
||||
# or elements. x in y should be: is the lhs-element in the
|
||||
# rhs-set. Instead this function also does subset checks.
|
||||
if isinstance(lhs, (StandardVersion, ClosedOpenRange, VersionList)):
|
||||
if isinstance(lhs, VersionType):
|
||||
return lhs.satisfies(rhs)
|
||||
raise ValueError(lhs)
|
||||
raise TypeError(f"'in' not supported for instances of {type(lhs)}")
|
||||
|
||||
def intersects(self, other: Union["StandardVersion", "GitVersion", "ClosedOpenRange"]) -> bool:
|
||||
def intersects(self, other: VersionType) -> bool:
|
||||
if isinstance(other, StandardVersion):
|
||||
return self == other
|
||||
return other.intersects(self)
|
||||
|
||||
def overlaps(self, other) -> bool:
|
||||
return self.intersects(other)
|
||||
|
||||
def satisfies(
|
||||
self, other: Union["ClosedOpenRange", "StandardVersion", "GitVersion", "VersionList"]
|
||||
) -> bool:
|
||||
def satisfies(self, other: VersionType) -> bool:
|
||||
if isinstance(other, GitVersion):
|
||||
return False
|
||||
|
||||
@@ -271,19 +391,19 @@ def satisfies(
|
||||
if isinstance(other, VersionList):
|
||||
return other.intersects(self)
|
||||
|
||||
return NotImplemented
|
||||
raise NotImplementedError
|
||||
|
||||
def union(self, other: Union["ClosedOpenRange", "StandardVersion"]):
|
||||
def union(self, other: VersionType) -> VersionType:
|
||||
if isinstance(other, StandardVersion):
|
||||
return self if self == other else VersionList([self, other])
|
||||
return other.union(self)
|
||||
|
||||
def intersection(self, other: Union["ClosedOpenRange", "StandardVersion"]):
|
||||
def intersection(self, other: VersionType) -> VersionType:
|
||||
if isinstance(other, StandardVersion):
|
||||
return self if self == other else VersionList()
|
||||
return other.intersection(self)
|
||||
|
||||
def isdevelop(self):
|
||||
def isdevelop(self) -> bool:
|
||||
"""Triggers on the special case of the `@develop-like` version."""
|
||||
return any(
|
||||
isinstance(p, VersionStrComponent) and isinstance(p.data, int) for p in self.version[0]
|
||||
@@ -304,7 +424,7 @@ def dotted_numeric_string(self) -> str:
|
||||
return ".".join(str(v) for v in numeric)
|
||||
|
||||
@property
|
||||
def dotted(self):
|
||||
def dotted(self) -> "StandardVersion":
|
||||
"""The dotted representation of the version.
|
||||
|
||||
Example:
|
||||
@@ -318,7 +438,7 @@ def dotted(self):
|
||||
return type(self).from_string(self.string.replace("-", ".").replace("_", "."))
|
||||
|
||||
@property
|
||||
def underscored(self):
|
||||
def underscored(self) -> "StandardVersion":
|
||||
"""The underscored representation of the version.
|
||||
|
||||
Example:
|
||||
@@ -333,7 +453,7 @@ def underscored(self):
|
||||
return type(self).from_string(self.string.replace(".", "_").replace("-", "_"))
|
||||
|
||||
@property
|
||||
def dashed(self):
|
||||
def dashed(self) -> "StandardVersion":
|
||||
"""The dashed representation of the version.
|
||||
|
||||
Example:
|
||||
@@ -347,7 +467,7 @@ def dashed(self):
|
||||
return type(self).from_string(self.string.replace(".", "-").replace("_", "-"))
|
||||
|
||||
@property
|
||||
def joined(self):
|
||||
def joined(self) -> "StandardVersion":
|
||||
"""The joined representation of the version.
|
||||
|
||||
Example:
|
||||
@@ -362,7 +482,7 @@ def joined(self):
|
||||
self.string.replace(".", "").replace("-", "").replace("_", "")
|
||||
)
|
||||
|
||||
def up_to(self, index):
|
||||
def up_to(self, index: int) -> "StandardVersion":
|
||||
"""The version up to the specified component.
|
||||
|
||||
Examples:
|
||||
@@ -482,7 +602,7 @@ def ref_version(self) -> StandardVersion:
|
||||
)
|
||||
return self._ref_version
|
||||
|
||||
def intersects(self, other):
|
||||
def intersects(self, other: VersionType) -> bool:
|
||||
# For concrete things intersects = satisfies = equality
|
||||
if isinstance(other, GitVersion):
|
||||
return self == other
|
||||
@@ -492,19 +612,14 @@ def intersects(self, other):
|
||||
return self.ref_version.intersects(other)
|
||||
if isinstance(other, VersionList):
|
||||
return any(self.intersects(rhs) for rhs in other)
|
||||
raise ValueError(f"Unexpected type {type(other)}")
|
||||
raise TypeError(f"'intersects()' not supported for instances of {type(other)}")
|
||||
|
||||
def intersection(self, other):
|
||||
def intersection(self, other: VersionType) -> VersionType:
|
||||
if isinstance(other, ConcreteVersion):
|
||||
return self if self == other else VersionList()
|
||||
return other.intersection(self)
|
||||
|
||||
def overlaps(self, other) -> bool:
|
||||
return self.intersects(other)
|
||||
|
||||
def satisfies(
|
||||
self, other: Union["GitVersion", StandardVersion, "ClosedOpenRange", "VersionList"]
|
||||
):
|
||||
def satisfies(self, other: VersionType) -> bool:
|
||||
# Concrete versions mean we have to do an equality check
|
||||
if isinstance(other, GitVersion):
|
||||
return self == other
|
||||
@@ -514,9 +629,9 @@ def satisfies(
|
||||
return self.ref_version.satisfies(other)
|
||||
if isinstance(other, VersionList):
|
||||
return any(self.satisfies(rhs) for rhs in other)
|
||||
raise ValueError(f"Unexpected type {type(other)}")
|
||||
raise TypeError(f"'satisfies()' not supported for instances of {type(other)}")
|
||||
|
||||
def __str__(self):
|
||||
def __str__(self) -> str:
|
||||
s = f"git.{self.ref}" if self.has_git_prefix else self.ref
|
||||
# Note: the solver actually depends on str(...) to produce the effective version.
|
||||
# So when a lookup is attached, we require the resolved version to be printed.
|
||||
@@ -534,7 +649,7 @@ def __repr__(self):
|
||||
def __bool__(self):
|
||||
return True
|
||||
|
||||
def __eq__(self, other):
|
||||
def __eq__(self, other: object) -> bool:
|
||||
# GitVersion cannot be equal to StandardVersion, otherwise == is not transitive
|
||||
return (
|
||||
isinstance(other, GitVersion)
|
||||
@@ -542,10 +657,10 @@ def __eq__(self, other):
|
||||
and self.ref_version == other.ref_version
|
||||
)
|
||||
|
||||
def __ne__(self, other):
|
||||
def __ne__(self, other: object) -> bool:
|
||||
return not self == other
|
||||
|
||||
def __lt__(self, other):
|
||||
def __lt__(self, other: object) -> bool:
|
||||
if isinstance(other, GitVersion):
|
||||
return (self.ref_version, self.ref) < (other.ref_version, other.ref)
|
||||
if isinstance(other, StandardVersion):
|
||||
@@ -553,9 +668,9 @@ def __lt__(self, other):
|
||||
return self.ref_version < other
|
||||
if isinstance(other, ClosedOpenRange):
|
||||
return self.ref_version < other
|
||||
raise ValueError(f"Unexpected type {type(other)}")
|
||||
raise TypeError(f"'<' not supported between instances of {type(self)} and {type(other)}")
|
||||
|
||||
def __le__(self, other):
|
||||
def __le__(self, other: object) -> bool:
|
||||
if isinstance(other, GitVersion):
|
||||
return (self.ref_version, self.ref) <= (other.ref_version, other.ref)
|
||||
if isinstance(other, StandardVersion):
|
||||
@@ -564,9 +679,9 @@ def __le__(self, other):
|
||||
if isinstance(other, ClosedOpenRange):
|
||||
# Equality is not a thing
|
||||
return self.ref_version < other
|
||||
raise ValueError(f"Unexpected type {type(other)}")
|
||||
raise TypeError(f"'<=' not supported between instances of {type(self)} and {type(other)}")
|
||||
|
||||
def __ge__(self, other):
|
||||
def __ge__(self, other: object) -> bool:
|
||||
if isinstance(other, GitVersion):
|
||||
return (self.ref_version, self.ref) >= (other.ref_version, other.ref)
|
||||
if isinstance(other, StandardVersion):
|
||||
@@ -574,9 +689,9 @@ def __ge__(self, other):
|
||||
return self.ref_version >= other
|
||||
if isinstance(other, ClosedOpenRange):
|
||||
return self.ref_version > other
|
||||
raise ValueError(f"Unexpected type {type(other)}")
|
||||
raise TypeError(f"'>=' not supported between instances of {type(self)} and {type(other)}")
|
||||
|
||||
def __gt__(self, other):
|
||||
def __gt__(self, other: object) -> bool:
|
||||
if isinstance(other, GitVersion):
|
||||
return (self.ref_version, self.ref) > (other.ref_version, other.ref)
|
||||
if isinstance(other, StandardVersion):
|
||||
@@ -584,14 +699,14 @@ def __gt__(self, other):
|
||||
return self.ref_version >= other
|
||||
if isinstance(other, ClosedOpenRange):
|
||||
return self.ref_version > other
|
||||
raise ValueError(f"Unexpected type {type(other)}")
|
||||
raise TypeError(f"'>' not supported between instances of {type(self)} and {type(other)}")
|
||||
|
||||
def __hash__(self):
|
||||
# hashing should not cause version lookup
|
||||
return hash(self.ref)
|
||||
|
||||
def __contains__(self, other):
|
||||
raise Exception("Not implemented yet")
|
||||
def __contains__(self, other: object) -> bool:
|
||||
raise NotImplementedError
|
||||
|
||||
@property
|
||||
def ref_lookup(self):
|
||||
@@ -649,7 +764,7 @@ def up_to(self, index) -> StandardVersion:
|
||||
return self.ref_version.up_to(index)
|
||||
|
||||
|
||||
class ClosedOpenRange:
|
||||
class ClosedOpenRange(VersionType):
|
||||
def __init__(self, lo: StandardVersion, hi: StandardVersion):
|
||||
if hi < lo:
|
||||
raise EmptyRangeError(f"{lo}..{hi} is an empty range")
|
||||
@@ -657,14 +772,14 @@ def __init__(self, lo: StandardVersion, hi: StandardVersion):
|
||||
self.hi: StandardVersion = hi
|
||||
|
||||
@classmethod
|
||||
def from_version_range(cls, lo: StandardVersion, hi: StandardVersion):
|
||||
def from_version_range(cls, lo: StandardVersion, hi: StandardVersion) -> "ClosedOpenRange":
|
||||
"""Construct ClosedOpenRange from lo:hi range."""
|
||||
try:
|
||||
return ClosedOpenRange(lo, _next_version(hi))
|
||||
except EmptyRangeError as e:
|
||||
raise EmptyRangeError(f"{lo}:{hi} is an empty range") from e
|
||||
|
||||
def __str__(self):
|
||||
def __str__(self) -> str:
|
||||
# This simplifies 3.1:<3.2 to 3.1:3.1 to 3.1
|
||||
# 3:3 -> 3
|
||||
hi_prev = _prev_version(self.hi)
|
||||
@@ -726,9 +841,9 @@ def __gt__(self, other):
|
||||
def __contains__(rhs, lhs):
|
||||
if isinstance(lhs, (ConcreteVersion, ClosedOpenRange, VersionList)):
|
||||
return lhs.satisfies(rhs)
|
||||
raise ValueError(f"Unexpected type {type(lhs)}")
|
||||
raise TypeError(f"'in' not supported between instances of {type(rhs)} and {type(lhs)}")
|
||||
|
||||
def intersects(self, other: Union[ConcreteVersion, "ClosedOpenRange", "VersionList"]):
|
||||
def intersects(self, other: VersionType) -> bool:
|
||||
if isinstance(other, StandardVersion):
|
||||
return self.lo <= other < self.hi
|
||||
if isinstance(other, GitVersion):
|
||||
@@ -737,23 +852,18 @@ def intersects(self, other: Union[ConcreteVersion, "ClosedOpenRange", "VersionLi
|
||||
return (self.lo < other.hi) and (other.lo < self.hi)
|
||||
if isinstance(other, VersionList):
|
||||
return any(self.intersects(rhs) for rhs in other)
|
||||
raise ValueError(f"Unexpected type {type(other)}")
|
||||
raise TypeError(f"'intersects' not supported for instances of {type(other)}")
|
||||
|
||||
def satisfies(self, other: Union["ClosedOpenRange", ConcreteVersion, "VersionList"]):
|
||||
def satisfies(self, other: VersionType) -> bool:
|
||||
if isinstance(other, ConcreteVersion):
|
||||
return False
|
||||
if isinstance(other, ClosedOpenRange):
|
||||
return not (self.lo < other.lo or other.hi < self.hi)
|
||||
if isinstance(other, VersionList):
|
||||
return any(self.satisfies(rhs) for rhs in other)
|
||||
raise ValueError(other)
|
||||
raise TypeError(f"'satisfies()' not supported for instances of {type(other)}")
|
||||
|
||||
def overlaps(self, other: Union["ClosedOpenRange", ConcreteVersion, "VersionList"]) -> bool:
|
||||
return self.intersects(other)
|
||||
|
||||
def _union_if_not_disjoint(
|
||||
self, other: Union["ClosedOpenRange", ConcreteVersion]
|
||||
) -> Optional["ClosedOpenRange"]:
|
||||
def _union_if_not_disjoint(self, other: VersionType) -> Optional["ClosedOpenRange"]:
|
||||
"""Same as union, but returns None when the union is not connected. This function is not
|
||||
implemented for version lists as right-hand side, as that makes little sense."""
|
||||
if isinstance(other, StandardVersion):
|
||||
@@ -770,9 +880,9 @@ def _union_if_not_disjoint(
|
||||
else None
|
||||
)
|
||||
|
||||
raise TypeError(f"Unexpected type {type(other)}")
|
||||
raise TypeError(f"'union()' not supported for instances of {type(other)}")
|
||||
|
||||
def union(self, other: Union["ClosedOpenRange", ConcreteVersion, "VersionList"]):
|
||||
def union(self, other: VersionType) -> VersionType:
|
||||
if isinstance(other, VersionList):
|
||||
v = other.copy()
|
||||
v.add(self)
|
||||
@@ -781,35 +891,51 @@ def union(self, other: Union["ClosedOpenRange", ConcreteVersion, "VersionList"])
|
||||
result = self._union_if_not_disjoint(other)
|
||||
return result if result is not None else VersionList([self, other])
|
||||
|
||||
def intersection(self, other: Union["ClosedOpenRange", ConcreteVersion]):
|
||||
def intersection(self, other: VersionType) -> VersionType:
|
||||
# range - version -> singleton or nothing.
|
||||
if isinstance(other, ClosedOpenRange):
|
||||
# range - range -> range or nothing.
|
||||
max_lo = max(self.lo, other.lo)
|
||||
min_hi = min(self.hi, other.hi)
|
||||
return ClosedOpenRange(max_lo, min_hi) if max_lo < min_hi else VersionList()
|
||||
|
||||
if isinstance(other, ConcreteVersion):
|
||||
return other if self.intersects(other) else VersionList()
|
||||
|
||||
# range - range -> range or nothing.
|
||||
max_lo = max(self.lo, other.lo)
|
||||
min_hi = min(self.hi, other.hi)
|
||||
return ClosedOpenRange(max_lo, min_hi) if max_lo < min_hi else VersionList()
|
||||
raise TypeError(f"'intersection()' not supported for instances of {type(other)}")
|
||||
|
||||
|
||||
class VersionList:
|
||||
class VersionList(VersionType):
|
||||
"""Sorted, non-redundant list of Version and ClosedOpenRange elements."""
|
||||
|
||||
def __init__(self, vlist=None):
|
||||
self.versions: List[Union[StandardVersion, GitVersion, ClosedOpenRange]] = []
|
||||
versions: List[VersionType]
|
||||
|
||||
def __init__(self, vlist: Optional[Union[str, VersionType, Iterable]] = None):
|
||||
if vlist is None:
|
||||
pass
|
||||
self.versions = []
|
||||
|
||||
elif isinstance(vlist, str):
|
||||
vlist = from_string(vlist)
|
||||
if isinstance(vlist, VersionList):
|
||||
self.versions = vlist.versions
|
||||
else:
|
||||
self.versions = [vlist]
|
||||
else:
|
||||
|
||||
elif isinstance(vlist, (ConcreteVersion, ClosedOpenRange)):
|
||||
self.versions = [vlist]
|
||||
|
||||
elif isinstance(vlist, VersionList):
|
||||
self.versions = vlist[:]
|
||||
|
||||
elif isinstance(vlist, Iterable):
|
||||
self.versions = []
|
||||
for v in vlist:
|
||||
self.add(ver(v))
|
||||
|
||||
def add(self, item: Union[StandardVersion, GitVersion, ClosedOpenRange, "VersionList"]):
|
||||
else:
|
||||
raise TypeError(f"Cannot construct VersionList from {type(vlist)}")
|
||||
|
||||
def add(self, item: VersionType) -> None:
|
||||
if isinstance(item, (StandardVersion, GitVersion)):
|
||||
i = bisect_left(self, item)
|
||||
# Only insert when prev and next are not intersected.
|
||||
@@ -865,7 +991,7 @@ def concrete_range_as_version(self) -> Optional[ConcreteVersion]:
|
||||
return v.lo
|
||||
return None
|
||||
|
||||
def copy(self):
|
||||
def copy(self) -> "VersionList":
|
||||
return VersionList(self)
|
||||
|
||||
def lowest(self) -> Optional[StandardVersion]:
|
||||
@@ -889,7 +1015,7 @@ def preferred(self) -> Optional[StandardVersion]:
|
||||
"""Get the preferred (latest) version in the list."""
|
||||
return self.highest_numeric() or self.highest()
|
||||
|
||||
def satisfies(self, other) -> bool:
|
||||
def satisfies(self, other: VersionType) -> bool:
|
||||
# This exploits the fact that version lists are "reduced" and normalized, so we can
|
||||
# never have a list like [1:3, 2:4] since that would be normalized to [1:4]
|
||||
if isinstance(other, VersionList):
|
||||
@@ -898,9 +1024,9 @@ def satisfies(self, other) -> bool:
|
||||
if isinstance(other, (ConcreteVersion, ClosedOpenRange)):
|
||||
return all(lhs.satisfies(other) for lhs in self)
|
||||
|
||||
raise ValueError(f"Unsupported type {type(other)}")
|
||||
raise TypeError(f"'satisfies()' not supported for instances of {type(other)}")
|
||||
|
||||
def intersects(self, other):
|
||||
def intersects(self, other: VersionType) -> bool:
|
||||
if isinstance(other, VersionList):
|
||||
s = o = 0
|
||||
while s < len(self) and o < len(other):
|
||||
@@ -915,19 +1041,16 @@ def intersects(self, other):
|
||||
if isinstance(other, (ClosedOpenRange, StandardVersion)):
|
||||
return any(v.intersects(other) for v in self)
|
||||
|
||||
raise ValueError(f"Unsupported type {type(other)}")
|
||||
raise TypeError(f"'intersects()' not supported for instances of {type(other)}")
|
||||
|
||||
def overlaps(self, other) -> bool:
|
||||
return self.intersects(other)
|
||||
|
||||
def to_dict(self):
|
||||
def to_dict(self) -> Dict:
|
||||
"""Generate human-readable dict for YAML."""
|
||||
if self.concrete:
|
||||
return syaml_dict([("version", str(self[0]))])
|
||||
return syaml_dict([("versions", [str(v) for v in self])])
|
||||
|
||||
@staticmethod
|
||||
def from_dict(dictionary):
|
||||
def from_dict(dictionary) -> "VersionList":
|
||||
"""Parse dict from to_dict."""
|
||||
if "versions" in dictionary:
|
||||
return VersionList(dictionary["versions"])
|
||||
@@ -935,27 +1058,29 @@ def from_dict(dictionary):
|
||||
return VersionList([Version(dictionary["version"])])
|
||||
raise ValueError("Dict must have 'version' or 'versions' in it.")
|
||||
|
||||
def update(self, other: "VersionList"):
|
||||
for v in other.versions:
|
||||
self.add(v)
|
||||
def update(self, other: "VersionList") -> None:
|
||||
self.add(other)
|
||||
|
||||
def union(self, other: "VersionList"):
|
||||
def union(self, other: VersionType) -> VersionType:
|
||||
result = self.copy()
|
||||
result.update(other)
|
||||
result.add(other)
|
||||
return result
|
||||
|
||||
def intersection(self, other: "VersionList") -> "VersionList":
|
||||
def intersection(self, other: VersionType) -> "VersionList":
|
||||
result = VersionList()
|
||||
for lhs, rhs in ((self, other), (other, self)):
|
||||
for x in lhs:
|
||||
i = bisect_left(rhs.versions, x)
|
||||
if i > 0:
|
||||
result.add(rhs[i - 1].intersection(x))
|
||||
if i < len(rhs):
|
||||
result.add(rhs[i].intersection(x))
|
||||
return result
|
||||
if isinstance(other, VersionList):
|
||||
for lhs, rhs in ((self, other), (other, self)):
|
||||
for x in lhs:
|
||||
i = bisect_left(rhs.versions, x)
|
||||
if i > 0:
|
||||
result.add(rhs[i - 1].intersection(x))
|
||||
if i < len(rhs):
|
||||
result.add(rhs[i].intersection(x))
|
||||
return result
|
||||
else:
|
||||
return self.intersection(VersionList(other))
|
||||
|
||||
def intersect(self, other) -> bool:
|
||||
def intersect(self, other: VersionType) -> bool:
|
||||
"""Intersect this spec's list with other.
|
||||
|
||||
Return True if the spec changed as a result; False otherwise
|
||||
@@ -965,6 +1090,7 @@ def intersect(self, other) -> bool:
|
||||
self.versions = isection.versions
|
||||
return changed
|
||||
|
||||
# typing this and getitem are a pain in Python 3.6
|
||||
def __contains__(self, other):
|
||||
if isinstance(other, (ClosedOpenRange, StandardVersion)):
|
||||
i = bisect_left(self, other)
|
||||
@@ -978,52 +1104,52 @@ def __contains__(self, other):
|
||||
def __getitem__(self, index):
|
||||
return self.versions[index]
|
||||
|
||||
def __iter__(self):
|
||||
def __iter__(self) -> Iterator:
|
||||
return iter(self.versions)
|
||||
|
||||
def __reversed__(self):
|
||||
def __reversed__(self) -> Iterator:
|
||||
return reversed(self.versions)
|
||||
|
||||
def __len__(self):
|
||||
def __len__(self) -> int:
|
||||
return len(self.versions)
|
||||
|
||||
def __bool__(self):
|
||||
def __bool__(self) -> bool:
|
||||
return bool(self.versions)
|
||||
|
||||
def __eq__(self, other):
|
||||
def __eq__(self, other) -> bool:
|
||||
if isinstance(other, VersionList):
|
||||
return self.versions == other.versions
|
||||
return False
|
||||
|
||||
def __ne__(self, other):
|
||||
def __ne__(self, other) -> bool:
|
||||
if isinstance(other, VersionList):
|
||||
return self.versions != other.versions
|
||||
return False
|
||||
|
||||
def __lt__(self, other):
|
||||
def __lt__(self, other) -> bool:
|
||||
if isinstance(other, VersionList):
|
||||
return self.versions < other.versions
|
||||
return NotImplemented
|
||||
|
||||
def __le__(self, other):
|
||||
def __le__(self, other) -> bool:
|
||||
if isinstance(other, VersionList):
|
||||
return self.versions <= other.versions
|
||||
return NotImplemented
|
||||
|
||||
def __ge__(self, other):
|
||||
def __ge__(self, other) -> bool:
|
||||
if isinstance(other, VersionList):
|
||||
return self.versions >= other.versions
|
||||
return NotImplemented
|
||||
|
||||
def __gt__(self, other):
|
||||
def __gt__(self, other) -> bool:
|
||||
if isinstance(other, VersionList):
|
||||
return self.versions > other.versions
|
||||
return NotImplemented
|
||||
|
||||
def __hash__(self):
|
||||
def __hash__(self) -> int:
|
||||
return hash(tuple(self.versions))
|
||||
|
||||
def __str__(self):
|
||||
def __str__(self) -> str:
|
||||
if not self.versions:
|
||||
return ""
|
||||
|
||||
@@ -1031,7 +1157,7 @@ def __str__(self):
|
||||
f"={v}" if isinstance(v, StandardVersion) else str(v) for v in self.versions
|
||||
)
|
||||
|
||||
def __repr__(self):
|
||||
def __repr__(self) -> str:
|
||||
return str(self.versions)
|
||||
|
||||
|
||||
@@ -1106,12 +1232,10 @@ def _next_version(v: StandardVersion) -> StandardVersion:
|
||||
release = release[:-1] + (_next_version_str_component(release[-1]),)
|
||||
else:
|
||||
release = release[:-1] + (release[-1] + 1,)
|
||||
components = [""] * (2 * len(release))
|
||||
components[::2] = release
|
||||
components[1::2] = separators[: len(release)]
|
||||
if prerelease_type != FINAL:
|
||||
components.extend((PRERELEASE_TO_STRING[prerelease_type], prerelease[1]))
|
||||
return StandardVersion("".join(str(c) for c in components), (release, prerelease), separators)
|
||||
|
||||
# Avoid constructing a string here for performance. Instead, pass "" to
|
||||
# StandardVersion to lazily stringify.
|
||||
return StandardVersion("", (release, prerelease), separators)
|
||||
|
||||
|
||||
def _prev_version(v: StandardVersion) -> StandardVersion:
|
||||
@@ -1130,19 +1254,15 @@ def _prev_version(v: StandardVersion) -> StandardVersion:
|
||||
release = release[:-1] + (_prev_version_str_component(release[-1]),)
|
||||
else:
|
||||
release = release[:-1] + (release[-1] - 1,)
|
||||
components = [""] * (2 * len(release))
|
||||
components[::2] = release
|
||||
components[1::2] = separators[: len(release)]
|
||||
if prerelease_type != FINAL:
|
||||
components.extend((PRERELEASE_TO_STRING[prerelease_type], *prerelease[1:]))
|
||||
|
||||
# this is only used for comparison functions, so don't bother making a string
|
||||
return StandardVersion(None, (release, prerelease), separators)
|
||||
# Avoid constructing a string here for performance. Instead, pass "" to
|
||||
# StandardVersion to lazily stringify.
|
||||
return StandardVersion("", (release, prerelease), separators)
|
||||
|
||||
|
||||
def Version(string: Union[str, int]) -> Union[GitVersion, StandardVersion]:
|
||||
def Version(string: Union[str, int]) -> ConcreteVersion:
|
||||
if not isinstance(string, (str, int)):
|
||||
raise ValueError(f"Cannot construct a version from {type(string)}")
|
||||
raise TypeError(f"Cannot construct a version from {type(string)}")
|
||||
string = str(string)
|
||||
if is_git_version(string):
|
||||
return GitVersion(string)
|
||||
@@ -1155,7 +1275,7 @@ def VersionRange(lo: Union[str, StandardVersion], hi: Union[str, StandardVersion
|
||||
return ClosedOpenRange.from_version_range(lo, hi)
|
||||
|
||||
|
||||
def from_string(string) -> Union[VersionList, ClosedOpenRange, StandardVersion, GitVersion]:
|
||||
def from_string(string: str) -> VersionType:
|
||||
"""Converts a string to a version object. This is private. Client code should use ver()."""
|
||||
string = string.replace(" ", "")
|
||||
|
||||
@@ -1184,17 +1304,17 @@ def from_string(string) -> Union[VersionList, ClosedOpenRange, StandardVersion,
|
||||
return VersionRange(v, v)
|
||||
|
||||
|
||||
def ver(obj) -> Union[VersionList, ClosedOpenRange, StandardVersion, GitVersion]:
|
||||
def ver(obj: Union[VersionType, str, list, tuple, int, float]) -> VersionType:
|
||||
"""Parses a Version, VersionRange, or VersionList from a string
|
||||
or list of strings.
|
||||
"""
|
||||
if isinstance(obj, (list, tuple)):
|
||||
return VersionList(obj)
|
||||
if isinstance(obj, VersionType):
|
||||
return obj
|
||||
elif isinstance(obj, str):
|
||||
return from_string(obj)
|
||||
elif isinstance(obj, (list, tuple)):
|
||||
return VersionList(obj)
|
||||
elif isinstance(obj, (int, float)):
|
||||
return from_string(str(obj))
|
||||
elif isinstance(obj, (StandardVersion, GitVersion, ClosedOpenRange, VersionList)):
|
||||
return obj
|
||||
else:
|
||||
raise TypeError("ver() can't convert %s to version!" % type(obj))
|
||||
|
@@ -810,14 +810,6 @@ ml-darwin-aarch64-mps-build:
|
||||
|
||||
.aws-pcluster-generate:
|
||||
image: { "name": "ghcr.io/spack/pcluster-amazonlinux-2:v2024-10-07", "entrypoint": [""] }
|
||||
before_script:
|
||||
# Use gcc from pre-installed spack store
|
||||
- - . "./share/spack/setup-env.sh"
|
||||
- . "/etc/profile.d/modules.sh"
|
||||
- diff -q "/bootstrap/cloud_pipelines-config.yaml" "share/spack/gitlab/cloud_pipelines/configs/config.yaml" || echo "WARNING Install tree might have changed. You need to rebuild the pcluster-amazonlinux-2 container in spack/gitlab-runners."
|
||||
- cp "share/spack/gitlab/cloud_pipelines/configs/config.yaml" "etc/spack/"
|
||||
- /bin/bash "${SPACK_ROOT}/share/spack/gitlab/cloud_pipelines/scripts/pcluster/setup-pcluster.sh"
|
||||
- rm "etc/spack/config.yaml"
|
||||
|
||||
# X86_64_V4 (one pipeline per target)
|
||||
.aws-pcluster-x86_64_v4:
|
||||
@@ -826,6 +818,10 @@ ml-darwin-aarch64-mps-build:
|
||||
|
||||
aws-pcluster-generate-x86_64_v4:
|
||||
extends: [ ".linux_x86_64_v4", ".aws-pcluster-x86_64_v4", ".generate-base", ".tags-x86_64_v4", ".aws-pcluster-generate"]
|
||||
before_script:
|
||||
- - . "./share/spack/setup-env.sh"
|
||||
# TODO: Move this to the container next time it is rebuilt
|
||||
- export PATH=/home/software/spack/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeh/linux-amzn2-x86_64_v3/gcc-7.3.1/binutils-2.37-qvccg7zpskturysmr4bzbsfrx34kvazo/bin:$PATH
|
||||
|
||||
aws-pcluster-build-x86_64_v4:
|
||||
extends: [ ".linux_x86_64_v4", ".aws-pcluster-x86_64_v4", ".build" ]
|
||||
@@ -846,6 +842,10 @@ aws-pcluster-build-x86_64_v4:
|
||||
aws-pcluster-generate-neoverse_v1:
|
||||
# TODO: Use updated runner tags: https://github.com/spack/spack-infrastructure/pull/694/files
|
||||
extends: [ ".linux_neoverse_v1", ".aws-pcluster-neoverse_v1", ".generate-neoverse_v1", ".aws-pcluster-generate"]
|
||||
before_script:
|
||||
- - . "./share/spack/setup-env.sh"
|
||||
# TODO: Move this to the container next time it is rebuilt
|
||||
- export PATH=/home/software/spack/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeh/linux-amzn2-aarch64/gcc-7.3.1/binutils-2.37-2yxz3xsjfmesxujxtlrgcctxlyilynmp/bin:$PATH
|
||||
|
||||
aws-pcluster-build-neoverse_v1:
|
||||
extends: [ ".linux_neoverse_v1", ".aws-pcluster-neoverse_v1", ".build" ]
|
||||
@@ -951,3 +951,49 @@ windows-vis-build:
|
||||
needs:
|
||||
- artifacts: True
|
||||
job: windows-vis-generate
|
||||
|
||||
#######################################
|
||||
# Bootstrap x86_64-linux-gnu
|
||||
#######################################
|
||||
.bootstrap-x86_64-linux-gnu:
|
||||
extends: [ ".linux_x86_64_v3" ]
|
||||
variables:
|
||||
SPACK_CI_STACK_NAME: bootstrap-x86_64-linux-gnu
|
||||
|
||||
bootstrap-x86_64-linux-gnu-generate:
|
||||
extends: [ .generate-x86_64, .bootstrap-x86_64-linux-gnu ]
|
||||
image: ghcr.io/spack/ubuntu-24.04:v2024-09-05-v2
|
||||
|
||||
bootstrap-x86_64-linux-gnu-build:
|
||||
extends: [ .build, .bootstrap-x86_64-linux-gnu ]
|
||||
trigger:
|
||||
include:
|
||||
- artifact: jobs_scratch_dir/cloud-ci-pipeline.yml
|
||||
job: bootstrap-x86_64-linux-gnu-generate
|
||||
strategy: depend
|
||||
needs:
|
||||
- artifacts: True
|
||||
job: bootstrap-x86_64-linux-gnu-generate
|
||||
|
||||
#######################################
|
||||
# Bootstrap aarch64-darwin
|
||||
#######################################
|
||||
.bootstrap-aarch64-darwin:
|
||||
extends: [.darwin_aarch64]
|
||||
variables:
|
||||
SPACK_CI_STACK_NAME: bootstrap-aarch64-darwin
|
||||
|
||||
bootstrap-aarch64-darwin-generate:
|
||||
tags: [macos-ventura, apple-clang-15, aarch64-macos]
|
||||
extends: [.bootstrap-aarch64-darwin, .generate-base]
|
||||
|
||||
bootstrap-aarch64-darwin-build:
|
||||
extends: [.bootstrap-aarch64-darwin, .build]
|
||||
trigger:
|
||||
include:
|
||||
- artifact: jobs_scratch_dir/cloud-ci-pipeline.yml
|
||||
job: bootstrap-aarch64-darwin-generate
|
||||
strategy: depend
|
||||
needs:
|
||||
- artifacts: true
|
||||
job: bootstrap-aarch64-darwin-generate
|
||||
|
@@ -1,112 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
set -e
|
||||
|
||||
set_pcluster_defaults() {
|
||||
# Set versions of pre-installed software in packages.yaml
|
||||
[ -z "${SLURM_ROOT}" ] && ls /etc/systemd/system/slurm* &>/dev/null && \
|
||||
SLURM_ROOT=$(dirname $(dirname "$(awk '/ExecStart=/ {print $1}' /etc/systemd/system/slurm* | sed -e 's?^.*=??1' | head -n1)"))
|
||||
# Fallback to default location if SLURM not in systemd
|
||||
[ -z "${SLURM_ROOT}" ] && [ -d "/opt/slurm" ] && SLURM_ROOT=/opt/slurm
|
||||
[ -z "${SLURM_VERSION}" ] && SLURM_VERSION=$(strings "${SLURM_ROOT}"/lib/libslurm.so | grep -e '^VERSION' | awk '{print $2}' | sed -e 's?"??g')
|
||||
[ -z "${LIBFABRIC_VERSION}" ] && LIBFABRIC_VERSION=$(awk '/Version:/{print $2}' "$(find /opt/amazon/efa/ -name libfabric.pc | head -n1)" | sed -e 's?~??g' -e 's?amzn.*??g')
|
||||
export SLURM_ROOT SLURM_VERSION LIBFABRIC_VERSION
|
||||
|
||||
envsubst < "${SPACK_ROOT}/share/spack/gitlab/cloud_pipelines/stacks/${SPACK_CI_STACK_NAME}/packages.yaml" > "${SPACK_ROOT}"/etc/spack/packages.yaml
|
||||
}
|
||||
|
||||
patch_compilers_yaml() {
|
||||
# Graceful exit if package not found by spack
|
||||
set -o pipefail
|
||||
compilers_yaml="${SPACK_ROOT}/etc/spack/compilers.yaml"
|
||||
[ -f "${compilers_yaml}" ] || {
|
||||
echo "Cannot find ${compilers_yaml}, compiler setup might now be optimal."
|
||||
return
|
||||
}
|
||||
|
||||
# System ld is too old for amzn linux2
|
||||
spack_gcc_version=$(spack find --format '{version}' gcc)
|
||||
binutils_path=$(spack find -p binutils | awk '/binutils/ {print $2}' | head -n1)
|
||||
if [ -d "${binutils_path}" ] && [ -n "${spack_gcc_version}" ]; then python3 <<EOF
|
||||
import yaml
|
||||
|
||||
with open("${compilers_yaml}",'r') as f:
|
||||
compilers=yaml.safe_load(f)
|
||||
|
||||
for c in compilers["compilers"]:
|
||||
if "arm" in c["compiler"]["spec"] or "intel" in c["compiler"]["spec"] or "oneapi" in c["compiler"]["spec"] \
|
||||
or "${spack_gcc_version}" in c["compiler"]["spec"]:
|
||||
compilers["compilers"][compilers["compilers"].index(c)]["compiler"]["environment"] = {"prepend_path":{"PATH":"${binutils_path}/bin"}}
|
||||
|
||||
with open("${compilers_yaml}",'w') as f:
|
||||
yaml.dump(compilers, f)
|
||||
EOF
|
||||
fi
|
||||
# Oneapi needs extra_rpath to gcc libstdc++.so.6
|
||||
if [ "x86_64" == "$(arch)" ] && oneapi_gcc_version=$(spack find --format '{compiler}' intel-oneapi-compilers | sed -e 's/=//g') && \
|
||||
[ -n "${oneapi_gcc_version}" ] && oneapi_gcc_path=$(spack find -p "${oneapi_gcc_version}" | grep "${oneapi_gcc_version}" | awk '{print $2}' | head -n1) && \
|
||||
[ -d "${oneapi_gcc_path}" ]; then python3 <<EOF
|
||||
import yaml
|
||||
|
||||
with open("${compilers_yaml}",'r') as f:
|
||||
compilers=yaml.safe_load(f)
|
||||
|
||||
for c in compilers["compilers"]:
|
||||
if "oneapi" in c["compiler"]["spec"]:
|
||||
compilers["compilers"][compilers["compilers"].index(c)]["compiler"]["extra_rpaths"] = ["${oneapi_gcc_path}/lib64"]
|
||||
|
||||
with open("${compilers_yaml}",'w') as f:
|
||||
yaml.dump(compilers, f)
|
||||
EOF
|
||||
fi
|
||||
}
|
||||
|
||||
install_compilers() {
|
||||
# Install Intel compilers through a static spack version such that the compiler's hash does not change.
|
||||
# The compilers needs to be in the same install tree as the rest of the software such that the path
|
||||
# relocation works correctly. This holds the danger that this part will fail when the current spack gets
|
||||
# incompatible with the one in $spack_intel_compiler_commit. Therefore, we make intel installations optional
|
||||
# in packages.yaml files and add a fallback `%gcc` version for each application.
|
||||
if [ -f "/bootstrap-compilers/spack/etc/spack/compilers.yaml" ]; then
|
||||
# Running inside a gitlab CI container
|
||||
# Intel and gcc@12 compiler are pre-installed and their location is known in
|
||||
cp /bootstrap-compilers/spack/etc/spack/compilers.yaml "${SPACK_ROOT}"/etc/spack/
|
||||
else
|
||||
spack compiler add --scope site
|
||||
# We need to treat compilers as essentially external, i.e. their installation location
|
||||
# (including hash) must not change when any changes are pushed to spack. The reason is that
|
||||
# changes in the compilers are not reflected in the package hashes built in the CI. Hence, those
|
||||
# packages will reference a wrong compiler path once the path changes.
|
||||
|
||||
# `gcc@12.4.0%gcc@7.3.1` is created as part of building the pipeline containers.
|
||||
# `ghcr.io/spack/pcluster-amazonlinux-2:v2024-10-07` produced the following hashes.
|
||||
if [ "x86_64" == "$(arch)" ]; then
|
||||
gcc_hash="pttzchh7o54nhmycj4wgzw5mic6rk2nb"
|
||||
else
|
||||
gcc_hash="v6wxye6ijzrxnzxftcwnpu3psohsjl2b"
|
||||
fi
|
||||
|
||||
spack install /${gcc_hash}
|
||||
(
|
||||
spack load gcc
|
||||
spack compiler add --scope site
|
||||
)
|
||||
|
||||
if [ "x86_64" == "$(arch)" ]; then
|
||||
# 2024.1.0 is the last oneapi compiler that works on AL2 and is the one used to compile packages in the build cache.
|
||||
spack install intel-oneapi-compilers@2024.1.0
|
||||
(
|
||||
. "$(spack location -i intel-oneapi-compilers)"/setvars.sh; spack compiler add --scope site \
|
||||
|| true
|
||||
)
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
set_pcluster_defaults
|
||||
install_compilers
|
||||
patch_compilers_yaml
|
@@ -1,67 +0,0 @@
|
||||
--- # Neoverse N1 / V1 packages
|
||||
packages:
|
||||
acfl:
|
||||
require:
|
||||
- one_of: ["%gcc target=aarch64"]
|
||||
message: "Clang based compilers need GCC libraries and they should be made available for the wide range of CPUs they actually support.
|
||||
Edit $SPACK_ROOT/etc/spack/packages.yaml to change this default."
|
||||
gromacs:
|
||||
require:
|
||||
- one_of:
|
||||
- "gromacs@2021.3 %arm ^fftw ^openmpi"
|
||||
- "gromacs@2021.3 %gcc ^armpl-gcc ^openmpi"
|
||||
libfabric:
|
||||
buildable: true
|
||||
externals:
|
||||
- prefix: /opt/amazon/efa/
|
||||
spec: libfabric@${LIBFABRIC_VERSION}
|
||||
require: ['fabrics=shm,efa']
|
||||
llvm:
|
||||
variants: ~lldb
|
||||
mpas-model:
|
||||
require:
|
||||
- one_of:
|
||||
- "precision=single make_target=llvm %arm ^parallelio+pnetcdf"
|
||||
- "precision=single %gcc ^parallelio+pnetcdf"
|
||||
mpich:
|
||||
require: "mpich pmi=pmi2 device=ch4 netmod=ofi +slurm"
|
||||
nvhpc:
|
||||
require:
|
||||
- one_of:
|
||||
- "nvhpc %gcc target=aarch64"
|
||||
message: "NVHPC should be built with GCC and should be made available for the wide range of CPUs they actually support.
|
||||
Edit $SPACK_ROOT/etc/spack/packages.yaml to change this default."
|
||||
openfoam:
|
||||
require: "openfoam %gcc ^scotch@6.0.9"
|
||||
openmpi:
|
||||
variants: ~atomics ~cuda ~cxx ~cxx_exceptions ~internal-hwloc ~java +legacylaunchers ~lustre ~memchecker +pmi +romio ~singularity +vt +wrapper-rpath fabrics=ofi schedulers=slurm
|
||||
require: '@4:'
|
||||
# Palace does not build correctly with armpl until https://github.com/awslabs/palace/pull/207 is merged into a version.
|
||||
# palace:
|
||||
# require:
|
||||
# - one_of: ["palace cxxflags=\"-include cstdint\" ^fmt@9.1.0"]
|
||||
pmix:
|
||||
require: "pmix@3:"
|
||||
quantum-espresso:
|
||||
require: "quantum-espresso@6.6 %gcc ^armpl-gcc"
|
||||
slurm:
|
||||
buildable: false
|
||||
externals:
|
||||
- prefix: ${SLURM_ROOT}
|
||||
spec: slurm@${SLURM_VERSION} +pmix
|
||||
wrf:
|
||||
require:
|
||||
- one_of:
|
||||
- "wrf%arm"
|
||||
- "wrf%gcc"
|
||||
all:
|
||||
compiler: [gcc, arm, nvhpc, clang]
|
||||
providers:
|
||||
blas: [armpl-gcc, openblas]
|
||||
fftw-api: [armpl-gcc, fftw]
|
||||
lapack: [armpl-gcc, openblas]
|
||||
mpi: [openmpi, mpich]
|
||||
scalapack: [netlib-scalapack]
|
||||
permissions:
|
||||
read: world
|
||||
write: user
|
@@ -28,10 +28,7 @@ spack:
|
||||
- . /etc/profile.d/modules.sh
|
||||
- spack --version
|
||||
- spack arch
|
||||
# Use gcc from pre-installed spack store
|
||||
- - cp share/spack/gitlab/cloud_pipelines/configs/config.yaml etc/spack/
|
||||
- /bin/bash "${SPACK_ROOT}/share/spack/gitlab/cloud_pipelines/scripts/pcluster/setup-pcluster.sh"
|
||||
- rm etc/spack/config.yaml
|
||||
- export PATH=/home/software/spack/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeh/linux-amzn2-aarch64/gcc-7.3.1/binutils-2.37-2yxz3xsjfmesxujxtlrgcctxlyilynmp/bin:$PATH
|
||||
- signing-job:
|
||||
before_script:
|
||||
# Do not distribute Intel & ARM binaries
|
||||
@@ -40,3 +37,98 @@ spack:
|
||||
|
||||
cdash:
|
||||
build-group: AWS Packages
|
||||
|
||||
compilers:
|
||||
- compiler:
|
||||
environment: {}
|
||||
extra_rpaths: []
|
||||
flags: {}
|
||||
modules: []
|
||||
operating_system: amzn2
|
||||
paths:
|
||||
cc: /usr/bin/gcc
|
||||
cxx: /usr/bin/g++
|
||||
f77: /usr/bin/gfortran
|
||||
fc: /usr/bin/gfortran
|
||||
spec: gcc@=7.3.1
|
||||
target: aarch64
|
||||
- compiler:
|
||||
environment: {}
|
||||
extra_rpaths: []
|
||||
flags: {}
|
||||
modules: []
|
||||
operating_system: amzn2
|
||||
paths:
|
||||
cc: /home/software/spack/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeh/linux-amzn2-aarch64/gcc-7.3.1/gcc-12.4.0-v6wxye6ijzrxnzxftcwnpu3psohsjl2b/bin/gcc
|
||||
cxx: /home/software/spack/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeh/linux-amzn2-aarch64/gcc-7.3.1/gcc-12.4.0-v6wxye6ijzrxnzxftcwnpu3psohsjl2b/bin/g++
|
||||
f77: /home/software/spack/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeh/linux-amzn2-aarch64/gcc-7.3.1/gcc-12.4.0-v6wxye6ijzrxnzxftcwnpu3psohsjl2b/bin/gfortran
|
||||
fc: /home/software/spack/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeh/linux-amzn2-aarch64/gcc-7.3.1/gcc-12.4.0-v6wxye6ijzrxnzxftcwnpu3psohsjl2b/bin/gfortran
|
||||
spec: gcc@=12.4.0
|
||||
target: aarch64
|
||||
|
||||
packages:
|
||||
acfl:
|
||||
require:
|
||||
- one_of: ["%gcc target=aarch64"]
|
||||
message: "Clang based compilers need GCC libraries and they should be made available for the wide range of CPUs they actually support.
|
||||
Edit $SPACK_ROOT/etc/spack/packages.yaml to change this default."
|
||||
gromacs:
|
||||
require:
|
||||
- one_of:
|
||||
- "gromacs@2024.3 %arm ^fftw ^openmpi"
|
||||
- "gromacs@2024.3 %gcc ^armpl-gcc ^openmpi"
|
||||
libfabric:
|
||||
buildable: true
|
||||
externals:
|
||||
- prefix: /opt/amazon/efa/
|
||||
spec: libfabric@1.17.0
|
||||
require: ['fabrics=shm,efa']
|
||||
llvm:
|
||||
variants: ~lldb
|
||||
mpas-model:
|
||||
require:
|
||||
- one_of:
|
||||
- "precision=single make_target=llvm %arm ^parallelio+pnetcdf"
|
||||
- "precision=single %gcc ^parallelio+pnetcdf"
|
||||
mpich:
|
||||
require: "mpich pmi=pmi2 device=ch4 netmod=ofi +slurm"
|
||||
nvhpc:
|
||||
require:
|
||||
- one_of:
|
||||
- "nvhpc %gcc target=aarch64"
|
||||
message: "NVHPC should be built with GCC and should be made available for the wide range of CPUs they actually support.
|
||||
Edit $SPACK_ROOT/etc/spack/packages.yaml to change this default."
|
||||
openfoam:
|
||||
require: "openfoam %gcc ^scotch@6.0.9"
|
||||
openmpi:
|
||||
variants: ~atomics ~cuda ~cxx ~cxx_exceptions ~internal-hwloc ~java +legacylaunchers ~lustre ~memchecker +pmi +romio ~singularity +vt +wrapper-rpath fabrics=ofi schedulers=slurm
|
||||
require: '@4:'
|
||||
# Palace does not build correctly with armpl until https://github.com/awslabs/palace/pull/207 is merged into a version.
|
||||
# palace:
|
||||
# require:
|
||||
# - one_of: ["palace cxxflags=\"-include cstdint\" ^fmt@9.1.0"]
|
||||
pmix:
|
||||
require: "pmix@3:"
|
||||
quantum-espresso:
|
||||
require: "quantum-espresso@6.6 %gcc ^armpl-gcc"
|
||||
slurm:
|
||||
buildable: false
|
||||
externals:
|
||||
- prefix: /opt/slurm
|
||||
spec: slurm@22.05.8 +pmix
|
||||
wrf:
|
||||
require:
|
||||
- one_of:
|
||||
- "wrf%arm"
|
||||
- "wrf%gcc"
|
||||
all:
|
||||
compiler: [gcc, arm, nvhpc, clang]
|
||||
providers:
|
||||
blas: [armpl-gcc, openblas]
|
||||
fftw-api: [armpl-gcc, fftw]
|
||||
lapack: [armpl-gcc, openblas]
|
||||
mpi: [openmpi, mpich]
|
||||
scalapack: [netlib-scalapack]
|
||||
permissions:
|
||||
read: world
|
||||
write: user
|
||||
|
@@ -1,143 +0,0 @@
|
||||
--- # x86_64_v4 packages (ice/skylake) & x86_64_v3 packages (zen2/3)
|
||||
packages:
|
||||
cpio:
|
||||
require:
|
||||
- one_of:
|
||||
- "cflags=-std=c18 target=x86_64_v4"
|
||||
- "cflags=-std=c18 target=x86_64_v3"
|
||||
when: "%intel"
|
||||
gettext:
|
||||
# Newer gettext cannot build with gcc@12 and old AL2 glibc headers
|
||||
# Older gettext versions do not build correctly with oneapi.
|
||||
require:
|
||||
- one_of:
|
||||
- '@:0.20'
|
||||
- '%oneapi'
|
||||
gromacs:
|
||||
require:
|
||||
- one_of:
|
||||
- "+intel_provided_gcc ^intel-oneapi-mkl target=x86_64_v4"
|
||||
- "+intel_provided_gcc ^intel-oneapi-mkl target=x86_64_v3"
|
||||
when: "%intel"
|
||||
- one_of:
|
||||
- "+intel_provided_gcc target=x86_64_v4 ^intel-oneapi-mkl"
|
||||
- "+intel_provided_gcc target=x86_64_v3 ^intel-oneapi-mkl"
|
||||
when: "%oneapi"
|
||||
intel-oneapi-compilers:
|
||||
require: "intel-oneapi-compilers %gcc target=x86_64_v3"
|
||||
intel-oneapi-mpi:
|
||||
variants: +external-libfabric generic-names=True
|
||||
lammps:
|
||||
require:
|
||||
- one_of:
|
||||
- "lammps_sizes=bigbig +molecule +kspace +rigid +asphere +opt +openmp +openmp-package +intel fft=mkl ^intel-oneapi-mkl target=x86_64_v4"
|
||||
- "lammps_sizes=bigbig +molecule +kspace +rigid +asphere +opt +openmp +openmp-package fft=mkl ^intel-oneapi-mkl target=x86_64_v3"
|
||||
when: "%intel"
|
||||
- one_of:
|
||||
- "lammps_sizes=bigbig +molecule +kspace +rigid +asphere +opt +openmp +openmp-package +intel fft=mkl ^intel-oneapi-mkl target=x86_64_v4"
|
||||
- "lammps_sizes=bigbig +molecule +kspace +rigid +asphere +opt +openmp +openmp-package fft=mkl ^intel-oneapi-mkl target=x86_64_v3"
|
||||
when: "%oneapi"
|
||||
libidn2:
|
||||
require:
|
||||
- one_of:
|
||||
- "cflags=-std=c18 target=x86_64_v4"
|
||||
- "cflags=-std=c18 target=x86_64_v3"
|
||||
when: "%intel"
|
||||
libfabric:
|
||||
buildable: true
|
||||
externals:
|
||||
- prefix: /opt/amazon/efa/
|
||||
spec: libfabric@${LIBFABRIC_VERSION}
|
||||
require: ['fabrics=shm,efa']
|
||||
libunistring:
|
||||
require:
|
||||
- one_of:
|
||||
- "cflags=-std=c18 target=x86_64_v4"
|
||||
- "cflags=-std=c18 target=x86_64_v3"
|
||||
when: "%intel"
|
||||
mpas-model:
|
||||
require:
|
||||
- one_of:
|
||||
- "precision=single ^parallelio+pnetcdf target=x86_64_v4"
|
||||
- "precision=single ^parallelio+pnetcdf target=x86_64_v3"
|
||||
when: "%intel"
|
||||
- one_of:
|
||||
- "precision=single ^parallelio+pnetcdf target=x86_64_v4"
|
||||
- "precision=single ^parallelio+pnetcdf target=x86_64_v3"
|
||||
when: "%oneapi"
|
||||
mpich:
|
||||
require:
|
||||
- one_of:
|
||||
- "mpich pmi=pmi2 device=ch4 netmod=ofi +slurm target=x86_64_v4"
|
||||
- "mpich pmi=pmi2 device=ch4 netmod=ofi +slurm target=x86_64_v3"
|
||||
openfoam:
|
||||
require:
|
||||
- one_of:
|
||||
- "openfoam %gcc ^scotch@6.0.9 target=x86_64_v4"
|
||||
- "openfoam %gcc ^scotch@6.0.9 target=x86_64_v3"
|
||||
openmpi:
|
||||
variants: ~atomics ~cuda ~cxx ~cxx_exceptions ~internal-hwloc ~java +legacylaunchers ~lustre ~memchecker +pmi +romio ~singularity +vt +wrapper-rpath fabrics=ofi schedulers=slurm
|
||||
require:
|
||||
- one_of:
|
||||
- "openmpi @4: target=x86_64_v4"
|
||||
- "openmpi @4: target=x86_64_v3"
|
||||
palace:
|
||||
require:
|
||||
- one_of:
|
||||
- "palace ^fmt@9.1.0 target=x86_64_v4"
|
||||
- "palace ^fmt@9.1.0 target=x86_64_v3"
|
||||
when: "%oneapi"
|
||||
- one_of:
|
||||
- "palace ^fmt@9.1.0"
|
||||
when: "%gcc"
|
||||
pmix:
|
||||
require:
|
||||
- one_of:
|
||||
- "pmix@3: target=x86_64_v4"
|
||||
- "pmix@3: target=x86_64_v3"
|
||||
quantum-espresso:
|
||||
require:
|
||||
- one_of:
|
||||
- "quantum-espresso@6.6 ^intel-oneapi-mkl+cluster target=x86_64_v4"
|
||||
- "quantum-espresso@6.6 ^intel-oneapi-mkl+cluster target=x86_64_v3"
|
||||
when: "%intel"
|
||||
- one_of:
|
||||
- "quantum-espresso@6.6 ^intel-oneapi-mkl+cluster target=x86_64_v4"
|
||||
- "quantum-espresso@6.6 ^intel-oneapi-mkl+cluster target=x86_64_v3"
|
||||
when: "%oneapi"
|
||||
slurm:
|
||||
buildable: false
|
||||
externals:
|
||||
- prefix: ${SLURM_ROOT}
|
||||
spec: slurm@${SLURM_VERSION} +pmix
|
||||
wrf:
|
||||
require:
|
||||
- one_of:
|
||||
- "wrf@4 build_type=dm+sm target=x86_64_v4"
|
||||
- "wrf@4 build_type=dm+sm target=x86_64_v3"
|
||||
- "wrf@4.2.2 +netcdf_classic fflags=\"-fp-model fast=2 -no-heap-arrays -no-prec-div -no-prec-sqrt -fno-common\" build_type=dm+sm target=x86_64_v3"
|
||||
when: "%intel"
|
||||
- one_of:
|
||||
- "wrf@4 build_type=dm+sm target=x86_64_v4"
|
||||
- "wrf@4 build_type=dm+sm target=x86_64_v3"
|
||||
- "wrf@4.2.2 +netcdf_classic fflags=\"-fp-model fast=2 -no-heap-arrays -no-prec-div -no-prec-sqrt -fno-common\" build_type=dm+sm target=x86_64_v3"
|
||||
when: "%oneapi"
|
||||
|
||||
all:
|
||||
compiler: [oneapi, gcc]
|
||||
permissions:
|
||||
read: world
|
||||
write: user
|
||||
providers:
|
||||
blas: [intel-oneapi-mkl]
|
||||
daal: [intel-oneapi-dal]
|
||||
fftw-api: [intel-oneapi-mkl]
|
||||
ipp: [intel-oneapi-ipp]
|
||||
lapack: [intel-oneapi-mkl]
|
||||
mkl: [intel-oneapi-mkl]
|
||||
mpi: [intel-oneapi-mpi, openmpi, mpich]
|
||||
tbb: [intel-oneapi-tbb, intel-tbb]
|
||||
scalapack: [intel-oneapi-mkl]
|
||||
|
||||
|
||||
|
@@ -31,10 +31,7 @@ spack:
|
||||
- . /etc/profile.d/modules.sh
|
||||
- spack --version
|
||||
- spack arch
|
||||
# Use gcc from pre-installed spack store
|
||||
- - cp share/spack/gitlab/cloud_pipelines/configs/config.yaml etc/spack/
|
||||
- /bin/bash "${SPACK_ROOT}/share/spack/gitlab/cloud_pipelines/scripts/pcluster/setup-pcluster.sh"
|
||||
- rm etc/spack/config.yaml
|
||||
- export PATH=/home/software/spack/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeh/linux-amzn2-x86_64_v3/gcc-7.3.1/binutils-2.37-qvccg7zpskturysmr4bzbsfrx34kvazo/bin:$PATH
|
||||
- signing-job:
|
||||
before_script:
|
||||
# Do not distribute Intel & ARM binaries
|
||||
@@ -43,3 +40,172 @@ spack:
|
||||
|
||||
cdash:
|
||||
build-group: AWS Packages
|
||||
|
||||
compilers:
|
||||
- compiler:
|
||||
environment: {}
|
||||
extra_rpaths: []
|
||||
flags: {}
|
||||
modules: []
|
||||
operating_system: amzn2
|
||||
paths:
|
||||
cc: /home/software/spack/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeh/linux-amzn2-x86_64_v3/gcc-7.3.1/gcc-12.4.0-pttzchh7o54nhmycj4wgzw5mic6rk2nb/bin/gcc
|
||||
cxx: /home/software/spack/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeh/linux-amzn2-x86_64_v3/gcc-7.3.1/gcc-12.4.0-pttzchh7o54nhmycj4wgzw5mic6rk2nb/bin/g++
|
||||
f77: /home/software/spack/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeh/linux-amzn2-x86_64_v3/gcc-7.3.1/gcc-12.4.0-pttzchh7o54nhmycj4wgzw5mic6rk2nb/bin/gfortran
|
||||
fc: /home/software/spack/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeh/linux-amzn2-x86_64_v3/gcc-7.3.1/gcc-12.4.0-pttzchh7o54nhmycj4wgzw5mic6rk2nb/bin/gfortran
|
||||
spec: gcc@=12.4.0
|
||||
target: x86_64
|
||||
- compiler:
|
||||
environment: {}
|
||||
extra_rpaths:
|
||||
- /home/software/spack/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeh/linux-amzn2-x86_64_v3/gcc-7.3.1/gcc-12.4.0-pttzchh7o54nhmycj4wgzw5mic6rk2nb/lib64
|
||||
flags: {}
|
||||
modules: []
|
||||
operating_system: amzn2
|
||||
paths:
|
||||
cc: /home/software/spack/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeh/linux-amzn2-x86_64_v3/gcc-12.4.0/intel-oneapi-compilers-2024.1.0-f5u3psfhbwscasajkn324igtupn3blop/compiler/2024.1/bin/icx
|
||||
cxx: /home/software/spack/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeh/linux-amzn2-x86_64_v3/gcc-12.4.0/intel-oneapi-compilers-2024.1.0-f5u3psfhbwscasajkn324igtupn3blop/compiler/2024.1/bin/icpx
|
||||
f77: /home/software/spack/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeh/linux-amzn2-x86_64_v3/gcc-12.4.0/intel-oneapi-compilers-2024.1.0-f5u3psfhbwscasajkn324igtupn3blop/compiler/2024.1/bin/ifx
|
||||
fc: /home/software/spack/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_placeh/linux-amzn2-x86_64_v3/gcc-12.4.0/intel-oneapi-compilers-2024.1.0-f5u3psfhbwscasajkn324igtupn3blop/compiler/2024.1/bin/ifx
|
||||
spec: oneapi@=2024.1.0
|
||||
target: x86_64
|
||||
|
||||
packages:
|
||||
cpio:
|
||||
require:
|
||||
- one_of:
|
||||
- "cflags=-std=c18 target=x86_64_v4"
|
||||
- "cflags=-std=c18 target=x86_64_v3"
|
||||
when: "%intel"
|
||||
gettext:
|
||||
# Newer gettext cannot build with gcc@12 and old AL2 glibc headers
|
||||
# Older gettext versions do not build correctly with oneapi.
|
||||
require:
|
||||
- one_of:
|
||||
- '@:0.20'
|
||||
- '%oneapi'
|
||||
gromacs:
|
||||
require:
|
||||
- one_of:
|
||||
- "+intel_provided_gcc ^intel-oneapi-mkl target=x86_64_v4"
|
||||
- "+intel_provided_gcc ^intel-oneapi-mkl target=x86_64_v3"
|
||||
when: "%intel"
|
||||
- one_of:
|
||||
- "+intel_provided_gcc target=x86_64_v4 ^intel-oneapi-mkl"
|
||||
- "+intel_provided_gcc target=x86_64_v3 ^intel-oneapi-mkl"
|
||||
when: "%oneapi"
|
||||
intel-oneapi-compilers:
|
||||
require: "intel-oneapi-compilers %gcc target=x86_64_v3"
|
||||
intel-oneapi-mpi:
|
||||
variants: +external-libfabric generic-names=True
|
||||
lammps:
|
||||
require:
|
||||
- one_of:
|
||||
- "lammps_sizes=bigbig +molecule +kspace +rigid +asphere +opt +openmp +openmp-package +intel fft=mkl ^intel-oneapi-mkl target=x86_64_v4"
|
||||
- "lammps_sizes=bigbig +molecule +kspace +rigid +asphere +opt +openmp +openmp-package fft=mkl ^intel-oneapi-mkl target=x86_64_v3"
|
||||
when: "%intel"
|
||||
- one_of:
|
||||
- "lammps_sizes=bigbig +molecule +kspace +rigid +asphere +opt +openmp +openmp-package +intel fft=mkl ^intel-oneapi-mkl target=x86_64_v4"
|
||||
- "lammps_sizes=bigbig +molecule +kspace +rigid +asphere +opt +openmp +openmp-package fft=mkl ^intel-oneapi-mkl target=x86_64_v3"
|
||||
when: "%oneapi"
|
||||
libidn2:
|
||||
require:
|
||||
- one_of:
|
||||
- "cflags=-std=c18 target=x86_64_v4"
|
||||
- "cflags=-std=c18 target=x86_64_v3"
|
||||
when: "%intel"
|
||||
libfabric:
|
||||
buildable: true
|
||||
externals:
|
||||
- prefix: /opt/amazon/efa/
|
||||
spec: libfabric@1.17.0
|
||||
require: ['fabrics=shm,efa']
|
||||
libunistring:
|
||||
require:
|
||||
- one_of:
|
||||
- "cflags=-std=c18 target=x86_64_v4"
|
||||
- "cflags=-std=c18 target=x86_64_v3"
|
||||
when: "%intel"
|
||||
mpas-model:
|
||||
require:
|
||||
- one_of:
|
||||
- "precision=single ^parallelio+pnetcdf target=x86_64_v4"
|
||||
- "precision=single ^parallelio+pnetcdf target=x86_64_v3"
|
||||
when: "%intel"
|
||||
- one_of:
|
||||
- "precision=single ^parallelio+pnetcdf target=x86_64_v4"
|
||||
- "precision=single ^parallelio+pnetcdf target=x86_64_v3"
|
||||
when: "%oneapi"
|
||||
mpich:
|
||||
require:
|
||||
- one_of:
|
||||
- "mpich pmi=pmi2 device=ch4 netmod=ofi +slurm target=x86_64_v4"
|
||||
- "mpich pmi=pmi2 device=ch4 netmod=ofi +slurm target=x86_64_v3"
|
||||
openfoam:
|
||||
require:
|
||||
- one_of:
|
||||
- "openfoam %gcc ^scotch@6.0.9 target=x86_64_v4"
|
||||
- "openfoam %gcc ^scotch@6.0.9 target=x86_64_v3"
|
||||
openmpi:
|
||||
variants: ~atomics ~cuda ~cxx ~cxx_exceptions ~internal-hwloc ~java +legacylaunchers ~lustre ~memchecker +pmi +romio ~singularity +vt +wrapper-rpath fabrics=ofi schedulers=slurm
|
||||
require:
|
||||
- one_of:
|
||||
- "openmpi @4: target=x86_64_v4"
|
||||
- "openmpi @4: target=x86_64_v3"
|
||||
palace:
|
||||
require:
|
||||
- one_of:
|
||||
- "palace ^fmt@9.1.0 target=x86_64_v4"
|
||||
- "palace ^fmt@9.1.0 target=x86_64_v3"
|
||||
when: "%oneapi"
|
||||
- one_of:
|
||||
- "palace ^fmt@9.1.0"
|
||||
when: "%gcc"
|
||||
pmix:
|
||||
require:
|
||||
- one_of:
|
||||
- "pmix@3: target=x86_64_v4"
|
||||
- "pmix@3: target=x86_64_v3"
|
||||
quantum-espresso:
|
||||
require:
|
||||
- one_of:
|
||||
- "quantum-espresso@6.6 ^intel-oneapi-mkl+cluster target=x86_64_v4"
|
||||
- "quantum-espresso@6.6 ^intel-oneapi-mkl+cluster target=x86_64_v3"
|
||||
when: "%intel"
|
||||
- one_of:
|
||||
- "quantum-espresso@6.6 ^intel-oneapi-mkl+cluster target=x86_64_v4"
|
||||
- "quantum-espresso@6.6 ^intel-oneapi-mkl+cluster target=x86_64_v3"
|
||||
when: "%oneapi"
|
||||
slurm:
|
||||
buildable: false
|
||||
externals:
|
||||
- prefix: /opt/slurm
|
||||
spec: slurm@22.05.8 +pmix
|
||||
wrf:
|
||||
require:
|
||||
- one_of:
|
||||
- "wrf@4 build_type=dm+sm target=x86_64_v4"
|
||||
- "wrf@4 build_type=dm+sm target=x86_64_v3"
|
||||
- "wrf@4.2.2 +netcdf_classic fflags=\"-fp-model fast=2 -no-heap-arrays -no-prec-div -no-prec-sqrt -fno-common\" build_type=dm+sm target=x86_64_v3"
|
||||
when: "%intel"
|
||||
- one_of:
|
||||
- "wrf@4 build_type=dm+sm target=x86_64_v4"
|
||||
- "wrf@4 build_type=dm+sm target=x86_64_v3"
|
||||
- "wrf@4.2.2 +netcdf_classic fflags=\"-fp-model fast=2 -no-heap-arrays -no-prec-div -no-prec-sqrt -fno-common\" build_type=dm+sm target=x86_64_v3"
|
||||
when: "%oneapi"
|
||||
|
||||
all:
|
||||
compiler: [oneapi, gcc]
|
||||
permissions:
|
||||
read: world
|
||||
write: user
|
||||
providers:
|
||||
blas: [intel-oneapi-mkl]
|
||||
daal: [intel-oneapi-dal]
|
||||
fftw-api: [intel-oneapi-mkl]
|
||||
ipp: [intel-oneapi-ipp]
|
||||
lapack: [intel-oneapi-mkl]
|
||||
mkl: [intel-oneapi-mkl]
|
||||
mpi: [intel-oneapi-mpi, openmpi, mpich]
|
||||
tbb: [intel-oneapi-tbb, intel-tbb]
|
||||
scalapack: [intel-oneapi-mkl]
|
||||
|
@@ -0,0 +1,33 @@
|
||||
spack:
|
||||
view: false
|
||||
|
||||
packages:
|
||||
all:
|
||||
require: target=aarch64
|
||||
|
||||
config:
|
||||
deprecated: true # allow old python versions
|
||||
|
||||
specs:
|
||||
- clingo-bootstrap +optimized ^python@3.13
|
||||
- clingo-bootstrap +optimized ^python@3.12
|
||||
- clingo-bootstrap +optimized ^python@3.11
|
||||
- clingo-bootstrap +optimized ^python@3.10
|
||||
- clingo-bootstrap +optimized ^python@3.9
|
||||
- clingo-bootstrap +optimized ^python@3.8
|
||||
|
||||
- clingo-bootstrap@spack +optimized ^python@3.13
|
||||
- clingo-bootstrap@spack +optimized ^python@3.12
|
||||
- clingo-bootstrap@spack +optimized ^python@3.11
|
||||
- clingo-bootstrap@spack +optimized ^python@3.10
|
||||
- clingo-bootstrap@spack +optimized ^python@3.9
|
||||
- clingo-bootstrap@spack +optimized ^python@3.8
|
||||
|
||||
ci:
|
||||
pipeline-gen:
|
||||
- build-job-remove:
|
||||
tags: [spack, public]
|
||||
- build-job:
|
||||
variables:
|
||||
CI_GPG_KEY_ROOT: /etc/protected-runner
|
||||
tags: [macos-ventura, apple-clang-15, aarch64-macos]
|
@@ -0,0 +1,35 @@
|
||||
spack:
|
||||
view: false
|
||||
|
||||
packages:
|
||||
all:
|
||||
require: target=x86_64_v3
|
||||
|
||||
config:
|
||||
deprecated: true # allow old python versions
|
||||
|
||||
specs:
|
||||
- clingo-bootstrap +optimized ^python@3.13
|
||||
- clingo-bootstrap +optimized ^python@3.12
|
||||
- clingo-bootstrap +optimized ^python@3.11
|
||||
- clingo-bootstrap +optimized ^python@3.10
|
||||
- clingo-bootstrap +optimized ^python@3.9
|
||||
- clingo-bootstrap +optimized ^python@3.8
|
||||
- clingo-bootstrap +optimized ^python@3.7
|
||||
- clingo-bootstrap +optimized ^python@3.6
|
||||
|
||||
- clingo-bootstrap@spack +optimized ^python@3.13
|
||||
- clingo-bootstrap@spack +optimized ^python@3.12
|
||||
- clingo-bootstrap@spack +optimized ^python@3.11
|
||||
- clingo-bootstrap@spack +optimized ^python@3.10
|
||||
- clingo-bootstrap@spack +optimized ^python@3.9
|
||||
- clingo-bootstrap@spack +optimized ^python@3.8
|
||||
- clingo-bootstrap@spack +optimized ^python@3.7
|
||||
- clingo-bootstrap@spack +optimized ^python@3.6
|
||||
|
||||
ci:
|
||||
pipeline-gen:
|
||||
- build-job:
|
||||
image:
|
||||
name: ghcr.io/spack/ubuntu-24.04:v2024-09-05-v2
|
||||
entrypoint: ['']
|
@@ -6,20 +6,31 @@ spack:
|
||||
cmake:
|
||||
variants: ~ownlibs
|
||||
ecp-data-vis-sdk:
|
||||
require: "+ascent +adios2 +cinema +darshan +faodel +hdf5 +pnetcdf +sensei +sz +unifyfs +veloc +vtkm +zfp"
|
||||
require:
|
||||
- "+ascent +adios2 +cinema +darshan +faodel +hdf5 +pnetcdf +sensei +sz +unifyfs +veloc +vtkm +zfp"
|
||||
hdf5:
|
||||
require:
|
||||
- one_of: ['@1.14', '@1.12']
|
||||
- "@1.14"
|
||||
mesa:
|
||||
require: "+glx +osmesa +opengl ~opengles +llvm"
|
||||
require:
|
||||
- "+glx +osmesa +opengl ~opengles +llvm"
|
||||
libglx:
|
||||
require: "mesa +glx"
|
||||
ospray:
|
||||
require: '@2.8.0 +denoiser +mpi'
|
||||
require:
|
||||
- "@2.8.0"
|
||||
- "+denoiser +mpi"
|
||||
llvm:
|
||||
require: '@14:'
|
||||
require: ["@14:"]
|
||||
# Minimize LLVM
|
||||
variants: ~lldb~lld~libomptarget~polly~gold libunwind=none compiler-rt=none
|
||||
libllvm:
|
||||
require: ["^llvm"]
|
||||
visit:
|
||||
require: ["@3.4.1"]
|
||||
|
||||
concretizer:
|
||||
unify: when_possible
|
||||
|
||||
definitions:
|
||||
- paraview_specs:
|
||||
@@ -30,11 +41,10 @@ spack:
|
||||
- ^[virtuals=gl] osmesa # OSMesa Rendering
|
||||
- visit_specs:
|
||||
- matrix:
|
||||
- - visit~gui
|
||||
- - ^[virtuals=gl] glx # GLX Rendering
|
||||
- ^[virtuals=gl] osmesa # OSMesa Rendering
|
||||
# VisIt GUI does not work with Qt 5.14.2
|
||||
# - +gui ^[virtuals=gl] glx # GUI Support w/ GLX Rendering
|
||||
- - visit
|
||||
- - ~gui ^[virtuals=gl] glx
|
||||
- ~gui ^[virtuals=gl] osmesa
|
||||
- +gui ^[virtuals=gl] glx # GUI Support w/ GLX Rendering
|
||||
- sdk_base_spec:
|
||||
- matrix:
|
||||
- - ecp-data-vis-sdk +ascent +adios2 +cinema +darshan +faodel +hdf5 +pnetcdf
|
||||
|
@@ -207,6 +207,11 @@ spack:
|
||||
externals:
|
||||
- spec: rocm-core@6.2.1
|
||||
prefix: /opt/rocm-6.2.1
|
||||
rocm-openmp-extras:
|
||||
buildable: false
|
||||
externals:
|
||||
- spec: rocm-openmp-extras@6.2.1
|
||||
prefix: /opt/rocm-6.2.1
|
||||
|
||||
specs:
|
||||
# ROCM NOARCH
|
||||
@@ -250,6 +255,7 @@ spack:
|
||||
# - chapel +rocm amdgpu_target=gfx908 # chapel: need chapel >= 2.2 to support ROCm >5.4
|
||||
# - cp2k +mpi +rocm amdgpu_target=gfx908 # cp2k: Error: KeyError: 'No spec with name rocm in... "-L{}".format(spec["rocm"].libs.directories[0]),
|
||||
# - exago +mpi +python +raja +hiop +rocm amdgpu_target=gfx908 ~ipopt cxxflags="-Wno-error=non-pod-varargs" ^hiop@1.0.0 ~sparse +mpi +raja +rocm amdgpu_target=gfx908 # raja: https://github.com/spack/spack/issues/44593
|
||||
# - lammps +rocm amdgpu_target=gfx908 # lammps: KeyError: 'No spec with name llvm-amdgpu in rocm-openmp-extras@6.2.1/xafvl6rnd3tjagjvezszdz6itqzcl3zj'
|
||||
# - lbann ~cuda +rocm amdgpu_target=gfx908 # aluminum: https://github.com/spack/spack/issues/38807
|
||||
# - papi +rocm amdgpu_target=gfx908 # papi: https://github.com/spack/spack/issues/27898
|
||||
# - petsc +rocm amdgpu_target=gfx908 # petsc: https://github.com/spack/spack/issues/44600
|
||||
@@ -294,6 +300,7 @@ spack:
|
||||
# - chapel +rocm amdgpu_target=gfx9a # chapel: need chapel >= 2.2 to support ROCm >5.4
|
||||
# - cp2k +mpi +rocm amdgpu_target=gfx90a # cp2k: Error: KeyError: 'No spec with name rocm in... "-L{}".format(spec["rocm"].libs.directories[0]),
|
||||
# - exago +mpi +python +raja +hiop +rocm amdgpu_target=gfx90a ~ipopt cxxflags="-Wno-error=non-pod-varargs" ^hiop@1.0.0 ~sparse +mpi +raja +rocm amdgpu_target=gfx90a # raja: https://github.com/spack/spack/issues/44593
|
||||
# - lammps +rocm amdgpu_target=gfx90a # lammps: KeyError: 'No spec with name llvm-amdgpu in rocm-openmp-extras@6.2.1/xafvl6rnd3tjagjvezszdz6itqzcl3zj'
|
||||
# - lbann ~cuda +rocm amdgpu_target=gfx90a # aluminum: https://github.com/spack/spack/issues/38807
|
||||
# - papi +rocm amdgpu_target=gfx90a # papi: https://github.com/spack/spack/issues/27898
|
||||
# - petsc +rocm amdgpu_target=gfx90a # petsc: https://github.com/spack/spack/issues/44600
|
||||
|
@@ -327,6 +327,7 @@ spack:
|
||||
- heffte +rocm amdgpu_target=gfx90a
|
||||
- hpx +rocm amdgpu_target=gfx90a
|
||||
- hypre +rocm amdgpu_target=gfx90a
|
||||
- lammps +rocm amdgpu_target=gfx90a
|
||||
- magma ~cuda +rocm amdgpu_target=gfx90a
|
||||
- mfem +rocm amdgpu_target=gfx90a
|
||||
- raja ~openmp +rocm amdgpu_target=gfx90a
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user