Compare commits
254 Commits
develop-20
...
traceback-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
01ec40b8ad | ||
|
|
4d0a5ae724 | ||
|
|
9c8b5f58c0 | ||
|
|
50aa5a7b24 | ||
|
|
ffab156366 | ||
|
|
e147679d40 | ||
|
|
ef9bb7ebe5 | ||
|
|
fc443ea30e | ||
|
|
b601bace24 | ||
|
|
cbad3d464a | ||
|
|
b56e792295 | ||
|
|
5b279c0732 | ||
|
|
149753a52e | ||
|
|
b582eacbc1 | ||
|
|
037196c2bd | ||
|
|
d9e8c5f13e | ||
|
|
275d1d88f4 | ||
|
|
a07d42d35b | ||
|
|
19ad29a690 | ||
|
|
4187c57250 | ||
|
|
590be9bba1 | ||
|
|
3edd68d981 | ||
|
|
5ca0e94bdd | ||
|
|
f6c9d98c8f | ||
|
|
9854c9e5f2 | ||
|
|
e5a602c1bb | ||
|
|
37fe3b4984 | ||
|
|
a00fddef4e | ||
|
|
260b36e272 | ||
|
|
117480dba9 | ||
|
|
bc75f23927 | ||
|
|
b0f1a0eb7c | ||
|
|
4d616e1168 | ||
|
|
4de8344c16 | ||
|
|
411ea019f1 | ||
|
|
296f99d800 | ||
|
|
ca4df91e7d | ||
|
|
9b8c06a049 | ||
|
|
011ff48f82 | ||
|
|
adcd05b365 | ||
|
|
dc160e3a52 | ||
|
|
ba953352a1 | ||
|
|
d47e726b76 | ||
|
|
89ab47284f | ||
|
|
31bdcd7dc6 | ||
|
|
f2bd11cbf4 | ||
|
|
f69e8297a7 | ||
|
|
c9377d9437 | ||
|
|
899004e29a | ||
|
|
df6427d259 | ||
|
|
31cfcafeba | ||
|
|
230bc7010a | ||
|
|
957c0cc9da | ||
|
|
99e4d6b446 | ||
|
|
7acd0cd86c | ||
|
|
d3378ffd25 | ||
|
|
2356ccc816 | ||
|
|
1d25275bd1 | ||
|
|
7678635d36 | ||
|
|
b2e28a0b08 | ||
|
|
53385f12da | ||
|
|
cfae194fbd | ||
|
|
88c193b83a | ||
|
|
c006cb573a | ||
|
|
d8d41e9b0e | ||
|
|
c6bfe7c6bd | ||
|
|
4432f5a1fe | ||
|
|
b9e0914ab2 | ||
|
|
49a8e84588 | ||
|
|
d36452cf4e | ||
|
|
580cc3c91b | ||
|
|
9ba7af404a | ||
|
|
2da812cbad | ||
|
|
420266c5c4 | ||
|
|
049ade024a | ||
|
|
75c71f7291 | ||
|
|
0a7533a609 | ||
|
|
7ecdc175ff | ||
|
|
962262a1d3 | ||
|
|
adaa0a4863 | ||
|
|
5f56eee8b0 | ||
|
|
aa6caf9ee6 | ||
|
|
1eb2cb97ad | ||
|
|
178a8bbdc5 | ||
|
|
e4c233710c | ||
|
|
b661acfa9b | ||
|
|
7bddcd27d2 | ||
|
|
5d2c67ec83 | ||
|
|
62fd5d12c2 | ||
|
|
64a7525e3f | ||
|
|
bfe434cbd5 | ||
|
|
39063baf18 | ||
|
|
f4a4acd272 | ||
|
|
8d2a059279 | ||
|
|
34c89c0f7b | ||
|
|
e1ea9e12a6 | ||
|
|
5611523baf | ||
|
|
4ff07c3918 | ||
|
|
49489a4815 | ||
|
|
fb53d31d09 | ||
|
|
80b9807e10 | ||
|
|
b573ec3920 | ||
|
|
cbdc07248f | ||
|
|
db6a2523d9 | ||
|
|
c710a1597f | ||
|
|
8c70912b11 | ||
|
|
64f90c38be | ||
|
|
d2f1e29927 | ||
|
|
57586df91a | ||
|
|
c00f36b5e2 | ||
|
|
2a7dd29f95 | ||
|
|
58e2f7a54f | ||
|
|
e3afe9a364 | ||
|
|
b0314faa3d | ||
|
|
2099e9f5cd | ||
|
|
5947c13570 | ||
|
|
1259992159 | ||
|
|
0477875667 | ||
|
|
4d5844b460 | ||
|
|
fc79c37e2d | ||
|
|
1d76ed7aa4 | ||
|
|
237f886e5d | ||
|
|
834ed2f117 | ||
|
|
73069045ae | ||
|
|
e0efd2bea2 | ||
|
|
b9873c5cea | ||
|
|
2f711bda5f | ||
|
|
f8381c9a63 | ||
|
|
c8f61c8662 | ||
|
|
507965cbc6 | ||
|
|
1f6ce56d3b | ||
|
|
3918f83ddc | ||
|
|
d4dc13fffb | ||
|
|
5008519a56 | ||
|
|
dad5ff8796 | ||
|
|
a24220b53f | ||
|
|
2186ff720e | ||
|
|
65d61e12c9 | ||
|
|
05f3fef72c | ||
|
|
21c2eedb80 | ||
|
|
66a3c7bc42 | ||
|
|
8b3d3ac2de | ||
|
|
b5610cdb8b | ||
|
|
6c6b262140 | ||
|
|
796e372bde | ||
|
|
78740942f9 | ||
|
|
02a991688f | ||
|
|
a8029c8ec4 | ||
|
|
adb8f37fc5 | ||
|
|
81b41d5948 | ||
|
|
0ff980ae87 | ||
|
|
74a93c04d8 | ||
|
|
b72c7deacb | ||
|
|
b061bbbb8f | ||
|
|
bbfad7e979 | ||
|
|
3a9963b497 | ||
|
|
8ac00aa58f | ||
|
|
13f80ff142 | ||
|
|
e8291cbd74 | ||
|
|
0dded55f39 | ||
|
|
a4ca6452c0 | ||
|
|
36761715fd | ||
|
|
02b116bd56 | ||
|
|
d4d7d5830d | ||
|
|
389b1824e9 | ||
|
|
e65be13056 | ||
|
|
1580c1745c | ||
|
|
cf54ef0fd3 | ||
|
|
b8b02e0691 | ||
|
|
8d986b8a99 | ||
|
|
4b836cb795 | ||
|
|
d5966e676d | ||
|
|
e187508485 | ||
|
|
80982149d5 | ||
|
|
a1f2e794c7 | ||
|
|
dbe323c631 | ||
|
|
77ddafaaac | ||
|
|
17efd6153c | ||
|
|
93f356c1cc | ||
|
|
386d115333 | ||
|
|
6b512210d4 | ||
|
|
ba215ca824 | ||
|
|
629a3e9396 | ||
|
|
08b07b9b27 | ||
|
|
3a38122764 | ||
|
|
25ab7cc16d | ||
|
|
41773383ec | ||
|
|
9855fbf7f1 | ||
|
|
5ef9d7e3ed | ||
|
|
5a4b7d3d44 | ||
|
|
9b40c1e89d | ||
|
|
edff99aab3 | ||
|
|
22043617aa | ||
|
|
7df23c7471 | ||
|
|
ef87a9a052 | ||
|
|
af62a062cc | ||
|
|
e6114f544d | ||
|
|
8d651625f7 | ||
|
|
9346306b79 | ||
|
|
f3a3e85bb9 | ||
|
|
caaaba464e | ||
|
|
8fae388f57 | ||
|
|
a332e0c143 | ||
|
|
bc662b8764 | ||
|
|
7a8955597d | ||
|
|
bcf9c646cf | ||
|
|
a76fffe8ff | ||
|
|
26c8714a24 | ||
|
|
0776ff05d2 | ||
|
|
d3beef6584 | ||
|
|
bdd06cb176 | ||
|
|
f639c4f1e6 | ||
|
|
f18a106759 | ||
|
|
5b01ddf832 | ||
|
|
c1fc98eef8 | ||
|
|
e9831985e4 | ||
|
|
30e9545d3e | ||
|
|
ce0910a82c | ||
|
|
afc01f9570 | ||
|
|
fc3a484a8c | ||
|
|
de0d5ba883 | ||
|
|
f756ab156c | ||
|
|
540de118c1 | ||
|
|
675be13a7b | ||
|
|
3342866e0e | ||
|
|
39ff675898 | ||
|
|
f807337273 | ||
|
|
8e4e3c9060 | ||
|
|
6d67992191 | ||
|
|
0f3fea511e | ||
|
|
a0611650e2 | ||
|
|
5959be577f | ||
|
|
9b5e508d15 | ||
|
|
66a30aef98 | ||
|
|
b117074df4 | ||
|
|
9f4be17451 | ||
|
|
d70e9e131d | ||
|
|
d7643d4f88 | ||
|
|
73b6aa9b92 | ||
|
|
6d51d94dab | ||
|
|
1a965e9ec2 | ||
|
|
a9e9b901d1 | ||
|
|
95b46dca3d | ||
|
|
7f6ae2a51e | ||
|
|
489d5b0f21 | ||
|
|
f884817009 | ||
|
|
a30704fdad | ||
|
|
57eb21ac3d | ||
|
|
f48c36fc2c | ||
|
|
a09b9f0659 | ||
|
|
92d940b7f4 | ||
|
|
d8c7cbe8f0 | ||
|
|
717d4800e1 | ||
|
|
c77916146c |
4
.github/workflows/audit.yaml
vendored
4
.github/workflows/audit.yaml
vendored
@@ -28,7 +28,7 @@ jobs:
|
||||
run:
|
||||
shell: ${{ matrix.system.shell }}
|
||||
steps:
|
||||
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
|
||||
- uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871
|
||||
- uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3
|
||||
with:
|
||||
python-version: ${{inputs.python_version}}
|
||||
@@ -66,7 +66,7 @@ jobs:
|
||||
./share/spack/qa/validate_last_exit.ps1
|
||||
spack -d audit externals
|
||||
./share/spack/qa/validate_last_exit.ps1
|
||||
- uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874
|
||||
- uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882
|
||||
if: ${{ inputs.with_coverage == 'true' && runner.os != 'Windows' }}
|
||||
with:
|
||||
name: coverage-audits-${{ matrix.system.os }}
|
||||
|
||||
10
.github/workflows/bootstrap.yml
vendored
10
.github/workflows/bootstrap.yml
vendored
@@ -37,7 +37,7 @@ jobs:
|
||||
make patch unzip which xz python3 python3-devel tree \
|
||||
cmake bison
|
||||
- name: Checkout
|
||||
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
|
||||
uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Bootstrap clingo
|
||||
@@ -60,7 +60,7 @@ jobs:
|
||||
run: |
|
||||
brew install cmake bison tree
|
||||
- name: Checkout
|
||||
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
|
||||
uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3
|
||||
@@ -90,7 +90,7 @@ jobs:
|
||||
if: ${{ matrix.runner == 'ubuntu-latest' }}
|
||||
run: sudo rm -rf $(command -v gpg gpg2 patchelf)
|
||||
- name: Checkout
|
||||
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
|
||||
uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Bootstrap GnuPG
|
||||
@@ -119,7 +119,7 @@ jobs:
|
||||
run: |
|
||||
sudo rm -rf $(which gpg) $(which gpg2) $(which patchelf)
|
||||
- name: Checkout
|
||||
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
|
||||
uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3
|
||||
@@ -172,7 +172,7 @@ jobs:
|
||||
runs-on: "windows-latest"
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
|
||||
uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3
|
||||
|
||||
8
.github/workflows/build-containers.yml
vendored
8
.github/workflows/build-containers.yml
vendored
@@ -55,7 +55,7 @@ jobs:
|
||||
if: github.repository == 'spack/spack'
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
|
||||
uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871
|
||||
|
||||
- uses: docker/metadata-action@8e5442c4ef9f78752691e2d8f8d19755c6f78e81
|
||||
id: docker_meta
|
||||
@@ -87,7 +87,7 @@ jobs:
|
||||
fi
|
||||
|
||||
- name: Upload Dockerfile
|
||||
uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874
|
||||
uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882
|
||||
with:
|
||||
name: dockerfiles_${{ matrix.dockerfile[0] }}
|
||||
path: dockerfiles
|
||||
@@ -113,7 +113,7 @@ jobs:
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Build & Deploy ${{ matrix.dockerfile[0] }}
|
||||
uses: docker/build-push-action@32945a339266b759abcbdc89316275140b0fc960
|
||||
uses: docker/build-push-action@4f58ea79222b3b9dc2c8bbdd6debcef730109a75
|
||||
with:
|
||||
context: dockerfiles/${{ matrix.dockerfile[0] }}
|
||||
platforms: ${{ matrix.dockerfile[1] }}
|
||||
@@ -126,7 +126,7 @@ jobs:
|
||||
needs: deploy-images
|
||||
steps:
|
||||
- name: Merge Artifacts
|
||||
uses: actions/upload-artifact/merge@50769540e7f4bd5e21e526ee35c689e35e0d6874
|
||||
uses: actions/upload-artifact/merge@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882
|
||||
with:
|
||||
name: dockerfiles
|
||||
pattern: dockerfiles_*
|
||||
|
||||
2
.github/workflows/ci.yaml
vendored
2
.github/workflows/ci.yaml
vendored
@@ -24,7 +24,7 @@ jobs:
|
||||
core: ${{ steps.filter.outputs.core }}
|
||||
packages: ${{ steps.filter.outputs.packages }}
|
||||
steps:
|
||||
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
|
||||
- uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871
|
||||
if: ${{ github.event_name == 'push' }}
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
2
.github/workflows/coverage.yml
vendored
2
.github/workflows/coverage.yml
vendored
@@ -8,7 +8,7 @@ jobs:
|
||||
upload:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
|
||||
- uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871
|
||||
- uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3
|
||||
with:
|
||||
python-version: '3.11'
|
||||
|
||||
2
.github/workflows/nightly-win-builds.yml
vendored
2
.github/workflows/nightly-win-builds.yml
vendored
@@ -14,7 +14,7 @@ jobs:
|
||||
build-paraview-deps:
|
||||
runs-on: windows-latest
|
||||
steps:
|
||||
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
|
||||
- uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
black==24.8.0
|
||||
black==24.10.0
|
||||
clingo==5.7.1
|
||||
flake8==7.1.1
|
||||
isort==5.13.2
|
||||
|
||||
41
.github/workflows/unit_tests.yaml
vendored
41
.github/workflows/unit_tests.yaml
vendored
@@ -40,7 +40,7 @@ jobs:
|
||||
on_develop: false
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
|
||||
- uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3
|
||||
@@ -80,7 +80,7 @@ jobs:
|
||||
UNIT_TEST_COVERAGE: ${{ matrix.python-version == '3.11' }}
|
||||
run: |
|
||||
share/spack/qa/run-unit-tests
|
||||
- uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874
|
||||
- uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882
|
||||
with:
|
||||
name: coverage-${{ matrix.os }}-python${{ matrix.python-version }}
|
||||
path: coverage
|
||||
@@ -89,7 +89,7 @@ jobs:
|
||||
shell:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
|
||||
- uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3
|
||||
@@ -113,7 +113,7 @@ jobs:
|
||||
COVERAGE: true
|
||||
run: |
|
||||
share/spack/qa/run-shell-tests
|
||||
- uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874
|
||||
- uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882
|
||||
with:
|
||||
name: coverage-shell
|
||||
path: coverage
|
||||
@@ -130,7 +130,7 @@ jobs:
|
||||
dnf install -y \
|
||||
bzip2 curl file gcc-c++ gcc gcc-gfortran git gnupg2 gzip \
|
||||
make patch tcl unzip which xz
|
||||
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
|
||||
- uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871
|
||||
- name: Setup repo and non-root user
|
||||
run: |
|
||||
git --version
|
||||
@@ -149,32 +149,33 @@ jobs:
|
||||
clingo-cffi:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
|
||||
- uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3
|
||||
with:
|
||||
python-version: '3.11'
|
||||
python-version: '3.13'
|
||||
- name: Install System packages
|
||||
run: |
|
||||
sudo apt-get -y update
|
||||
sudo apt-get -y install coreutils cvs gfortran graphviz gnupg2 mercurial ninja-build kcov
|
||||
sudo apt-get -y install coreutils gfortran graphviz gnupg2
|
||||
- name: Install Python packages
|
||||
run: |
|
||||
pip install --upgrade pip setuptools pytest coverage[toml] pytest-cov clingo pytest-xdist
|
||||
pip install --upgrade pip setuptools pytest coverage[toml] pytest-cov clingo
|
||||
pip install --upgrade flake8 "isort>=4.3.5" "mypy>=0.900" "click" "black"
|
||||
- name: Setup git configuration
|
||||
run: |
|
||||
# Need this for the git tests to succeed.
|
||||
git --version
|
||||
. .github/workflows/bin/setup_git.sh
|
||||
- name: Run unit tests (full suite with coverage)
|
||||
env:
|
||||
COVERAGE: true
|
||||
COVERAGE_FILE: coverage/.coverage-clingo-cffi
|
||||
run: |
|
||||
share/spack/qa/run-unit-tests
|
||||
- uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874
|
||||
. share/spack/setup-env.sh
|
||||
spack bootstrap disable spack-install
|
||||
spack bootstrap disable github-actions-v0.4
|
||||
spack bootstrap disable github-actions-v0.5
|
||||
spack bootstrap status
|
||||
spack solve zlib
|
||||
spack unit-test --verbose --cov --cov-config=pyproject.toml --cov-report=xml:coverage.xml lib/spack/spack/test/concretize.py
|
||||
- uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882
|
||||
with:
|
||||
name: coverage-clingo-cffi
|
||||
path: coverage
|
||||
@@ -187,7 +188,7 @@ jobs:
|
||||
os: [macos-13, macos-14]
|
||||
python-version: ["3.11"]
|
||||
steps:
|
||||
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
|
||||
- uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3
|
||||
@@ -212,7 +213,7 @@ jobs:
|
||||
$(which spack) solve zlib
|
||||
common_args=(--dist loadfile --tx '4*popen//python=./bin/spack-tmpconfig python -u ./bin/spack python' -x)
|
||||
$(which spack) unit-test --verbose --cov --cov-config=pyproject.toml --cov-report=xml:coverage.xml "${common_args[@]}"
|
||||
- uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874
|
||||
- uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882
|
||||
with:
|
||||
name: coverage-${{ matrix.os }}-python${{ matrix.python-version }}
|
||||
path: coverage
|
||||
@@ -225,7 +226,7 @@ jobs:
|
||||
powershell Invoke-Expression -Command "./share/spack/qa/windows_test_setup.ps1"; {0}
|
||||
runs-on: windows-latest
|
||||
steps:
|
||||
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
|
||||
- uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3
|
||||
@@ -243,7 +244,7 @@ jobs:
|
||||
run: |
|
||||
spack unit-test -x --verbose --cov --cov-config=pyproject.toml
|
||||
./share/spack/qa/validate_last_exit.ps1
|
||||
- uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874
|
||||
- uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882
|
||||
with:
|
||||
name: coverage-windows
|
||||
path: coverage
|
||||
|
||||
16
.github/workflows/valid-style.yml
vendored
16
.github/workflows/valid-style.yml
vendored
@@ -18,7 +18,7 @@ jobs:
|
||||
validate:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
|
||||
- uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871
|
||||
- uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3
|
||||
with:
|
||||
python-version: '3.11'
|
||||
@@ -35,7 +35,7 @@ jobs:
|
||||
style:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
|
||||
- uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3
|
||||
@@ -70,7 +70,7 @@ jobs:
|
||||
dnf install -y \
|
||||
bzip2 curl file gcc-c++ gcc gcc-gfortran git gnupg2 gzip \
|
||||
make patch tcl unzip which xz
|
||||
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
|
||||
- uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871
|
||||
- name: Setup repo and non-root user
|
||||
run: |
|
||||
git --version
|
||||
@@ -85,7 +85,7 @@ jobs:
|
||||
source share/spack/setup-env.sh
|
||||
spack debug report
|
||||
spack -d bootstrap now --dev
|
||||
spack style -t black
|
||||
spack -d style -t black
|
||||
spack unit-test -V
|
||||
import-check:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -98,14 +98,14 @@ jobs:
|
||||
# PR: use the base of the PR as the old commit
|
||||
- name: Checkout PR base commit
|
||||
if: github.event_name == 'pull_request'
|
||||
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
|
||||
uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.base.sha }}
|
||||
path: old
|
||||
# not a PR: use the previous commit as the old commit
|
||||
- name: Checkout previous commit
|
||||
if: github.event_name != 'pull_request'
|
||||
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
|
||||
uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871
|
||||
with:
|
||||
fetch-depth: 2
|
||||
path: old
|
||||
@@ -114,11 +114,11 @@ jobs:
|
||||
run: git -C old reset --hard HEAD^
|
||||
|
||||
- name: Checkout new commit
|
||||
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
|
||||
uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871
|
||||
with:
|
||||
path: new
|
||||
- name: Install circular import checker
|
||||
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
|
||||
uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871
|
||||
with:
|
||||
repository: haampie/circular-import-fighter
|
||||
ref: 555519c6fd5564fd2eb844e7b87e84f4d12602e2
|
||||
|
||||
@@ -14,3 +14,26 @@ sphinx:
|
||||
python:
|
||||
install:
|
||||
- requirements: lib/spack/docs/requirements.txt
|
||||
|
||||
search:
|
||||
ranking:
|
||||
spack.html: -10
|
||||
spack.*.html: -10
|
||||
llnl.html: -10
|
||||
llnl.*.html: -10
|
||||
_modules/*: -10
|
||||
command_index.html: -9
|
||||
basic_usage.html: 5
|
||||
configuration.html: 5
|
||||
config_yaml.html: 5
|
||||
packages_yaml.html: 5
|
||||
build_settings.html: 5
|
||||
environments.html: 5
|
||||
containers.html: 5
|
||||
mirrors.html: 5
|
||||
module_file_support.html: 5
|
||||
repositories.html: 5
|
||||
binary_caches.html: 5
|
||||
chain.html: 5
|
||||
pipelines.html: 5
|
||||
packaging_guide.html: 5
|
||||
|
||||
@@ -166,3 +166,74 @@ while `py-numpy` still needs an older version:
|
||||
|
||||
Up to Spack v0.20 ``duplicates:strategy:none`` was the default (and only) behavior. From Spack v0.21 the
|
||||
default behavior is ``duplicates:strategy:minimal``.
|
||||
|
||||
--------
|
||||
Splicing
|
||||
--------
|
||||
|
||||
The ``splice`` key covers config attributes for splicing specs in the solver.
|
||||
|
||||
"Splicing" is a method for replacing a dependency with another spec
|
||||
that provides the same package or virtual. There are two types of
|
||||
splices, referring to different behaviors for shared dependencies
|
||||
between the root spec and the new spec replacing a dependency:
|
||||
"transitive" and "intransitive". A "transitive" splice is one that
|
||||
resolves all conflicts by taking the dependency from the new node. An
|
||||
"intransitive" splice is one that resolves all conflicts by taking the
|
||||
dependency from the original root. From a theory perspective, hybrid
|
||||
splices are possible but are not modeled by Spack.
|
||||
|
||||
All spliced specs retain a ``build_spec`` attribute that points to the
|
||||
original Spec before any splice occurred. The ``build_spec`` for a
|
||||
non-spliced spec is itself.
|
||||
|
||||
The figure below shows examples of transitive and intransitive splices:
|
||||
|
||||
.. figure:: images/splices.png
|
||||
:align: center
|
||||
|
||||
The concretizer can be configured to explicitly splice particular
|
||||
replacements for a target spec. Splicing will allow the user to make
|
||||
use of generically built public binary caches, while swapping in
|
||||
highly optimized local builds for performance critical components
|
||||
and/or components that interact closely with the specific hardware
|
||||
details of the system. The most prominent candidate for splicing is
|
||||
MPI providers. MPI packages have relatively well-understood ABI
|
||||
characteristics, and most High Performance Computing facilities deploy
|
||||
highly optimized MPI packages tailored to their particular
|
||||
hardware. The following config block configures Spack to replace
|
||||
whatever MPI provider each spec was concretized to use with the
|
||||
particular package of ``mpich`` with the hash that begins ``abcdef``.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
concretizer:
|
||||
splice:
|
||||
explicit:
|
||||
- target: mpi
|
||||
replacement: mpich/abcdef
|
||||
transitive: false
|
||||
|
||||
.. warning::
|
||||
|
||||
When configuring an explicit splice, you as the user take on the
|
||||
responsibility for ensuring ABI compatibility between the specs
|
||||
matched by the target and the replacement you provide. If they are
|
||||
not compatible, Spack will not warn you and your application will
|
||||
fail to run.
|
||||
|
||||
The ``target`` field of an explicit splice can be any abstract
|
||||
spec. The ``replacement`` field must be a spec that includes the hash
|
||||
of a concrete spec, and the replacement must either be the same
|
||||
package as the target, provide the virtual that is the target, or
|
||||
provide a virtual that the target provides. The ``transitive`` field
|
||||
is optional -- by default, splices will be transitive.
|
||||
|
||||
.. note::
|
||||
|
||||
With explicit splices configured, it is possible for Spack to
|
||||
concretize to a spec that does not satisfy the input. For example,
|
||||
with the config above ``hdf5 ^mvapich2`` will concretize to user
|
||||
``mpich/abcdef`` instead of ``mvapich2`` as the MPI provider. Spack
|
||||
will warn the user in this case, but will not fail the
|
||||
concretization.
|
||||
|
||||
@@ -281,7 +281,7 @@ When spack queries for configuration parameters, it searches in
|
||||
higher-precedence scopes first. So, settings in a higher-precedence file
|
||||
can override those with the same key in a lower-precedence one. For
|
||||
list-valued settings, Spack *prepends* higher-precedence settings to
|
||||
lower-precedence settings. Completely ignoring higher-level configuration
|
||||
lower-precedence settings. Completely ignoring lower-precedence configuration
|
||||
options is supported with the ``::`` notation for keys (see
|
||||
:ref:`config-overrides` below).
|
||||
|
||||
|
||||
@@ -712,27 +712,27 @@ Release branches
|
||||
^^^^^^^^^^^^^^^^
|
||||
|
||||
There are currently two types of Spack releases: :ref:`major releases
|
||||
<major-releases>` (``0.17.0``, ``0.18.0``, etc.) and :ref:`point releases
|
||||
<point-releases>` (``0.17.1``, ``0.17.2``, ``0.17.3``, etc.). Here is a
|
||||
<major-releases>` (``0.21.0``, ``0.22.0``, etc.) and :ref:`patch releases
|
||||
<patch-releases>` (``0.22.1``, ``0.22.2``, ``0.22.3``, etc.). Here is a
|
||||
diagram of how Spack release branches work::
|
||||
|
||||
o branch: develop (latest version, v0.19.0.dev0)
|
||||
o branch: develop (latest version, v0.23.0.dev0)
|
||||
|
|
||||
o
|
||||
| o branch: releases/v0.18, tag: v0.18.1
|
||||
| o branch: releases/v0.22, tag: v0.22.1
|
||||
o |
|
||||
| o tag: v0.18.0
|
||||
| o tag: v0.22.0
|
||||
o |
|
||||
| o
|
||||
|/
|
||||
o
|
||||
|
|
||||
o
|
||||
| o branch: releases/v0.17, tag: v0.17.2
|
||||
| o branch: releases/v0.21, tag: v0.21.2
|
||||
o |
|
||||
| o tag: v0.17.1
|
||||
| o tag: v0.21.1
|
||||
o |
|
||||
| o tag: v0.17.0
|
||||
| o tag: v0.21.0
|
||||
o |
|
||||
| o
|
||||
|/
|
||||
@@ -743,8 +743,8 @@ requests target ``develop``. The ``develop`` branch will report that its
|
||||
version is that of the next **major** release with a ``.dev0`` suffix.
|
||||
|
||||
Each Spack release series also has a corresponding branch, e.g.
|
||||
``releases/v0.18`` has ``0.18.x`` versions of Spack, and
|
||||
``releases/v0.17`` has ``0.17.x`` versions. A major release is the first
|
||||
``releases/v0.22`` has ``v0.22.x`` versions of Spack, and
|
||||
``releases/v0.21`` has ``v0.21.x`` versions. A major release is the first
|
||||
tagged version on a release branch. Minor releases are back-ported from
|
||||
develop onto release branches. This is typically done by cherry-picking
|
||||
bugfix commits off of ``develop``.
|
||||
@@ -774,27 +774,40 @@ for more details.
|
||||
Scheduling work for releases
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
We schedule work for releases by creating `GitHub projects
|
||||
<https://github.com/spack/spack/projects>`_. At any time, there may be
|
||||
several open release projects. For example, below are two releases (from
|
||||
some past version of the page linked above):
|
||||
We schedule work for **major releases** through `milestones
|
||||
<https://github.com/spack/spack/milestones>`_ and `GitHub Projects
|
||||
<https://github.com/spack/spack/projects>`_, while **patch releases** use `labels
|
||||
<https://github.com/spack/spack/labels>`_.
|
||||
|
||||
.. image:: images/projects.png
|
||||
There is only one milestone open at a time. Its name corresponds to the next major version, for
|
||||
example ``v0.23``. Important issues and pull requests should be assigned to this milestone by
|
||||
core developers, so that they are not forgotten at the time of release. The milestone is closed
|
||||
when the release is made, and a new milestone is created for the next major release.
|
||||
|
||||
This image shows one release in progress for ``0.15.1`` and another for
|
||||
``0.16.0``. Each of these releases has a project board containing issues
|
||||
and pull requests. GitHub shows a status bar with completed work in
|
||||
green, work in progress in purple, and work not started yet in gray, so
|
||||
it's fairly easy to see progress.
|
||||
Bug reports in GitHub issues are automatically labelled ``bug`` and ``triage``. Spack developers
|
||||
assign one of the labels ``impact-low``, ``impact-medium`` or ``impact-high``. This will make the
|
||||
issue appear in the `Triaged bugs <https://github.com/orgs/spack/projects/6>`_ project board.
|
||||
Important issues should be assigned to the next milestone as well, so they appear at the top of
|
||||
the project board.
|
||||
|
||||
Spack's project boards are not firm commitments so we move work between
|
||||
releases frequently. If we need to make a release and some tasks are not
|
||||
yet done, we will simply move them to the next minor or major release, rather
|
||||
than delaying the release to complete them.
|
||||
Spack's milestones are not firm commitments so we move work between releases frequently. If we
|
||||
need to make a release and some tasks are not yet done, we will simply move them to the next major
|
||||
release milestone, rather than delaying the release to complete them.
|
||||
|
||||
For more on using GitHub project boards, see `GitHub's documentation
|
||||
<https://docs.github.com/en/github/managing-your-work-on-github/about-project-boards>`_.
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
Backporting bug fixes
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
When a bug is fixed in the ``develop`` branch, it is often necessary to backport the fix to one
|
||||
(or more) of the ``release/vX.Y`` branches. Only the release manager is responsible for doing
|
||||
backports, but Spack maintainers are responsible for labelling pull requests (and issues if no bug
|
||||
fix is available yet) with ``vX.Y.Z`` labels. The label should correspond to the next patch version
|
||||
that the bug fix should be backported to.
|
||||
|
||||
Backports are done publicly by the release manager using a pull request named ``Backports vX.Y.Z``.
|
||||
This pull request is opened from the ``backports/vX.Y.Z`` branch, targets the ``releases/vX.Y``
|
||||
branch and contains a (growing) list of cherry-picked commits from the ``develop`` branch.
|
||||
Typically there are one or two backport pull requests open at any given time.
|
||||
|
||||
.. _major-releases:
|
||||
|
||||
@@ -802,25 +815,21 @@ For more on using GitHub project boards, see `GitHub's documentation
|
||||
Making major releases
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Assuming a project board has already been created and all required work
|
||||
completed, the steps to make the major release are:
|
||||
Assuming all required work from the milestone is completed, the steps to make the major release
|
||||
are:
|
||||
|
||||
#. Create two new project boards:
|
||||
#. `Create a new milestone <https://github.com/spack/spack/milestones>`_ for the next major
|
||||
release.
|
||||
|
||||
* One for the next major release
|
||||
* One for the next point release
|
||||
#. `Create a new label <https://github.com/spack/spack/labels>`_ for the next patch release.
|
||||
|
||||
#. Move any optional tasks that are not done to one of the new project boards.
|
||||
|
||||
In general, small bugfixes should go to the next point release. Major
|
||||
features, refactors, and changes that could affect concretization should
|
||||
go in the next major release.
|
||||
#. Move any optional tasks that are not done to the next milestone.
|
||||
|
||||
#. Create a branch for the release, based on ``develop``:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ git checkout -b releases/v0.15 develop
|
||||
$ git checkout -b releases/v0.23 develop
|
||||
|
||||
For a version ``vX.Y.Z``, the branch's name should be
|
||||
``releases/vX.Y``. That is, you should create a ``releases/vX.Y``
|
||||
@@ -856,8 +865,8 @@ completed, the steps to make the major release are:
|
||||
|
||||
Create a pull request targeting the ``develop`` branch, bumping the major
|
||||
version in ``lib/spack/spack/__init__.py`` with a ``dev0`` release segment.
|
||||
For instance when you have just released ``v0.15.0``, set the version
|
||||
to ``(0, 16, 0, 'dev0')`` on ``develop``.
|
||||
For instance when you have just released ``v0.23.0``, set the version
|
||||
to ``(0, 24, 0, 'dev0')`` on ``develop``.
|
||||
|
||||
#. Follow the steps in :ref:`publishing-releases`.
|
||||
|
||||
@@ -866,82 +875,52 @@ completed, the steps to make the major release are:
|
||||
#. Follow the steps in :ref:`announcing-releases`.
|
||||
|
||||
|
||||
.. _point-releases:
|
||||
.. _patch-releases:
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
Making point releases
|
||||
Making patch releases
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Assuming a project board has already been created and all required work
|
||||
completed, the steps to make the point release are:
|
||||
To make the patch release process both efficient and transparent, we use a *backports pull request*
|
||||
which contains cherry-picked commits from the ``develop`` branch. The majority of the work is to
|
||||
cherry-pick the bug fixes, which ideally should be done as soon as they land on ``develop``:
|
||||
this ensures cherry-picking happens in order, and makes conflicts easier to resolve since the
|
||||
changes are fresh in the mind of the developer.
|
||||
|
||||
#. Create a new project board for the next point release.
|
||||
The backports pull request is always titled ``Backports vX.Y.Z`` and is labelled ``backports``. It
|
||||
is opened from a branch named ``backports/vX.Y.Z`` and targets the ``releases/vX.Y`` branch.
|
||||
|
||||
#. Move any optional tasks that are not done to the next project board.
|
||||
Whenever a pull request labelled ``vX.Y.Z`` is merged, cherry-pick the associated squashed commit
|
||||
on ``develop`` to the ``backports/vX.Y.Z`` branch. For pull requests that were rebased (or not
|
||||
squashed), cherry-pick each associated commit individually. Never force push to the
|
||||
``backports/vX.Y.Z`` branch.
|
||||
|
||||
#. Check out the release branch (it should already exist).
|
||||
.. warning::
|
||||
|
||||
For the ``X.Y.Z`` release, the release branch is called ``releases/vX.Y``.
|
||||
For ``v0.15.1``, you would check out ``releases/v0.15``:
|
||||
Sometimes you may **still** get merge conflicts even if you have
|
||||
cherry-picked all the commits in order. This generally means there
|
||||
is some other intervening pull request that the one you're trying
|
||||
to pick depends on. In these cases, you'll need to make a judgment
|
||||
call regarding those pull requests. Consider the number of affected
|
||||
files and/or the resulting differences.
|
||||
|
||||
.. code-block:: console
|
||||
1. If the changes are small, you might just cherry-pick it.
|
||||
|
||||
$ git checkout releases/v0.15
|
||||
2. If the changes are large, then you may decide that this fix is not
|
||||
worth including in a patch release, in which case you should remove
|
||||
the label from the pull request. Remember that large, manual backports
|
||||
are seldom the right choice for a patch release.
|
||||
|
||||
#. If a pull request to the release branch named ``Backports vX.Y.Z`` is not already
|
||||
in the project, create it. This pull request ought to be created as early as
|
||||
possible when working on a release project, so that we can build the release
|
||||
commits incrementally, and identify potential conflicts at an early stage.
|
||||
When all commits are cherry-picked in the ``backports/vX.Y.Z`` branch, make the patch
|
||||
release as follows:
|
||||
|
||||
#. Cherry-pick each pull request in the ``Done`` column of the release
|
||||
project board onto the ``Backports vX.Y.Z`` pull request.
|
||||
#. `Create a new label <https://github.com/spack/spack/labels>`_ ``vX.Y.{Z+1}`` for the next patch
|
||||
release.
|
||||
|
||||
This is **usually** fairly simple since we squash the commits from the
|
||||
vast majority of pull requests. That means there is only one commit
|
||||
per pull request to cherry-pick. For example, `this pull request
|
||||
<https://github.com/spack/spack/pull/15777>`_ has three commits, but
|
||||
they were squashed into a single commit on merge. You can see the
|
||||
commit that was created here:
|
||||
#. Replace the label ``vX.Y.Z`` with ``vX.Y.{Z+1}`` for all PRs and issues that are not done.
|
||||
|
||||
.. image:: images/pr-commit.png
|
||||
|
||||
You can easily cherry pick it like this (assuming you already have the
|
||||
release branch checked out):
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ git cherry-pick 7e46da7
|
||||
|
||||
For pull requests that were rebased (or not squashed), you'll need to
|
||||
cherry-pick each associated commit individually.
|
||||
|
||||
.. warning::
|
||||
|
||||
It is important to cherry-pick commits in the order they happened,
|
||||
otherwise you can get conflicts while cherry-picking. When
|
||||
cherry-picking look at the merge date,
|
||||
**not** the number of the pull request or the date it was opened.
|
||||
|
||||
Sometimes you may **still** get merge conflicts even if you have
|
||||
cherry-picked all the commits in order. This generally means there
|
||||
is some other intervening pull request that the one you're trying
|
||||
to pick depends on. In these cases, you'll need to make a judgment
|
||||
call regarding those pull requests. Consider the number of affected
|
||||
files and or the resulting differences.
|
||||
|
||||
1. If the dependency changes are small, you might just cherry-pick it,
|
||||
too. If you do this, add the task to the release board.
|
||||
|
||||
2. If the changes are large, then you may decide that this fix is not
|
||||
worth including in a point release, in which case you should remove
|
||||
the task from the release project.
|
||||
|
||||
3. You can always decide to manually back-port the fix to the release
|
||||
branch if neither of the above options makes sense, but this can
|
||||
require a lot of work. It's seldom the right choice.
|
||||
|
||||
#. When all the commits from the project board are cherry-picked into
|
||||
the ``Backports vX.Y.Z`` pull request, you can push a commit to:
|
||||
#. Manually push a single commit with commit message ``Set version to vX.Y.Z`` to the
|
||||
``backports/vX.Y.Z`` branch, that both bumps the Spack version number and updates the changelog:
|
||||
|
||||
1. Bump the version in ``lib/spack/spack/__init__.py``.
|
||||
2. Update ``CHANGELOG.md`` with a list of the changes.
|
||||
@@ -950,20 +929,22 @@ completed, the steps to make the point release are:
|
||||
release branch. See `the changelog from 0.14.1
|
||||
<https://github.com/spack/spack/commit/ff0abb9838121522321df2a054d18e54b566b44a>`_.
|
||||
|
||||
#. Merge the ``Backports vX.Y.Z`` PR with the **Rebase and merge** strategy. This
|
||||
is needed to keep track in the release branch of all the commits that were
|
||||
cherry-picked.
|
||||
|
||||
#. Make sure CI passes on the release branch, including:
|
||||
#. Make sure CI passes on the **backports pull request**, including:
|
||||
|
||||
* Regular unit tests
|
||||
* Build tests
|
||||
* The E4S pipeline at `gitlab.spack.io <https://gitlab.spack.io>`_
|
||||
|
||||
If CI does not pass, you'll need to figure out why, and make changes
|
||||
to the release branch until it does. You can make more commits, modify
|
||||
or remove cherry-picked commits, or cherry-pick **more** from
|
||||
``develop`` to make this happen.
|
||||
#. Merge the ``Backports vX.Y.Z`` PR with the **Rebase and merge** strategy. This
|
||||
is needed to keep track in the release branch of all the commits that were
|
||||
cherry-picked.
|
||||
|
||||
#. Make sure CI passes on the last commit of the **release branch**.
|
||||
|
||||
#. In the rare case you need to include additional commits in the patch release after the backports
|
||||
PR is merged, it is best to delete the last commit ``Set version to vX.Y.Z`` from the release
|
||||
branch with a single force push, open a new backports PR named ``Backports vX.Y.Z (2)``, and
|
||||
repeat the process. Avoid repeated force pushes to the release branch.
|
||||
|
||||
#. Follow the steps in :ref:`publishing-releases`.
|
||||
|
||||
@@ -1038,25 +1019,31 @@ Updating `releases/latest`
|
||||
|
||||
If the new release is the **highest** Spack release yet, you should
|
||||
also tag it as ``releases/latest``. For example, suppose the highest
|
||||
release is currently ``0.15.3``:
|
||||
release is currently ``0.22.3``:
|
||||
|
||||
* If you are releasing ``0.15.4`` or ``0.16.0``, then you should tag
|
||||
it with ``releases/latest``, as these are higher than ``0.15.3``.
|
||||
* If you are releasing ``0.22.4`` or ``0.23.0``, then you should tag
|
||||
it with ``releases/latest``, as these are higher than ``0.22.3``.
|
||||
|
||||
* If you are making a new release of an **older** major version of
|
||||
Spack, e.g. ``0.14.4``, then you should not tag it as
|
||||
Spack, e.g. ``0.21.4``, then you should not tag it as
|
||||
``releases/latest`` (as there are newer major versions).
|
||||
|
||||
To tag ``releases/latest``, do this:
|
||||
To do so, first fetch the latest tag created on GitHub, since you may not have it locally:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ git checkout releases/vX.Y # vX.Y is the new release's branch
|
||||
$ git tag --force releases/latest
|
||||
$ git push --force --tags
|
||||
$ git fetch --force git@github.com:spack/spack vX.Y.Z
|
||||
|
||||
The ``--force`` argument to ``git tag`` makes ``git`` overwrite the existing
|
||||
``releases/latest`` tag with the new one.
|
||||
Then tag ``vX.Y.Z`` as ``releases/latest`` and push the individual tag to GitHub.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ git tag --force releases/latest vX.Y.Z
|
||||
$ git push --force git@github.com:spack/spack releases/latest
|
||||
|
||||
The ``--force`` argument to ``git tag`` makes ``git`` overwrite the existing ``releases/latest``
|
||||
tag with the new one. Do **not** use the ``--tags`` flag when pushing, since this will push *all*
|
||||
local tags.
|
||||
|
||||
|
||||
.. _announcing-releases:
|
||||
|
||||
@@ -425,9 +425,13 @@ Developing Packages in a Spack Environment
|
||||
|
||||
The ``spack develop`` command allows one to develop Spack packages in
|
||||
an environment. It requires a spec containing a concrete version, and
|
||||
will configure Spack to install the package from local source. By
|
||||
default, it will also clone the package to a subdirectory in the
|
||||
environment. This package will have a special variant ``dev_path``
|
||||
will configure Spack to install the package from local source.
|
||||
If a version is not provided from the command line interface then spack
|
||||
will automatically pick the highest version the package has defined.
|
||||
This means any infinity versions (``develop``, ``main``, ``stable``) will be
|
||||
preferred in this selection process.
|
||||
By default, ``spack develop`` will also clone the package to a subdirectory in the
|
||||
environment for the local source. This package will have a special variant ``dev_path``
|
||||
set, and Spack will ensure the package and its dependents are rebuilt
|
||||
any time the environment is installed if the package's local source
|
||||
code has been modified. Spack's native implementation to check for modifications
|
||||
@@ -669,6 +673,9 @@ them to the environment.
|
||||
Environments can include files or URLs. File paths can be relative or
|
||||
absolute. URLs include the path to the text for individual files or
|
||||
can be the path to a directory containing configuration files.
|
||||
Spack supports ``file``, ``http``, ``https`` and ``ftp`` protocols (or
|
||||
schemes). Spack-specific, environment and user path variables may be
|
||||
used in these paths. See :ref:`config-file-variables` for more information.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Configuration precedence
|
||||
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 44 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 68 KiB |
BIN
lib/spack/docs/images/splices.png
Normal file
BIN
lib/spack/docs/images/splices.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 358 KiB |
@@ -457,11 +457,11 @@ For instance, the following config options,
|
||||
tcl:
|
||||
all:
|
||||
suffixes:
|
||||
^python@3.12: 'python-3.12'
|
||||
^python@3: 'python{^python.version}'
|
||||
^openblas: 'openblas'
|
||||
|
||||
will add a ``python-3.12`` version string to any packages compiled with
|
||||
Python matching the spec, ``python@3.12``. This is useful to know which
|
||||
will add a ``python-3.12.1`` version string to any packages compiled with
|
||||
Python matching the spec, ``python@3``. This is useful to know which
|
||||
version of Python a set of Python extensions is associated with. Likewise, the
|
||||
``openblas`` string is attached to any program that has openblas in the spec,
|
||||
most likely via the ``+blas`` variant specification.
|
||||
|
||||
@@ -592,6 +592,77 @@ the attributes will be merged starting from the bottom match going up to the top
|
||||
|
||||
In the case that no match is found in a submapping section, no additional attributes will be applied.
|
||||
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Dynamic Mapping Sections
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
For large scale CI where cost optimization is required, dynamic mapping allows for the use of real-time
|
||||
mapping schemes served by a web service. This type of mapping does not support the ``-remove`` type
|
||||
behavior, but it does follow the rest of the merge rules for configurations.
|
||||
|
||||
The dynamic mapping service needs to implement a single REST API interface for getting
|
||||
requests ``GET <URL>[:PORT][/PATH]?spec=<pkg_name@pkg_version +variant1+variant2%compiler@compiler_version>``.
|
||||
|
||||
example request.
|
||||
|
||||
.. code-block::
|
||||
|
||||
https://my-dyn-mapping.spack.io/allocation?spec=zlib-ng@2.1.6 +compat+opt+shared+pic+new_strategies arch=linux-ubuntu20.04-x86_64_v3%gcc@12.0.0
|
||||
|
||||
|
||||
With an example response the updates kubernetes request variables, overrides the max retries for gitlab,
|
||||
and prepends a note about the modifications made by the my-dyn-mapping.spack.io service.
|
||||
|
||||
.. code-block::
|
||||
|
||||
200 OK
|
||||
|
||||
{
|
||||
"variables":
|
||||
{
|
||||
"KUBERNETES_CPU_REQUEST": "500m",
|
||||
"KUBERNETES_MEMORY_REQUEST": "2G",
|
||||
},
|
||||
"retry": { "max:": "1"}
|
||||
"script+:":
|
||||
[
|
||||
"echo \"Job modified by my-dyn-mapping.spack.io\""
|
||||
]
|
||||
}
|
||||
|
||||
|
||||
The ci.yaml configuration section takes the URL endpoint as well as a number of options to configure how responses are handled.
|
||||
|
||||
It is possible to specify a list of allowed and ignored configuration attributes under ``allow`` and ``ignore``
|
||||
respectively. It is also possible to configure required attributes under ``required`` section.
|
||||
|
||||
Options to configure the client timeout and SSL verification using the ``timeout`` and ``verify_ssl`` options.
|
||||
By default, the ``timeout`` is set to the option in ``config:timeout`` and ``veryify_ssl`` is set the the option in ``config::verify_ssl``.
|
||||
|
||||
Passing header parameters to the request can be achieved through the ``header`` section. The values of the variables passed to the
|
||||
header may be environment variables that are expanded at runtime, such as a private token configured on the runner.
|
||||
|
||||
Here is an example configuration pointing to ``my-dyn-mapping.spack.io/allocation``.
|
||||
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
ci:
|
||||
- dynamic-mapping:
|
||||
endpoint: my-dyn-mapping.spack.io/allocation
|
||||
timeout: 10
|
||||
verify_ssl: True
|
||||
header:
|
||||
PRIVATE_TOKEN: ${MY_PRIVATE_TOKEN}
|
||||
MY_CONFIG: "fuzz_allocation:false"
|
||||
allow:
|
||||
- variables
|
||||
ignore:
|
||||
- script
|
||||
require: []
|
||||
|
||||
|
||||
^^^^^^^^^^^^^
|
||||
Bootstrapping
|
||||
^^^^^^^^^^^^^
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
sphinx==7.4.7
|
||||
sphinxcontrib-programoutput==0.17
|
||||
sphinx_design==0.6.1
|
||||
sphinx-rtd-theme==2.0.0
|
||||
python-levenshtein==0.25.1
|
||||
sphinx-rtd-theme==3.0.1
|
||||
python-levenshtein==0.26.0
|
||||
docutils==0.20.1
|
||||
pygments==2.18.0
|
||||
urllib3==2.2.3
|
||||
pytest==8.3.3
|
||||
isort==5.13.2
|
||||
black==24.8.0
|
||||
black==24.10.0
|
||||
flake8==7.1.1
|
||||
mypy==1.11.1
|
||||
|
||||
2
lib/spack/external/__init__.py
vendored
2
lib/spack/external/__init__.py
vendored
@@ -18,7 +18,7 @@
|
||||
|
||||
* Homepage: https://pypi.python.org/pypi/archspec
|
||||
* Usage: Labeling, comparison and detection of microarchitectures
|
||||
* Version: 0.2.5-dev (commit bceb39528ac49dd0c876b2e9bf3e7482e9c2be4a)
|
||||
* Version: 0.2.5 (commit 38ce485258ffc4fc6dd6688f8dc90cb269478c47)
|
||||
|
||||
astunparse
|
||||
----------------
|
||||
|
||||
@@ -81,8 +81,13 @@ def __init__(self, name, parents, vendor, features, compilers, generation=0, cpu
|
||||
self.generation = generation
|
||||
# Only relevant for AArch64
|
||||
self.cpu_part = cpu_part
|
||||
# Cache the ancestor computation
|
||||
|
||||
# Cache the "ancestor" computation
|
||||
self._ancestors = None
|
||||
# Cache the "generic" computation
|
||||
self._generic = None
|
||||
# Cache the "family" computation
|
||||
self._family = None
|
||||
|
||||
@property
|
||||
def ancestors(self):
|
||||
@@ -174,18 +179,22 @@ def __contains__(self, feature):
|
||||
@property
|
||||
def family(self):
|
||||
"""Returns the architecture family a given target belongs to"""
|
||||
roots = [x for x in [self] + self.ancestors if not x.ancestors]
|
||||
msg = "a target is expected to belong to just one architecture family"
|
||||
msg += f"[found {', '.join(str(x) for x in roots)}]"
|
||||
assert len(roots) == 1, msg
|
||||
if self._family is None:
|
||||
roots = [x for x in [self] + self.ancestors if not x.ancestors]
|
||||
msg = "a target is expected to belong to just one architecture family"
|
||||
msg += f"[found {', '.join(str(x) for x in roots)}]"
|
||||
assert len(roots) == 1, msg
|
||||
self._family = roots.pop()
|
||||
|
||||
return roots.pop()
|
||||
return self._family
|
||||
|
||||
@property
|
||||
def generic(self):
|
||||
"""Returns the best generic architecture that is compatible with self"""
|
||||
generics = [x for x in [self] + self.ancestors if x.vendor == "generic"]
|
||||
return max(generics, key=lambda x: len(x.ancestors))
|
||||
if self._generic is None:
|
||||
generics = [x for x in [self] + self.ancestors if x.vendor == "generic"]
|
||||
self._generic = max(generics, key=lambda x: len(x.ancestors))
|
||||
return self._generic
|
||||
|
||||
def to_dict(self):
|
||||
"""Returns a dictionary representation of this object."""
|
||||
|
||||
@@ -1482,7 +1482,6 @@
|
||||
"cldemote",
|
||||
"movdir64b",
|
||||
"movdiri",
|
||||
"pdcm",
|
||||
"serialize",
|
||||
"waitpkg"
|
||||
],
|
||||
@@ -2237,6 +2236,84 @@
|
||||
]
|
||||
}
|
||||
},
|
||||
"zen5": {
|
||||
"from": ["zen4"],
|
||||
"vendor": "AuthenticAMD",
|
||||
"features": [
|
||||
"abm",
|
||||
"aes",
|
||||
"avx",
|
||||
"avx2",
|
||||
"avx512_bf16",
|
||||
"avx512_bitalg",
|
||||
"avx512bw",
|
||||
"avx512cd",
|
||||
"avx512dq",
|
||||
"avx512f",
|
||||
"avx512ifma",
|
||||
"avx512vbmi",
|
||||
"avx512_vbmi2",
|
||||
"avx512vl",
|
||||
"avx512_vnni",
|
||||
"avx512_vp2intersect",
|
||||
"avx512_vpopcntdq",
|
||||
"avx_vnni",
|
||||
"bmi1",
|
||||
"bmi2",
|
||||
"clflushopt",
|
||||
"clwb",
|
||||
"clzero",
|
||||
"cppc",
|
||||
"cx16",
|
||||
"f16c",
|
||||
"flush_l1d",
|
||||
"fma",
|
||||
"fsgsbase",
|
||||
"gfni",
|
||||
"ibrs_enhanced",
|
||||
"mmx",
|
||||
"movbe",
|
||||
"movdir64b",
|
||||
"movdiri",
|
||||
"pclmulqdq",
|
||||
"popcnt",
|
||||
"rdseed",
|
||||
"sse",
|
||||
"sse2",
|
||||
"sse4_1",
|
||||
"sse4_2",
|
||||
"sse4a",
|
||||
"ssse3",
|
||||
"tsc_adjust",
|
||||
"vaes",
|
||||
"vpclmulqdq",
|
||||
"xsavec",
|
||||
"xsaveopt"
|
||||
],
|
||||
"compilers": {
|
||||
"gcc": [
|
||||
{
|
||||
"versions": "14.1:",
|
||||
"name": "znver5",
|
||||
"flags": "-march={name} -mtune={name}"
|
||||
}
|
||||
],
|
||||
"aocc": [
|
||||
{
|
||||
"versions": "5.0:",
|
||||
"name": "znver5",
|
||||
"flags": "-march={name} -mtune={name}"
|
||||
}
|
||||
],
|
||||
"clang": [
|
||||
{
|
||||
"versions": "19.1:",
|
||||
"name": "znver5",
|
||||
"flags": "-march={name} -mtune={name}"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"ppc64": {
|
||||
"from": [],
|
||||
"vendor": "generic",
|
||||
|
||||
@@ -41,6 +41,20 @@ def comma_and(sequence: List[str]) -> str:
|
||||
return comma_list(sequence, "and")
|
||||
|
||||
|
||||
def ordinal(number: int) -> str:
|
||||
"""Return the ordinal representation (1st, 2nd, 3rd, etc.) for the provided number.
|
||||
|
||||
Args:
|
||||
number: int to convert to ordinal number
|
||||
|
||||
Returns: number's corresponding ordinal
|
||||
"""
|
||||
idx = (number % 10) << 1
|
||||
tens = number % 100 // 10
|
||||
suffix = "th" if tens == 1 or idx > 6 else "thstndrd"[idx : idx + 2]
|
||||
return f"{number}{suffix}"
|
||||
|
||||
|
||||
def quote(sequence: List[str], q: str = "'") -> List[str]:
|
||||
"""Quotes each item in the input list with the quote character passed as second argument."""
|
||||
return [f"{q}{e}{q}" for e in sequence]
|
||||
|
||||
@@ -47,6 +47,7 @@
|
||||
"copy_mode",
|
||||
"filter_file",
|
||||
"find",
|
||||
"find_first",
|
||||
"find_headers",
|
||||
"find_all_headers",
|
||||
"find_libraries",
|
||||
|
||||
@@ -348,7 +348,19 @@ def close(self):
|
||||
class MultiProcessFd:
|
||||
"""Return an object which stores a file descriptor and can be passed as an
|
||||
argument to a function run with ``multiprocessing.Process``, such that
|
||||
the file descriptor is available in the subprocess."""
|
||||
the file descriptor is available in the subprocess. It provides access via
|
||||
the `fd` property.
|
||||
|
||||
This object takes control over the associated FD: files opened from this
|
||||
using `fdopen` need to use `closefd=False`.
|
||||
"""
|
||||
|
||||
# As for why you have to fdopen(..., closefd=False): when a
|
||||
# multiprocessing.connection.Connection object stores an fd, it assumes
|
||||
# control over it, and will attempt to close it when gc'ed during __del__;
|
||||
# if you fdopen(multiprocessfd.fd, closefd=True) then the resulting file
|
||||
# will also assume control, and you can see warnings when there is an
|
||||
# attempted double close.
|
||||
|
||||
def __init__(self, fd):
|
||||
self._connection = None
|
||||
@@ -361,33 +373,20 @@ def __init__(self, fd):
|
||||
@property
|
||||
def fd(self):
|
||||
if self._connection:
|
||||
return self._connection._handle
|
||||
return self._connection.fileno()
|
||||
else:
|
||||
return self._fd
|
||||
|
||||
def close(self):
|
||||
"""Rather than `.close()`ing any file opened from the associated
|
||||
`.fd`, the `MultiProcessFd` should be closed with this.
|
||||
"""
|
||||
if self._connection:
|
||||
self._connection.close()
|
||||
else:
|
||||
os.close(self._fd)
|
||||
|
||||
|
||||
def close_connection_and_file(multiprocess_fd, file):
|
||||
# MultiprocessFd is intended to transmit a FD
|
||||
# to a child process, this FD is then opened to a Python File object
|
||||
# (using fdopen). In >= 3.8, MultiprocessFd encapsulates a
|
||||
# multiprocessing.connection.Connection; Connection closes the FD
|
||||
# when it is deleted, and prints a warning about duplicate closure if
|
||||
# it is not explicitly closed. In < 3.8, MultiprocessFd encapsulates a
|
||||
# simple FD; closing the FD here appears to conflict with
|
||||
# closure of the File object (in < 3.8 that is). Therefore this needs
|
||||
# to choose whether to close the File or the Connection.
|
||||
if sys.version_info >= (3, 8):
|
||||
multiprocess_fd.close()
|
||||
else:
|
||||
file.close()
|
||||
|
||||
|
||||
@contextmanager
|
||||
def replace_environment(env):
|
||||
"""Replace the current environment (`os.environ`) with `env`.
|
||||
@@ -932,10 +931,10 @@ def _writer_daemon(
|
||||
# 1. Use line buffering (3rd param = 1) since Python 3 has a bug
|
||||
# that prevents unbuffered text I/O.
|
||||
# 2. Python 3.x before 3.7 does not open with UTF-8 encoding by default
|
||||
in_pipe = os.fdopen(read_multiprocess_fd.fd, "r", 1, encoding="utf-8")
|
||||
in_pipe = os.fdopen(read_multiprocess_fd.fd, "r", 1, encoding="utf-8", closefd=False)
|
||||
|
||||
if stdin_multiprocess_fd:
|
||||
stdin = os.fdopen(stdin_multiprocess_fd.fd)
|
||||
stdin = os.fdopen(stdin_multiprocess_fd.fd, closefd=False)
|
||||
else:
|
||||
stdin = None
|
||||
|
||||
@@ -1025,9 +1024,9 @@ def _writer_daemon(
|
||||
if isinstance(log_file, io.StringIO):
|
||||
control_pipe.send(log_file.getvalue())
|
||||
log_file_wrapper.close()
|
||||
close_connection_and_file(read_multiprocess_fd, in_pipe)
|
||||
read_multiprocess_fd.close()
|
||||
if stdin_multiprocess_fd:
|
||||
close_connection_and_file(stdin_multiprocess_fd, stdin)
|
||||
stdin_multiprocess_fd.close()
|
||||
|
||||
# send echo value back to the parent so it can be preserved.
|
||||
control_pipe.send(echo)
|
||||
|
||||
@@ -35,6 +35,7 @@
|
||||
import spack.caches
|
||||
import spack.config as config
|
||||
import spack.database as spack_db
|
||||
import spack.deptypes as dt
|
||||
import spack.error
|
||||
import spack.hash_types as ht
|
||||
import spack.hooks
|
||||
@@ -712,15 +713,32 @@ def get_buildfile_manifest(spec):
|
||||
return data
|
||||
|
||||
|
||||
def hashes_to_prefixes(spec):
|
||||
"""Return a dictionary of hashes to prefixes for a spec and its deps, excluding externals"""
|
||||
return {
|
||||
s.dag_hash(): str(s.prefix)
|
||||
def deps_to_relocate(spec):
|
||||
"""Return the transitive link and direct run dependencies of the spec.
|
||||
|
||||
This is a special traversal for dependencies we need to consider when relocating a package.
|
||||
|
||||
Package binaries, scripts, and other files may refer to the prefixes of dependencies, so
|
||||
we need to rewrite those locations when dependencies are in a different place at install time
|
||||
than they were at build time.
|
||||
|
||||
This traversal covers transitive link dependencies and direct run dependencies because:
|
||||
|
||||
1. Spack adds RPATHs for transitive link dependencies so that packages can find needed
|
||||
dependency libraries.
|
||||
2. Packages may call any of their *direct* run dependencies (and may bake their paths into
|
||||
binaries or scripts), so we also need to search for run dependency prefixes when relocating.
|
||||
|
||||
This returns a deduplicated list of transitive link dependencies and direct run dependencies.
|
||||
"""
|
||||
deps = [
|
||||
s
|
||||
for s in itertools.chain(
|
||||
spec.traverse(root=True, deptype="link"), spec.dependencies(deptype="run")
|
||||
)
|
||||
if not s.external
|
||||
}
|
||||
]
|
||||
return llnl.util.lang.dedupe(deps, key=lambda s: s.dag_hash())
|
||||
|
||||
|
||||
def get_buildinfo_dict(spec):
|
||||
@@ -736,7 +754,7 @@ def get_buildinfo_dict(spec):
|
||||
"relocate_binaries": manifest["binary_to_relocate"],
|
||||
"relocate_links": manifest["link_to_relocate"],
|
||||
"hardlinks_deduped": manifest["hardlinks_deduped"],
|
||||
"hash_to_prefix": hashes_to_prefixes(spec),
|
||||
"hash_to_prefix": {d.dag_hash(): str(d.prefix) for d in deps_to_relocate(spec)},
|
||||
}
|
||||
|
||||
|
||||
@@ -1631,7 +1649,6 @@ def _oci_push(
|
||||
Dict[str, spack.oci.oci.Blob],
|
||||
List[Tuple[Spec, BaseException]],
|
||||
]:
|
||||
|
||||
# Spec dag hash -> blob
|
||||
checksums: Dict[str, spack.oci.oci.Blob] = {}
|
||||
|
||||
@@ -2201,11 +2218,36 @@ def relocate_package(spec):
|
||||
# First match specific prefix paths. Possibly the *local* install prefix
|
||||
# of some dependency is in an upstream, so we cannot assume the original
|
||||
# spack store root can be mapped uniformly to the new spack store root.
|
||||
for dag_hash, new_dep_prefix in hashes_to_prefixes(spec).items():
|
||||
if dag_hash in hash_to_old_prefix:
|
||||
old_dep_prefix = hash_to_old_prefix[dag_hash]
|
||||
prefix_to_prefix_bin[old_dep_prefix] = new_dep_prefix
|
||||
prefix_to_prefix_text[old_dep_prefix] = new_dep_prefix
|
||||
#
|
||||
# If the spec is spliced, we need to handle the simultaneous mapping
|
||||
# from the old install_tree to the new install_tree and from the build_spec
|
||||
# to the spliced spec.
|
||||
# Because foo.build_spec is foo for any non-spliced spec, we can simplify
|
||||
# by checking for spliced-in nodes by checking for nodes not in the build_spec
|
||||
# without any explicit check for whether the spec is spliced.
|
||||
# An analog in this algorithm is any spec that shares a name or provides the same virtuals
|
||||
# in the context of the relevant root spec. This ensures that the analog for a spec s
|
||||
# is the spec that s replaced when we spliced.
|
||||
relocation_specs = deps_to_relocate(spec)
|
||||
build_spec_ids = set(id(s) for s in spec.build_spec.traverse(deptype=dt.ALL & ~dt.BUILD))
|
||||
for s in relocation_specs:
|
||||
analog = s
|
||||
if id(s) not in build_spec_ids:
|
||||
analogs = [
|
||||
d
|
||||
for d in spec.build_spec.traverse(deptype=dt.ALL & ~dt.BUILD)
|
||||
if s._splice_match(d, self_root=spec, other_root=spec.build_spec)
|
||||
]
|
||||
if analogs:
|
||||
# Prefer same-name analogs and prefer higher versions
|
||||
# This matches the preferences in Spec.splice, so we will find same node
|
||||
analog = max(analogs, key=lambda a: (a.name == s.name, a.version))
|
||||
|
||||
lookup_dag_hash = analog.dag_hash()
|
||||
if lookup_dag_hash in hash_to_old_prefix:
|
||||
old_dep_prefix = hash_to_old_prefix[lookup_dag_hash]
|
||||
prefix_to_prefix_bin[old_dep_prefix] = str(s.prefix)
|
||||
prefix_to_prefix_text[old_dep_prefix] = str(s.prefix)
|
||||
|
||||
# Only then add the generic fallback of install prefix -> install prefix.
|
||||
prefix_to_prefix_text[old_prefix] = new_prefix
|
||||
@@ -2520,7 +2562,13 @@ def _ensure_common_prefix(tar: tarfile.TarFile) -> str:
|
||||
return pkg_prefix
|
||||
|
||||
|
||||
def install_root_node(spec, unsigned=False, force=False, sha256=None):
|
||||
def install_root_node(
|
||||
spec: spack.spec.Spec,
|
||||
unsigned=False,
|
||||
force: bool = False,
|
||||
sha256: Optional[str] = None,
|
||||
allow_missing: bool = False,
|
||||
) -> None:
|
||||
"""Install the root node of a concrete spec from a buildcache.
|
||||
|
||||
Checking the sha256 sum of a node before installation is usually needed only
|
||||
@@ -2529,11 +2577,10 @@ def install_root_node(spec, unsigned=False, force=False, sha256=None):
|
||||
|
||||
Args:
|
||||
spec: spec to be installed (note that only the root node will be installed)
|
||||
unsigned (bool): if True allows installing unsigned binaries
|
||||
force (bool): force installation if the spec is already present in the
|
||||
local store
|
||||
sha256 (str): optional sha256 of the binary package, to be checked
|
||||
before installation
|
||||
unsigned: if True allows installing unsigned binaries
|
||||
force: force installation if the spec is already present in the local store
|
||||
sha256: optional sha256 of the binary package, to be checked before installation
|
||||
allow_missing: when true, allows installing a node with missing dependencies
|
||||
"""
|
||||
# Early termination
|
||||
if spec.external or spec.virtual:
|
||||
@@ -2543,10 +2590,10 @@ def install_root_node(spec, unsigned=False, force=False, sha256=None):
|
||||
warnings.warn("Package for spec {0} already installed.".format(spec.format()))
|
||||
return
|
||||
|
||||
download_result = download_tarball(spec, unsigned)
|
||||
download_result = download_tarball(spec.build_spec, unsigned)
|
||||
if not download_result:
|
||||
msg = 'download of binary cache file for spec "{0}" failed'
|
||||
raise RuntimeError(msg.format(spec.format()))
|
||||
raise RuntimeError(msg.format(spec.build_spec.format()))
|
||||
|
||||
if sha256:
|
||||
checker = spack.util.crypto.Checker(sha256)
|
||||
@@ -2565,8 +2612,13 @@ def install_root_node(spec, unsigned=False, force=False, sha256=None):
|
||||
with spack.util.path.filter_padding():
|
||||
tty.msg('Installing "{0}" from a buildcache'.format(spec.format()))
|
||||
extract_tarball(spec, download_result, force)
|
||||
spec.package.windows_establish_runtime_linkage()
|
||||
if spec.spliced: # overwrite old metadata with new
|
||||
spack.store.STORE.layout.write_spec(
|
||||
spec, spack.store.STORE.layout.spec_file_path(spec)
|
||||
)
|
||||
spack.hooks.post_install(spec, False)
|
||||
spack.store.STORE.db.add(spec)
|
||||
spack.store.STORE.db.add(spec, allow_missing=allow_missing)
|
||||
|
||||
|
||||
def install_single_spec(spec, unsigned=False, force=False):
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
"""Common basic functions used through the spack.bootstrap package"""
|
||||
import fnmatch
|
||||
import glob
|
||||
import importlib
|
||||
import os.path
|
||||
import re
|
||||
@@ -60,10 +61,19 @@ def _try_import_from_store(
|
||||
python, *_ = candidate_spec.dependencies("python-venv")
|
||||
else:
|
||||
python, *_ = candidate_spec.dependencies("python")
|
||||
module_paths = [
|
||||
os.path.join(candidate_spec.prefix, python.package.purelib),
|
||||
os.path.join(candidate_spec.prefix, python.package.platlib),
|
||||
]
|
||||
|
||||
# if python is installed, ask it for the layout
|
||||
if python.installed:
|
||||
module_paths = [
|
||||
os.path.join(candidate_spec.prefix, python.package.purelib),
|
||||
os.path.join(candidate_spec.prefix, python.package.platlib),
|
||||
]
|
||||
# otherwise search for the site-packages directory
|
||||
# (clingo from binaries with truncated python-venv runtime)
|
||||
else:
|
||||
module_paths = glob.glob(
|
||||
os.path.join(candidate_spec.prefix, "lib", "python*", "site-packages")
|
||||
)
|
||||
path_before = list(sys.path)
|
||||
|
||||
# NOTE: try module_paths first and last, last allows an existing version in path
|
||||
|
||||
@@ -175,7 +175,15 @@ def _install_by_hash(
|
||||
query = spack.binary_distribution.BinaryCacheQuery(all_architectures=True)
|
||||
for match in spack.store.find([f"/{pkg_hash}"], multiple=False, query_fn=query):
|
||||
spack.binary_distribution.install_root_node(
|
||||
match, unsigned=True, force=True, sha256=pkg_sha256
|
||||
# allow_missing is true since when bootstrapping clingo we truncate runtime
|
||||
# deps such as gcc-runtime, since we link libstdc++ statically, and the other
|
||||
# further runtime deps are loaded by the Python interpreter. This just silences
|
||||
# warnings about missing dependencies.
|
||||
match,
|
||||
unsigned=True,
|
||||
force=True,
|
||||
sha256=pkg_sha256,
|
||||
allow_missing=True,
|
||||
)
|
||||
|
||||
def _install_and_test(
|
||||
|
||||
@@ -91,7 +91,7 @@
|
||||
)
|
||||
from spack.util.executable import Executable
|
||||
from spack.util.log_parse import make_log_context, parse_log_events
|
||||
from spack.util.module_cmd import load_module, path_from_modules
|
||||
from spack.util.module_cmd import load_module
|
||||
|
||||
#
|
||||
# This can be set by the user to globally disable parallel builds.
|
||||
@@ -617,13 +617,11 @@ def set_package_py_globals(pkg, context: Context = Context.BUILD):
|
||||
"""
|
||||
module = ModuleChangePropagator(pkg)
|
||||
|
||||
if context == Context.BUILD:
|
||||
module.std_cmake_args = spack.build_systems.cmake.CMakeBuilder.std_args(pkg)
|
||||
module.std_meson_args = spack.build_systems.meson.MesonBuilder.std_args(pkg)
|
||||
module.std_pip_args = spack.build_systems.python.PythonPipBuilder.std_args(pkg)
|
||||
|
||||
jobs = spack.config.determine_number_of_jobs(parallel=pkg.parallel)
|
||||
module.make_jobs = jobs
|
||||
if context == Context.BUILD:
|
||||
module.std_meson_args = spack.build_systems.meson.MesonBuilder.std_args(pkg)
|
||||
module.std_pip_args = spack.build_systems.python.PythonPipBuilder.std_args(pkg)
|
||||
|
||||
# TODO: make these build deps that can be installed if not found.
|
||||
module.make = MakeExecutable("make", jobs)
|
||||
@@ -792,21 +790,6 @@ def get_rpath_deps(pkg: spack.package_base.PackageBase) -> List[spack.spec.Spec]
|
||||
return _get_rpath_deps_from_spec(pkg.spec, pkg.transitive_rpaths)
|
||||
|
||||
|
||||
def get_rpaths(pkg):
|
||||
"""Get a list of all the rpaths for a package."""
|
||||
rpaths = [pkg.prefix.lib, pkg.prefix.lib64]
|
||||
deps = get_rpath_deps(pkg)
|
||||
rpaths.extend(d.prefix.lib for d in deps if os.path.isdir(d.prefix.lib))
|
||||
rpaths.extend(d.prefix.lib64 for d in deps if os.path.isdir(d.prefix.lib64))
|
||||
# Second module is our compiler mod name. We use that to get rpaths from
|
||||
# module show output.
|
||||
if pkg.compiler.modules and len(pkg.compiler.modules) > 1:
|
||||
mod_rpath = path_from_modules([pkg.compiler.modules[1]])
|
||||
if mod_rpath:
|
||||
rpaths.append(mod_rpath)
|
||||
return list(dedupe(filter_system_paths(rpaths)))
|
||||
|
||||
|
||||
def load_external_modules(pkg):
|
||||
"""Traverse a package's spec DAG and load any external modules.
|
||||
|
||||
@@ -1063,6 +1046,12 @@ def set_all_package_py_globals(self):
|
||||
# This includes runtime dependencies, also runtime deps of direct build deps.
|
||||
set_package_py_globals(pkg, context=Context.RUN)
|
||||
|
||||
# Looping over the set of packages a second time
|
||||
# ensures all globals are loaded into the module space prior to
|
||||
# any package setup. This guarantees package setup methods have
|
||||
# access to expected module level definitions such as "spack_cc"
|
||||
for dspec, flag in chain(self.external, self.nonexternal):
|
||||
pkg = dspec.package
|
||||
for spec in dspec.dependents():
|
||||
# Note: some specs have dependents that are unreachable from the root, so avoid
|
||||
# setting globals for those.
|
||||
@@ -1072,6 +1061,15 @@ def set_all_package_py_globals(self):
|
||||
pkg.setup_dependent_package(dependent_module, spec)
|
||||
dependent_module.propagate_changes_to_mro()
|
||||
|
||||
pkg = self.specs[0].package
|
||||
if self.context == Context.BUILD:
|
||||
module = ModuleChangePropagator(pkg)
|
||||
# std_cmake_args is not sufficiently static to be defined
|
||||
# in set_package_py_globals and is deprecated so its handled
|
||||
# here as a special case
|
||||
module.std_cmake_args = spack.build_systems.cmake.CMakeBuilder.std_args(pkg)
|
||||
module.propagate_changes_to_mro()
|
||||
|
||||
def get_env_modifications(self) -> EnvironmentModifications:
|
||||
"""Returns the environment variable modifications for the given input specs and context.
|
||||
Environment modifications include:
|
||||
@@ -1141,32 +1139,6 @@ def _make_runnable(self, dep: spack.spec.Spec, env: EnvironmentModifications):
|
||||
env.prepend_path("PATH", bin_dir)
|
||||
|
||||
|
||||
def get_cmake_prefix_path(pkg):
|
||||
# Note that unlike modifications_from_dependencies, this does not include
|
||||
# any edits to CMAKE_PREFIX_PATH defined in custom
|
||||
# setup_dependent_build_environment implementations of dependency packages
|
||||
build_deps = set(pkg.spec.dependencies(deptype=("build", "test")))
|
||||
link_deps = set(pkg.spec.traverse(root=False, deptype=("link")))
|
||||
build_link_deps = build_deps | link_deps
|
||||
spack_built = []
|
||||
externals = []
|
||||
# modifications_from_dependencies updates CMAKE_PREFIX_PATH by first
|
||||
# prepending all externals and then all non-externals
|
||||
for dspec in pkg.spec.traverse(root=False, order="post"):
|
||||
if dspec in build_link_deps:
|
||||
if dspec.external:
|
||||
externals.insert(0, dspec)
|
||||
else:
|
||||
spack_built.insert(0, dspec)
|
||||
|
||||
ordered_build_link_deps = spack_built + externals
|
||||
cmake_prefix_path_entries = []
|
||||
for spec in ordered_build_link_deps:
|
||||
cmake_prefix_path_entries.extend(spec.package.cmake_prefix_paths)
|
||||
|
||||
return filter_system_paths(cmake_prefix_path_entries)
|
||||
|
||||
|
||||
def _setup_pkg_and_run(
|
||||
serialized_pkg: "spack.subprocess_context.PackageInstallContext",
|
||||
function: Callable,
|
||||
@@ -1222,7 +1194,7 @@ def _setup_pkg_and_run(
|
||||
# that the parent process is not going to read from it till we
|
||||
# are done with the child, so we undo Python's precaution.
|
||||
if input_multiprocess_fd is not None:
|
||||
sys.stdin = os.fdopen(input_multiprocess_fd.fd)
|
||||
sys.stdin = os.fdopen(input_multiprocess_fd.fd, closefd=False)
|
||||
|
||||
pkg = serialized_pkg.restore()
|
||||
|
||||
@@ -1245,7 +1217,7 @@ def _setup_pkg_and_run(
|
||||
# objects can't be sent to the parent.
|
||||
exc_type = type(e)
|
||||
tb = e.__traceback__
|
||||
tb_string = traceback.format_exception(exc_type, e, tb)
|
||||
tb_string = "".join(traceback.format_exception(exc_type, e, tb))
|
||||
|
||||
# build up some context from the offending package so we can
|
||||
# show that, too.
|
||||
|
||||
@@ -10,7 +10,6 @@
|
||||
import llnl.util.filesystem as fs
|
||||
import llnl.util.tty as tty
|
||||
|
||||
import spack.build_environment
|
||||
import spack.builder
|
||||
|
||||
from .cmake import CMakeBuilder, CMakePackage
|
||||
@@ -297,18 +296,6 @@ def initconfig_hardware_entries(self):
|
||||
def std_initconfig_entries(self):
|
||||
cmake_prefix_path_env = os.environ["CMAKE_PREFIX_PATH"]
|
||||
cmake_prefix_path = cmake_prefix_path_env.replace(os.pathsep, ";")
|
||||
cmake_rpaths_env = spack.build_environment.get_rpaths(self.pkg)
|
||||
cmake_rpaths_path = ";".join(cmake_rpaths_env)
|
||||
complete_rpath_list = cmake_rpaths_path
|
||||
if "SPACK_COMPILER_EXTRA_RPATHS" in os.environ:
|
||||
spack_extra_rpaths_env = os.environ["SPACK_COMPILER_EXTRA_RPATHS"]
|
||||
spack_extra_rpaths_path = spack_extra_rpaths_env.replace(os.pathsep, ";")
|
||||
complete_rpath_list = "{0};{1}".format(complete_rpath_list, spack_extra_rpaths_path)
|
||||
|
||||
if "SPACK_COMPILER_IMPLICIT_RPATHS" in os.environ:
|
||||
spack_implicit_rpaths_env = os.environ["SPACK_COMPILER_IMPLICIT_RPATHS"]
|
||||
spack_implicit_rpaths_path = spack_implicit_rpaths_env.replace(os.pathsep, ";")
|
||||
complete_rpath_list = "{0};{1}".format(complete_rpath_list, spack_implicit_rpaths_path)
|
||||
|
||||
return [
|
||||
"#------------------{0}".format("-" * 60),
|
||||
@@ -318,8 +305,6 @@ def std_initconfig_entries(self):
|
||||
"#------------------{0}\n".format("-" * 60),
|
||||
cmake_cache_string("CMAKE_PREFIX_PATH", cmake_prefix_path),
|
||||
cmake_cache_string("CMAKE_INSTALL_RPATH_USE_LINK_PATH", "ON"),
|
||||
cmake_cache_string("CMAKE_BUILD_RPATH", complete_rpath_list),
|
||||
cmake_cache_string("CMAKE_INSTALL_RPATH", complete_rpath_list),
|
||||
self.define_cmake_cache_from_variant("CMAKE_BUILD_TYPE", "build_type"),
|
||||
]
|
||||
|
||||
|
||||
@@ -8,17 +8,19 @@
|
||||
import platform
|
||||
import re
|
||||
import sys
|
||||
from typing import List, Optional, Tuple
|
||||
from itertools import chain
|
||||
from typing import List, Optional, Set, Tuple
|
||||
|
||||
import llnl.util.filesystem as fs
|
||||
from llnl.util.lang import stable_partition
|
||||
|
||||
import spack.build_environment
|
||||
import spack.builder
|
||||
import spack.deptypes as dt
|
||||
import spack.error
|
||||
import spack.package_base
|
||||
from spack.directives import build_system, conflicts, depends_on, variant
|
||||
from spack.multimethod import when
|
||||
from spack.util.environment import filter_system_paths
|
||||
|
||||
from ._checks import BaseBuilder, execute_build_time_tests
|
||||
|
||||
@@ -152,6 +154,24 @@ def _values(x):
|
||||
conflicts(f"generator={x}")
|
||||
|
||||
|
||||
def get_cmake_prefix_path(pkg: spack.package_base.PackageBase) -> List[str]:
|
||||
"""Obtain the CMAKE_PREFIX_PATH entries for a package, based on the cmake_prefix_path package
|
||||
attribute of direct build/test and transitive link dependencies."""
|
||||
# Add direct build/test deps
|
||||
selected: Set[str] = {s.dag_hash() for s in pkg.spec.dependencies(deptype=dt.BUILD | dt.TEST)}
|
||||
# Add transitive link deps
|
||||
selected.update(s.dag_hash() for s in pkg.spec.traverse(root=False, deptype=dt.LINK))
|
||||
# Separate out externals so they do not shadow Spack prefixes
|
||||
externals, spack_built = stable_partition(
|
||||
(s for s in pkg.spec.traverse(root=False, order="topo") if s.dag_hash() in selected),
|
||||
lambda x: x.external,
|
||||
)
|
||||
|
||||
return filter_system_paths(
|
||||
path for spec in chain(spack_built, externals) for path in spec.package.cmake_prefix_paths
|
||||
)
|
||||
|
||||
|
||||
class CMakePackage(spack.package_base.PackageBase):
|
||||
"""Specialized class for packages built using CMake
|
||||
|
||||
@@ -358,6 +378,16 @@ def std_args(pkg, generator=None):
|
||||
"-G",
|
||||
generator,
|
||||
define("CMAKE_INSTALL_PREFIX", pathlib.Path(pkg.prefix).as_posix()),
|
||||
define("CMAKE_INSTALL_RPATH_USE_LINK_PATH", True),
|
||||
# only include the install prefix lib dirs; rpaths for deps are added by USE_LINK_PATH
|
||||
define(
|
||||
"CMAKE_INSTALL_RPATH",
|
||||
[
|
||||
pathlib.Path(pkg.prefix, "lib").as_posix(),
|
||||
pathlib.Path(pkg.prefix, "lib64").as_posix(),
|
||||
],
|
||||
),
|
||||
define("CMAKE_PREFIX_PATH", get_cmake_prefix_path(pkg)),
|
||||
define("CMAKE_BUILD_TYPE", build_type),
|
||||
]
|
||||
|
||||
@@ -372,15 +402,6 @@ def std_args(pkg, generator=None):
|
||||
_conditional_cmake_defaults(pkg, args)
|
||||
_maybe_set_python_hints(pkg, args)
|
||||
|
||||
# Set up CMake rpath
|
||||
args.extend(
|
||||
[
|
||||
define("CMAKE_INSTALL_RPATH_USE_LINK_PATH", True),
|
||||
define("CMAKE_INSTALL_RPATH", spack.build_environment.get_rpaths(pkg)),
|
||||
define("CMAKE_PREFIX_PATH", spack.build_environment.get_cmake_prefix_path(pkg)),
|
||||
]
|
||||
)
|
||||
|
||||
return args
|
||||
|
||||
@staticmethod
|
||||
@@ -541,6 +562,13 @@ def cmake_args(self):
|
||||
|
||||
def cmake(self, pkg, spec, prefix):
|
||||
"""Runs ``cmake`` in the build directory"""
|
||||
|
||||
# skip cmake phase if it is an incremental develop build
|
||||
if spec.is_develop and os.path.isfile(
|
||||
os.path.join(self.build_directory, "CMakeCache.txt")
|
||||
):
|
||||
return
|
||||
|
||||
options = self.std_cmake_args
|
||||
options += self.cmake_args()
|
||||
options.append(os.path.abspath(self.root_cmakelists_dir))
|
||||
|
||||
@@ -110,8 +110,8 @@ def compute_capabilities(arch_list: Iterable[str]) -> List[str]:
|
||||
|
||||
depends_on("cuda@5.0:10.2", when="cuda_arch=30")
|
||||
depends_on("cuda@5.0:10.2", when="cuda_arch=32")
|
||||
depends_on("cuda@5.0:", when="cuda_arch=35")
|
||||
depends_on("cuda@6.5:", when="cuda_arch=37")
|
||||
depends_on("cuda@5.0:11.8", when="cuda_arch=35")
|
||||
depends_on("cuda@6.5:11.8", when="cuda_arch=37")
|
||||
|
||||
depends_on("cuda@6.0:", when="cuda_arch=50")
|
||||
depends_on("cuda@6.5:", when="cuda_arch=52")
|
||||
@@ -131,6 +131,7 @@ def compute_capabilities(arch_list: Iterable[str]) -> List[str]:
|
||||
depends_on("cuda@11.8:", when="cuda_arch=89")
|
||||
|
||||
depends_on("cuda@12.0:", when="cuda_arch=90")
|
||||
depends_on("cuda@12.0:", when="cuda_arch=90a")
|
||||
|
||||
# From the NVIDIA install guide we know of conflicts for particular
|
||||
# platforms (linux, darwin), architectures (x86, powerpc) and compilers
|
||||
@@ -149,7 +150,6 @@ def compute_capabilities(arch_list: Iterable[str]) -> List[str]:
|
||||
# minimum supported versions
|
||||
conflicts("%gcc@:4", when="+cuda ^cuda@11.0:")
|
||||
conflicts("%gcc@:5", when="+cuda ^cuda@11.4:")
|
||||
conflicts("%gcc@:7.2", when="+cuda ^cuda@12.4:")
|
||||
conflicts("%clang@:6", when="+cuda ^cuda@12.2:")
|
||||
|
||||
# maximum supported version
|
||||
|
||||
@@ -10,6 +10,7 @@
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import ssl
|
||||
import stat
|
||||
import subprocess
|
||||
import sys
|
||||
@@ -19,14 +20,14 @@
|
||||
from collections import defaultdict, namedtuple
|
||||
from typing import Dict, List, Optional, Set, Tuple
|
||||
from urllib.error import HTTPError, URLError
|
||||
from urllib.parse import urlencode
|
||||
from urllib.request import HTTPHandler, Request, build_opener
|
||||
from urllib.parse import quote, urlencode, urlparse
|
||||
from urllib.request import HTTPHandler, HTTPSHandler, Request, build_opener
|
||||
|
||||
import ruamel.yaml
|
||||
|
||||
import llnl.util.filesystem as fs
|
||||
import llnl.util.tty as tty
|
||||
from llnl.util.lang import memoized
|
||||
from llnl.util.lang import Singleton, memoized
|
||||
from llnl.util.tty.color import cescape, colorize
|
||||
|
||||
import spack
|
||||
@@ -50,6 +51,31 @@
|
||||
from spack.reporters.cdash import SPACK_CDASH_TIMEOUT
|
||||
from spack.reporters.cdash import build_stamp as cdash_build_stamp
|
||||
|
||||
|
||||
def _urlopen():
|
||||
error_handler = web_util.SpackHTTPDefaultErrorHandler()
|
||||
|
||||
# One opener with HTTPS ssl enabled
|
||||
with_ssl = build_opener(
|
||||
HTTPHandler(), HTTPSHandler(context=web_util.ssl_create_default_context()), error_handler
|
||||
)
|
||||
|
||||
# One opener with HTTPS ssl disabled
|
||||
without_ssl = build_opener(
|
||||
HTTPHandler(), HTTPSHandler(context=ssl._create_unverified_context()), error_handler
|
||||
)
|
||||
|
||||
# And dynamically dispatch based on the config:verify_ssl.
|
||||
def dispatch_open(fullurl, data=None, timeout=None, verify_ssl=True):
|
||||
opener = with_ssl if verify_ssl else without_ssl
|
||||
timeout = timeout or spack.config.get("config:connect_timeout", 1)
|
||||
return opener.open(fullurl, data, timeout)
|
||||
|
||||
return dispatch_open
|
||||
|
||||
|
||||
_dyn_mapping_urlopener = Singleton(_urlopen)
|
||||
|
||||
# See https://docs.gitlab.com/ee/ci/yaml/#retry for descriptions of conditions
|
||||
JOB_RETRY_CONDITIONS = [
|
||||
# "always",
|
||||
@@ -405,9 +431,20 @@ def __init__(self, ci_config, spec_labels, stages):
|
||||
if name not in ["any", "build"]:
|
||||
jobs[name] = self.__init_job("")
|
||||
|
||||
def __init_job(self, spec):
|
||||
def __init_job(self, release_spec):
|
||||
"""Initialize job object"""
|
||||
return {"spec": spec, "attributes": {}}
|
||||
job_object = {"spec": release_spec, "attributes": {}}
|
||||
if release_spec:
|
||||
job_vars = job_object["attributes"].setdefault("variables", {})
|
||||
job_vars["SPACK_JOB_SPEC_DAG_HASH"] = release_spec.dag_hash()
|
||||
job_vars["SPACK_JOB_SPEC_PKG_NAME"] = release_spec.name
|
||||
job_vars["SPACK_JOB_SPEC_PKG_VERSION"] = release_spec.format("{version}")
|
||||
job_vars["SPACK_JOB_SPEC_COMPILER_NAME"] = release_spec.format("{compiler.name}")
|
||||
job_vars["SPACK_JOB_SPEC_COMPILER_VERSION"] = release_spec.format("{compiler.version}")
|
||||
job_vars["SPACK_JOB_SPEC_ARCH"] = release_spec.format("{architecture}")
|
||||
job_vars["SPACK_JOB_SPEC_VARIANTS"] = release_spec.format("{variants}")
|
||||
|
||||
return job_object
|
||||
|
||||
def __is_named(self, section):
|
||||
"""Check if a pipeline-gen configuration section is for a named job,
|
||||
@@ -500,6 +537,7 @@ def generate_ir(self):
|
||||
for section in reversed(pipeline_gen):
|
||||
name = self.__is_named(section)
|
||||
has_submapping = "submapping" in section
|
||||
has_dynmapping = "dynamic-mapping" in section
|
||||
section = cfg.InternalConfigScope._process_dict_keyname_overrides(section)
|
||||
|
||||
if name:
|
||||
@@ -542,6 +580,108 @@ def _apply_section(dest, src):
|
||||
job["attributes"] = self.__apply_submapping(
|
||||
job["attributes"], job["spec"], section
|
||||
)
|
||||
elif has_dynmapping:
|
||||
mapping = section["dynamic-mapping"]
|
||||
|
||||
dynmap_name = mapping.get("name")
|
||||
|
||||
# Check if this section should be skipped
|
||||
dynmap_skip = os.environ.get("SPACK_CI_SKIP_DYNAMIC_MAPPING")
|
||||
if dynmap_name and dynmap_skip:
|
||||
if re.match(dynmap_skip, dynmap_name):
|
||||
continue
|
||||
|
||||
# Get the endpoint
|
||||
endpoint = mapping["endpoint"]
|
||||
endpoint_url = urlparse(endpoint)
|
||||
|
||||
# Configure the request header
|
||||
header = {"User-Agent": web_util.SPACK_USER_AGENT}
|
||||
header.update(mapping.get("header", {}))
|
||||
|
||||
# Expand header environment variables
|
||||
# ie. if tokens are passed
|
||||
for value in header.values():
|
||||
value = os.path.expandvars(value)
|
||||
|
||||
verify_ssl = mapping.get("verify_ssl", spack.config.get("config:verify_ssl", True))
|
||||
timeout = mapping.get("timeout", spack.config.get("config:connect_timeout", 1))
|
||||
|
||||
required = mapping.get("require", [])
|
||||
allowed = mapping.get("allow", [])
|
||||
ignored = mapping.get("ignore", [])
|
||||
|
||||
# required keys are implicitly allowed
|
||||
allowed = sorted(set(allowed + required))
|
||||
ignored = sorted(set(ignored))
|
||||
required = sorted(set(required))
|
||||
|
||||
# Make sure required things are not also ignored
|
||||
assert not any([ikey in required for ikey in ignored])
|
||||
|
||||
def job_query(job):
|
||||
job_vars = job["attributes"]["variables"]
|
||||
query = (
|
||||
"{SPACK_JOB_SPEC_PKG_NAME}@{SPACK_JOB_SPEC_PKG_VERSION}"
|
||||
# The preceding spaces are required (ref. https://github.com/spack/spack-gantry/blob/develop/docs/api.md#allocation)
|
||||
" {SPACK_JOB_SPEC_VARIANTS}"
|
||||
" arch={SPACK_JOB_SPEC_ARCH}"
|
||||
"%{SPACK_JOB_SPEC_COMPILER_NAME}@{SPACK_JOB_SPEC_COMPILER_VERSION}"
|
||||
).format_map(job_vars)
|
||||
return f"spec={quote(query)}"
|
||||
|
||||
for job in jobs.values():
|
||||
if not job["spec"]:
|
||||
continue
|
||||
|
||||
# Create request for this job
|
||||
query = job_query(job)
|
||||
request = Request(
|
||||
endpoint_url._replace(query=query).geturl(), headers=header, method="GET"
|
||||
)
|
||||
try:
|
||||
response = _dyn_mapping_urlopener(
|
||||
request, verify_ssl=verify_ssl, timeout=timeout
|
||||
)
|
||||
except Exception as e:
|
||||
# For now just ignore any errors from dynamic mapping and continue
|
||||
# This is still experimental, and failures should not stop CI
|
||||
# from running normally
|
||||
tty.warn(f"Failed to fetch dynamic mapping for query:\n\t{query}")
|
||||
tty.warn(f"{e}")
|
||||
continue
|
||||
|
||||
config = json.load(codecs.getreader("utf-8")(response))
|
||||
|
||||
# Strip ignore keys
|
||||
if ignored:
|
||||
for key in ignored:
|
||||
if key in config:
|
||||
config.pop(key)
|
||||
|
||||
# Only keep allowed keys
|
||||
clean_config = {}
|
||||
if allowed:
|
||||
for key in allowed:
|
||||
if key in config:
|
||||
clean_config[key] = config[key]
|
||||
else:
|
||||
clean_config = config
|
||||
|
||||
# Verify all of the required keys are present
|
||||
if required:
|
||||
missing_keys = []
|
||||
for key in required:
|
||||
if key not in clean_config.keys():
|
||||
missing_keys.append(key)
|
||||
|
||||
if missing_keys:
|
||||
tty.warn(f"Response missing required keys: {missing_keys}")
|
||||
|
||||
if clean_config:
|
||||
job["attributes"] = spack.config.merge_yaml(
|
||||
job.get("attributes", {}), clean_config
|
||||
)
|
||||
|
||||
for _, job in jobs.items():
|
||||
if job["spec"]:
|
||||
@@ -952,15 +1092,6 @@ def main_script_replacements(cmd):
|
||||
|
||||
job_name = get_job_name(release_spec, build_group)
|
||||
|
||||
job_vars = job_object.setdefault("variables", {})
|
||||
job_vars["SPACK_JOB_SPEC_DAG_HASH"] = release_spec_dag_hash
|
||||
job_vars["SPACK_JOB_SPEC_PKG_NAME"] = release_spec.name
|
||||
job_vars["SPACK_JOB_SPEC_PKG_VERSION"] = release_spec.format("{version}")
|
||||
job_vars["SPACK_JOB_SPEC_COMPILER_NAME"] = release_spec.format("{compiler.name}")
|
||||
job_vars["SPACK_JOB_SPEC_COMPILER_VERSION"] = release_spec.format("{compiler.version}")
|
||||
job_vars["SPACK_JOB_SPEC_ARCH"] = release_spec.format("{architecture}")
|
||||
job_vars["SPACK_JOB_SPEC_VARIANTS"] = release_spec.format("{variants}")
|
||||
|
||||
job_object["needs"] = []
|
||||
if spec_label in dependencies:
|
||||
if enable_artifacts_buildcache:
|
||||
@@ -1038,6 +1169,7 @@ def main_script_replacements(cmd):
|
||||
|
||||
# Let downstream jobs know whether the spec needed rebuilding, regardless
|
||||
# whether DAG pruning was enabled or not.
|
||||
job_vars = job_object["variables"]
|
||||
job_vars["SPACK_SPEC_NEEDS_REBUILD"] = str(rebuild_spec)
|
||||
|
||||
if cdash_handler:
|
||||
|
||||
@@ -19,12 +19,23 @@
|
||||
|
||||
|
||||
def setup_parser(subparser):
|
||||
# DEPRECATED: equivalent to --generic --target
|
||||
subparser.add_argument(
|
||||
"-g", "--generic-target", action="store_true", help="show the best generic target"
|
||||
"-g",
|
||||
"--generic-target",
|
||||
action="store_true",
|
||||
help="show the best generic target (deprecated)",
|
||||
)
|
||||
subparser.add_argument(
|
||||
"--known-targets", action="store_true", help="show a list of all known targets and exit"
|
||||
)
|
||||
target_type = subparser.add_mutually_exclusive_group()
|
||||
target_type.add_argument(
|
||||
"--family", action="store_true", help="print generic ISA (x86_64, aarch64, ppc64le, ...)"
|
||||
)
|
||||
target_type.add_argument(
|
||||
"--generic", action="store_true", help="print feature level (x86_64_v3, armv8.4a, ...)"
|
||||
)
|
||||
parts = subparser.add_mutually_exclusive_group()
|
||||
parts2 = subparser.add_mutually_exclusive_group()
|
||||
parts.add_argument(
|
||||
@@ -80,6 +91,7 @@ def display_target_group(header, target_group):
|
||||
|
||||
def arch(parser, args):
|
||||
if args.generic_target:
|
||||
# TODO: add deprecation warning in 0.24
|
||||
print(archspec.cpu.host().generic)
|
||||
return
|
||||
|
||||
@@ -96,6 +108,10 @@ def arch(parser, args):
|
||||
host_platform = spack.platforms.host()
|
||||
host_os = host_platform.operating_system(os_args)
|
||||
host_target = host_platform.target(target_args)
|
||||
if args.family:
|
||||
host_target = host_target.family
|
||||
elif args.generic:
|
||||
host_target = host_target.generic
|
||||
architecture = spack.spec.ArchSpec((str(host_platform), str(host_os), str(host_target)))
|
||||
|
||||
if args.platform:
|
||||
|
||||
@@ -660,34 +660,32 @@ def mirror_name_or_url(m):
|
||||
# accidentally to a dir in the current working directory.
|
||||
|
||||
# If there's a \ or / in the name, it's interpreted as a path or url.
|
||||
if "/" in m or "\\" in m:
|
||||
if "/" in m or "\\" in m or m in (".", ".."):
|
||||
return spack.mirror.Mirror(m)
|
||||
|
||||
# Otherwise, the named mirror is required to exist.
|
||||
try:
|
||||
return spack.mirror.require_mirror_name(m)
|
||||
except ValueError as e:
|
||||
raise argparse.ArgumentTypeError(
|
||||
str(e) + ". Did you mean {}?".format(os.path.join(".", m))
|
||||
)
|
||||
raise argparse.ArgumentTypeError(f"{e}. Did you mean {os.path.join('.', m)}?") from e
|
||||
|
||||
|
||||
def mirror_url(url):
|
||||
try:
|
||||
return spack.mirror.Mirror.from_url(url)
|
||||
except ValueError as e:
|
||||
raise argparse.ArgumentTypeError(str(e))
|
||||
raise argparse.ArgumentTypeError(str(e)) from e
|
||||
|
||||
|
||||
def mirror_directory(path):
|
||||
try:
|
||||
return spack.mirror.Mirror.from_local_path(path)
|
||||
except ValueError as e:
|
||||
raise argparse.ArgumentTypeError(str(e))
|
||||
raise argparse.ArgumentTypeError(str(e)) from e
|
||||
|
||||
|
||||
def mirror_name(name):
|
||||
try:
|
||||
return spack.mirror.require_mirror_name(name)
|
||||
except ValueError as e:
|
||||
raise argparse.ArgumentTypeError(str(e))
|
||||
raise argparse.ArgumentTypeError(str(e)) from e
|
||||
|
||||
@@ -85,8 +85,14 @@ def _retrieve_develop_source(spec: spack.spec.Spec, abspath: str) -> None:
|
||||
|
||||
|
||||
def develop(parser, args):
|
||||
# Note: we could put develop specs in any scope, but I assume
|
||||
# users would only ever want to do this for either (a) an active
|
||||
# env or (b) a specified config file (e.g. that is included by
|
||||
# an environment)
|
||||
# TODO: when https://github.com/spack/spack/pull/35307 is merged,
|
||||
# an active env is not required if a scope is specified
|
||||
env = spack.cmd.require_active_env(cmd_name="develop")
|
||||
if not args.spec:
|
||||
env = spack.cmd.require_active_env(cmd_name="develop")
|
||||
if args.clone is False:
|
||||
raise SpackError("No spec provided to spack develop command")
|
||||
|
||||
@@ -116,16 +122,18 @@ def develop(parser, args):
|
||||
raise SpackError("spack develop requires at most one named spec")
|
||||
|
||||
spec = specs[0]
|
||||
|
||||
version = spec.versions.concrete_range_as_version
|
||||
if not version:
|
||||
raise SpackError("Packages to develop must have a concrete version")
|
||||
# look up the maximum version so infintiy versions are preferred for develop
|
||||
version = max(spec.package_class.versions.keys())
|
||||
tty.msg(f"Defaulting to highest version: {spec.name}@{version}")
|
||||
spec.versions = spack.version.VersionList([version])
|
||||
|
||||
# If user does not specify --path, we choose to create a directory in the
|
||||
# active environment's directory, named after the spec
|
||||
path = args.path or spec.name
|
||||
if not os.path.isabs(path):
|
||||
env = spack.cmd.require_active_env(cmd_name="develop")
|
||||
abspath = spack.util.path.canonicalize_path(path, default_wd=env.path)
|
||||
else:
|
||||
abspath = path
|
||||
@@ -149,13 +157,6 @@ def develop(parser, args):
|
||||
|
||||
_retrieve_develop_source(spec, abspath)
|
||||
|
||||
# Note: we could put develop specs in any scope, but I assume
|
||||
# users would only ever want to do this for either (a) an active
|
||||
# env or (b) a specified config file (e.g. that is included by
|
||||
# an environment)
|
||||
# TODO: when https://github.com/spack/spack/pull/35307 is merged,
|
||||
# an active env is not required if a scope is specified
|
||||
env = spack.cmd.require_active_env(cmd_name="develop")
|
||||
tty.debug("Updating develop config for {0} transactionally".format(env.name))
|
||||
with env.write_transaction():
|
||||
if args.build_directory is not None:
|
||||
|
||||
@@ -174,9 +174,9 @@ def query_arguments(args):
|
||||
if (args.missing or args.only_missing) and not args.only_deprecated:
|
||||
installed.append(InstallStatuses.MISSING)
|
||||
|
||||
known = any
|
||||
predicate_fn = None
|
||||
if args.unknown:
|
||||
known = False
|
||||
predicate_fn = lambda x: not spack.repo.PATH.exists(x.spec.name)
|
||||
|
||||
explicit = any
|
||||
if args.explicit:
|
||||
@@ -184,7 +184,7 @@ def query_arguments(args):
|
||||
if args.implicit:
|
||||
explicit = False
|
||||
|
||||
q_args = {"installed": installed, "known": known, "explicit": explicit}
|
||||
q_args = {"installed": installed, "predicate_fn": predicate_fn, "explicit": explicit}
|
||||
|
||||
install_tree = args.install_tree
|
||||
upstreams = spack.config.get("upstreams", {})
|
||||
|
||||
@@ -378,7 +378,10 @@ def refresh(module_type, specs, args):
|
||||
def modules_cmd(parser, args, module_type, callbacks=callbacks):
|
||||
# Qualifiers to be used when querying the db for specs
|
||||
constraint_qualifiers = {
|
||||
"refresh": {"installed": True, "known": lambda x: not spack.repo.PATH.exists(x)}
|
||||
"refresh": {
|
||||
"installed": True,
|
||||
"predicate_fn": lambda x: spack.repo.PATH.exists(x.spec.name),
|
||||
}
|
||||
}
|
||||
query_args = constraint_qualifiers.get(args.subparser_name, {})
|
||||
|
||||
|
||||
@@ -33,6 +33,8 @@
|
||||
YamlFilesystemView.
|
||||
|
||||
"""
|
||||
import sys
|
||||
|
||||
import llnl.util.tty as tty
|
||||
from llnl.util.link_tree import MergeConflictError
|
||||
|
||||
@@ -178,7 +180,12 @@ def setup_parser(sp):
|
||||
|
||||
|
||||
def view(parser, args):
|
||||
"Produce a view of a set of packages."
|
||||
"""Produce a view of a set of packages."""
|
||||
|
||||
if sys.platform == "win32" and args.action in ("hardlink", "hard"):
|
||||
# Hard-linked views are not yet allowed on Windows.
|
||||
# See https://github.com/spack/spack/pull/46335#discussion_r1757411915
|
||||
tty.die("Hard linking is not supported on Windows. Please use symlinks or copy methods.")
|
||||
|
||||
specs = spack.cmd.parse_specs(args.specs)
|
||||
path = args.path[0]
|
||||
|
||||
@@ -92,6 +92,14 @@ def c11_flag(self):
|
||||
else:
|
||||
return "-std=c1x"
|
||||
|
||||
@property
|
||||
def c18_flag(self):
|
||||
# c18 supported since oneapi 2022, which is classic version 2021.5.0
|
||||
if self.real_version < Version("21.5.0"):
|
||||
raise UnsupportedCompilerFlag(self, "the C18 standard", "c18_flag", "< 21.5.0")
|
||||
else:
|
||||
return "-std=c18"
|
||||
|
||||
@property
|
||||
def cc_pic_flag(self):
|
||||
return "-fPIC"
|
||||
|
||||
@@ -293,6 +293,17 @@ def platform_toolset_ver(self):
|
||||
vs22_toolset = Version(toolset_ver) > Version("142")
|
||||
return toolset_ver if not vs22_toolset else "143"
|
||||
|
||||
@property
|
||||
def visual_studio_version(self):
|
||||
"""The four digit Visual Studio version (i.e. 2019 or 2022)
|
||||
|
||||
Note: This differs from the msvc version or toolset version as
|
||||
those properties track the compiler and build tools version
|
||||
respectively, whereas this tracks the VS release associated
|
||||
with a given MSVC compiler.
|
||||
"""
|
||||
return re.search(r"[0-9]{4}", self.cc).group(0)
|
||||
|
||||
def _compiler_version(self, compiler):
|
||||
"""Returns version object for given compiler"""
|
||||
# ignore_errors below is true here due to ifx's
|
||||
|
||||
@@ -7,7 +7,9 @@
|
||||
from os.path import dirname, join
|
||||
|
||||
from llnl.util import tty
|
||||
from llnl.util.filesystem import ancestor
|
||||
|
||||
import spack.util.executable
|
||||
from spack.compiler import Compiler
|
||||
from spack.version import Version
|
||||
|
||||
@@ -116,6 +118,24 @@ def fc_pic_flag(self):
|
||||
def stdcxx_libs(self):
|
||||
return ("-cxxlib",)
|
||||
|
||||
@property
|
||||
def prefix(self):
|
||||
# OneAPI reports its install prefix when running ``--version``
|
||||
# on the line ``InstalledDir: <prefix>/bin/compiler``.
|
||||
cc = spack.util.executable.Executable(self.cc)
|
||||
with self.compiler_environment():
|
||||
oneapi_output = cc("--version", output=str, error=str)
|
||||
|
||||
for line in oneapi_output.splitlines():
|
||||
if line.startswith("InstalledDir:"):
|
||||
oneapi_prefix = line.split(":")[1].strip()
|
||||
# Go from <prefix>/bin/compiler to <prefix>
|
||||
return ancestor(oneapi_prefix, 2)
|
||||
|
||||
raise RuntimeError(
|
||||
"could not find install prefix of OneAPI from output:\n\t{}".format(oneapi_output)
|
||||
)
|
||||
|
||||
def setup_custom_environment(self, pkg, env):
|
||||
# workaround bug in icpx driver where it requires sycl-post-link is on the PATH
|
||||
# It is located in the same directory as the driver. Error message:
|
||||
|
||||
@@ -299,12 +299,9 @@ def __reduce__(self):
|
||||
database. If it is a spec, we'll evaluate
|
||||
``spec.satisfies(query_spec)``
|
||||
|
||||
known (bool or None): Specs that are "known" are those
|
||||
for which Spack can locate a ``package.py`` file -- i.e.,
|
||||
Spack "knows" how to install them. Specs that are unknown may
|
||||
represent packages that existed in a previous version of
|
||||
Spack, but have since either changed their name or
|
||||
been removed
|
||||
predicate_fn: optional predicate taking an InstallRecord as argument, and returning
|
||||
whether that record is selected for the query. It can be used to craft criteria
|
||||
that need some data for selection not provided by the Database itself.
|
||||
|
||||
installed (bool or InstallStatus or typing.Iterable or None):
|
||||
if ``True``, includes only installed
|
||||
@@ -604,6 +601,9 @@ def _path(self, spec: "spack.spec.Spec") -> pathlib.Path:
|
||||
return self.dir / f"{spec.name}-{spec.dag_hash()}"
|
||||
|
||||
|
||||
SelectType = Callable[[InstallRecord], bool]
|
||||
|
||||
|
||||
class Database:
|
||||
#: Fields written for each install record
|
||||
record_fields: Tuple[str, ...] = DEFAULT_INSTALL_RECORD_FIELDS
|
||||
@@ -1245,7 +1245,7 @@ def _add(
|
||||
self._data[key].explicit = explicit
|
||||
|
||||
@_autospec
|
||||
def add(self, spec: "spack.spec.Spec", *, explicit: bool = False) -> None:
|
||||
def add(self, spec: "spack.spec.Spec", *, explicit: bool = False, allow_missing=False) -> None:
|
||||
"""Add spec at path to database, locking and reading DB to sync.
|
||||
|
||||
``add()`` will lock and read from the DB on disk.
|
||||
@@ -1254,7 +1254,7 @@ def add(self, spec: "spack.spec.Spec", *, explicit: bool = False) -> None:
|
||||
# TODO: ensure that spec is concrete?
|
||||
# Entire add is transactional.
|
||||
with self.write_transaction():
|
||||
self._add(spec, explicit=explicit)
|
||||
self._add(spec, explicit=explicit, allow_missing=allow_missing)
|
||||
|
||||
def _get_matching_spec_key(self, spec: "spack.spec.Spec", **kwargs) -> str:
|
||||
"""Get the exact spec OR get a single spec that matches."""
|
||||
@@ -1526,7 +1526,7 @@ def get_by_hash(self, dag_hash, default=None, installed=any):
|
||||
def _query(
|
||||
self,
|
||||
query_spec=any,
|
||||
known=any,
|
||||
predicate_fn: Optional[SelectType] = None,
|
||||
installed=True,
|
||||
explicit=any,
|
||||
start_date=None,
|
||||
@@ -1534,7 +1534,7 @@ def _query(
|
||||
hashes=None,
|
||||
in_buildcache=any,
|
||||
origin=None,
|
||||
):
|
||||
) -> List["spack.spec.Spec"]:
|
||||
"""Run a query on the database."""
|
||||
|
||||
# TODO: Specs are a lot like queries. Should there be a
|
||||
@@ -1580,7 +1580,7 @@ def _query(
|
||||
if explicit is not any and rec.explicit != explicit:
|
||||
continue
|
||||
|
||||
if known is not any and known(rec.spec.name):
|
||||
if predicate_fn is not None and not predicate_fn(rec):
|
||||
continue
|
||||
|
||||
if start_date or end_date:
|
||||
@@ -1665,14 +1665,14 @@ def query(self, *args, **kwargs):
|
||||
query.__doc__ = ""
|
||||
query.__doc__ += _QUERY_DOCSTRING
|
||||
|
||||
def query_one(self, query_spec, known=any, installed=True):
|
||||
def query_one(self, query_spec, predicate_fn=None, installed=True):
|
||||
"""Query for exactly one spec that matches the query spec.
|
||||
|
||||
Raises an assertion error if more than one spec matches the
|
||||
query. Returns None if no installed package matches.
|
||||
|
||||
"""
|
||||
concrete_specs = self.query(query_spec, known=known, installed=installed)
|
||||
concrete_specs = self.query(query_spec, predicate_fn=predicate_fn, installed=installed)
|
||||
assert len(concrete_specs) <= 1
|
||||
return concrete_specs[0] if concrete_specs else None
|
||||
|
||||
|
||||
@@ -11,6 +11,7 @@
|
||||
import os.path
|
||||
import re
|
||||
import sys
|
||||
import traceback
|
||||
import warnings
|
||||
from typing import Dict, Iterable, List, Optional, Set, Tuple, Type
|
||||
|
||||
@@ -18,6 +19,7 @@
|
||||
import llnl.util.lang
|
||||
import llnl.util.tty
|
||||
|
||||
import spack.error
|
||||
import spack.spec
|
||||
import spack.util.elf as elf_utils
|
||||
import spack.util.environment
|
||||
@@ -66,6 +68,21 @@ def file_identifier(path):
|
||||
return s.st_dev, s.st_ino
|
||||
|
||||
|
||||
def dedupe_paths(paths: List[str]) -> List[str]:
|
||||
"""Deduplicate paths based on inode and device number. In case the list contains first a
|
||||
symlink and then the directory it points to, the symlink is replaced with the directory path.
|
||||
This ensures that we pick for example ``/usr/bin`` over ``/bin`` if the latter is a symlink to
|
||||
the former`."""
|
||||
seen: Dict[Tuple[int, int], str] = {}
|
||||
for path in paths:
|
||||
identifier = file_identifier(path)
|
||||
if identifier not in seen:
|
||||
seen[identifier] = path
|
||||
elif not os.path.islink(path):
|
||||
seen[identifier] = path
|
||||
return list(seen.values())
|
||||
|
||||
|
||||
def executables_in_path(path_hints: List[str]) -> Dict[str, str]:
|
||||
"""Get the paths of all executables available from the current PATH.
|
||||
|
||||
@@ -82,8 +99,7 @@ def executables_in_path(path_hints: List[str]) -> Dict[str, str]:
|
||||
"""
|
||||
search_paths = llnl.util.filesystem.search_paths_for_executables(*path_hints)
|
||||
# Make use we don't doubly list /usr/lib and /lib etc
|
||||
search_paths = list(llnl.util.lang.dedupe(search_paths, key=file_identifier))
|
||||
return path_to_dict(search_paths)
|
||||
return path_to_dict(dedupe_paths(search_paths))
|
||||
|
||||
|
||||
def accept_elf(path, host_compat):
|
||||
@@ -144,7 +160,7 @@ def libraries_in_ld_and_system_library_path(
|
||||
search_paths = list(filter(os.path.isdir, search_paths))
|
||||
|
||||
# Make use we don't doubly list /usr/lib and /lib etc
|
||||
search_paths = list(llnl.util.lang.dedupe(search_paths, key=file_identifier))
|
||||
search_paths = dedupe_paths(search_paths)
|
||||
|
||||
try:
|
||||
host_compat = elf_utils.get_elf_compat(sys.executable)
|
||||
@@ -260,8 +276,12 @@ def detect_specs(
|
||||
)
|
||||
except Exception as e:
|
||||
specs = []
|
||||
if spack.error.SHOW_BACKTRACE:
|
||||
details = traceback.format_exc()
|
||||
else:
|
||||
details = f"[{e.__class__.__name__}: {e}]"
|
||||
warnings.warn(
|
||||
f'error detecting "{pkg.name}" from prefix {candidate_path} [{str(e)}]'
|
||||
f'error detecting "{pkg.name}" from prefix {candidate_path}: {details}'
|
||||
)
|
||||
|
||||
if not specs:
|
||||
@@ -435,9 +455,9 @@ def by_path(
|
||||
llnl.util.tty.debug(
|
||||
f"[EXTERNAL DETECTION] Skipping {pkg_name}: timeout reached"
|
||||
)
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
llnl.util.tty.debug(
|
||||
f"[EXTERNAL DETECTION] Skipping {pkg_name}: exception occured {e}"
|
||||
f"[EXTERNAL DETECTION] Skipping {pkg_name}: {traceback.format_exc()}"
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
@@ -9,11 +9,13 @@
|
||||
|
||||
import os
|
||||
import re
|
||||
import shlex
|
||||
from enum import Enum
|
||||
from typing import List, Optional
|
||||
|
||||
import spack.deptypes as dt
|
||||
import spack.environment.environment as ev
|
||||
import spack.paths
|
||||
import spack.spec
|
||||
import spack.traverse as traverse
|
||||
|
||||
@@ -226,6 +228,7 @@ def to_dict(self):
|
||||
"install_deps_target": self._target("install-deps"),
|
||||
"any_hash_target": self._target("%"),
|
||||
"jobserver_support": self.jobserver_support,
|
||||
"spack_script": shlex.quote(spack.paths.spack_script),
|
||||
"adjacency_list": self.make_adjacency_list,
|
||||
"phony_convenience_targets": " ".join(self.phony_convenience_targets),
|
||||
"pkg_ids_variable": self.pkg_identifier_variable,
|
||||
|
||||
@@ -1159,6 +1159,8 @@ def clear(self, re_read=False):
|
||||
# things that cannot be recreated from file
|
||||
self.new_specs = [] # write packages for these on write()
|
||||
|
||||
self.manifest.clear()
|
||||
|
||||
@property
|
||||
def active(self):
|
||||
"""True if this environment is currently active."""
|
||||
@@ -2163,6 +2165,13 @@ def _concrete_specs_dict(self):
|
||||
# Assumes no legacy formats, since this was just created.
|
||||
spec_dict[ht.dag_hash.name] = s.dag_hash()
|
||||
concrete_specs[s.dag_hash()] = spec_dict
|
||||
|
||||
if s.build_spec is not s:
|
||||
for d in s.build_spec.traverse():
|
||||
build_spec_dict = d.node_dict_with_hashes(hash=ht.dag_hash)
|
||||
build_spec_dict[ht.dag_hash.name] = d.dag_hash()
|
||||
concrete_specs[d.dag_hash()] = build_spec_dict
|
||||
|
||||
return concrete_specs
|
||||
|
||||
def _concrete_roots_dict(self):
|
||||
@@ -2322,7 +2331,7 @@ def filter_specs(self, reader, json_specs_by_hash, order_concretized):
|
||||
specs_by_hash[lockfile_key] = spec
|
||||
|
||||
# Second pass: For each spec, get its dependencies from the node dict
|
||||
# and add them to the spec
|
||||
# and add them to the spec, including build specs
|
||||
for lockfile_key, node_dict in json_specs_by_hash.items():
|
||||
name, data = reader.name_and_data(node_dict)
|
||||
for _, dep_hash, deptypes, _, virtuals in reader.dependencies_from_node_dict(data):
|
||||
@@ -2330,6 +2339,10 @@ def filter_specs(self, reader, json_specs_by_hash, order_concretized):
|
||||
specs_by_hash[dep_hash], depflag=dt.canonicalize(deptypes), virtuals=virtuals
|
||||
)
|
||||
|
||||
if "build_spec" in node_dict:
|
||||
_, bhash, _ = reader.extract_build_spec_info_from_node_dict(node_dict)
|
||||
specs_by_hash[lockfile_key]._build_spec = specs_by_hash[bhash]
|
||||
|
||||
# Traverse the root specs one at a time in the order they appear.
|
||||
# The first time we see each DAG hash, that's the one we want to
|
||||
# keep. This is only required as long as we support older lockfile
|
||||
@@ -2789,6 +2802,11 @@ def remove_user_spec(self, user_spec: str) -> None:
|
||||
raise SpackEnvironmentError(msg) from e
|
||||
self.changed = True
|
||||
|
||||
def clear(self) -> None:
|
||||
"""Clear all user specs from the list of root specs"""
|
||||
self.configuration["specs"] = []
|
||||
self.changed = True
|
||||
|
||||
def override_user_spec(self, user_spec: str, idx: int) -> None:
|
||||
"""Overrides the user spec at index idx with the one passed as input.
|
||||
|
||||
|
||||
@@ -12,6 +12,9 @@
|
||||
#: this is module-scoped because it needs to be set very early
|
||||
debug = 0
|
||||
|
||||
#: whether to show a backtrace when an error is printed, enabled with --backtrace.
|
||||
SHOW_BACKTRACE = False
|
||||
|
||||
|
||||
class SpackError(Exception):
|
||||
"""This is the superclass for all Spack errors.
|
||||
|
||||
@@ -100,10 +100,12 @@ def view_copy(
|
||||
|
||||
spack.relocate.relocate_text(files=[dst], prefixes=prefix_to_projection)
|
||||
|
||||
try:
|
||||
os.chown(dst, src_stat.st_uid, src_stat.st_gid)
|
||||
except OSError:
|
||||
tty.debug(f"Can't change the permissions for {dst}")
|
||||
# The os module on Windows does not have a chown function.
|
||||
if sys.platform != "win32":
|
||||
try:
|
||||
os.chown(dst, src_stat.st_uid, src_stat.st_gid)
|
||||
except OSError:
|
||||
tty.debug(f"Can't change the permissions for {dst}")
|
||||
|
||||
|
||||
#: supported string values for `link_type` in an env, mapped to canonical values
|
||||
|
||||
@@ -2,8 +2,7 @@
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
"""
|
||||
This module encapsulates package installation functionality.
|
||||
"""This module encapsulates package installation functionality.
|
||||
|
||||
The PackageInstaller coordinates concurrent builds of packages for the same
|
||||
Spack instance by leveraging the dependency DAG and file system locks. It
|
||||
@@ -17,16 +16,18 @@
|
||||
File system locks enable coordination such that no two processes attempt to
|
||||
build the same or a failed dependency package.
|
||||
|
||||
Failures to install dependency packages result in removal of their dependents'
|
||||
build tasks from the current process. A failure file is also written (and
|
||||
locked) so that other processes can detect the failure and adjust their build
|
||||
tasks accordingly.
|
||||
If a dependency package fails to install, its dependents' tasks will be
|
||||
removed from the installing process's queue. A failure file is also written
|
||||
and locked. Other processes use this file to detect the failure and dequeue
|
||||
its dependents.
|
||||
|
||||
This module supports the coordination of local and distributed concurrent
|
||||
installations of packages in a Spack instance.
|
||||
|
||||
"""
|
||||
|
||||
import copy
|
||||
import enum
|
||||
import glob
|
||||
import heapq
|
||||
import io
|
||||
@@ -35,6 +36,7 @@
|
||||
import shutil
|
||||
import sys
|
||||
import time
|
||||
import traceback
|
||||
from collections import defaultdict
|
||||
from gzip import GzipFile
|
||||
from typing import Dict, Iterator, List, Optional, Set, Tuple, Union
|
||||
@@ -42,6 +44,7 @@
|
||||
import llnl.util.filesystem as fs
|
||||
import llnl.util.lock as lk
|
||||
import llnl.util.tty as tty
|
||||
from llnl.string import ordinal
|
||||
from llnl.util.lang import pretty_seconds
|
||||
from llnl.util.tty.color import colorize
|
||||
from llnl.util.tty.log import log_output
|
||||
@@ -57,6 +60,7 @@
|
||||
import spack.package_base
|
||||
import spack.package_prefs as prefs
|
||||
import spack.repo
|
||||
import spack.rewiring
|
||||
import spack.spec
|
||||
import spack.store
|
||||
import spack.util.executable
|
||||
@@ -70,25 +74,32 @@
|
||||
#: were added (see https://docs.python.org/2/library/heapq.html).
|
||||
_counter = itertools.count(0)
|
||||
|
||||
#: Build status indicating task has been added.
|
||||
STATUS_ADDED = "queued"
|
||||
|
||||
#: Build status indicating the spec failed to install
|
||||
STATUS_FAILED = "failed"
|
||||
class BuildStatus(enum.Enum):
|
||||
"""Different build (task) states."""
|
||||
|
||||
#: Build status indicating the spec is being installed (possibly by another
|
||||
#: process)
|
||||
STATUS_INSTALLING = "installing"
|
||||
#: Build status indicating task has been added/queued.
|
||||
QUEUED = enum.auto()
|
||||
|
||||
#: Build status indicating the spec was sucessfully installed
|
||||
STATUS_INSTALLED = "installed"
|
||||
#: Build status indicating the spec failed to install
|
||||
FAILED = enum.auto()
|
||||
|
||||
#: Build status indicating the task has been popped from the queue
|
||||
STATUS_DEQUEUED = "dequeued"
|
||||
#: Build status indicating the spec is being installed (possibly by another
|
||||
#: process)
|
||||
INSTALLING = enum.auto()
|
||||
|
||||
#: Build status indicating task has been removed (to maintain priority
|
||||
#: queue invariants).
|
||||
STATUS_REMOVED = "removed"
|
||||
#: Build status indicating the spec was sucessfully installed
|
||||
INSTALLED = enum.auto()
|
||||
|
||||
#: Build status indicating the task has been popped from the queue
|
||||
DEQUEUED = enum.auto()
|
||||
|
||||
#: Build status indicating task has been removed (to maintain priority
|
||||
#: queue invariants).
|
||||
REMOVED = enum.auto()
|
||||
|
||||
def __str__(self):
|
||||
return f"{self.name.lower()}"
|
||||
|
||||
|
||||
def _write_timer_json(pkg, timer, cache):
|
||||
@@ -101,13 +112,22 @@ def _write_timer_json(pkg, timer, cache):
|
||||
return
|
||||
|
||||
|
||||
class InstallAction:
|
||||
class ExecuteResult(enum.Enum):
|
||||
# Task succeeded
|
||||
SUCCESS = enum.auto()
|
||||
# Task failed
|
||||
FAILED = enum.auto()
|
||||
# Task is missing build spec and will be requeued
|
||||
MISSING_BUILD_SPEC = enum.auto()
|
||||
|
||||
|
||||
class InstallAction(enum.Enum):
|
||||
#: Don't perform an install
|
||||
NONE = 0
|
||||
NONE = enum.auto()
|
||||
#: Do a standard install
|
||||
INSTALL = 1
|
||||
INSTALL = enum.auto()
|
||||
#: Do an overwrite install
|
||||
OVERWRITE = 2
|
||||
OVERWRITE = enum.auto()
|
||||
|
||||
|
||||
class InstallStatus:
|
||||
@@ -431,7 +451,7 @@ def _process_binary_cache_tarball(
|
||||
"""
|
||||
with timer.measure("fetch"):
|
||||
download_result = binary_distribution.download_tarball(
|
||||
pkg.spec, unsigned, mirrors_for_spec
|
||||
pkg.spec.build_spec, unsigned, mirrors_for_spec
|
||||
)
|
||||
|
||||
if download_result is None:
|
||||
@@ -442,6 +462,11 @@ def _process_binary_cache_tarball(
|
||||
with timer.measure("install"), spack.util.path.filter_padding():
|
||||
binary_distribution.extract_tarball(pkg.spec, download_result, force=False, timer=timer)
|
||||
|
||||
if pkg.spec.spliced: # overwrite old metadata with new
|
||||
spack.store.STORE.layout.write_spec(
|
||||
pkg.spec, spack.store.STORE.layout.spec_file_path(pkg.spec)
|
||||
)
|
||||
|
||||
if hasattr(pkg, "_post_buildcache_install_hook"):
|
||||
pkg._post_buildcache_install_hook()
|
||||
|
||||
@@ -677,7 +702,7 @@ def log(pkg: "spack.package_base.PackageBase") -> None:
|
||||
def package_id(spec: "spack.spec.Spec") -> str:
|
||||
"""A "unique" package identifier for installation purposes
|
||||
|
||||
The identifier is used to track build tasks, locks, install, and
|
||||
The identifier is used to track tasks, locks, install, and
|
||||
failure statuses.
|
||||
|
||||
The identifier needs to distinguish between combinations of compilers
|
||||
@@ -736,14 +761,14 @@ def __init__(self, pkg: "spack.package_base.PackageBase", install_args: dict):
|
||||
)
|
||||
|
||||
def __repr__(self) -> str:
|
||||
"""Returns a formal representation of the build request."""
|
||||
"""Return a formal representation of the build request."""
|
||||
rep = f"{self.__class__.__name__}("
|
||||
for attr, value in self.__dict__.items():
|
||||
rep += f"{attr}={value.__repr__()}, "
|
||||
return f"{rep.strip(', ')})"
|
||||
|
||||
def __str__(self) -> str:
|
||||
"""Returns a printable version of the build request."""
|
||||
"""Return a printable version of the build request."""
|
||||
return f"package={self.pkg.name}, install_args={self.install_args}"
|
||||
|
||||
def _add_default_args(self) -> None:
|
||||
@@ -840,37 +865,42 @@ def traverse_dependencies(self, spec=None, visited=None) -> Iterator["spack.spec
|
||||
yield dep
|
||||
|
||||
|
||||
class BuildTask:
|
||||
"""Class for representing the build task for a package."""
|
||||
class Task:
|
||||
"""Base class for representing a task for a package."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
pkg: "spack.package_base.PackageBase",
|
||||
request: Optional[BuildRequest],
|
||||
compiler: bool,
|
||||
start: float,
|
||||
attempts: int,
|
||||
status: str,
|
||||
installed: Set[str],
|
||||
request: BuildRequest,
|
||||
*,
|
||||
compiler: bool = False,
|
||||
start: float = 0.0,
|
||||
attempts: int = 0,
|
||||
status: BuildStatus = BuildStatus.QUEUED,
|
||||
installed: Set[str] = set(),
|
||||
):
|
||||
"""
|
||||
Instantiate a build task for a package.
|
||||
Instantiate a task for a package.
|
||||
|
||||
Args:
|
||||
pkg: the package to be built and installed
|
||||
request: the associated install request where ``None`` can be
|
||||
used to indicate the package was explicitly requested by the user
|
||||
compiler: whether task is for a bootstrap compiler
|
||||
request: the associated install request
|
||||
start: the initial start time for the package, in seconds
|
||||
attempts: the number of attempts to install the package
|
||||
attempts: the number of attempts to install the package, which
|
||||
should be 0 when the task is initially instantiated
|
||||
status: the installation status
|
||||
installed: the identifiers of packages that have
|
||||
installed: the (string) identifiers of packages that have
|
||||
been installed so far
|
||||
|
||||
Raises:
|
||||
``InstallError`` if the build status is incompatible with the task
|
||||
``TypeError`` if provided an argument of the wrong type
|
||||
``ValueError`` if provided an argument with the wrong value or state
|
||||
"""
|
||||
|
||||
# Ensure dealing with a package that has a concrete spec
|
||||
if not isinstance(pkg, spack.package_base.PackageBase):
|
||||
raise ValueError(f"{str(pkg)} must be a package")
|
||||
raise TypeError(f"{str(pkg)} must be a package")
|
||||
|
||||
self.pkg = pkg
|
||||
if not self.pkg.spec.concrete:
|
||||
@@ -881,26 +911,34 @@ def __init__(
|
||||
|
||||
# The explicit build request associated with the package
|
||||
if not isinstance(request, BuildRequest):
|
||||
raise ValueError(f"{str(pkg)} must have a build request")
|
||||
|
||||
raise TypeError(f"{request} is not a valid build request")
|
||||
self.request = request
|
||||
|
||||
# Initialize the status to an active state. The status is used to
|
||||
# ensure priority queue invariants when tasks are "removed" from the
|
||||
# queue.
|
||||
if status == STATUS_REMOVED:
|
||||
raise spack.error.InstallError(
|
||||
f"Cannot create a build task for {self.pkg_id} with status '{status}'", pkg=pkg
|
||||
)
|
||||
if not isinstance(status, BuildStatus):
|
||||
raise TypeError(f"{status} is not a valid build status")
|
||||
|
||||
# The initial build task cannot have status "removed".
|
||||
if attempts == 0 and status == BuildStatus.REMOVED:
|
||||
raise spack.error.InstallError(
|
||||
f"Cannot create a task for {self.pkg_id} with status '{status}'", pkg=pkg
|
||||
)
|
||||
self.status = status
|
||||
|
||||
# Package is associated with a bootstrap compiler
|
||||
self.compiler = compiler
|
||||
# cache the PID, which is used for distributed build messages in self.execute
|
||||
self.pid = os.getpid()
|
||||
|
||||
# The initial start time for processing the spec
|
||||
self.start = start
|
||||
|
||||
if not isinstance(installed, set):
|
||||
raise TypeError(
|
||||
f"BuildTask constructor requires 'installed' be a 'set', "
|
||||
f"not '{installed.__class__.__name__}'."
|
||||
)
|
||||
|
||||
# Set of dependents, which needs to include the requesting package
|
||||
# to support tracking of parallel, multi-spec, environment installs.
|
||||
self.dependents = set(get_dependent_ids(self.pkg.spec))
|
||||
@@ -921,16 +959,22 @@ def __init__(
|
||||
)
|
||||
|
||||
# List of uninstalled dependencies, which is used to establish
|
||||
# the priority of the build task.
|
||||
#
|
||||
# the priority of the task.
|
||||
self.uninstalled_deps = set(
|
||||
pkg_id for pkg_id in self.dependencies if pkg_id not in installed
|
||||
)
|
||||
|
||||
# Ensure key sequence-related properties are updated accordingly.
|
||||
self.attempts = 0
|
||||
self.attempts = attempts
|
||||
self._update()
|
||||
|
||||
def execute(self, install_status: InstallStatus) -> ExecuteResult:
|
||||
"""Execute the work of this task.
|
||||
|
||||
The ``install_status`` is an ``InstallStatus`` object used to format progress reporting for
|
||||
this task in the context of the full ``BuildRequest``."""
|
||||
raise NotImplementedError
|
||||
|
||||
def __eq__(self, other):
|
||||
return self.key == other.key
|
||||
|
||||
@@ -950,14 +994,14 @@ def __ne__(self, other):
|
||||
return self.key != other.key
|
||||
|
||||
def __repr__(self) -> str:
|
||||
"""Returns a formal representation of the build task."""
|
||||
"""Returns a formal representation of the task."""
|
||||
rep = f"{self.__class__.__name__}("
|
||||
for attr, value in self.__dict__.items():
|
||||
rep += f"{attr}={value.__repr__()}, "
|
||||
return f"{rep.strip(', ')})"
|
||||
|
||||
def __str__(self) -> str:
|
||||
"""Returns a printable version of the build task."""
|
||||
"""Returns a printable version of the task."""
|
||||
dependencies = f"#dependencies={len(self.dependencies)}"
|
||||
return "priority={0}, status={1}, start={2}, {3}".format(
|
||||
self.priority, self.status, self.start, dependencies
|
||||
@@ -974,8 +1018,7 @@ def _update(self) -> None:
|
||||
|
||||
def add_dependent(self, pkg_id: str) -> None:
|
||||
"""
|
||||
Ensure the dependent package id is in the task's list so it will be
|
||||
properly updated when this package is installed.
|
||||
Ensure the package is in this task's ``dependents`` list.
|
||||
|
||||
Args:
|
||||
pkg_id: package identifier of the dependent package
|
||||
@@ -984,6 +1027,20 @@ def add_dependent(self, pkg_id: str) -> None:
|
||||
tty.debug(f"Adding {pkg_id} as a dependent of {self.pkg_id}")
|
||||
self.dependents.add(pkg_id)
|
||||
|
||||
def add_dependency(self, pkg_id, installed=False):
|
||||
"""
|
||||
Ensure the package is in this task's ``dependencies`` list.
|
||||
|
||||
Args:
|
||||
pkg_id (str): package identifier of the dependency package
|
||||
installed (bool): install status of the dependency package
|
||||
"""
|
||||
if pkg_id != self.pkg_id and pkg_id not in self.dependencies:
|
||||
tty.debug(f"Adding {pkg_id} as a depencency of {self.pkg_id}")
|
||||
self.dependencies.add(pkg_id)
|
||||
if not installed:
|
||||
self.uninstalled_deps.add(pkg_id)
|
||||
|
||||
def flag_installed(self, installed: List[str]) -> None:
|
||||
"""
|
||||
Ensure the dependency is not considered to still be uninstalled.
|
||||
@@ -1000,6 +1057,39 @@ def flag_installed(self, installed: List[str]) -> None:
|
||||
level=2,
|
||||
)
|
||||
|
||||
def _setup_install_dir(self, pkg: "spack.package_base.PackageBase") -> None:
|
||||
"""
|
||||
Create and ensure proper access controls for the install directory.
|
||||
Write a small metadata file with the current spack environment.
|
||||
|
||||
Args:
|
||||
pkg: the package to be built and installed
|
||||
"""
|
||||
# Move to a module level method.
|
||||
if not os.path.exists(pkg.spec.prefix):
|
||||
path = spack.util.path.debug_padded_filter(pkg.spec.prefix)
|
||||
tty.debug(f"Creating the installation directory {path}")
|
||||
spack.store.STORE.layout.create_install_directory(pkg.spec)
|
||||
else:
|
||||
# Set the proper group for the prefix
|
||||
group = prefs.get_package_group(pkg.spec)
|
||||
if group:
|
||||
fs.chgrp(pkg.spec.prefix, group)
|
||||
|
||||
# Set the proper permissions.
|
||||
# This has to be done after group because changing groups blows
|
||||
# away the sticky group bit on the directory
|
||||
mode = os.stat(pkg.spec.prefix).st_mode
|
||||
perms = prefs.get_package_dir_permissions(pkg.spec)
|
||||
if mode != perms:
|
||||
os.chmod(pkg.spec.prefix, perms)
|
||||
|
||||
# Ensure the metadata path exists as well
|
||||
fs.mkdirp(spack.store.STORE.layout.metadata_path(pkg.spec), mode=perms)
|
||||
|
||||
# Always write host environment - we assume this can change
|
||||
spack.store.STORE.layout.write_host_environment(pkg.spec)
|
||||
|
||||
@property
|
||||
def explicit(self) -> bool:
|
||||
return self.pkg.spec.dag_hash() in self.request.install_args.get("explicit", [])
|
||||
@@ -1030,7 +1120,7 @@ def key(self) -> Tuple[int, int]:
|
||||
"""The key is the tuple (# uninstalled dependencies, sequence)."""
|
||||
return (self.priority, self.sequence)
|
||||
|
||||
def next_attempt(self, installed) -> "BuildTask":
|
||||
def next_attempt(self, installed) -> "Task":
|
||||
"""Create a new, updated task for the next installation attempt."""
|
||||
task = copy.copy(self)
|
||||
task._update()
|
||||
@@ -1044,6 +1134,100 @@ def priority(self):
|
||||
return len(self.uninstalled_deps)
|
||||
|
||||
|
||||
class BuildTask(Task):
|
||||
"""Class for representing a build task for a package."""
|
||||
|
||||
def execute(self, install_status):
|
||||
"""
|
||||
Perform the installation of the requested spec and/or dependency
|
||||
represented by the build task.
|
||||
"""
|
||||
install_args = self.request.install_args
|
||||
tests = install_args.get("tests")
|
||||
unsigned = install_args.get("unsigned")
|
||||
|
||||
pkg, pkg_id = self.pkg, self.pkg_id
|
||||
|
||||
tty.msg(install_msg(pkg_id, self.pid, install_status))
|
||||
self.start = self.start or time.time()
|
||||
self.status = BuildStatus.INSTALLING
|
||||
|
||||
# Use the binary cache if requested
|
||||
if self.use_cache:
|
||||
if _install_from_cache(pkg, self.explicit, unsigned):
|
||||
return ExecuteResult.SUCCESS
|
||||
elif self.cache_only:
|
||||
raise spack.error.InstallError(
|
||||
"No binary found when cache-only was specified", pkg=pkg
|
||||
)
|
||||
else:
|
||||
tty.msg(f"No binary for {pkg_id} found: installing from source")
|
||||
|
||||
pkg.run_tests = tests is True or tests and pkg.name in tests
|
||||
|
||||
# hook that allows tests to inspect the Package before installation
|
||||
# see unit_test_check() docs.
|
||||
if not pkg.unit_test_check():
|
||||
return ExecuteResult.FAILED
|
||||
|
||||
try:
|
||||
# Create stage object now and let it be serialized for the child process. That
|
||||
# way monkeypatch in tests works correctly.
|
||||
pkg.stage
|
||||
|
||||
self._setup_install_dir(pkg)
|
||||
|
||||
# Create a child process to do the actual installation.
|
||||
# Preserve verbosity settings across installs.
|
||||
spack.package_base.PackageBase._verbose = spack.build_environment.start_build_process(
|
||||
pkg, build_process, install_args
|
||||
)
|
||||
|
||||
# Note: PARENT of the build process adds the new package to
|
||||
# the database, so that we don't need to re-read from file.
|
||||
spack.store.STORE.db.add(pkg.spec, explicit=self.explicit)
|
||||
except spack.error.StopPhase as e:
|
||||
# A StopPhase exception means that do_install was asked to
|
||||
# stop early from clients, and is not an error at this point
|
||||
pid = f"{self.pid}: " if tty.show_pid() else ""
|
||||
tty.debug(f"{pid}{str(e)}")
|
||||
tty.debug(f"Package stage directory: {pkg.stage.source_path}")
|
||||
return ExecuteResult.SUCCESS
|
||||
|
||||
|
||||
class RewireTask(Task):
|
||||
"""Class for representing a rewire task for a package."""
|
||||
|
||||
def execute(self, install_status):
|
||||
"""Execute rewire task
|
||||
|
||||
Rewire tasks are executed by either rewiring self.package.spec.build_spec that is already
|
||||
installed or downloading and rewiring a binary for the it.
|
||||
|
||||
If not available installed or as binary, return ExecuteResult.MISSING_BUILD_SPEC.
|
||||
This will prompt the Installer to requeue the task with a dependency on the BuildTask
|
||||
to install self.pkg.spec.build_spec
|
||||
"""
|
||||
oldstatus = self.status
|
||||
self.status = BuildStatus.INSTALLING
|
||||
tty.msg(install_msg(self.pkg_id, self.pid, install_status))
|
||||
self.start = self.start or time.time()
|
||||
if not self.pkg.spec.build_spec.installed:
|
||||
try:
|
||||
install_args = self.request.install_args
|
||||
unsigned = install_args.get("unsigned")
|
||||
_process_binary_cache_tarball(self.pkg, explicit=self.explicit, unsigned=unsigned)
|
||||
_print_installed_pkg(self.pkg.prefix)
|
||||
return ExecuteResult.SUCCESS
|
||||
except BaseException as e:
|
||||
tty.error(f"Failed to rewire {self.pkg.spec} from binary. {e}")
|
||||
self.status = oldstatus
|
||||
return ExecuteResult.MISSING_BUILD_SPEC
|
||||
spack.rewiring.rewire_node(self.pkg.spec, self.explicit)
|
||||
_print_installed_pkg(self.pkg.prefix)
|
||||
return ExecuteResult.SUCCESS
|
||||
|
||||
|
||||
class PackageInstaller:
|
||||
"""
|
||||
Class for managing the install process for a Spack instance based on a bottom-up DAG approach.
|
||||
@@ -1137,11 +1321,11 @@ def __init__(
|
||||
# List of build requests
|
||||
self.build_requests = [BuildRequest(pkg, install_args) for pkg in packages]
|
||||
|
||||
# Priority queue of build tasks
|
||||
self.build_pq: List[Tuple[Tuple[int, int], BuildTask]] = []
|
||||
# Priority queue of tasks
|
||||
self.build_pq: List[Tuple[Tuple[int, int], Task]] = []
|
||||
|
||||
# Mapping of unique package ids to build task
|
||||
self.build_tasks: Dict[str, BuildTask] = {}
|
||||
# Mapping of unique package ids to task
|
||||
self.build_tasks: Dict[str, Task] = {}
|
||||
|
||||
# Cache of package locks for failed packages, keyed on package's ids
|
||||
self.failed: Dict[str, Optional[lk.Lock]] = {}
|
||||
@@ -1162,6 +1346,9 @@ def __init__(
|
||||
# fast then that option applies to all build requests.
|
||||
self.fail_fast = False
|
||||
|
||||
# Initializing all_dependencies to empty. This will be set later in _init_queue.
|
||||
self.all_dependencies: Dict[str, Set[str]] = {}
|
||||
|
||||
def __repr__(self) -> str:
|
||||
"""Returns a formal representation of the package installer."""
|
||||
rep = f"{self.__class__.__name__}("
|
||||
@@ -1180,23 +1367,19 @@ def __str__(self) -> str:
|
||||
def _add_init_task(
|
||||
self,
|
||||
pkg: "spack.package_base.PackageBase",
|
||||
request: Optional[BuildRequest],
|
||||
is_compiler: bool,
|
||||
request: BuildRequest,
|
||||
all_deps: Dict[str, Set[str]],
|
||||
) -> None:
|
||||
"""
|
||||
Creates and queus the initial build task for the package.
|
||||
Creates and queues the initial task for the package.
|
||||
|
||||
Args:
|
||||
pkg: the package to be built and installed
|
||||
request (BuildRequest or None): the associated install request
|
||||
where ``None`` can be used to indicate the package was
|
||||
explicitly requested by the user
|
||||
is_compiler (bool): whether task is for a bootstrap compiler
|
||||
all_deps (defaultdict(set)): dictionary of all dependencies and
|
||||
associated dependents
|
||||
request: the associated install request
|
||||
all_deps: dictionary of all dependencies and associated dependents
|
||||
"""
|
||||
task = BuildTask(pkg, request, is_compiler, 0, 0, STATUS_ADDED, self.installed)
|
||||
cls = RewireTask if pkg.spec.spliced else BuildTask
|
||||
task = cls(pkg, request=request, status=BuildStatus.QUEUED, installed=self.installed)
|
||||
for dep_id in task.dependencies:
|
||||
all_deps[dep_id].add(package_id(pkg.spec))
|
||||
|
||||
@@ -1270,7 +1453,7 @@ def _check_deps_status(self, request: BuildRequest) -> None:
|
||||
else:
|
||||
lock.release_read()
|
||||
|
||||
def _prepare_for_install(self, task: BuildTask) -> None:
|
||||
def _prepare_for_install(self, task: Task) -> None:
|
||||
"""
|
||||
Check the database and leftover installation directories/files and
|
||||
prepare for a new install attempt for an uninstalled package.
|
||||
@@ -1278,7 +1461,7 @@ def _prepare_for_install(self, task: BuildTask) -> None:
|
||||
and ensuring the database is up-to-date.
|
||||
|
||||
Args:
|
||||
task (BuildTask): the build task whose associated package is
|
||||
task: the task whose associated package is
|
||||
being checked
|
||||
"""
|
||||
install_args = task.request.install_args
|
||||
@@ -1329,7 +1512,7 @@ def _prepare_for_install(self, task: BuildTask) -> None:
|
||||
spack.store.STORE.db.update_explicit(task.pkg.spec, True)
|
||||
|
||||
def _cleanup_all_tasks(self) -> None:
|
||||
"""Cleanup all build tasks to include releasing their locks."""
|
||||
"""Cleanup all tasks to include releasing their locks."""
|
||||
for pkg_id in self.locks:
|
||||
self._release_lock(pkg_id)
|
||||
|
||||
@@ -1361,7 +1544,7 @@ def _cleanup_failed(self, pkg_id: str) -> None:
|
||||
|
||||
def _cleanup_task(self, pkg: "spack.package_base.PackageBase") -> None:
|
||||
"""
|
||||
Cleanup the build task for the spec
|
||||
Cleanup the task for the spec
|
||||
|
||||
Args:
|
||||
pkg: the package being installed
|
||||
@@ -1433,7 +1616,7 @@ def _ensure_locked(
|
||||
|
||||
if lock_type == "read":
|
||||
# Wait until the other process finishes if there are no more
|
||||
# build tasks with priority 0 (i.e., with no uninstalled
|
||||
# tasks with priority 0 (i.e., with no uninstalled
|
||||
# dependencies).
|
||||
no_p0 = len(self.build_tasks) == 0 or not self._next_is_pri0()
|
||||
timeout = None if no_p0 else 3.0
|
||||
@@ -1485,6 +1668,33 @@ def _ensure_locked(
|
||||
self.locks[pkg_id] = (lock_type, lock)
|
||||
return self.locks[pkg_id]
|
||||
|
||||
def _requeue_with_build_spec_tasks(self, task):
|
||||
"""Requeue the task and its missing build spec dependencies"""
|
||||
# Full install of the build_spec is necessary because it didn't already exist somewhere
|
||||
spec = task.pkg.spec
|
||||
for dep in spec.build_spec.traverse():
|
||||
dep_pkg = dep.package
|
||||
|
||||
dep_id = package_id(dep)
|
||||
if dep_id not in self.build_tasks:
|
||||
self._add_init_task(dep_pkg, task.request, self.all_dependencies)
|
||||
|
||||
# Clear any persistent failure markings _unless_ they are
|
||||
# associated with another process in this parallel build
|
||||
# of the spec.
|
||||
spack.store.STORE.failure_tracker.clear(dep, force=False)
|
||||
|
||||
# Queue the build spec.
|
||||
build_pkg_id = package_id(spec.build_spec)
|
||||
build_spec_task = self.build_tasks[build_pkg_id]
|
||||
spec_pkg_id = package_id(spec)
|
||||
spec_task = task.next_attempt(self.installed)
|
||||
spec_task.status = BuildStatus.QUEUED
|
||||
# Convey a build spec as a dependency of a deployed spec.
|
||||
build_spec_task.add_dependent(spec_pkg_id)
|
||||
spec_task.add_dependency(build_pkg_id)
|
||||
self._push_task(spec_task)
|
||||
|
||||
def _add_tasks(self, request: BuildRequest, all_deps):
|
||||
"""Add tasks to the priority queue for the given build request.
|
||||
|
||||
@@ -1514,7 +1724,7 @@ def _add_tasks(self, request: BuildRequest, all_deps):
|
||||
|
||||
dep_id = package_id(dep)
|
||||
if dep_id not in self.build_tasks:
|
||||
self._add_init_task(dep_pkg, request, False, all_deps)
|
||||
self._add_init_task(dep_pkg, request, all_deps=all_deps)
|
||||
|
||||
# Clear any persistent failure markings _unless_ they are
|
||||
# associated with another process in this parallel build
|
||||
@@ -1532,80 +1742,29 @@ def _add_tasks(self, request: BuildRequest, all_deps):
|
||||
self._check_deps_status(request)
|
||||
|
||||
# Now add the package itself, if appropriate
|
||||
self._add_init_task(request.pkg, request, False, all_deps)
|
||||
self._add_init_task(request.pkg, request, all_deps=all_deps)
|
||||
|
||||
# Ensure if one request is to fail fast then all requests will.
|
||||
fail_fast = bool(request.install_args.get("fail_fast"))
|
||||
self.fail_fast = self.fail_fast or fail_fast
|
||||
|
||||
def _install_task(self, task: BuildTask, install_status: InstallStatus) -> None:
|
||||
def _install_task(self, task: Task, install_status: InstallStatus) -> None:
|
||||
"""
|
||||
Perform the installation of the requested spec and/or dependency
|
||||
represented by the build task.
|
||||
represented by the task.
|
||||
|
||||
Args:
|
||||
task: the installation build task for a package
|
||||
task: the installation task for a package
|
||||
install_status: the installation status for the package"""
|
||||
|
||||
explicit = task.explicit
|
||||
install_args = task.request.install_args
|
||||
cache_only = task.cache_only
|
||||
use_cache = task.use_cache
|
||||
tests = install_args.get("tests", False)
|
||||
assert isinstance(tests, (bool, list)) # make mypy happy.
|
||||
unsigned: Optional[bool] = install_args.get("unsigned")
|
||||
|
||||
pkg, pkg_id = task.pkg, task.pkg_id
|
||||
|
||||
tty.msg(install_msg(pkg_id, self.pid, install_status))
|
||||
task.start = task.start or time.time()
|
||||
task.status = STATUS_INSTALLING
|
||||
|
||||
# Use the binary cache if requested
|
||||
if use_cache:
|
||||
if _install_from_cache(pkg, explicit, unsigned):
|
||||
self._update_installed(task)
|
||||
return
|
||||
elif cache_only:
|
||||
raise spack.error.InstallError(
|
||||
"No binary found when cache-only was specified", pkg=pkg
|
||||
)
|
||||
else:
|
||||
tty.msg(f"No binary for {pkg_id} found: installing from source")
|
||||
|
||||
pkg.run_tests = tests if isinstance(tests, bool) else pkg.name in tests
|
||||
|
||||
# hook that allows tests to inspect the Package before installation
|
||||
# see unit_test_check() docs.
|
||||
if not pkg.unit_test_check():
|
||||
return
|
||||
|
||||
try:
|
||||
self._setup_install_dir(pkg)
|
||||
|
||||
# Create stage object now and let it be serialized for the child process. That
|
||||
# way monkeypatch in tests works correctly.
|
||||
pkg.stage
|
||||
|
||||
# Create a child process to do the actual installation.
|
||||
# Preserve verbosity settings across installs.
|
||||
spack.package_base.PackageBase._verbose = spack.build_environment.start_build_process(
|
||||
pkg, build_process, install_args
|
||||
)
|
||||
# Note: PARENT of the build process adds the new package to
|
||||
# the database, so that we don't need to re-read from file.
|
||||
spack.store.STORE.db.add(pkg.spec, explicit=explicit)
|
||||
|
||||
except spack.error.StopPhase as e:
|
||||
# A StopPhase exception means that the installer was asked to stop early from clients,
|
||||
# and is not an error at this point
|
||||
pid = f"{self.pid}: " if tty.show_pid() else ""
|
||||
tty.debug(f"{pid}{str(e)}")
|
||||
tty.debug(f"Package stage directory: {pkg.stage.source_path}")
|
||||
rc = task.execute(install_status)
|
||||
if rc == ExecuteResult.MISSING_BUILD_SPEC:
|
||||
self._requeue_with_build_spec_tasks(task)
|
||||
else: # if rc == ExecuteResult.SUCCESS or rc == ExecuteResult.FAILED
|
||||
self._update_installed(task)
|
||||
|
||||
def _next_is_pri0(self) -> bool:
|
||||
"""
|
||||
Determine if the next build task has priority 0
|
||||
Determine if the next task has priority 0
|
||||
|
||||
Return:
|
||||
True if it does, False otherwise
|
||||
@@ -1615,31 +1774,31 @@ def _next_is_pri0(self) -> bool:
|
||||
task = self.build_pq[0][1]
|
||||
return task.priority == 0
|
||||
|
||||
def _pop_task(self) -> Optional[BuildTask]:
|
||||
def _pop_task(self) -> Optional[Task]:
|
||||
"""
|
||||
Remove and return the lowest priority build task.
|
||||
Remove and return the lowest priority task.
|
||||
|
||||
Source: Variant of function at docs.python.org/2/library/heapq.html
|
||||
"""
|
||||
while self.build_pq:
|
||||
task = heapq.heappop(self.build_pq)[1]
|
||||
if task.status != STATUS_REMOVED:
|
||||
if task.status != BuildStatus.REMOVED:
|
||||
del self.build_tasks[task.pkg_id]
|
||||
task.status = STATUS_DEQUEUED
|
||||
task.status = BuildStatus.DEQUEUED
|
||||
return task
|
||||
return None
|
||||
|
||||
def _push_task(self, task: BuildTask) -> None:
|
||||
def _push_task(self, task: Task) -> None:
|
||||
"""
|
||||
Push (or queue) the specified build task for the package.
|
||||
Push (or queue) the specified task for the package.
|
||||
|
||||
Source: Customization of "add_task" function at
|
||||
docs.python.org/2/library/heapq.html
|
||||
|
||||
Args:
|
||||
task: the installation build task for a package
|
||||
task: the installation task for a package
|
||||
"""
|
||||
msg = "{0} a build task for {1} with status '{2}'"
|
||||
msg = "{0} a task for {1} with status '{2}'"
|
||||
skip = "Skipping requeue of task for {0}: {1}"
|
||||
|
||||
# Ensure do not (re-)queue installed or failed packages whose status
|
||||
@@ -1652,9 +1811,11 @@ def _push_task(self, task: BuildTask) -> None:
|
||||
tty.debug(skip.format(task.pkg_id, "failed"))
|
||||
return
|
||||
|
||||
# Remove any associated build task since its sequence will change
|
||||
# Remove any associated task since its sequence will change
|
||||
self._remove_task(task.pkg_id)
|
||||
desc = "Queueing" if task.attempts == 0 else "Requeueing"
|
||||
desc = (
|
||||
"Queueing" if task.attempts == 1 else f"Requeueing ({ordinal(task.attempts)} attempt)"
|
||||
)
|
||||
tty.debug(msg.format(desc, task.pkg_id, task.status))
|
||||
|
||||
# Now add the new task to the queue with a new sequence number to
|
||||
@@ -1685,9 +1846,9 @@ def _release_lock(self, pkg_id: str) -> None:
|
||||
except Exception as exc:
|
||||
tty.warn(err.format(exc.__class__.__name__, ltype, pkg_id, str(exc)))
|
||||
|
||||
def _remove_task(self, pkg_id: str) -> Optional[BuildTask]:
|
||||
def _remove_task(self, pkg_id: str) -> Optional[Task]:
|
||||
"""
|
||||
Mark the existing package build task as being removed and return it.
|
||||
Mark the existing package task as being removed and return it.
|
||||
Raises KeyError if not found.
|
||||
|
||||
Source: Variant of function at docs.python.org/2/library/heapq.html
|
||||
@@ -1696,71 +1857,39 @@ def _remove_task(self, pkg_id: str) -> Optional[BuildTask]:
|
||||
pkg_id: identifier for the package to be removed
|
||||
"""
|
||||
if pkg_id in self.build_tasks:
|
||||
tty.debug(f"Removing build task for {pkg_id} from list")
|
||||
tty.debug(f"Removing task for {pkg_id} from list")
|
||||
task = self.build_tasks.pop(pkg_id)
|
||||
task.status = STATUS_REMOVED
|
||||
task.status = BuildStatus.REMOVED
|
||||
return task
|
||||
else:
|
||||
return None
|
||||
|
||||
def _requeue_task(self, task: BuildTask, install_status: InstallStatus) -> None:
|
||||
def _requeue_task(self, task: Task, install_status: InstallStatus) -> None:
|
||||
"""
|
||||
Requeues a task that appears to be in progress by another process.
|
||||
|
||||
Args:
|
||||
task (BuildTask): the installation build task for a package
|
||||
task (Task): the installation task for a package
|
||||
"""
|
||||
if task.status not in [STATUS_INSTALLED, STATUS_INSTALLING]:
|
||||
if task.status not in [BuildStatus.INSTALLED, BuildStatus.INSTALLING]:
|
||||
tty.debug(
|
||||
f"{install_msg(task.pkg_id, self.pid, install_status)} "
|
||||
"in progress by another process"
|
||||
)
|
||||
|
||||
new_task = task.next_attempt(self.installed)
|
||||
new_task.status = STATUS_INSTALLING
|
||||
new_task.status = BuildStatus.INSTALLING
|
||||
self._push_task(new_task)
|
||||
|
||||
def _setup_install_dir(self, pkg: "spack.package_base.PackageBase") -> None:
|
||||
"""
|
||||
Create and ensure proper access controls for the install directory.
|
||||
Write a small metadata file with the current spack environment.
|
||||
|
||||
Args:
|
||||
pkg: the package to be built and installed
|
||||
"""
|
||||
if not os.path.exists(pkg.spec.prefix):
|
||||
path = spack.util.path.debug_padded_filter(pkg.spec.prefix)
|
||||
tty.debug(f"Creating the installation directory {path}")
|
||||
spack.store.STORE.layout.create_install_directory(pkg.spec)
|
||||
else:
|
||||
# Set the proper group for the prefix
|
||||
group = prefs.get_package_group(pkg.spec)
|
||||
if group:
|
||||
fs.chgrp(pkg.spec.prefix, group)
|
||||
|
||||
# Set the proper permissions.
|
||||
# This has to be done after group because changing groups blows
|
||||
# away the sticky group bit on the directory
|
||||
mode = os.stat(pkg.spec.prefix).st_mode
|
||||
perms = prefs.get_package_dir_permissions(pkg.spec)
|
||||
if mode != perms:
|
||||
os.chmod(pkg.spec.prefix, perms)
|
||||
|
||||
# Ensure the metadata path exists as well
|
||||
fs.mkdirp(spack.store.STORE.layout.metadata_path(pkg.spec), mode=perms)
|
||||
|
||||
# Always write host environment - we assume this can change
|
||||
spack.store.STORE.layout.write_host_environment(pkg.spec)
|
||||
|
||||
def _update_failed(
|
||||
self, task: BuildTask, mark: bool = False, exc: Optional[BaseException] = None
|
||||
self, task: Task, mark: bool = False, exc: Optional[BaseException] = None
|
||||
) -> None:
|
||||
"""
|
||||
Update the task and transitive dependents as failed; optionally mark
|
||||
externally as failed; and remove associated build tasks.
|
||||
externally as failed; and remove associated tasks.
|
||||
|
||||
Args:
|
||||
task: the build task for the failed package
|
||||
task: the task for the failed package
|
||||
mark: ``True`` if the package and its dependencies are to
|
||||
be marked as "failed", otherwise, ``False``
|
||||
exc: optional exception if associated with the failure
|
||||
@@ -1772,34 +1901,34 @@ def _update_failed(
|
||||
self.failed[pkg_id] = spack.store.STORE.failure_tracker.mark(task.pkg.spec)
|
||||
else:
|
||||
self.failed[pkg_id] = None
|
||||
task.status = STATUS_FAILED
|
||||
task.status = BuildStatus.FAILED
|
||||
|
||||
for dep_id in task.dependents:
|
||||
if dep_id in self.build_tasks:
|
||||
tty.warn(f"Skipping build of {dep_id} since {pkg_id} failed")
|
||||
# Ensure the dependent's uninstalled dependents are
|
||||
# up-to-date and their build tasks removed.
|
||||
# up-to-date and their tasks removed.
|
||||
dep_task = self.build_tasks[dep_id]
|
||||
self._update_failed(dep_task, mark)
|
||||
self._remove_task(dep_id)
|
||||
else:
|
||||
tty.debug(f"No build task for {dep_id} to skip since {pkg_id} failed")
|
||||
tty.debug(f"No task for {dep_id} to skip since {pkg_id} failed")
|
||||
|
||||
def _update_installed(self, task: BuildTask) -> None:
|
||||
def _update_installed(self, task: Task) -> None:
|
||||
"""
|
||||
Mark the task as installed and ensure dependent build tasks are aware.
|
||||
Mark the task as installed and ensure dependent tasks are aware.
|
||||
|
||||
Args:
|
||||
task (BuildTask): the build task for the installed package
|
||||
task: the task for the installed package
|
||||
"""
|
||||
task.status = STATUS_INSTALLED
|
||||
task.status = BuildStatus.INSTALLED
|
||||
self._flag_installed(task.pkg, task.dependents)
|
||||
|
||||
def _flag_installed(
|
||||
self, pkg: "spack.package_base.PackageBase", dependent_ids: Optional[Set[str]] = None
|
||||
) -> None:
|
||||
"""
|
||||
Flag the package as installed and ensure known by all build tasks of
|
||||
Flag the package as installed and ensure known by all tasks of
|
||||
known dependents.
|
||||
|
||||
Args:
|
||||
@@ -1827,7 +1956,7 @@ def _flag_installed(
|
||||
dep_task = self.build_tasks[dep_id]
|
||||
self._push_task(dep_task.next_attempt(self.installed))
|
||||
else:
|
||||
tty.debug(f"{dep_id} has no build task to update for {pkg_id}'s success")
|
||||
tty.debug(f"{dep_id} has no task to update for {pkg_id}'s success")
|
||||
|
||||
def _init_queue(self) -> None:
|
||||
"""Initialize the build queue from the list of build requests."""
|
||||
@@ -1846,8 +1975,9 @@ def _init_queue(self) -> None:
|
||||
task = self.build_tasks[dep_id]
|
||||
for dependent_id in dependents.difference(task.dependents):
|
||||
task.add_dependent(dependent_id)
|
||||
self.all_dependencies = all_dependencies
|
||||
|
||||
def _install_action(self, task: BuildTask) -> int:
|
||||
def _install_action(self, task: Task) -> InstallAction:
|
||||
"""
|
||||
Determine whether the installation should be overwritten (if it already
|
||||
exists) or skipped (if has been handled by another process).
|
||||
@@ -1995,7 +2125,6 @@ def install(self) -> None:
|
||||
self._update_installed(task)
|
||||
path = spack.util.path.debug_padded_filter(pkg.prefix)
|
||||
_print_installed_pkg(path)
|
||||
|
||||
else:
|
||||
# At this point we've failed to get a write or a read
|
||||
# lock, which means another process has taken a write
|
||||
@@ -2035,8 +2164,6 @@ def install(self) -> None:
|
||||
# wrapper -- silence mypy
|
||||
OverwriteInstall(self, spack.store.STORE.db, task, install_status).install() # type: ignore[arg-type] # noqa: E501
|
||||
|
||||
self._update_installed(task)
|
||||
|
||||
# If we installed then we should keep the prefix
|
||||
stop_before_phase = getattr(pkg, "stop_before_phase", None)
|
||||
last_phase = getattr(pkg, "last_phase", None)
|
||||
@@ -2080,13 +2207,15 @@ def install(self) -> None:
|
||||
)
|
||||
# Terminate if requested to do so on the first failure.
|
||||
if self.fail_fast:
|
||||
raise spack.error.InstallError(f"{fail_fast_err}: {str(exc)}", pkg=pkg)
|
||||
raise spack.error.InstallError(
|
||||
f"{fail_fast_err}: {str(exc)}", pkg=pkg
|
||||
) from exc
|
||||
|
||||
# Terminate when a single build request has failed, or summarize errors later.
|
||||
if task.is_build_request:
|
||||
if single_requested_spec:
|
||||
raise
|
||||
failed_build_requests.append((pkg, pkg_id, str(exc)))
|
||||
failed_build_requests.append((pkg, pkg_id, exc))
|
||||
|
||||
finally:
|
||||
# Remove the install prefix if anything went wrong during
|
||||
@@ -2096,7 +2225,8 @@ def install(self) -> None:
|
||||
|
||||
# Perform basic task cleanup for the installed spec to
|
||||
# include downgrading the write to a read lock
|
||||
self._cleanup_task(pkg)
|
||||
if pkg.spec.installed:
|
||||
self._cleanup_task(pkg)
|
||||
|
||||
# Cleanup, which includes releasing all of the read locks
|
||||
self._cleanup_all_tasks()
|
||||
@@ -2112,6 +2242,9 @@ def install(self) -> None:
|
||||
if failed_build_requests or missing:
|
||||
for _, pkg_id, err in failed_build_requests:
|
||||
tty.error(f"{pkg_id}: {err}")
|
||||
if spack.error.SHOW_BACKTRACE:
|
||||
# note: in python 3.10+ this can just be print_exception(err)
|
||||
traceback.print_exception(type(err), err, err.__traceback__)
|
||||
|
||||
for _, pkg_id in missing:
|
||||
tty.error(f"{pkg_id}: Package was not installed")
|
||||
@@ -2365,6 +2498,15 @@ def build_process(pkg: "spack.package_base.PackageBase", install_args: dict) ->
|
||||
|
||||
def deprecate(spec: "spack.spec.Spec", deprecator: "spack.spec.Spec", link_fn) -> None:
|
||||
"""Deprecate this package in favor of deprecator spec"""
|
||||
# Here we assume we don't deprecate across different stores, and that same hash
|
||||
# means same binary artifacts
|
||||
if spec.dag_hash() == deprecator.dag_hash():
|
||||
return
|
||||
|
||||
# We can't really have control over external specs, and cannot link anything in their place
|
||||
if spec.external:
|
||||
return
|
||||
|
||||
# Install deprecator if it isn't installed already
|
||||
if not spack.store.STORE.db.query(deprecator):
|
||||
PackageInstaller([deprecator.package], explicit=True).install()
|
||||
@@ -2395,7 +2537,7 @@ def __init__(
|
||||
self,
|
||||
installer: PackageInstaller,
|
||||
database: spack.database.Database,
|
||||
task: BuildTask,
|
||||
task: Task,
|
||||
install_status: InstallStatus,
|
||||
):
|
||||
self.installer = installer
|
||||
|
||||
@@ -102,9 +102,6 @@
|
||||
|
||||
spack_ld_library_path = os.environ.get("LD_LIBRARY_PATH", "")
|
||||
|
||||
#: Whether to print backtraces on error
|
||||
SHOW_BACKTRACE = False
|
||||
|
||||
|
||||
def add_all_commands(parser):
|
||||
"""Add all spack subcommands to the parser."""
|
||||
@@ -527,8 +524,7 @@ def setup_main_options(args):
|
||||
|
||||
if args.debug or args.backtrace:
|
||||
spack.error.debug = True
|
||||
global SHOW_BACKTRACE
|
||||
SHOW_BACKTRACE = True
|
||||
spack.error.SHOW_BACKTRACE = True
|
||||
|
||||
if args.debug:
|
||||
spack.util.debug.register_interrupt_handler()
|
||||
@@ -1021,19 +1017,19 @@ def main(argv=None):
|
||||
e.die() # gracefully die on any SpackErrors
|
||||
|
||||
except KeyboardInterrupt:
|
||||
if spack.config.get("config:debug") or SHOW_BACKTRACE:
|
||||
if spack.config.get("config:debug") or spack.error.SHOW_BACKTRACE:
|
||||
raise
|
||||
sys.stderr.write("\n")
|
||||
tty.error("Keyboard interrupt.")
|
||||
return signal.SIGINT.value
|
||||
|
||||
except SystemExit as e:
|
||||
if spack.config.get("config:debug") or SHOW_BACKTRACE:
|
||||
if spack.config.get("config:debug") or spack.error.SHOW_BACKTRACE:
|
||||
traceback.print_exc()
|
||||
return e.code
|
||||
|
||||
except Exception as e:
|
||||
if spack.config.get("config:debug") or SHOW_BACKTRACE:
|
||||
if spack.config.get("config:debug") or spack.error.SHOW_BACKTRACE:
|
||||
raise
|
||||
tty.error(e)
|
||||
return 3
|
||||
|
||||
@@ -89,9 +89,8 @@ def from_url(url: str):
|
||||
"""Create an anonymous mirror by URL. This method validates the URL."""
|
||||
if not urllib.parse.urlparse(url).scheme in supported_url_schemes:
|
||||
raise ValueError(
|
||||
'"{}" is not a valid mirror URL. Scheme must be once of {}.'.format(
|
||||
url, ", ".join(supported_url_schemes)
|
||||
)
|
||||
f'"{url}" is not a valid mirror URL. '
|
||||
f"Scheme must be one of {supported_url_schemes}."
|
||||
)
|
||||
return Mirror(url)
|
||||
|
||||
@@ -759,7 +758,7 @@ def require_mirror_name(mirror_name):
|
||||
"""Find a mirror by name and raise if it does not exist"""
|
||||
mirror = spack.mirror.MirrorCollection().get(mirror_name)
|
||||
if not mirror:
|
||||
raise ValueError('no mirror named "{0}"'.format(mirror_name))
|
||||
raise ValueError(f'no mirror named "{mirror_name}"')
|
||||
return mirror
|
||||
|
||||
|
||||
|
||||
@@ -527,7 +527,8 @@ def use_name(self):
|
||||
parts = name.split("/")
|
||||
name = os.path.join(*parts)
|
||||
# Add optional suffixes based on constraints
|
||||
path_elements = [name] + self.conf.suffixes
|
||||
path_elements = [name]
|
||||
path_elements.extend(map(self.spec.format, self.conf.suffixes))
|
||||
return "-".join(path_elements)
|
||||
|
||||
@property
|
||||
|
||||
@@ -1855,13 +1855,22 @@ def _has_make_target(self, target):
|
||||
#
|
||||
# BSD Make:
|
||||
# make: don't know how to make test. Stop
|
||||
#
|
||||
# Note: "Stop." is not printed when running a Make jobserver (spack env depfile) that runs
|
||||
# with `make -k/--keep-going`
|
||||
missing_target_msgs = [
|
||||
"No rule to make target `{0}'. Stop.",
|
||||
"No rule to make target '{0}'. Stop.",
|
||||
"don't know how to make {0}. Stop",
|
||||
"No rule to make target `{0}'.",
|
||||
"No rule to make target '{0}'.",
|
||||
"don't know how to make {0}.",
|
||||
]
|
||||
|
||||
kwargs = {"fail_on_error": False, "output": os.devnull, "error": str}
|
||||
kwargs = {
|
||||
"fail_on_error": False,
|
||||
"output": os.devnull,
|
||||
"error": str,
|
||||
# Remove MAKEFLAGS to avoid inherited flags from Make jobserver (spack env depfile)
|
||||
"extra_env": {"MAKEFLAGS": ""},
|
||||
}
|
||||
|
||||
stderr = make("-n", target, **kwargs)
|
||||
|
||||
|
||||
@@ -205,23 +205,33 @@ def macho_find_paths(orig_rpaths, deps, idpath, old_layout_root, prefix_to_prefi
|
||||
paths_to_paths dictionary which maps all of the old paths to new paths
|
||||
"""
|
||||
paths_to_paths = dict()
|
||||
# Sort from longest path to shortest, to ensure we try /foo/bar/baz before /foo/bar
|
||||
prefix_iteration_order = sorted(prefix_to_prefix, key=len, reverse=True)
|
||||
for orig_rpath in orig_rpaths:
|
||||
if orig_rpath.startswith(old_layout_root):
|
||||
for old_prefix, new_prefix in prefix_to_prefix.items():
|
||||
for old_prefix in prefix_iteration_order:
|
||||
new_prefix = prefix_to_prefix[old_prefix]
|
||||
if orig_rpath.startswith(old_prefix):
|
||||
new_rpath = re.sub(re.escape(old_prefix), new_prefix, orig_rpath)
|
||||
paths_to_paths[orig_rpath] = new_rpath
|
||||
break
|
||||
else:
|
||||
paths_to_paths[orig_rpath] = orig_rpath
|
||||
|
||||
if idpath:
|
||||
for old_prefix, new_prefix in prefix_to_prefix.items():
|
||||
for old_prefix in prefix_iteration_order:
|
||||
new_prefix = prefix_to_prefix[old_prefix]
|
||||
if idpath.startswith(old_prefix):
|
||||
paths_to_paths[idpath] = re.sub(re.escape(old_prefix), new_prefix, idpath)
|
||||
break
|
||||
|
||||
for dep in deps:
|
||||
for old_prefix, new_prefix in prefix_to_prefix.items():
|
||||
for old_prefix in prefix_iteration_order:
|
||||
new_prefix = prefix_to_prefix[old_prefix]
|
||||
if dep.startswith(old_prefix):
|
||||
paths_to_paths[dep] = re.sub(re.escape(old_prefix), new_prefix, dep)
|
||||
break
|
||||
|
||||
if dep.startswith("@"):
|
||||
paths_to_paths[dep] = dep
|
||||
|
||||
@@ -270,40 +280,14 @@ def modify_macho_object(cur_path, rpaths, deps, idpath, paths_to_paths):
|
||||
install_name_tool = executable.Executable("install_name_tool")
|
||||
install_name_tool(*args)
|
||||
|
||||
return
|
||||
|
||||
|
||||
def modify_object_macholib(cur_path, paths_to_paths):
|
||||
"""
|
||||
This function is used when install machO buildcaches on linux by
|
||||
rewriting mach-o loader commands for dependency library paths of
|
||||
mach-o binaries and the id path for mach-o libraries.
|
||||
Rewritting of rpaths is handled by replace_prefix_bin.
|
||||
Inputs
|
||||
mach-o binary to be modified
|
||||
dictionary mapping paths in old install layout to new install layout
|
||||
"""
|
||||
|
||||
dll = macholib.MachO.MachO(cur_path)
|
||||
dll.rewriteLoadCommands(paths_to_paths.get)
|
||||
|
||||
try:
|
||||
f = open(dll.filename, "rb+")
|
||||
for header in dll.headers:
|
||||
f.seek(0)
|
||||
dll.write(f)
|
||||
f.seek(0, 2)
|
||||
f.flush()
|
||||
f.close()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return
|
||||
|
||||
|
||||
def macholib_get_paths(cur_path):
|
||||
"""Get rpaths, dependent libraries, and library id of mach-o objects."""
|
||||
headers = macholib.MachO.MachO(cur_path).headers
|
||||
headers = []
|
||||
try:
|
||||
headers = macholib.MachO.MachO(cur_path).headers
|
||||
except ValueError:
|
||||
pass
|
||||
if not headers:
|
||||
tty.warn("Failed to read Mach-O headers: {0}".format(cur_path))
|
||||
commands = []
|
||||
@@ -415,10 +399,7 @@ def relocate_macho_binaries(
|
||||
# normalized paths
|
||||
rel_to_orig = macho_make_paths_normal(orig_path_name, rpaths, deps, idpath)
|
||||
# replace the relativized paths with normalized paths
|
||||
if sys.platform == "darwin":
|
||||
modify_macho_object(path_name, rpaths, deps, idpath, rel_to_orig)
|
||||
else:
|
||||
modify_object_macholib(path_name, rel_to_orig)
|
||||
modify_macho_object(path_name, rpaths, deps, idpath, rel_to_orig)
|
||||
# get the normalized paths in the mach-o binary
|
||||
rpaths, deps, idpath = macholib_get_paths(path_name)
|
||||
# get the mapping of paths in old prefix to path in new prefix
|
||||
@@ -426,10 +407,7 @@ def relocate_macho_binaries(
|
||||
rpaths, deps, idpath, old_layout_root, prefix_to_prefix
|
||||
)
|
||||
# replace the old paths with new paths
|
||||
if sys.platform == "darwin":
|
||||
modify_macho_object(path_name, rpaths, deps, idpath, paths_to_paths)
|
||||
else:
|
||||
modify_object_macholib(path_name, paths_to_paths)
|
||||
modify_macho_object(path_name, rpaths, deps, idpath, paths_to_paths)
|
||||
# get the new normalized path in the mach-o binary
|
||||
rpaths, deps, idpath = macholib_get_paths(path_name)
|
||||
# get the mapping of paths to relative paths in the new prefix
|
||||
@@ -437,10 +415,7 @@ def relocate_macho_binaries(
|
||||
path_name, new_layout_root, rpaths, deps, idpath
|
||||
)
|
||||
# replace the new paths with relativized paths in the new prefix
|
||||
if sys.platform == "darwin":
|
||||
modify_macho_object(path_name, rpaths, deps, idpath, paths_to_paths)
|
||||
else:
|
||||
modify_object_macholib(path_name, paths_to_paths)
|
||||
modify_macho_object(path_name, rpaths, deps, idpath, paths_to_paths)
|
||||
else:
|
||||
# get the paths in the old prefix
|
||||
rpaths, deps, idpath = macholib_get_paths(path_name)
|
||||
@@ -449,10 +424,7 @@ def relocate_macho_binaries(
|
||||
rpaths, deps, idpath, old_layout_root, prefix_to_prefix
|
||||
)
|
||||
# replace the old paths with new paths
|
||||
if sys.platform == "darwin":
|
||||
modify_macho_object(path_name, rpaths, deps, idpath, paths_to_paths)
|
||||
else:
|
||||
modify_object_macholib(path_name, paths_to_paths)
|
||||
modify_macho_object(path_name, rpaths, deps, idpath, paths_to_paths)
|
||||
|
||||
|
||||
def _transform_rpaths(orig_rpaths, orig_root, new_prefixes):
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
from llnl.util.symlink import readlink, symlink
|
||||
|
||||
import spack.binary_distribution as bindist
|
||||
import spack.deptypes as dt
|
||||
import spack.error
|
||||
import spack.hooks
|
||||
import spack.platforms
|
||||
@@ -52,6 +53,7 @@ def rewire_node(spec, explicit):
|
||||
its subgraph. Binaries, text, and links are all changed in accordance with
|
||||
the splice. The resulting package is then 'installed.'"""
|
||||
tempdir = tempfile.mkdtemp()
|
||||
|
||||
# copy anything installed to a temporary directory
|
||||
shutil.copytree(spec.build_spec.prefix, os.path.join(tempdir, spec.dag_hash()))
|
||||
|
||||
@@ -59,8 +61,21 @@ def rewire_node(spec, explicit):
|
||||
# compute prefix-to-prefix for every node from the build spec to the spliced
|
||||
# spec
|
||||
prefix_to_prefix = OrderedDict({spec.build_spec.prefix: spec.prefix})
|
||||
for build_dep in spec.build_spec.traverse(root=False):
|
||||
prefix_to_prefix[build_dep.prefix] = spec[build_dep.name].prefix
|
||||
build_spec_ids = set(id(s) for s in spec.build_spec.traverse(deptype=dt.ALL & ~dt.BUILD))
|
||||
for s in bindist.deps_to_relocate(spec):
|
||||
analog = s
|
||||
if id(s) not in build_spec_ids:
|
||||
analogs = [
|
||||
d
|
||||
for d in spec.build_spec.traverse(deptype=dt.ALL & ~dt.BUILD)
|
||||
if s._splice_match(d, self_root=spec, other_root=spec.build_spec)
|
||||
]
|
||||
if analogs:
|
||||
# Prefer same-name analogs and prefer higher versions
|
||||
# This matches the preferences in Spec.splice, so we will find same node
|
||||
analog = max(analogs, key=lambda a: (a.name == s.name, a.version))
|
||||
|
||||
prefix_to_prefix[analog.prefix] = s.prefix
|
||||
|
||||
manifest = bindist.get_buildfile_manifest(spec.build_spec)
|
||||
platform = spack.platforms.by_name(spec.platform)
|
||||
|
||||
@@ -47,7 +47,7 @@
|
||||
"tags": {"type": "array", "items": {"type": "string"}},
|
||||
"variables": {
|
||||
"type": "object",
|
||||
"patternProperties": {r"[\w\d\-_\.]+": {"type": "string"}},
|
||||
"patternProperties": {r"[\w\d\-_\.]+": {"type": ["string", "number"]}},
|
||||
},
|
||||
"before_script": script_schema,
|
||||
"script": script_schema,
|
||||
@@ -77,58 +77,54 @@
|
||||
},
|
||||
}
|
||||
|
||||
named_attributes_schema = {
|
||||
"oneOf": [
|
||||
{
|
||||
dynamic_mapping_schema = {
|
||||
"type": "object",
|
||||
"additionalProperties": False,
|
||||
"required": ["dynamic-mapping"],
|
||||
"properties": {
|
||||
"dynamic-mapping": {
|
||||
"type": "object",
|
||||
"additionalProperties": False,
|
||||
"properties": {"noop-job": attributes_schema, "noop-job-remove": attributes_schema},
|
||||
},
|
||||
{
|
||||
"type": "object",
|
||||
"additionalProperties": False,
|
||||
"properties": {"build-job": attributes_schema, "build-job-remove": attributes_schema},
|
||||
},
|
||||
{
|
||||
"type": "object",
|
||||
"additionalProperties": False,
|
||||
"properties": {"copy-job": attributes_schema, "copy-job-remove": attributes_schema},
|
||||
},
|
||||
{
|
||||
"type": "object",
|
||||
"additionalProperties": False,
|
||||
"required": ["endpoint"],
|
||||
"properties": {
|
||||
"reindex-job": attributes_schema,
|
||||
"reindex-job-remove": attributes_schema,
|
||||
"name": {"type": "string"},
|
||||
# "endpoint" cannot have http patternProperties constaint as it is a required field
|
||||
# Constrain is applied in code
|
||||
"endpoint": {"type": "string"},
|
||||
"timeout": {"type": "integer", "minimum": 0},
|
||||
"verify_ssl": {"type": "boolean", "default": False},
|
||||
"header": {"type": "object", "additionalProperties": False},
|
||||
"allow": {"type": "array", "items": {"type": "string"}},
|
||||
"require": {"type": "array", "items": {"type": "string"}},
|
||||
"ignore": {"type": "array", "items": {"type": "string"}},
|
||||
},
|
||||
},
|
||||
{
|
||||
"type": "object",
|
||||
"additionalProperties": False,
|
||||
"properties": {
|
||||
"signing-job": attributes_schema,
|
||||
"signing-job-remove": attributes_schema,
|
||||
},
|
||||
},
|
||||
{
|
||||
"type": "object",
|
||||
"additionalProperties": False,
|
||||
"properties": {
|
||||
"cleanup-job": attributes_schema,
|
||||
"cleanup-job-remove": attributes_schema,
|
||||
},
|
||||
},
|
||||
{
|
||||
"type": "object",
|
||||
"additionalProperties": False,
|
||||
"properties": {"any-job": attributes_schema, "any-job-remove": attributes_schema},
|
||||
},
|
||||
]
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def job_schema(name: str):
|
||||
return {
|
||||
"type": "object",
|
||||
"additionalProperties": False,
|
||||
"properties": {f"{name}-job": attributes_schema, f"{name}-job-remove": attributes_schema},
|
||||
}
|
||||
|
||||
|
||||
pipeline_gen_schema = {
|
||||
"type": "array",
|
||||
"items": {"oneOf": [submapping_schema, named_attributes_schema]},
|
||||
"items": {
|
||||
"oneOf": [
|
||||
submapping_schema,
|
||||
dynamic_mapping_schema,
|
||||
job_schema("any"),
|
||||
job_schema("build"),
|
||||
job_schema("cleanup"),
|
||||
job_schema("copy"),
|
||||
job_schema("noop"),
|
||||
job_schema("reindex"),
|
||||
job_schema("signing"),
|
||||
]
|
||||
},
|
||||
}
|
||||
|
||||
core_shared_properties = union_dicts(
|
||||
|
||||
@@ -55,6 +55,26 @@
|
||||
"unify": {
|
||||
"oneOf": [{"type": "boolean"}, {"type": "string", "enum": ["when_possible"]}]
|
||||
},
|
||||
"splice": {
|
||||
"type": "object",
|
||||
"additionalProperties": False,
|
||||
"properties": {
|
||||
"explicit": {
|
||||
"type": "array",
|
||||
"default": [],
|
||||
"items": {
|
||||
"type": "object",
|
||||
"required": ["target", "replacement"],
|
||||
"additionalProperties": False,
|
||||
"properties": {
|
||||
"target": {"type": "string"},
|
||||
"replacement": {"type": "string"},
|
||||
"transitive": {"type": "boolean", "default": False},
|
||||
},
|
||||
},
|
||||
}
|
||||
},
|
||||
},
|
||||
"duplicates": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
|
||||
@@ -523,7 +523,12 @@ def _compute_specs_from_answer_set(self):
|
||||
node = SpecBuilder.make_node(pkg=providers[0])
|
||||
candidate = answer.get(node)
|
||||
|
||||
if candidate and candidate.satisfies(input_spec):
|
||||
if candidate and candidate.build_spec.satisfies(input_spec):
|
||||
if not candidate.satisfies(input_spec):
|
||||
tty.warn(
|
||||
"explicit splice configuration has caused the concretized spec"
|
||||
f" {candidate} not to satisfy the input spec {input_spec}"
|
||||
)
|
||||
self._concrete_specs.append(answer[node])
|
||||
self._concrete_specs_by_input[input_spec] = answer[node]
|
||||
else:
|
||||
@@ -3814,7 +3819,33 @@ def build_specs(self, function_tuples):
|
||||
spack.version.git_ref_lookup.GitRefLookup(spec.fullname)
|
||||
)
|
||||
|
||||
return self._specs
|
||||
specs = self.execute_explicit_splices()
|
||||
|
||||
return specs
|
||||
|
||||
def execute_explicit_splices(self):
|
||||
splice_config = spack.config.CONFIG.get("concretizer:splice:explicit", [])
|
||||
splice_triples = []
|
||||
for splice_set in splice_config:
|
||||
target = splice_set["target"]
|
||||
replacement = spack.spec.Spec(splice_set["replacement"])
|
||||
assert replacement.abstract_hash
|
||||
replacement.replace_hash()
|
||||
transitive = splice_set.get("transitive", False)
|
||||
splice_triples.append((target, replacement, transitive))
|
||||
|
||||
specs = {}
|
||||
for key, spec in self._specs.items():
|
||||
current_spec = spec
|
||||
for target, replacement, transitive in splice_triples:
|
||||
if target in current_spec:
|
||||
# matches root or non-root
|
||||
# e.g. mvapich2%gcc
|
||||
current_spec = current_spec.splice(replacement, transitive)
|
||||
new_key = NodeArgument(id=key.id, pkg=current_spec.name)
|
||||
specs[new_key] = current_spec
|
||||
|
||||
return specs
|
||||
|
||||
|
||||
def _develop_specs_from_env(spec, env):
|
||||
|
||||
@@ -4183,7 +4183,7 @@ def _virtuals_provided(self, root):
|
||||
"""Return set of virtuals provided by self in the context of root"""
|
||||
if root is self:
|
||||
# Could be using any virtual the package can provide
|
||||
return set(self.package.virtuals_provided)
|
||||
return set(v.name for v in self.package.virtuals_provided)
|
||||
|
||||
hashes = [s.dag_hash() for s in root.traverse()]
|
||||
in_edges = set(
|
||||
@@ -4206,7 +4206,7 @@ def _splice_match(self, other, self_root, other_root):
|
||||
return True
|
||||
|
||||
return bool(
|
||||
self._virtuals_provided(self_root)
|
||||
bool(self._virtuals_provided(self_root))
|
||||
and self._virtuals_provided(self_root) <= other._virtuals_provided(other_root)
|
||||
)
|
||||
|
||||
@@ -4226,29 +4226,24 @@ def _splice_detach_and_add_dependents(self, replacement, context):
|
||||
# Only set it if it hasn't been spliced before
|
||||
ancestor._build_spec = ancestor._build_spec or ancestor.copy()
|
||||
ancestor.clear_cached_hashes(ignore=(ht.package_hash.attr,))
|
||||
for edge in ancestor.edges_to_dependencies(depflag=dt.BUILD):
|
||||
if edge.depflag & ~dt.BUILD:
|
||||
edge.depflag &= ~dt.BUILD
|
||||
else:
|
||||
ancestor._dependencies[edge.spec.name].remove(edge)
|
||||
edge.spec._dependents[ancestor.name].remove(edge)
|
||||
|
||||
# For each direct dependent in the link/run graph, replace the dependency on
|
||||
# node with one on replacement
|
||||
# For each build dependent, restrict the edge to build-only
|
||||
for edge in self.edges_from_dependents():
|
||||
if edge.parent not in ancestors_in_context:
|
||||
continue
|
||||
build_dep = edge.depflag & dt.BUILD
|
||||
other_dep = edge.depflag & ~dt.BUILD
|
||||
if build_dep:
|
||||
parent_edge = [e for e in edge.parent._dependencies[self.name] if e.spec is self]
|
||||
assert len(parent_edge) == 1
|
||||
|
||||
edge.depflag = dt.BUILD
|
||||
parent_edge[0].depflag = dt.BUILD
|
||||
else:
|
||||
edge.parent._dependencies.edges[self.name].remove(edge)
|
||||
self._dependents.edges[edge.parent.name].remove(edge)
|
||||
edge.parent._dependencies.edges[self.name].remove(edge)
|
||||
self._dependents.edges[edge.parent.name].remove(edge)
|
||||
edge.parent._add_dependency(replacement, depflag=edge.depflag, virtuals=edge.virtuals)
|
||||
|
||||
if other_dep:
|
||||
edge.parent._add_dependency(replacement, depflag=other_dep, virtuals=edge.virtuals)
|
||||
|
||||
def _splice_helper(self, replacement, self_root, other_root):
|
||||
def _splice_helper(self, replacement):
|
||||
"""Main loop of a transitive splice.
|
||||
|
||||
The while loop around a traversal of self ensures that changes to self from previous
|
||||
@@ -4276,8 +4271,7 @@ def _splice_helper(self, replacement, self_root, other_root):
|
||||
replacements_by_name[node.name].append(node)
|
||||
virtuals = node._virtuals_provided(root=replacement)
|
||||
for virtual in virtuals:
|
||||
# Virtual may be spec or str, get name or return str
|
||||
replacements_by_name[getattr(virtual, "name", virtual)].append(node)
|
||||
replacements_by_name[virtual].append(node)
|
||||
|
||||
changed = True
|
||||
while changed:
|
||||
@@ -4298,8 +4292,8 @@ def _splice_helper(self, replacement, self_root, other_root):
|
||||
for virtual in node._virtuals_provided(root=self):
|
||||
analogs += [
|
||||
r
|
||||
for r in replacements_by_name[getattr(virtual, "name", virtual)]
|
||||
if r._splice_match(node, self_root=self_root, other_root=other_root)
|
||||
for r in replacements_by_name[virtual]
|
||||
if node._splice_match(r, self_root=self, other_root=replacement)
|
||||
]
|
||||
|
||||
# No match, keep iterating over self
|
||||
@@ -4313,34 +4307,56 @@ def _splice_helper(self, replacement, self_root, other_root):
|
||||
# No splice needed here, keep checking
|
||||
if analog == node:
|
||||
continue
|
||||
|
||||
node._splice_detach_and_add_dependents(analog, context=self)
|
||||
changed = True
|
||||
break
|
||||
|
||||
def splice(self, other, transitive):
|
||||
"""Splices dependency "other" into this ("target") Spec, and return the
|
||||
result as a concrete Spec.
|
||||
If transitive, then other and its dependencies will be extrapolated to
|
||||
a list of Specs and spliced in accordingly.
|
||||
For example, let there exist a dependency graph as follows:
|
||||
T
|
||||
| \
|
||||
Z<-H
|
||||
In this example, Spec T depends on H and Z, and H also depends on Z.
|
||||
Suppose, however, that we wish to use a different H, known as H'. This
|
||||
function will splice in the new H' in one of two ways:
|
||||
1. transitively, where H' depends on the Z' it was built with, and the
|
||||
new T* also directly depends on this new Z', or
|
||||
2. intransitively, where the new T* and H' both depend on the original
|
||||
Z.
|
||||
Since the Spec returned by this splicing function is no longer deployed
|
||||
the same way it was built, any such changes are tracked by setting the
|
||||
build_spec to point to the corresponding dependency from the original
|
||||
Spec.
|
||||
"""
|
||||
def splice(self, other: "Spec", transitive: bool = True) -> "Spec":
|
||||
"""Returns a new, spliced concrete Spec with the "other" dependency and,
|
||||
optionally, its dependencies.
|
||||
|
||||
Args:
|
||||
other: alternate dependency
|
||||
transitive: include other's dependencies
|
||||
|
||||
Returns: a concrete, spliced version of the current Spec
|
||||
|
||||
When transitive is "True", use the dependencies from "other" to reconcile
|
||||
conflicting dependencies. When transitive is "False", use dependencies from self.
|
||||
|
||||
For example, suppose we have the following dependency graph:
|
||||
|
||||
T
|
||||
| \
|
||||
Z<-H
|
||||
|
||||
Spec T depends on H and Z, and H also depends on Z. Now we want to use
|
||||
a different H, called H'. This function can be used to splice in H' to
|
||||
create a new spec, called T*. If H' was built with Z', then transitive
|
||||
"True" will ensure H' and T* both depend on Z':
|
||||
|
||||
T*
|
||||
| \
|
||||
Z'<-H'
|
||||
|
||||
If transitive is "False", then H' and T* will both depend on
|
||||
the original Z, resulting in a new H'*
|
||||
|
||||
T*
|
||||
| \
|
||||
Z<-H'*
|
||||
|
||||
Provenance of the build is tracked through the "build_spec" property
|
||||
of the spliced spec and any correspondingly modified dependency specs.
|
||||
The build specs are set to that of the original spec, so the original
|
||||
spec's provenance is preserved unchanged."""
|
||||
assert self.concrete
|
||||
assert other.concrete
|
||||
|
||||
if self._splice_match(other, self_root=self, other_root=other):
|
||||
return other.copy()
|
||||
|
||||
if not any(
|
||||
node._splice_match(other, self_root=self, other_root=other)
|
||||
for node in self.traverse(root=False, deptype=dt.LINK | dt.RUN)
|
||||
@@ -4379,12 +4395,12 @@ def mask_build_deps(in_spec):
|
||||
|
||||
# Transitively splice any relevant nodes from new into base
|
||||
# This handles all shared dependencies between self and other
|
||||
spec._splice_helper(replacement, self_root=self, other_root=other)
|
||||
spec._splice_helper(replacement)
|
||||
else:
|
||||
# Do the same thing as the transitive splice, but reversed
|
||||
node_pairs = make_node_pairs(other, replacement)
|
||||
mask_build_deps(replacement)
|
||||
replacement._splice_helper(spec, self_root=other, other_root=self)
|
||||
replacement._splice_helper(spec)
|
||||
|
||||
# Intransitively splice replacement into spec
|
||||
# This is very simple now that all shared dependencies have been handled
|
||||
@@ -4392,13 +4408,14 @@ def mask_build_deps(in_spec):
|
||||
if node._splice_match(other, self_root=spec, other_root=other):
|
||||
node._splice_detach_and_add_dependents(replacement, context=spec)
|
||||
|
||||
# Set up build dependencies for modified nodes
|
||||
# Also modify build_spec because the existing ones had build deps removed
|
||||
# For nodes that were spliced, modify the build spec to ensure build deps are preserved
|
||||
# For nodes that were not spliced, replace the build deps on the spec itself
|
||||
for orig, copy in node_pairs:
|
||||
for edge in orig.edges_to_dependencies(depflag=dt.BUILD):
|
||||
copy._add_dependency(edge.spec, depflag=dt.BUILD, virtuals=edge.virtuals)
|
||||
if copy._build_spec:
|
||||
copy._build_spec = orig.build_spec.copy()
|
||||
else:
|
||||
for edge in orig.edges_to_dependencies(depflag=dt.BUILD):
|
||||
copy._add_dependency(edge.spec, depflag=dt.BUILD, virtuals=edge.virtuals)
|
||||
|
||||
return spec
|
||||
|
||||
@@ -4797,7 +4814,7 @@ def _load(cls, data):
|
||||
virtuals=virtuals,
|
||||
)
|
||||
if "build_spec" in node.keys():
|
||||
_, bhash, _ = cls.build_spec_from_node_dict(node, hash_type=hash_type)
|
||||
_, bhash, _ = cls.extract_build_spec_info_from_node_dict(node, hash_type=hash_type)
|
||||
node_spec._build_spec = hash_dict[bhash]["node_spec"]
|
||||
|
||||
return hash_dict[root_spec_hash]["node_spec"]
|
||||
@@ -4925,7 +4942,7 @@ def extract_info_from_dep(cls, elt, hash):
|
||||
return dep_hash, deptypes, hash_type, virtuals
|
||||
|
||||
@classmethod
|
||||
def build_spec_from_node_dict(cls, node, hash_type=ht.dag_hash.name):
|
||||
def extract_build_spec_info_from_node_dict(cls, node, hash_type=ht.dag_hash.name):
|
||||
build_spec_dict = node["build_spec"]
|
||||
return build_spec_dict["name"], build_spec_dict[hash_type], hash_type
|
||||
|
||||
|
||||
@@ -68,22 +68,6 @@ def cache_directory(tmpdir):
|
||||
spack.config.caches = old_cache_path
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def mirror_dir(tmpdir_factory):
|
||||
dir = tmpdir_factory.mktemp("mirror")
|
||||
dir.ensure("build_cache", dir=True)
|
||||
yield str(dir)
|
||||
dir.join("build_cache").remove()
|
||||
|
||||
|
||||
@pytest.fixture(scope="function")
|
||||
def test_mirror(mirror_dir):
|
||||
mirror_url = url_util.path_to_file_url(mirror_dir)
|
||||
mirror_cmd("add", "--scope", "site", "test-mirror-func", mirror_url)
|
||||
yield mirror_dir
|
||||
mirror_cmd("rm", "--scope=site", "test-mirror-func")
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def config_directory(tmp_path_factory):
|
||||
# Copy defaults to a temporary "site" scope
|
||||
@@ -222,9 +206,9 @@ def dummy_prefix(tmpdir):
|
||||
@pytest.mark.requires_executables(*args)
|
||||
@pytest.mark.maybeslow
|
||||
@pytest.mark.usefixtures(
|
||||
"default_config", "cache_directory", "install_dir_default_layout", "test_mirror"
|
||||
"default_config", "cache_directory", "install_dir_default_layout", "temporary_mirror"
|
||||
)
|
||||
def test_default_rpaths_create_install_default_layout(mirror_dir):
|
||||
def test_default_rpaths_create_install_default_layout(temporary_mirror_dir):
|
||||
"""
|
||||
Test the creation and installation of buildcaches with default rpaths
|
||||
into the default directory layout scheme.
|
||||
@@ -237,13 +221,12 @@ def test_default_rpaths_create_install_default_layout(mirror_dir):
|
||||
install_cmd("--no-cache", sy_spec.name)
|
||||
|
||||
# Create a buildache
|
||||
buildcache_cmd("push", "-u", mirror_dir, cspec.name, sy_spec.name)
|
||||
|
||||
buildcache_cmd("push", "-u", temporary_mirror_dir, cspec.name, sy_spec.name)
|
||||
# Test force overwrite create buildcache (-f option)
|
||||
buildcache_cmd("push", "-uf", mirror_dir, cspec.name)
|
||||
buildcache_cmd("push", "-uf", temporary_mirror_dir, cspec.name)
|
||||
|
||||
# Create mirror index
|
||||
buildcache_cmd("update-index", mirror_dir)
|
||||
buildcache_cmd("update-index", temporary_mirror_dir)
|
||||
|
||||
# List the buildcaches in the mirror
|
||||
buildcache_cmd("list", "-alv")
|
||||
@@ -271,9 +254,9 @@ def test_default_rpaths_create_install_default_layout(mirror_dir):
|
||||
@pytest.mark.maybeslow
|
||||
@pytest.mark.nomockstage
|
||||
@pytest.mark.usefixtures(
|
||||
"default_config", "cache_directory", "install_dir_non_default_layout", "test_mirror"
|
||||
"default_config", "cache_directory", "install_dir_non_default_layout", "temporary_mirror"
|
||||
)
|
||||
def test_default_rpaths_install_nondefault_layout(mirror_dir):
|
||||
def test_default_rpaths_install_nondefault_layout(temporary_mirror_dir):
|
||||
"""
|
||||
Test the creation and installation of buildcaches with default rpaths
|
||||
into the non-default directory layout scheme.
|
||||
@@ -294,9 +277,9 @@ def test_default_rpaths_install_nondefault_layout(mirror_dir):
|
||||
@pytest.mark.maybeslow
|
||||
@pytest.mark.nomockstage
|
||||
@pytest.mark.usefixtures(
|
||||
"default_config", "cache_directory", "install_dir_default_layout", "test_mirror"
|
||||
"default_config", "cache_directory", "install_dir_default_layout", "temporary_mirror"
|
||||
)
|
||||
def test_relative_rpaths_install_default_layout(mirror_dir):
|
||||
def test_relative_rpaths_install_default_layout(temporary_mirror_dir):
|
||||
"""
|
||||
Test the creation and installation of buildcaches with relative
|
||||
rpaths into the default directory layout scheme.
|
||||
@@ -323,9 +306,9 @@ def test_relative_rpaths_install_default_layout(mirror_dir):
|
||||
@pytest.mark.maybeslow
|
||||
@pytest.mark.nomockstage
|
||||
@pytest.mark.usefixtures(
|
||||
"default_config", "cache_directory", "install_dir_non_default_layout", "test_mirror"
|
||||
"default_config", "cache_directory", "install_dir_non_default_layout", "temporary_mirror"
|
||||
)
|
||||
def test_relative_rpaths_install_nondefault(mirror_dir):
|
||||
def test_relative_rpaths_install_nondefault(temporary_mirror_dir):
|
||||
"""
|
||||
Test the installation of buildcaches with relativized rpaths
|
||||
into the non-default directory layout scheme.
|
||||
@@ -374,9 +357,9 @@ def test_push_and_fetch_keys(mock_gnupghome, tmp_path):
|
||||
@pytest.mark.maybeslow
|
||||
@pytest.mark.nomockstage
|
||||
@pytest.mark.usefixtures(
|
||||
"default_config", "cache_directory", "install_dir_non_default_layout", "test_mirror"
|
||||
"default_config", "cache_directory", "install_dir_non_default_layout", "temporary_mirror"
|
||||
)
|
||||
def test_built_spec_cache(mirror_dir):
|
||||
def test_built_spec_cache(temporary_mirror_dir):
|
||||
"""Because the buildcache list command fetches the buildcache index
|
||||
and uses it to populate the binary_distribution built spec cache, when
|
||||
this test calls get_mirrors_for_spec, it is testing the popluation of
|
||||
@@ -397,7 +380,7 @@ def fake_dag_hash(spec, length=None):
|
||||
return "tal4c7h4z0gqmixb1eqa92mjoybxn5l6"[:length]
|
||||
|
||||
|
||||
@pytest.mark.usefixtures("install_mockery", "mock_packages", "mock_fetch", "test_mirror")
|
||||
@pytest.mark.usefixtures("install_mockery", "mock_packages", "mock_fetch", "temporary_mirror")
|
||||
def test_spec_needs_rebuild(monkeypatch, tmpdir):
|
||||
"""Make sure needs_rebuild properly compares remote hash
|
||||
against locally computed one, avoiding unnecessary rebuilds"""
|
||||
@@ -518,7 +501,7 @@ def mock_list_url(url, recursive=False):
|
||||
|
||||
|
||||
@pytest.mark.usefixtures("mock_fetch", "install_mockery")
|
||||
def test_update_sbang(tmpdir, test_mirror):
|
||||
def test_update_sbang(tmpdir, temporary_mirror):
|
||||
"""Test the creation and installation of buildcaches with default rpaths
|
||||
into the non-default directory layout scheme, triggering an update of the
|
||||
sbang.
|
||||
@@ -529,7 +512,7 @@ def test_update_sbang(tmpdir, test_mirror):
|
||||
old_spec_hash_str = "/{0}".format(old_spec.dag_hash())
|
||||
|
||||
# Need a fake mirror with *function* scope.
|
||||
mirror_dir = test_mirror
|
||||
mirror_dir = temporary_mirror
|
||||
|
||||
# Assume all commands will concretize old_spec the same way.
|
||||
install_cmd("--no-cache", old_spec.name)
|
||||
|
||||
@@ -516,6 +516,30 @@ def test_setting_dtags_based_on_config(config_setting, expected_flag, config, mo
|
||||
assert dtags_to_add.value == expected_flag
|
||||
|
||||
|
||||
def test_module_globals_available_at_setup_dependent_time(
|
||||
monkeypatch, mutable_config, mock_packages, working_env
|
||||
):
|
||||
"""Spack built package externaltest depends on an external package
|
||||
externaltool. Externaltool's setup_dependent_package needs to be able to
|
||||
access globals on the dependent"""
|
||||
|
||||
def setup_dependent_package(module, dependent_spec):
|
||||
# Make sure set_package_py_globals was already called on
|
||||
# dependents
|
||||
# ninja is always set by the setup context and is not None
|
||||
dependent_module = dependent_spec.package.module
|
||||
assert hasattr(dependent_module, "ninja")
|
||||
assert dependent_module.ninja is not None
|
||||
dependent_spec.package.test_attr = True
|
||||
|
||||
externaltool = spack.spec.Spec("externaltest").concretized()
|
||||
monkeypatch.setattr(
|
||||
externaltool["externaltool"].package, "setup_dependent_package", setup_dependent_package
|
||||
)
|
||||
spack.build_environment.setup_package(externaltool.package, False)
|
||||
assert externaltool.package.test_attr
|
||||
|
||||
|
||||
def test_build_jobs_sequential_is_sequential():
|
||||
assert (
|
||||
spack.config.determine_number_of_jobs(
|
||||
|
||||
@@ -12,22 +12,39 @@
|
||||
|
||||
|
||||
def test_build_task_errors(install_mockery):
|
||||
with pytest.raises(ValueError, match="must be a package"):
|
||||
inst.BuildTask("abc", None, False, 0, 0, 0, set())
|
||||
|
||||
"""Check expected errors when instantiating a BuildTask."""
|
||||
spec = spack.spec.Spec("trivial-install-test-package")
|
||||
pkg_cls = spack.repo.PATH.get_pkg_class(spec.name)
|
||||
with pytest.raises(ValueError, match="must have a concrete spec"):
|
||||
inst.BuildTask(pkg_cls(spec), None, False, 0, 0, 0, set())
|
||||
|
||||
# The value of the request argument is expected to not be checked.
|
||||
for pkg in [None, "abc"]:
|
||||
with pytest.raises(TypeError, match="must be a package"):
|
||||
inst.BuildTask(pkg, None)
|
||||
|
||||
with pytest.raises(ValueError, match="must have a concrete spec"):
|
||||
inst.BuildTask(pkg_cls(spec), None)
|
||||
|
||||
# Using a concretized package now means the request argument is checked.
|
||||
spec.concretize()
|
||||
assert spec.concrete
|
||||
with pytest.raises(ValueError, match="must have a build request"):
|
||||
inst.BuildTask(spec.package, None, False, 0, 0, 0, set())
|
||||
|
||||
with pytest.raises(TypeError, match="is not a valid build request"):
|
||||
inst.BuildTask(spec.package, None)
|
||||
|
||||
# Using a valid package and spec, the next check is the status argument.
|
||||
request = inst.BuildRequest(spec.package, {})
|
||||
with pytest.raises(spack.error.InstallError, match="Cannot create a build task"):
|
||||
inst.BuildTask(spec.package, request, False, 0, 0, inst.STATUS_REMOVED, set())
|
||||
|
||||
with pytest.raises(TypeError, match="is not a valid build status"):
|
||||
inst.BuildTask(spec.package, request, status="queued")
|
||||
|
||||
# Now we can check that build tasks cannot be create when the status
|
||||
# indicates the task is/should've been removed.
|
||||
with pytest.raises(spack.error.InstallError, match="Cannot create a task"):
|
||||
inst.BuildTask(spec.package, request, status=inst.BuildStatus.REMOVED)
|
||||
|
||||
# Also make sure to not accept an incompatible installed argument value.
|
||||
with pytest.raises(TypeError, match="'installed' be a 'set', not 'str'"):
|
||||
inst.BuildTask(spec.package, request, installed="mpileaks")
|
||||
|
||||
|
||||
def test_build_task_basics(install_mockery):
|
||||
@@ -37,7 +54,7 @@ def test_build_task_basics(install_mockery):
|
||||
|
||||
# Ensure key properties match expectations
|
||||
request = inst.BuildRequest(spec.package, {})
|
||||
task = inst.BuildTask(spec.package, request, False, 0, 0, inst.STATUS_ADDED, set())
|
||||
task = inst.BuildTask(spec.package, request=request, status=inst.BuildStatus.QUEUED)
|
||||
assert not task.explicit
|
||||
assert task.priority == len(task.uninstalled_deps)
|
||||
assert task.key == (task.priority, task.sequence)
|
||||
@@ -59,16 +76,16 @@ def test_build_task_strings(install_mockery):
|
||||
|
||||
# Ensure key properties match expectations
|
||||
request = inst.BuildRequest(spec.package, {})
|
||||
task = inst.BuildTask(spec.package, request, False, 0, 0, inst.STATUS_ADDED, set())
|
||||
task = inst.BuildTask(spec.package, request=request, status=inst.BuildStatus.QUEUED)
|
||||
|
||||
# Cover __repr__
|
||||
irep = task.__repr__()
|
||||
assert irep.startswith(task.__class__.__name__)
|
||||
assert "status='queued'" in irep # == STATUS_ADDED
|
||||
assert "BuildStatus.QUEUED" in irep
|
||||
assert "sequence=" in irep
|
||||
|
||||
# Cover __str__
|
||||
istr = str(task)
|
||||
assert "status=queued" in istr # == STATUS_ADDED
|
||||
assert "status=queued" in istr # == BuildStatus.QUEUED
|
||||
assert "#dependencies=1" in istr
|
||||
assert "priority=" in istr
|
||||
|
||||
@@ -7,6 +7,7 @@
|
||||
import os
|
||||
import pathlib
|
||||
import shutil
|
||||
from io import BytesIO
|
||||
from typing import NamedTuple
|
||||
|
||||
import jsonschema
|
||||
@@ -1846,3 +1847,91 @@ def test_ci_generate_mirror_config(
|
||||
pipeline_doc = syaml.load(f)
|
||||
assert fst not in pipeline_doc["rebuild-index"]["script"][0]
|
||||
assert snd in pipeline_doc["rebuild-index"]["script"][0]
|
||||
|
||||
|
||||
def dynamic_mapping_setup(tmpdir):
|
||||
filename = str(tmpdir.join("spack.yaml"))
|
||||
with open(filename, "w") as f:
|
||||
f.write(
|
||||
"""\
|
||||
spack:
|
||||
specs:
|
||||
- pkg-a
|
||||
mirrors:
|
||||
some-mirror: https://my.fake.mirror
|
||||
ci:
|
||||
pipeline-gen:
|
||||
- dynamic-mapping:
|
||||
endpoint: https://fake.spack.io/mapper
|
||||
require: ["variables"]
|
||||
ignore: ["ignored_field"]
|
||||
allow: ["variables", "retry"]
|
||||
"""
|
||||
)
|
||||
|
||||
spec_a = Spec("pkg-a")
|
||||
spec_a.concretize()
|
||||
|
||||
return ci.get_job_name(spec_a)
|
||||
|
||||
|
||||
def test_ci_dynamic_mapping_empty(
|
||||
tmpdir,
|
||||
working_env,
|
||||
mutable_mock_env_path,
|
||||
install_mockery,
|
||||
mock_packages,
|
||||
monkeypatch,
|
||||
ci_base_environment,
|
||||
):
|
||||
# The test will always return an empty dictionary
|
||||
def fake_dyn_mapping_urlopener(*args, **kwargs):
|
||||
return BytesIO("{}".encode())
|
||||
|
||||
monkeypatch.setattr(ci, "_dyn_mapping_urlopener", fake_dyn_mapping_urlopener)
|
||||
|
||||
_ = dynamic_mapping_setup(tmpdir)
|
||||
with tmpdir.as_cwd():
|
||||
env_cmd("create", "test", "./spack.yaml")
|
||||
outputfile = str(tmpdir.join(".gitlab-ci.yml"))
|
||||
|
||||
with ev.read("test"):
|
||||
output = ci_cmd("generate", "--output-file", outputfile)
|
||||
assert "Response missing required keys: ['variables']" in output
|
||||
|
||||
|
||||
def test_ci_dynamic_mapping_full(
|
||||
tmpdir,
|
||||
working_env,
|
||||
mutable_mock_env_path,
|
||||
install_mockery,
|
||||
mock_packages,
|
||||
monkeypatch,
|
||||
ci_base_environment,
|
||||
):
|
||||
# The test will always return an empty dictionary
|
||||
def fake_dyn_mapping_urlopener(*args, **kwargs):
|
||||
return BytesIO(
|
||||
json.dumps(
|
||||
{"variables": {"MY_VAR": "hello"}, "ignored_field": 0, "unallowed_field": 0}
|
||||
).encode()
|
||||
)
|
||||
|
||||
monkeypatch.setattr(ci, "_dyn_mapping_urlopener", fake_dyn_mapping_urlopener)
|
||||
|
||||
label = dynamic_mapping_setup(tmpdir)
|
||||
with tmpdir.as_cwd():
|
||||
env_cmd("create", "test", "./spack.yaml")
|
||||
outputfile = str(tmpdir.join(".gitlab-ci.yml"))
|
||||
|
||||
with ev.read("test"):
|
||||
ci_cmd("generate", "--output-file", outputfile)
|
||||
|
||||
with open(outputfile) as of:
|
||||
pipeline_doc = syaml.load(of.read())
|
||||
assert label in pipeline_doc
|
||||
job = pipeline_doc[label]
|
||||
|
||||
assert job.get("variables", {}).get("MY_VAR") == "hello"
|
||||
assert "ignored_field" not in job
|
||||
assert "unallowed_field" not in job
|
||||
|
||||
@@ -164,3 +164,30 @@ def test_concretize_deprecated(mock_packages, mock_archive, mock_fetch, install_
|
||||
spec = spack.spec.Spec("libelf@0.8.10")
|
||||
with pytest.raises(spack.spec.SpecDeprecatedError):
|
||||
spec.concretize()
|
||||
|
||||
|
||||
@pytest.mark.usefixtures("mock_packages", "mock_archive", "mock_fetch", "install_mockery")
|
||||
@pytest.mark.regression("46915")
|
||||
def test_deprecate_spec_with_external_dependency(mutable_config, temporary_store, tmp_path):
|
||||
"""Tests that we can deprecate a spec that has an external dependency"""
|
||||
packages_yaml = {
|
||||
"libelf": {
|
||||
"buildable": False,
|
||||
"externals": [{"spec": "libelf@0.8.13", "prefix": str(tmp_path / "libelf")}],
|
||||
}
|
||||
}
|
||||
mutable_config.set("packages", packages_yaml)
|
||||
|
||||
install("--fake", "dyninst ^libdwarf@=20111030")
|
||||
install("--fake", "libdwarf@=20130729")
|
||||
|
||||
# Ensure we are using the external libelf
|
||||
db = temporary_store.db
|
||||
libelf = db.query_one("libelf")
|
||||
assert libelf.external
|
||||
|
||||
deprecated_spec = db.query_one("libdwarf@=20111030")
|
||||
new_libdwarf = db.query_one("libdwarf@=20130729")
|
||||
deprecate("-y", "libdwarf@=20111030", "libdwarf@=20130729")
|
||||
|
||||
assert db.deprecator(deprecated_spec) == new_libdwarf
|
||||
|
||||
@@ -65,6 +65,12 @@ def test_develop_no_clone(self, tmpdir):
|
||||
develop("--no-clone", "-p", str(tmpdir), "mpich@1.0")
|
||||
self.check_develop(e, spack.spec.Spec("mpich@=1.0"), str(tmpdir))
|
||||
|
||||
def test_develop_no_version(self, tmpdir):
|
||||
env("create", "test")
|
||||
with ev.read("test") as e:
|
||||
develop("--no-clone", "-p", str(tmpdir), "mpich")
|
||||
self.check_develop(e, spack.spec.Spec("mpich@=main"), str(tmpdir))
|
||||
|
||||
def test_develop(self):
|
||||
env("create", "test")
|
||||
with ev.read("test") as e:
|
||||
|
||||
@@ -38,6 +38,7 @@
|
||||
import spack.util.spack_json as sjson
|
||||
import spack.util.spack_yaml
|
||||
from spack.cmd.env import _env_create
|
||||
from spack.installer import PackageInstaller
|
||||
from spack.main import SpackCommand, SpackCommandError
|
||||
from spack.spec import Spec
|
||||
from spack.stage import stage_prefix
|
||||
@@ -574,42 +575,76 @@ def test_remove_command():
|
||||
|
||||
with ev.read("test"):
|
||||
add("mpileaks")
|
||||
|
||||
with ev.read("test"):
|
||||
assert "mpileaks" in find()
|
||||
assert "mpileaks@" not in find()
|
||||
assert "mpileaks@" not in find("--show-concretized")
|
||||
|
||||
with ev.read("test"):
|
||||
remove("mpileaks")
|
||||
|
||||
with ev.read("test"):
|
||||
assert "mpileaks" not in find()
|
||||
assert "mpileaks@" not in find()
|
||||
assert "mpileaks@" not in find("--show-concretized")
|
||||
|
||||
with ev.read("test"):
|
||||
add("mpileaks")
|
||||
|
||||
with ev.read("test"):
|
||||
assert "mpileaks" in find()
|
||||
assert "mpileaks@" not in find()
|
||||
assert "mpileaks@" not in find("--show-concretized")
|
||||
|
||||
with ev.read("test"):
|
||||
concretize()
|
||||
|
||||
with ev.read("test"):
|
||||
assert "mpileaks" in find()
|
||||
assert "mpileaks@" not in find()
|
||||
assert "mpileaks@" in find("--show-concretized")
|
||||
|
||||
with ev.read("test"):
|
||||
remove("mpileaks")
|
||||
|
||||
with ev.read("test"):
|
||||
assert "mpileaks" not in find()
|
||||
# removed but still in last concretized specs
|
||||
assert "mpileaks@" in find("--show-concretized")
|
||||
|
||||
with ev.read("test"):
|
||||
concretize()
|
||||
|
||||
with ev.read("test"):
|
||||
assert "mpileaks" not in find()
|
||||
assert "mpileaks@" not in find()
|
||||
# now the lockfile is regenerated and it's gone.
|
||||
assert "mpileaks@" not in find("--show-concretized")
|
||||
|
||||
|
||||
def test_remove_command_all():
|
||||
# Need separate ev.read calls for each command to ensure we test round-trip to disk
|
||||
env("create", "test")
|
||||
test_pkgs = ("mpileaks", "zlib")
|
||||
|
||||
with ev.read("test"):
|
||||
for name in test_pkgs:
|
||||
add(name)
|
||||
|
||||
with ev.read("test"):
|
||||
for name in test_pkgs:
|
||||
assert name in find()
|
||||
assert f"{name}@" not in find()
|
||||
|
||||
with ev.read("test"):
|
||||
remove("-a")
|
||||
|
||||
with ev.read("test"):
|
||||
for name in test_pkgs:
|
||||
assert name not in find()
|
||||
|
||||
|
||||
def test_bad_remove_included_env():
|
||||
env("create", "test")
|
||||
test = ev.read("test")
|
||||
@@ -769,6 +804,39 @@ def test_user_removed_spec(environment_from_manifest):
|
||||
assert not any(x.name == "hypre" for x in env_specs)
|
||||
|
||||
|
||||
def test_lockfile_spliced_specs(environment_from_manifest, install_mockery):
|
||||
"""Test that an environment can round-trip a spliced spec."""
|
||||
# Create a local install for zmpi to splice in
|
||||
# Default concretization is not using zmpi
|
||||
zmpi = spack.spec.Spec("zmpi").concretized()
|
||||
PackageInstaller([zmpi.package], fake=True).install()
|
||||
|
||||
e1 = environment_from_manifest(
|
||||
f"""
|
||||
spack:
|
||||
specs:
|
||||
- mpileaks
|
||||
concretizer:
|
||||
splice:
|
||||
explicit:
|
||||
- target: mpi
|
||||
replacement: zmpi/{zmpi.dag_hash()}
|
||||
"""
|
||||
)
|
||||
with e1:
|
||||
e1.concretize()
|
||||
e1.write()
|
||||
|
||||
# By reading into a second environment, we force a round trip to json
|
||||
e2 = _env_create("test2", init_file=e1.lock_path)
|
||||
|
||||
# The one spec is mpileaks
|
||||
for _, spec in e2.concretized_specs():
|
||||
assert spec.spliced
|
||||
assert spec["mpi"].satisfies(f"zmpi@{zmpi.version}")
|
||||
assert spec["mpi"].build_spec.satisfies(zmpi)
|
||||
|
||||
|
||||
def test_init_from_lockfile(environment_from_manifest):
|
||||
"""Test that an environment can be instantiated from a lockfile."""
|
||||
e1 = environment_from_manifest(
|
||||
@@ -3885,7 +3953,7 @@ def test_environment_depfile_makefile(depfile_flags, expected_installs, tmpdir,
|
||||
)
|
||||
|
||||
# Do make dry run.
|
||||
out = make("-n", "-f", makefile, output=str)
|
||||
out = make("-n", "-f", makefile, "SPACK=spack", output=str)
|
||||
|
||||
specs_that_make_would_install = _parse_dry_run_package_installs(out)
|
||||
|
||||
@@ -3923,7 +3991,7 @@ def test_depfile_works_with_gitversions(tmpdir, mock_packages, monkeypatch):
|
||||
env("depfile", "-o", makefile, "--make-disable-jobserver", "--make-prefix=prefix")
|
||||
|
||||
# Do a dry run on the generated depfile
|
||||
out = make("-n", "-f", makefile, output=str)
|
||||
out = make("-n", "-f", makefile, "SPACK=spack", output=str)
|
||||
|
||||
# Check that all specs are there (without duplicates)
|
||||
specs_that_make_would_install = _parse_dry_run_package_installs(out)
|
||||
@@ -3985,7 +4053,12 @@ def test_depfile_phony_convenience_targets(
|
||||
|
||||
# Phony install/* target should install picked package and all its deps
|
||||
specs_that_make_would_install = _parse_dry_run_package_installs(
|
||||
make("-n", picked_spec.format("install/{name}-{version}-{hash}"), output=str)
|
||||
make(
|
||||
"-n",
|
||||
picked_spec.format("install/{name}-{version}-{hash}"),
|
||||
"SPACK=spack",
|
||||
output=str,
|
||||
)
|
||||
)
|
||||
|
||||
assert set(specs_that_make_would_install) == set(expected_installs)
|
||||
@@ -3993,7 +4066,12 @@ def test_depfile_phony_convenience_targets(
|
||||
|
||||
# Phony install-deps/* target shouldn't install picked package
|
||||
specs_that_make_would_install = _parse_dry_run_package_installs(
|
||||
make("-n", picked_spec.format("install-deps/{name}-{version}-{hash}"), output=str)
|
||||
make(
|
||||
"-n",
|
||||
picked_spec.format("install-deps/{name}-{version}-{hash}"),
|
||||
"SPACK=spack",
|
||||
output=str,
|
||||
)
|
||||
)
|
||||
|
||||
assert set(specs_that_make_would_install) == set(expected_installs) - {picked_package}
|
||||
@@ -4053,7 +4131,7 @@ def test_spack_package_ids_variable(tmpdir, mock_packages):
|
||||
make = Executable("make")
|
||||
|
||||
# Do dry run.
|
||||
out = make("-n", "-C", str(tmpdir), output=str)
|
||||
out = make("-n", "-C", str(tmpdir), "SPACK=spack", output=str)
|
||||
|
||||
# post-install: <hash> should've been executed
|
||||
with ev.read("test") as test:
|
||||
|
||||
@@ -70,10 +70,10 @@ def test_query_arguments():
|
||||
|
||||
q_args = query_arguments(args)
|
||||
assert "installed" in q_args
|
||||
assert "known" in q_args
|
||||
assert "predicate_fn" in q_args
|
||||
assert "explicit" in q_args
|
||||
assert q_args["installed"] == ["installed"]
|
||||
assert q_args["known"] is any
|
||||
assert q_args["predicate_fn"] is None
|
||||
assert q_args["explicit"] is any
|
||||
assert "start_date" in q_args
|
||||
assert "end_date" not in q_args
|
||||
|
||||
@@ -4,9 +4,12 @@
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
import os.path
|
||||
import sys
|
||||
|
||||
import pytest
|
||||
|
||||
from llnl.util.symlink import _windows_can_symlink
|
||||
|
||||
import spack.util.spack_yaml as s_yaml
|
||||
from spack.installer import PackageInstaller
|
||||
from spack.main import SpackCommand
|
||||
@@ -16,7 +19,16 @@
|
||||
install = SpackCommand("install")
|
||||
view = SpackCommand("view")
|
||||
|
||||
pytestmark = pytest.mark.not_on_windows("does not run on windows")
|
||||
if sys.platform == "win32":
|
||||
if not _windows_can_symlink():
|
||||
pytest.skip(
|
||||
"Windows must be able to create symlinks to run tests.", allow_module_level=True
|
||||
)
|
||||
# TODO: Skipping hardlink command testing on windows until robust checks can be added.
|
||||
# See https://github.com/spack/spack/pull/46335#discussion_r1757411915
|
||||
commands = ["symlink", "add", "copy", "relocate"]
|
||||
else:
|
||||
commands = ["hardlink", "symlink", "hard", "add", "copy", "relocate"]
|
||||
|
||||
|
||||
def create_projection_file(tmpdir, projection):
|
||||
@@ -28,7 +40,7 @@ def create_projection_file(tmpdir, projection):
|
||||
return projection_file
|
||||
|
||||
|
||||
@pytest.mark.parametrize("cmd", ["hardlink", "symlink", "hard", "add", "copy", "relocate"])
|
||||
@pytest.mark.parametrize("cmd", commands)
|
||||
def test_view_link_type(tmpdir, mock_packages, mock_archive, mock_fetch, install_mockery, cmd):
|
||||
install("libdwarf")
|
||||
viewpath = str(tmpdir.mkdir("view_{0}".format(cmd)))
|
||||
@@ -41,7 +53,7 @@ def test_view_link_type(tmpdir, mock_packages, mock_archive, mock_fetch, install
|
||||
assert os.path.islink(package_prefix) == is_link_cmd
|
||||
|
||||
|
||||
@pytest.mark.parametrize("add_cmd", ["hardlink", "symlink", "hard", "add", "copy", "relocate"])
|
||||
@pytest.mark.parametrize("add_cmd", commands)
|
||||
def test_view_link_type_remove(
|
||||
tmpdir, mock_packages, mock_archive, mock_fetch, install_mockery, add_cmd
|
||||
):
|
||||
@@ -55,7 +67,7 @@ def test_view_link_type_remove(
|
||||
assert not os.path.exists(bindir)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("cmd", ["hardlink", "symlink", "hard", "add", "copy", "relocate"])
|
||||
@pytest.mark.parametrize("cmd", commands)
|
||||
def test_view_projections(tmpdir, mock_packages, mock_archive, mock_fetch, install_mockery, cmd):
|
||||
install("libdwarf@20130207")
|
||||
|
||||
|
||||
@@ -461,9 +461,13 @@ def test_intel_flags():
|
||||
unsupported_flag_test("cxx14_flag", "intel@=14.0")
|
||||
supported_flag_test("cxx14_flag", "-std=c++1y", "intel@=15.0")
|
||||
supported_flag_test("cxx14_flag", "-std=c++14", "intel@=15.0.2")
|
||||
unsupported_flag_test("cxx17_flag", "intel@=18")
|
||||
supported_flag_test("cxx17_flag", "-std=c++17", "intel@=19.0")
|
||||
unsupported_flag_test("c99_flag", "intel@=11.0")
|
||||
supported_flag_test("c99_flag", "-std=c99", "intel@=12.0")
|
||||
unsupported_flag_test("c11_flag", "intel@=15.0")
|
||||
supported_flag_test("c18_flag", "-std=c18", "intel@=21.5.0")
|
||||
unsupported_flag_test("c18_flag", "intel@=21.4.0")
|
||||
supported_flag_test("c11_flag", "-std=c1x", "intel@=16.0")
|
||||
supported_flag_test("cc_pic_flag", "-fPIC", "intel@=1.0")
|
||||
supported_flag_test("cxx_pic_flag", "-fPIC", "intel@=1.0")
|
||||
|
||||
@@ -2281,6 +2281,31 @@ def test_virtuals_are_annotated_on_edges(self, spec_str):
|
||||
edges = spec.edges_to_dependencies(name="callpath")
|
||||
assert len(edges) == 1 and edges[0].virtuals == ()
|
||||
|
||||
@pytest.mark.parametrize("transitive", [True, False])
|
||||
def test_explicit_splices(
|
||||
self, mutable_config, database_mutable_config, mock_packages, transitive, capfd
|
||||
):
|
||||
mpich_spec = database_mutable_config.query("mpich")[0]
|
||||
splice_info = {
|
||||
"target": "mpi",
|
||||
"replacement": f"/{mpich_spec.dag_hash()}",
|
||||
"transitive": transitive,
|
||||
}
|
||||
spack.config.CONFIG.set("concretizer", {"splice": {"explicit": [splice_info]}})
|
||||
|
||||
spec = spack.spec.Spec("hdf5 ^zmpi").concretized()
|
||||
|
||||
assert spec.satisfies(f"^mpich@{mpich_spec.version}")
|
||||
assert spec.build_spec.dependencies(name="zmpi", deptype="link")
|
||||
assert spec["mpi"].build_spec.satisfies(mpich_spec)
|
||||
assert not spec.build_spec.satisfies(f"^mpich/{mpich_spec.dag_hash()}")
|
||||
assert not spec.dependencies(name="zmpi", deptype="link")
|
||||
|
||||
captured = capfd.readouterr()
|
||||
assert "Warning: explicit splice configuration has caused" in captured.err
|
||||
assert "hdf5 ^zmpi" in captured.err
|
||||
assert str(spec) in captured.err
|
||||
|
||||
@pytest.mark.db
|
||||
@pytest.mark.parametrize(
|
||||
"spec_str,mpi_name",
|
||||
|
||||
@@ -62,8 +62,11 @@
|
||||
import spack.version
|
||||
from spack.fetch_strategy import URLFetchStrategy
|
||||
from spack.installer import PackageInstaller
|
||||
from spack.main import SpackCommand
|
||||
from spack.util.pattern import Bunch
|
||||
|
||||
mirror_cmd = SpackCommand("mirror")
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def check_config_fixture(request):
|
||||
@@ -989,6 +992,38 @@ def install_mockery(temporary_store: spack.store.Store, mutable_config, mock_pac
|
||||
temporary_store.failure_tracker.clear_all()
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def temporary_mirror_dir(tmpdir_factory):
|
||||
dir = tmpdir_factory.mktemp("mirror")
|
||||
dir.ensure("build_cache", dir=True)
|
||||
yield str(dir)
|
||||
dir.join("build_cache").remove()
|
||||
|
||||
|
||||
@pytest.fixture(scope="function")
|
||||
def temporary_mirror(temporary_mirror_dir):
|
||||
mirror_url = url_util.path_to_file_url(temporary_mirror_dir)
|
||||
mirror_cmd("add", "--scope", "site", "test-mirror-func", mirror_url)
|
||||
yield temporary_mirror_dir
|
||||
mirror_cmd("rm", "--scope=site", "test-mirror-func")
|
||||
|
||||
|
||||
@pytest.fixture(scope="function")
|
||||
def mutable_temporary_mirror_dir(tmpdir_factory):
|
||||
dir = tmpdir_factory.mktemp("mirror")
|
||||
dir.ensure("build_cache", dir=True)
|
||||
yield str(dir)
|
||||
dir.join("build_cache").remove()
|
||||
|
||||
|
||||
@pytest.fixture(scope="function")
|
||||
def mutable_temporary_mirror(mutable_temporary_mirror_dir):
|
||||
mirror_url = url_util.path_to_file_url(mutable_temporary_mirror_dir)
|
||||
mirror_cmd("add", "--scope", "site", "test-mirror-func", mirror_url)
|
||||
yield mutable_temporary_mirror_dir
|
||||
mirror_cmd("rm", "--scope=site", "test-mirror-func")
|
||||
|
||||
|
||||
@pytest.fixture(scope="function")
|
||||
def temporary_store(tmpdir, request):
|
||||
"""Hooks a temporary empty store for the test function."""
|
||||
@@ -1980,6 +2015,11 @@ def pytest_runtest_setup(item):
|
||||
if not_on_windows_marker and sys.platform == "win32":
|
||||
pytest.skip(*not_on_windows_marker.args)
|
||||
|
||||
# Skip items marked "only windows" if they're run anywhere but Windows
|
||||
only_windows_marker = item.get_closest_marker(name="only_windows")
|
||||
if only_windows_marker and sys.platform != "win32":
|
||||
pytest.skip(*only_windows_marker.args)
|
||||
|
||||
|
||||
def _sequential_executor(*args, **kwargs):
|
||||
return spack.util.parallel.SequentialExecutor()
|
||||
|
||||
9
lib/spack/spack/test/data/modules/tcl/suffix-format.yaml
Normal file
9
lib/spack/spack/test/data/modules/tcl/suffix-format.yaml
Normal file
@@ -0,0 +1,9 @@
|
||||
enable:
|
||||
- tcl
|
||||
tcl:
|
||||
all:
|
||||
autoload: none
|
||||
mpileaks:
|
||||
suffixes:
|
||||
mpileaks: 'debug={variants.debug.value}'
|
||||
'^mpi': 'mpi={^mpi.name}-v{^mpi.version}'
|
||||
@@ -1181,3 +1181,20 @@ def test_reindex_with_upstreams(tmp_path, monkeypatch, mock_packages, config):
|
||||
assert not reindexed_local_store.db.query_local("callpath")
|
||||
assert reindexed_local_store.db.query("callpath") == [callpath]
|
||||
assert reindexed_local_store.db.query_local("mpileaks") == [mpileaks]
|
||||
|
||||
|
||||
@pytest.mark.regression("47101")
|
||||
def test_query_with_predicate_fn(database):
|
||||
all_specs = database.query()
|
||||
|
||||
# Name starts with a string
|
||||
specs = database.query(predicate_fn=lambda x: x.spec.name.startswith("mpil"))
|
||||
assert specs and all(x.name.startswith("mpil") for x in specs)
|
||||
assert len(specs) < len(all_specs)
|
||||
|
||||
# Recipe is currently known/unknown
|
||||
specs = database.query(predicate_fn=lambda x: spack.repo.PATH.exists(x.spec.name))
|
||||
assert specs == all_specs
|
||||
|
||||
specs = database.query(predicate_fn=lambda x: not spack.repo.PATH.exists(x.spec.name))
|
||||
assert not specs
|
||||
|
||||
@@ -7,6 +7,7 @@
|
||||
import spack.config
|
||||
import spack.detection
|
||||
import spack.detection.common
|
||||
import spack.detection.path
|
||||
import spack.spec
|
||||
|
||||
|
||||
@@ -26,3 +27,28 @@ def test_detection_update_config(mutable_config):
|
||||
external_gcc = externals[0]
|
||||
assert external_gcc["spec"] == "cmake@3.27.5"
|
||||
assert external_gcc["prefix"] == "/usr/bin"
|
||||
|
||||
|
||||
def test_dedupe_paths(tmp_path):
|
||||
"""Test that ``dedupe_paths`` deals with symlinked directories, retaining the target"""
|
||||
x = tmp_path / "x"
|
||||
y = tmp_path / "y"
|
||||
z = tmp_path / "z"
|
||||
|
||||
x.mkdir()
|
||||
y.mkdir()
|
||||
z.symlink_to("x", target_is_directory=True)
|
||||
|
||||
# dedupe repeated dirs, should preserve order
|
||||
assert spack.detection.path.dedupe_paths([str(x), str(y), str(x)]) == [str(x), str(y)]
|
||||
assert spack.detection.path.dedupe_paths([str(y), str(x), str(y)]) == [str(y), str(x)]
|
||||
|
||||
# dedupe repeated symlinks
|
||||
assert spack.detection.path.dedupe_paths([str(z), str(y), str(z)]) == [str(z), str(y)]
|
||||
assert spack.detection.path.dedupe_paths([str(y), str(z), str(y)]) == [str(y), str(z)]
|
||||
|
||||
# when both symlink and target are present, only target is retained, and it comes at the
|
||||
# priority of the first occurrence.
|
||||
assert spack.detection.path.dedupe_paths([str(x), str(y), str(z)]) == [str(x), str(y)]
|
||||
assert spack.detection.path.dedupe_paths([str(z), str(y), str(x)]) == [str(x), str(y)]
|
||||
assert spack.detection.path.dedupe_paths([str(y), str(z), str(x)]) == [str(y), str(x)]
|
||||
|
||||
@@ -353,21 +353,21 @@ def test_install_prefix_collision_fails(config, mock_fetch, mock_packages, tmpdi
|
||||
Test that different specs with coinciding install prefixes will fail
|
||||
to install.
|
||||
"""
|
||||
projections = {"projections": {"all": "all-specs-project-to-this-prefix"}}
|
||||
projections = {"projections": {"all": "one-prefix-per-package-{name}"}}
|
||||
with spack.store.use_store(str(tmpdir), extra_data=projections):
|
||||
with spack.config.override("config:checksum", False):
|
||||
pkg_a = Spec("libelf@0.8.13").concretized().package
|
||||
pkg_b = Spec("libelf@0.8.12").concretized().package
|
||||
PackageInstaller([pkg_a], explicit=True).install()
|
||||
PackageInstaller([pkg_a], explicit=True, fake=True).install()
|
||||
|
||||
with pytest.raises(InstallError, match="Install prefix collision"):
|
||||
PackageInstaller([pkg_b], explicit=True).install()
|
||||
PackageInstaller([pkg_b], explicit=True, fake=True).install()
|
||||
|
||||
|
||||
def test_store(install_mockery, mock_fetch):
|
||||
spec = Spec("cmake-client").concretized()
|
||||
pkg = spec.package
|
||||
PackageInstaller([pkg], explicit=True).install()
|
||||
PackageInstaller([pkg], fake=True, explicit=True).install()
|
||||
|
||||
|
||||
@pytest.mark.disable_clean_stage_check
|
||||
|
||||
@@ -29,6 +29,7 @@
|
||||
import spack.store
|
||||
import spack.util.lock as lk
|
||||
from spack.installer import PackageInstaller
|
||||
from spack.main import SpackCommand
|
||||
|
||||
|
||||
def _mock_repo(root, namespace):
|
||||
@@ -73,7 +74,7 @@ def create_build_task(
|
||||
pkg: spack.package_base.PackageBase, install_args: Optional[dict] = None
|
||||
) -> inst.BuildTask:
|
||||
request = inst.BuildRequest(pkg, {} if install_args is None else install_args)
|
||||
return inst.BuildTask(pkg, request, False, 0, 0, inst.STATUS_ADDED, set())
|
||||
return inst.BuildTask(pkg, request=request, status=inst.BuildStatus.QUEUED)
|
||||
|
||||
|
||||
def create_installer(
|
||||
@@ -640,6 +641,88 @@ def test_prepare_for_install_on_installed(install_mockery, monkeypatch):
|
||||
installer._prepare_for_install(task)
|
||||
|
||||
|
||||
def test_installer_init_requests(install_mockery):
|
||||
"""Test of installer initial requests."""
|
||||
spec_name = "dependent-install"
|
||||
with spack.config.override("config:install_missing_compilers", True):
|
||||
installer = create_installer([spec_name], {})
|
||||
|
||||
# There is only one explicit request in this case
|
||||
assert len(installer.build_requests) == 1
|
||||
request = installer.build_requests[0]
|
||||
assert request.pkg.name == spec_name
|
||||
|
||||
|
||||
@pytest.mark.parametrize("transitive", [True, False])
|
||||
def test_install_spliced(install_mockery, mock_fetch, monkeypatch, capsys, transitive):
|
||||
"""Test installing a spliced spec"""
|
||||
spec = spack.spec.Spec("splice-t").concretized()
|
||||
dep = spack.spec.Spec("splice-h+foo").concretized()
|
||||
|
||||
# Do the splice.
|
||||
out = spec.splice(dep, transitive)
|
||||
installer = create_installer([out], {"verbose": True, "fail_fast": True})
|
||||
installer.install()
|
||||
for node in out.traverse():
|
||||
assert node.installed
|
||||
assert node.build_spec.installed
|
||||
|
||||
|
||||
@pytest.mark.parametrize("transitive", [True, False])
|
||||
def test_install_spliced_build_spec_installed(install_mockery, capfd, mock_fetch, transitive):
|
||||
"""Test installing a spliced spec with the build spec already installed"""
|
||||
spec = spack.spec.Spec("splice-t").concretized()
|
||||
dep = spack.spec.Spec("splice-h+foo").concretized()
|
||||
|
||||
# Do the splice.
|
||||
out = spec.splice(dep, transitive)
|
||||
PackageInstaller([out.build_spec.package]).install()
|
||||
|
||||
installer = create_installer([out], {"verbose": True, "fail_fast": True})
|
||||
installer._init_queue()
|
||||
for _, task in installer.build_pq:
|
||||
assert isinstance(task, inst.RewireTask if task.pkg.spec.spliced else inst.BuildTask)
|
||||
installer.install()
|
||||
for node in out.traverse():
|
||||
assert node.installed
|
||||
assert node.build_spec.installed
|
||||
|
||||
|
||||
@pytest.mark.not_on_windows("lacking windows support for binary installs")
|
||||
@pytest.mark.parametrize("transitive", [True, False])
|
||||
@pytest.mark.parametrize(
|
||||
"root_str", ["splice-t^splice-h~foo", "splice-h~foo", "splice-vt^splice-a"]
|
||||
)
|
||||
def test_install_splice_root_from_binary(
|
||||
install_mockery, mock_fetch, mutable_temporary_mirror, transitive, root_str
|
||||
):
|
||||
"""Test installing a spliced spec with the root available in binary cache"""
|
||||
# Test splicing and rewiring a spec with the same name, different hash.
|
||||
original_spec = spack.spec.Spec(root_str).concretized()
|
||||
spec_to_splice = spack.spec.Spec("splice-h+foo").concretized()
|
||||
|
||||
PackageInstaller([original_spec.package, spec_to_splice.package]).install()
|
||||
|
||||
out = original_spec.splice(spec_to_splice, transitive)
|
||||
|
||||
buildcache = SpackCommand("buildcache")
|
||||
buildcache(
|
||||
"push",
|
||||
"--unsigned",
|
||||
"--update-index",
|
||||
mutable_temporary_mirror,
|
||||
str(original_spec),
|
||||
str(spec_to_splice),
|
||||
)
|
||||
|
||||
uninstall = SpackCommand("uninstall")
|
||||
uninstall("-ay")
|
||||
|
||||
PackageInstaller([out.package], unsigned=True).install()
|
||||
|
||||
assert len(spack.store.STORE.db.query()) == len(list(out.traverse()))
|
||||
|
||||
|
||||
def test_install_task_use_cache(install_mockery, monkeypatch):
|
||||
installer = create_installer(["trivial-install-test-package"], {})
|
||||
request = installer.build_requests[0]
|
||||
@@ -650,6 +733,33 @@ def test_install_task_use_cache(install_mockery, monkeypatch):
|
||||
assert request.pkg_id in installer.installed
|
||||
|
||||
|
||||
def test_install_task_requeue_build_specs(install_mockery, monkeypatch, capfd):
|
||||
"""Check that a missing build_spec spec is added by _install_task."""
|
||||
|
||||
# This test also ensures coverage of most of the new
|
||||
# _requeue_with_build_spec_tasks method.
|
||||
def _missing(*args, **kwargs):
|
||||
return inst.ExecuteResult.MISSING_BUILD_SPEC
|
||||
|
||||
# Set the configuration to ensure _requeue_with_build_spec_tasks actually
|
||||
# does something.
|
||||
with spack.config.override("config:install_missing_compilers", True):
|
||||
installer = create_installer(["depb"], {})
|
||||
installer._init_queue()
|
||||
request = installer.build_requests[0]
|
||||
task = create_build_task(request.pkg)
|
||||
|
||||
# Drop one of the specs so its task is missing before _install_task
|
||||
popped_task = installer._pop_task()
|
||||
assert inst.package_id(popped_task.pkg.spec) not in installer.build_tasks
|
||||
|
||||
monkeypatch.setattr(task, "execute", _missing)
|
||||
installer._install_task(task, None)
|
||||
|
||||
# Ensure the dropped task/spec was added back by _install_task
|
||||
assert inst.package_id(popped_task.pkg.spec) in installer.build_tasks
|
||||
|
||||
|
||||
def test_release_lock_write_n_exception(install_mockery, tmpdir, capsys):
|
||||
"""Test _release_lock for supposed write lock with exception."""
|
||||
installer = create_installer(["trivial-install-test-package"], {})
|
||||
@@ -698,7 +808,7 @@ def test_requeue_task(install_mockery, capfd):
|
||||
ids = list(installer.build_tasks)
|
||||
assert len(ids) == 1
|
||||
qtask = installer.build_tasks[ids[0]]
|
||||
assert qtask.status == inst.STATUS_INSTALLING
|
||||
assert qtask.status == inst.BuildStatus.INSTALLING
|
||||
assert qtask.sequence > task.sequence
|
||||
assert qtask.attempts == task.attempts + 1
|
||||
|
||||
@@ -745,8 +855,10 @@ def _chgrp(path, group, follow_symlinks=True):
|
||||
monkeypatch.setattr(prefs, "get_package_group", _get_group)
|
||||
monkeypatch.setattr(fs, "chgrp", _chgrp)
|
||||
|
||||
installer = create_installer(["trivial-install-test-package"], {})
|
||||
spec = installer.build_requests[0].pkg.spec
|
||||
build_task = create_build_task(
|
||||
spack.spec.Spec("trivial-install-test-package").concretized().package
|
||||
)
|
||||
spec = build_task.request.pkg.spec
|
||||
|
||||
fs.touchp(spec.prefix)
|
||||
metadatadir = spack.store.STORE.layout.metadata_path(spec)
|
||||
@@ -756,7 +868,7 @@ def _chgrp(path, group, follow_symlinks=True):
|
||||
metadatadir = None
|
||||
# Should fail with a "not a directory" error
|
||||
with pytest.raises(OSError, match=metadatadir):
|
||||
installer._setup_install_dir(spec.package)
|
||||
build_task._setup_install_dir(spec.package)
|
||||
|
||||
out = str(capfd.readouterr()[0])
|
||||
|
||||
@@ -843,79 +955,74 @@ def test_install_failed_not_fast(install_mockery, monkeypatch, capsys):
|
||||
assert "Skipping build of pkg-a" in out
|
||||
|
||||
|
||||
def test_install_fail_on_interrupt(install_mockery, monkeypatch):
|
||||
def _interrupt(installer, task, install_status, **kwargs):
|
||||
if task.pkg.name == "pkg-a":
|
||||
raise KeyboardInterrupt("mock keyboard interrupt for pkg-a")
|
||||
else:
|
||||
return installer._real_install_task(task, None)
|
||||
# installer.installed.add(task.pkg.name)
|
||||
|
||||
|
||||
def test_install_fail_on_interrupt(install_mockery, mock_fetch, monkeypatch):
|
||||
"""Test ctrl-c interrupted install."""
|
||||
spec_name = "pkg-a"
|
||||
err_msg = "mock keyboard interrupt for {0}".format(spec_name)
|
||||
|
||||
def _interrupt(installer, task, install_status, **kwargs):
|
||||
if task.pkg.name == spec_name:
|
||||
raise KeyboardInterrupt(err_msg)
|
||||
else:
|
||||
installer.installed.add(task.pkg.name)
|
||||
|
||||
installer = create_installer([spec_name], {})
|
||||
|
||||
installer = create_installer([spec_name], {"fake": True})
|
||||
setattr(inst.PackageInstaller, "_real_install_task", inst.PackageInstaller._install_task)
|
||||
# Raise a KeyboardInterrupt error to trigger early termination
|
||||
monkeypatch.setattr(inst.PackageInstaller, "_install_task", _interrupt)
|
||||
|
||||
with pytest.raises(KeyboardInterrupt, match=err_msg):
|
||||
installer.install()
|
||||
|
||||
assert "pkg-b" in installer.installed # ensure dependency of pkg-a is 'installed'
|
||||
assert spec_name not in installer.installed
|
||||
assert not any(i.startswith("pkg-a-") for i in installer.installed)
|
||||
assert any(
|
||||
i.startswith("pkg-b-") for i in installer.installed
|
||||
) # ensure dependency of a is 'installed'
|
||||
|
||||
|
||||
def test_install_fail_single(install_mockery, monkeypatch):
|
||||
class MyBuildException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def _install_fail_my_build_exception(installer, task, install_status, **kwargs):
|
||||
print(task, task.pkg.name)
|
||||
if task.pkg.name == "pkg-a":
|
||||
raise MyBuildException("mock internal package build error for pkg-a")
|
||||
else:
|
||||
# No need for more complex logic here because no splices
|
||||
task.execute(install_status)
|
||||
installer._update_installed(task)
|
||||
|
||||
|
||||
def test_install_fail_single(install_mockery, mock_fetch, monkeypatch):
|
||||
"""Test expected results for failure of single package."""
|
||||
spec_name = "pkg-a"
|
||||
err_msg = "mock internal package build error for {0}".format(spec_name)
|
||||
|
||||
class MyBuildException(Exception):
|
||||
pass
|
||||
|
||||
def _install(installer, task, install_status, **kwargs):
|
||||
if task.pkg.name == spec_name:
|
||||
raise MyBuildException(err_msg)
|
||||
else:
|
||||
installer.installed.add(task.pkg.name)
|
||||
|
||||
installer = create_installer([spec_name], {})
|
||||
installer = create_installer(["pkg-a"], {"fake": True})
|
||||
|
||||
# Raise a KeyboardInterrupt error to trigger early termination
|
||||
monkeypatch.setattr(inst.PackageInstaller, "_install_task", _install)
|
||||
monkeypatch.setattr(inst.PackageInstaller, "_install_task", _install_fail_my_build_exception)
|
||||
|
||||
with pytest.raises(MyBuildException, match=err_msg):
|
||||
with pytest.raises(MyBuildException, match="mock internal package build error for pkg-a"):
|
||||
installer.install()
|
||||
|
||||
assert "pkg-b" in installer.installed # ensure dependency of a is 'installed'
|
||||
assert spec_name not in installer.installed
|
||||
# ensure dependency of a is 'installed' and a is not
|
||||
assert any(pkg_id.startswith("pkg-b-") for pkg_id in installer.installed)
|
||||
assert not any(pkg_id.startswith("pkg-a-") for pkg_id in installer.installed)
|
||||
|
||||
|
||||
def test_install_fail_multi(install_mockery, monkeypatch):
|
||||
def test_install_fail_multi(install_mockery, mock_fetch, monkeypatch):
|
||||
"""Test expected results for failure of multiple packages."""
|
||||
spec_name = "pkg-c"
|
||||
err_msg = "mock internal package build error"
|
||||
|
||||
class MyBuildException(Exception):
|
||||
pass
|
||||
|
||||
def _install(installer, task, install_status, **kwargs):
|
||||
if task.pkg.name == spec_name:
|
||||
raise MyBuildException(err_msg)
|
||||
else:
|
||||
installer.installed.add(task.pkg.name)
|
||||
|
||||
installer = create_installer([spec_name, "pkg-a"], {})
|
||||
installer = create_installer(["pkg-a", "pkg-c"], {"fake": True})
|
||||
|
||||
# Raise a KeyboardInterrupt error to trigger early termination
|
||||
monkeypatch.setattr(inst.PackageInstaller, "_install_task", _install)
|
||||
monkeypatch.setattr(inst.PackageInstaller, "_install_task", _install_fail_my_build_exception)
|
||||
|
||||
with pytest.raises(spack.error.InstallError, match="Installation request failed"):
|
||||
installer.install()
|
||||
|
||||
assert "pkg-a" in installer.installed # ensure the the second spec installed
|
||||
assert spec_name not in installer.installed
|
||||
# ensure the the second spec installed but not the first
|
||||
assert any(pkg_id.startswith("pkg-c-") for pkg_id in installer.installed)
|
||||
assert not any(pkg_id.startswith("pkg-a-") for pkg_id in installer.installed)
|
||||
|
||||
|
||||
def test_install_fail_fast_on_detect(install_mockery, monkeypatch, capsys):
|
||||
|
||||
@@ -1000,7 +1000,7 @@ def setup_test_dirs():
|
||||
shutil.rmtree(tmpdir.join("f"))
|
||||
|
||||
|
||||
@pytest.mark.skipif(sys.platform != "win32", reason="No-op on non Windows")
|
||||
@pytest.mark.only_windows("Test is for Windows specific behavior")
|
||||
def test_windows_sfn(tmpdir):
|
||||
# first check some standard Windows locations
|
||||
# we know require sfn names
|
||||
|
||||
@@ -5,7 +5,6 @@
|
||||
|
||||
"""Tests for ``llnl/util/symlink.py``"""
|
||||
import os
|
||||
import sys
|
||||
import tempfile
|
||||
|
||||
import pytest
|
||||
@@ -37,7 +36,7 @@ def test_symlink_dir(tmpdir):
|
||||
assert symlink.islink(link_dir)
|
||||
|
||||
|
||||
@pytest.mark.skipif(sys.platform != "win32", reason="Test is only for Windows")
|
||||
@pytest.mark.only_windows("Test is for Windows specific behavior")
|
||||
def test_symlink_source_not_exists(tmpdir):
|
||||
"""Test the symlink.symlink method for the case where a source path does not exist"""
|
||||
with tmpdir.as_cwd():
|
||||
@@ -71,7 +70,7 @@ def test_symlink_src_relative_to_link(tmpdir):
|
||||
assert os.path.lexists(link_dir)
|
||||
|
||||
|
||||
@pytest.mark.skipif(sys.platform != "win32", reason="Test is only for Windows")
|
||||
@pytest.mark.only_windows("Test is for Windows specific behavior")
|
||||
def test_symlink_src_not_relative_to_link(tmpdir):
|
||||
"""Test the symlink.symlink functionality where the source value does not exist relative to
|
||||
the link and not relative to the cwd. NOTE that this symlink api call is EXPECTED to raise
|
||||
@@ -98,7 +97,7 @@ def test_symlink_src_not_relative_to_link(tmpdir):
|
||||
assert not os.path.lexists(link_dir)
|
||||
|
||||
|
||||
@pytest.mark.skipif(sys.platform != "win32", reason="Test is only for Windows")
|
||||
@pytest.mark.only_windows("Test is for Windows specific behavior")
|
||||
def test_symlink_link_already_exists(tmpdir):
|
||||
"""Test the symlink.symlink method for the case where a link already exists"""
|
||||
with tmpdir.as_cwd():
|
||||
@@ -113,7 +112,7 @@ def test_symlink_link_already_exists(tmpdir):
|
||||
|
||||
|
||||
@pytest.mark.skipif(not symlink._windows_can_symlink(), reason="Test requires elevated privileges")
|
||||
@pytest.mark.skipif(sys.platform != "win32", reason="Test is only for Windows")
|
||||
@pytest.mark.only_windows("Test is for Windows specific behavior")
|
||||
def test_symlink_win_file(tmpdir):
|
||||
"""Check that symlink.symlink makes a symlink file when run with elevated permissions"""
|
||||
with tmpdir.as_cwd():
|
||||
@@ -130,7 +129,7 @@ def test_symlink_win_file(tmpdir):
|
||||
|
||||
|
||||
@pytest.mark.skipif(not symlink._windows_can_symlink(), reason="Test requires elevated privileges")
|
||||
@pytest.mark.skipif(sys.platform != "win32", reason="Test is only for Windows")
|
||||
@pytest.mark.only_windows("Test is for Windows specific behavior")
|
||||
def test_symlink_win_dir(tmpdir):
|
||||
"""Check that symlink.symlink makes a symlink dir when run with elevated permissions"""
|
||||
with tmpdir.as_cwd():
|
||||
@@ -147,7 +146,7 @@ def test_symlink_win_dir(tmpdir):
|
||||
assert not symlink._windows_is_junction(link_dir)
|
||||
|
||||
|
||||
@pytest.mark.skipif(sys.platform != "win32", reason="Test is only for Windows")
|
||||
@pytest.mark.only_windows("Test is for Windows specific behavior")
|
||||
def test_windows_create_junction(tmpdir):
|
||||
"""Test the symlink._windows_create_junction method"""
|
||||
with tmpdir.as_cwd():
|
||||
@@ -163,7 +162,7 @@ def test_windows_create_junction(tmpdir):
|
||||
assert not os.path.islink(junction_link_dir)
|
||||
|
||||
|
||||
@pytest.mark.skipif(sys.platform != "win32", reason="Test is only for Windows")
|
||||
@pytest.mark.only_windows("Test is for Windows specific behavior")
|
||||
def test_windows_create_hard_link(tmpdir):
|
||||
"""Test the symlink._windows_create_hard_link method"""
|
||||
with tmpdir.as_cwd():
|
||||
@@ -179,7 +178,7 @@ def test_windows_create_hard_link(tmpdir):
|
||||
assert not os.path.islink(link_file)
|
||||
|
||||
|
||||
@pytest.mark.skipif(sys.platform != "win32", reason="Test is only for Windows")
|
||||
@pytest.mark.only_windows("Test is for Windows specific behavior")
|
||||
def test_windows_create_link_dir(tmpdir):
|
||||
"""Test the functionality of the windows_create_link method with a directory
|
||||
which should result in making a junction.
|
||||
@@ -198,7 +197,7 @@ def test_windows_create_link_dir(tmpdir):
|
||||
assert not os.path.islink(link_dir)
|
||||
|
||||
|
||||
@pytest.mark.skipif(sys.platform != "win32", reason="Test is only for Windows")
|
||||
@pytest.mark.only_windows("Test is for Windows specific behavior")
|
||||
def test_windows_create_link_file(tmpdir):
|
||||
"""Test the functionality of the windows_create_link method with a file
|
||||
which should result in the creation of a hard link. It also tests the
|
||||
@@ -215,7 +214,7 @@ def test_windows_create_link_file(tmpdir):
|
||||
assert not symlink._windows_is_junction(link_file)
|
||||
|
||||
|
||||
@pytest.mark.skipif(sys.platform != "win32", reason="Test is only for Windows")
|
||||
@pytest.mark.only_windows("Test is for Windows specific behavior")
|
||||
def test_windows_read_link(tmpdir):
|
||||
"""Makes sure symlink.readlink can read the link source for hard links and
|
||||
junctions on windows."""
|
||||
|
||||
@@ -8,6 +8,7 @@
|
||||
|
||||
import pytest
|
||||
|
||||
from llnl.util.filesystem import working_dir
|
||||
from llnl.util.symlink import resolve_link_target_relative_to_the_link
|
||||
|
||||
import spack.caches
|
||||
@@ -19,6 +20,7 @@
|
||||
import spack.util.executable
|
||||
import spack.util.spack_json as sjson
|
||||
import spack.util.url as url_util
|
||||
from spack.cmd.common.arguments import mirror_name_or_url
|
||||
from spack.spec import Spec
|
||||
from spack.util.executable import which
|
||||
from spack.util.spack_yaml import SpackYAMLError
|
||||
@@ -357,3 +359,12 @@ def test_update_connection_params(direction):
|
||||
assert m.get_access_token(direction) == "token"
|
||||
assert m.get_profile(direction) == "profile"
|
||||
assert m.get_endpoint_url(direction) == "https://example.com"
|
||||
|
||||
|
||||
def test_mirror_name_or_url_dir_parsing(tmp_path):
|
||||
curdir = tmp_path / "mirror"
|
||||
curdir.mkdir()
|
||||
|
||||
with working_dir(curdir):
|
||||
assert mirror_name_or_url(".").fetch_url == curdir.as_uri()
|
||||
assert mirror_name_or_url("..").fetch_url == tmp_path.as_uri()
|
||||
|
||||
@@ -377,6 +377,14 @@ def test_suffixes(self, module_configuration, factory):
|
||||
writer, spec = factory("mpileaks~debug+opt target=x86_64")
|
||||
assert "baz-foo-bar" in writer.layout.use_name
|
||||
|
||||
def test_suffixes_format(self, module_configuration, factory):
|
||||
"""Tests adding suffixes as spec format string to module file name."""
|
||||
module_configuration("suffix-format")
|
||||
|
||||
writer, spec = factory("mpileaks +debug target=x86_64 ^mpich@3.0.4")
|
||||
assert "debug=True" in writer.layout.use_name
|
||||
assert "mpi=mpich-v3.0.4" in writer.layout.use_name
|
||||
|
||||
def test_setup_environment(self, modulefile_content, module_configuration):
|
||||
"""Tests the internal set-up of run-time environment."""
|
||||
|
||||
|
||||
@@ -69,8 +69,8 @@ def test_buildcache_tag(install_mockery, mock_fetch, mutable_mock_env_path):
|
||||
"""Tests whether we can create an OCI image from a full environment with multiple roots."""
|
||||
env("create", "test")
|
||||
with ev.read("test"):
|
||||
install("--add", "libelf")
|
||||
install("--add", "trivial-install-test-package")
|
||||
install("--fake", "--add", "libelf")
|
||||
install("--fake", "--add", "trivial-install-test-package")
|
||||
|
||||
registry = InMemoryOCIRegistry("example.com")
|
||||
|
||||
@@ -83,7 +83,7 @@ def test_buildcache_tag(install_mockery, mock_fetch, mutable_mock_env_path):
|
||||
name = ImageReference.from_string("example.com/image:full_env")
|
||||
|
||||
with ev.read("test") as e:
|
||||
specs = e.all_specs()
|
||||
specs = [x for x in e.all_specs() if not x.external]
|
||||
|
||||
manifest, config = get_manifest_and_config(name)
|
||||
|
||||
@@ -100,7 +100,7 @@ def test_buildcache_tag(install_mockery, mock_fetch, mutable_mock_env_path):
|
||||
|
||||
name = ImageReference.from_string("example.com/image:single_spec")
|
||||
manifest, config = get_manifest_and_config(name)
|
||||
assert len(manifest["layers"]) == 1
|
||||
assert len(manifest["layers"]) == len([x for x in libelf.traverse() if not x.external])
|
||||
|
||||
|
||||
def test_buildcache_push_with_base_image_command(mutable_database, tmpdir):
|
||||
@@ -347,6 +347,10 @@ def put_manifest(base_images, checksums, image_ref, tmpdir, extra_config, annota
|
||||
for s in mpileaks.traverse():
|
||||
if s.name in without_manifest:
|
||||
continue
|
||||
|
||||
if s.external:
|
||||
continue
|
||||
|
||||
# This should not raise a 404.
|
||||
manifest, _ = get_manifest_and_config(image.with_tag(default_tag(s)))
|
||||
|
||||
@@ -358,6 +362,10 @@ def put_manifest(base_images, checksums, image_ref, tmpdir, extra_config, annota
|
||||
for s in mpileaks.traverse():
|
||||
if s.name in without_manifest:
|
||||
continue
|
||||
|
||||
if s.external:
|
||||
continue
|
||||
|
||||
expected_digests = {
|
||||
pkg_to_own_digest[t.name]
|
||||
for t in s.traverse(deptype=("link", "run"), root=True)
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
|
||||
import pytest
|
||||
|
||||
import spack.build_environment
|
||||
import spack.build_systems.cmake as cmake
|
||||
import spack.directives
|
||||
import spack.error
|
||||
import spack.fetch_strategy
|
||||
@@ -140,7 +140,7 @@ def test_url_for_version_with_no_urls(mock_packages, config):
|
||||
def test_custom_cmake_prefix_path(mock_packages, config):
|
||||
spec = Spec("depends-on-define-cmake-prefix-paths").concretized()
|
||||
|
||||
assert spack.build_environment.get_cmake_prefix_path(spec.package) == [
|
||||
assert cmake.get_cmake_prefix_path(spec.package) == [
|
||||
spec["define-cmake-prefix-paths"].prefix.test
|
||||
]
|
||||
|
||||
|
||||
@@ -549,3 +549,35 @@ def test_fetch_external_package_is_noop(default_mock_concretization, fetching_no
|
||||
spec.external_path = "/some/where"
|
||||
assert spec.external
|
||||
spec.package.do_fetch()
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"relocation_dict",
|
||||
[
|
||||
{"/foo/bar/baz": "/a/b/c", "/foo/bar": "/a/b"},
|
||||
# Ensure correctness does not depend on the ordering of the dict
|
||||
{"/foo/bar": "/a/b", "/foo/bar/baz": "/a/b/c"},
|
||||
],
|
||||
)
|
||||
def test_macho_relocation_with_changing_projection(relocation_dict):
|
||||
"""Tests that prefix relocation is computed correctly when the prefixes to be relocated
|
||||
contain a directory and its subdirectories.
|
||||
|
||||
This happens when relocating to a new place AND changing the store projection. In that case we
|
||||
might have a relocation dict like:
|
||||
|
||||
/foo/bar/baz/ -> /a/b/c
|
||||
/foo/bar -> /a/b
|
||||
|
||||
What we need to check is that we don't end up in situations where we relocate to a mixture of
|
||||
the two schemes, like /a/b/baz.
|
||||
"""
|
||||
original_rpath = "/foo/bar/baz/abcdef"
|
||||
result = macho_find_paths(
|
||||
[original_rpath],
|
||||
deps=[],
|
||||
idpath=None,
|
||||
old_layout_root="/foo",
|
||||
prefix_to_prefix=relocation_dict,
|
||||
)
|
||||
assert result[original_rpath] == "/a/b/c/abcdef"
|
||||
|
||||
@@ -9,6 +9,7 @@
|
||||
|
||||
import pytest
|
||||
|
||||
import spack.deptypes as dt
|
||||
import spack.rewiring
|
||||
import spack.store
|
||||
from spack.installer import PackageInstaller
|
||||
@@ -22,6 +23,18 @@
|
||||
args.extend(["g++", "patchelf"])
|
||||
|
||||
|
||||
def check_spliced_spec_prefixes(spliced_spec):
|
||||
"""check the file in the prefix has the correct paths"""
|
||||
for node in spliced_spec.traverse(root=True):
|
||||
text_file_path = os.path.join(node.prefix, node.name)
|
||||
with open(text_file_path, "r") as f:
|
||||
text = f.read()
|
||||
print(text)
|
||||
for modded_spec in node.traverse(root=True, deptype=dt.ALL & ~dt.BUILD):
|
||||
print(modded_spec)
|
||||
assert modded_spec.prefix in text
|
||||
|
||||
|
||||
@pytest.mark.requires_executables(*args)
|
||||
@pytest.mark.parametrize("transitive", [True, False])
|
||||
def test_rewire_db(mock_fetch, install_mockery, transitive):
|
||||
@@ -42,13 +55,8 @@ def test_rewire_db(mock_fetch, install_mockery, transitive):
|
||||
installed_in_db = rec.installed if rec else False
|
||||
assert installed_in_db
|
||||
|
||||
# check the file in the prefix has the correct paths
|
||||
for node in spliced_spec.traverse(root=True):
|
||||
text_file_path = os.path.join(node.prefix, node.name)
|
||||
with open(text_file_path, "r") as f:
|
||||
text = f.read()
|
||||
for modded_spec in node.traverse(root=True, deptype=("link", "run")):
|
||||
assert modded_spec.prefix in text
|
||||
# check for correct prefix paths
|
||||
check_spliced_spec_prefixes(spliced_spec)
|
||||
|
||||
|
||||
@pytest.mark.requires_executables(*args)
|
||||
@@ -150,3 +158,26 @@ def test_rewire_not_installed_fails(mock_fetch, install_mockery):
|
||||
match="failed due to missing install of build spec",
|
||||
):
|
||||
spack.rewiring.rewire(spliced_spec)
|
||||
|
||||
|
||||
def test_rewire_virtual(mock_fetch, install_mockery):
|
||||
"""Check installed package can successfully splice an alternate virtual implementation"""
|
||||
dep = "splice-a"
|
||||
alt_dep = "splice-h"
|
||||
|
||||
spec = Spec(f"splice-vt^{dep}").concretized()
|
||||
alt_spec = Spec(alt_dep).concretized()
|
||||
|
||||
PackageInstaller([spec.package, alt_spec.package]).install()
|
||||
|
||||
spliced_spec = spec.splice(alt_spec, True)
|
||||
spack.rewiring.rewire(spliced_spec)
|
||||
|
||||
# Confirm the original spec still has the original virtual implementation.
|
||||
assert spec.satisfies(f"^{dep}")
|
||||
|
||||
# Confirm the spliced spec uses the new virtual implementation.
|
||||
assert spliced_spec.satisfies(f"^{alt_dep}")
|
||||
|
||||
# check for correct prefix paths
|
||||
check_spliced_spec_prefixes(spliced_spec)
|
||||
|
||||
@@ -1056,12 +1056,11 @@ def test_splice_intransitive_complex(self, setup_complex_splice):
|
||||
spliced = a_red.splice(c_blue, transitive=False)
|
||||
assert spliced.satisfies(
|
||||
"pkg-a color=red ^pkg-b color=red ^pkg-c color=blue "
|
||||
"^pkg-d color=red ^pkg-e color=red ^pkg-f color=blue ^pkg-g@3 color=blue"
|
||||
)
|
||||
assert set(spliced.dependencies(deptype=dt.BUILD)) == set(
|
||||
a_red.dependencies(deptype=dt.BUILD)
|
||||
"^pkg-d color=red ^pkg-e color=red ^pkg-f color=blue ^pkg-g@2 color=red"
|
||||
)
|
||||
assert set(spliced.dependencies(deptype=dt.BUILD)) == set()
|
||||
assert spliced.build_spec == a_red
|
||||
|
||||
# We cannot check spliced["b"].build_spec is spliced["b"] because Spec.__getitem__ creates
|
||||
# a new wrapper object on each invocation. So we select once and check on that object
|
||||
# For the rest of the unchanged specs we will just check the s._build_spec is None.
|
||||
@@ -1072,11 +1071,9 @@ def test_splice_intransitive_complex(self, setup_complex_splice):
|
||||
|
||||
assert spliced["pkg-c"].satisfies(
|
||||
"pkg-c color=blue ^pkg-d color=red ^pkg-e color=red "
|
||||
"^pkg-f color=blue ^pkg-g@3 color=blue"
|
||||
)
|
||||
assert set(spliced["pkg-c"].dependencies(deptype=dt.BUILD)) == set(
|
||||
c_blue.dependencies(deptype=dt.BUILD)
|
||||
"^pkg-f color=blue ^pkg-g@2 color=red"
|
||||
)
|
||||
assert set(spliced["pkg-c"].dependencies(deptype=dt.BUILD)) == set()
|
||||
assert spliced["pkg-c"].build_spec == c_blue
|
||||
assert set(spliced["pkg-c"].dependents()) == {spliced}
|
||||
|
||||
@@ -1101,14 +1098,12 @@ def test_splice_intransitive_complex(self, setup_complex_splice):
|
||||
# Build dependent edge to f because f originally dependended on the e this was copied from
|
||||
assert set(spliced["pkg-e"].dependents(deptype=dt.BUILD)) == {spliced["pkg-b"]}
|
||||
|
||||
assert spliced["pkg-f"].satisfies("pkg-f color=blue ^pkg-e color=red ^pkg-g@3 color=blue")
|
||||
assert set(spliced["pkg-f"].dependencies(deptype=dt.BUILD)) == set(
|
||||
c_blue["pkg-f"].dependencies(deptype=dt.BUILD)
|
||||
)
|
||||
assert spliced["pkg-f"].satisfies("pkg-f color=blue ^pkg-e color=red ^pkg-g@2 color=red")
|
||||
assert set(spliced["pkg-f"].dependencies(deptype=dt.BUILD)) == set()
|
||||
assert spliced["pkg-f"].build_spec == c_blue["pkg-f"]
|
||||
assert set(spliced["pkg-f"].dependents()) == {spliced["pkg-c"]}
|
||||
|
||||
# spliced["g"] is g3, but spliced["b"]["g"] is g1
|
||||
# spliced["pkg-g"] is g2, but spliced["pkg-b"]["pkg-g"] is g1
|
||||
assert spliced["pkg-g"] == a_red["pkg-g"]
|
||||
assert spliced["pkg-g"]._build_spec is None
|
||||
assert set(spliced["pkg-g"].dependents(deptype=dt.LINK)) == {
|
||||
@@ -1117,7 +1112,6 @@ def test_splice_intransitive_complex(self, setup_complex_splice):
|
||||
spliced["pkg-f"],
|
||||
a_red["pkg-c"],
|
||||
}
|
||||
assert set(spliced["pkg-g"].dependents(deptype=dt.BUILD)) == {spliced, a_red["pkg-c"]}
|
||||
|
||||
assert spliced["pkg-b"]["pkg-g"] == a_red["pkg-b"]["pkg-g"]
|
||||
assert spliced["pkg-b"]["pkg-g"]._build_spec is None
|
||||
@@ -1131,14 +1125,7 @@ def test_splice_intransitive_complex(self, setup_complex_splice):
|
||||
# traverse_edges creates a synthetic edge with no deptypes to the root
|
||||
if edge.depflag:
|
||||
depflag = dt.LINK
|
||||
if (edge.parent.name, edge.spec.name) not in [
|
||||
("pkg-a", "pkg-c"), # These are the spliced edges
|
||||
("pkg-c", "pkg-d"),
|
||||
("pkg-f", "pkg-e"),
|
||||
("pkg-c", "pkg-g"),
|
||||
("pkg-f", "pkg-g"),
|
||||
("pkg-c", "pkg-f"), # ancestor to spliced edge
|
||||
]:
|
||||
if not edge.parent.spliced:
|
||||
depflag |= dt.BUILD
|
||||
assert edge.depflag == depflag
|
||||
|
||||
@@ -1150,21 +1137,17 @@ def test_splice_transitive_complex(self, setup_complex_splice):
|
||||
"pkg-a color=red ^pkg-b color=red ^pkg-c color=blue ^pkg-d color=blue "
|
||||
"^pkg-e color=blue ^pkg-f color=blue ^pkg-g@3 color=blue"
|
||||
)
|
||||
assert set(spliced.dependencies(deptype=dt.BUILD)) == set(
|
||||
a_red.dependencies(deptype=dt.BUILD)
|
||||
)
|
||||
assert set(spliced.dependencies(deptype=dt.BUILD)) == set()
|
||||
assert spliced.build_spec == a_red
|
||||
|
||||
assert spliced["pkg-b"].satisfies(
|
||||
"pkg-b color=red ^pkg-d color=blue ^pkg-e color=blue ^pkg-g@2 color=blue"
|
||||
)
|
||||
assert set(spliced["pkg-b"].dependencies(deptype=dt.BUILD)) == set(
|
||||
a_red["pkg-b"].dependencies(deptype=dt.BUILD)
|
||||
)
|
||||
assert set(spliced["pkg-b"].dependencies(deptype=dt.BUILD)) == set()
|
||||
assert spliced["pkg-b"].build_spec == a_red["pkg-b"]
|
||||
assert set(spliced["pkg-b"].dependents()) == {spliced}
|
||||
|
||||
# We cannot check spliced["b"].build_spec is spliced["b"] because Spec.__getitem__ creates
|
||||
# We cannot check spliced["c"].build_spec is spliced["c"] because Spec.__getitem__ creates
|
||||
# a new wrapper object on each invocation. So we select once and check on that object
|
||||
# For the rest of the unchanged specs we will just check the s._build_spec is None.
|
||||
c = spliced["pkg-c"]
|
||||
@@ -1211,17 +1194,7 @@ def test_splice_transitive_complex(self, setup_complex_splice):
|
||||
# traverse_edges creates a synthetic edge with no deptypes to the root
|
||||
if edge.depflag:
|
||||
depflag = dt.LINK
|
||||
if (edge.parent.name, edge.spec.name) not in [
|
||||
("pkg-a", "pkg-c"), # These are the spliced edges
|
||||
("pkg-a", "pkg-g"),
|
||||
("pkg-b", "pkg-d"),
|
||||
("pkg-b", "pkg-e"),
|
||||
("pkg-b", "pkg-g"),
|
||||
(
|
||||
"pkg-a",
|
||||
"pkg-b",
|
||||
), # This edge not spliced, but b was spliced invalidating edge
|
||||
]:
|
||||
if not edge.parent.spliced:
|
||||
depflag |= dt.BUILD
|
||||
assert edge.depflag == depflag
|
||||
|
||||
@@ -1365,10 +1338,10 @@ def test_splice_swap_names(self, default_mock_concretization, transitive):
|
||||
|
||||
@pytest.mark.parametrize("transitive", [True, False])
|
||||
def test_splice_swap_names_mismatch_virtuals(self, default_mock_concretization, transitive):
|
||||
spec = default_mock_concretization("splice-t")
|
||||
dep = default_mock_concretization("splice-vh+foo")
|
||||
vt = default_mock_concretization("splice-vt")
|
||||
vh = default_mock_concretization("splice-vh+foo")
|
||||
with pytest.raises(spack.spec.SpliceError, match="virtual"):
|
||||
spec.splice(dep, transitive)
|
||||
vt.splice(vh, transitive)
|
||||
|
||||
def test_spec_override(self):
|
||||
init_spec = Spec("pkg-a foo=baz foobar=baz cflags=-O3 cxxflags=-O1")
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
import subprocess
|
||||
import sys
|
||||
from functools import wraps
|
||||
from typing import Any, Callable, Dict, List, MutableMapping, Optional, Tuple, Union
|
||||
from typing import Any, Callable, Dict, Iterable, List, MutableMapping, Optional, Tuple, Union
|
||||
|
||||
from llnl.path import path_to_os_path, system_path_filter
|
||||
from llnl.util import tty
|
||||
@@ -90,7 +90,7 @@ def is_system_path(path: Path) -> bool:
|
||||
return bool(path) and (os.path.normpath(path) in SYSTEM_DIRS)
|
||||
|
||||
|
||||
def filter_system_paths(paths: List[Path]) -> List[Path]:
|
||||
def filter_system_paths(paths: Iterable[Path]) -> List[Path]:
|
||||
"""Returns a copy of the input where system paths are filtered out."""
|
||||
return [p for p in paths if not is_system_path(p)]
|
||||
|
||||
|
||||
@@ -15,3 +15,4 @@ markers =
|
||||
enable_compiler_execution: enable compiler execution to detect link paths and libc
|
||||
disable_clean_stage_check: avoid failing tests if there are leftover files in the stage area
|
||||
not_on_windows: mark tests that are skipped on Windows
|
||||
only_windows: mark tests that are skipped everywhere but Windows
|
||||
|
||||
@@ -308,7 +308,7 @@ default:
|
||||
|
||||
e4s-generate:
|
||||
extends: [ ".e4s", ".generate-x86_64"]
|
||||
image: ecpe4s/ubuntu22.04-runner-amd64-gcc-11.4:2024.03.01
|
||||
image: ghcr.io/spack/spack/ubuntu22.04-runner-amd64-gcc-11.4:2024.03.01
|
||||
|
||||
e4s-build:
|
||||
extends: [ ".e4s", ".build" ]
|
||||
@@ -331,7 +331,7 @@ e4s-build:
|
||||
|
||||
e4s-neoverse-v2-generate:
|
||||
extends: [ ".e4s-neoverse-v2", ".generate-neoverse-v2" ]
|
||||
image: ecpe4s/ubuntu22.04-runner-arm64-gcc-11.4:2024.03.01
|
||||
image: ghcr.io/spack/spack/ubuntu22.04-runner-arm64-gcc-11.4:2024.03.01
|
||||
|
||||
e4s-neoverse-v2-build:
|
||||
extends: [ ".e4s-neoverse-v2", ".build" ]
|
||||
@@ -354,7 +354,7 @@ e4s-neoverse-v2-build:
|
||||
|
||||
e4s-neoverse_v1-generate:
|
||||
extends: [ ".e4s-neoverse_v1", ".generate-neoverse_v1" ]
|
||||
image: ecpe4s/ubuntu22.04-runner-arm64-gcc-11.4:2024.03.01
|
||||
image: ghcr.io/spack/spack/ubuntu22.04-runner-arm64-gcc-11.4:2024.03.01
|
||||
|
||||
e4s-neoverse_v1-build:
|
||||
extends: [ ".e4s-neoverse_v1", ".build" ]
|
||||
@@ -377,7 +377,7 @@ e4s-neoverse_v1-build:
|
||||
|
||||
e4s-rocm-external-generate:
|
||||
extends: [ ".e4s-rocm-external", ".generate-x86_64"]
|
||||
image: ecpe4s/ubuntu22.04-runner-amd64-gcc-11.4-rocm6.2.0:2024.09.11
|
||||
image: ghcr.io/spack/spack/ubuntu22.04-runner-amd64-gcc-11.4-rocm6.2.1:2024.10.08
|
||||
|
||||
e4s-rocm-external-build:
|
||||
extends: [ ".e4s-rocm-external", ".build" ]
|
||||
@@ -423,7 +423,7 @@ e4s-rocm-external-build:
|
||||
|
||||
e4s-oneapi-generate:
|
||||
extends: [ ".e4s-oneapi", ".generate-x86_64"]
|
||||
image: ecpe4s/ubuntu22.04-runner-amd64-oneapi-2024.2:2024.09.06
|
||||
image: ghcr.io/spack/spack/ubuntu22.04-runner-amd64-oneapi-2024.2:2024.09.06
|
||||
|
||||
e4s-oneapi-build:
|
||||
extends: [ ".e4s-oneapi", ".build" ]
|
||||
@@ -495,7 +495,7 @@ build_systems-build:
|
||||
|
||||
developer-tools-manylinux2014-generate:
|
||||
extends: [ ".developer-tools-manylinux2014", ".generate-x86_64"]
|
||||
image: ecpe4s/manylinux2014:2024.03.28
|
||||
image: ghcr.io/spack/spack/manylinux2014:2024.03.28
|
||||
|
||||
developer-tools-manylinux2014-build:
|
||||
extends: [ ".developer-tools-manylinux2014", ".build" ]
|
||||
@@ -508,6 +508,30 @@ developer-tools-manylinux2014-build:
|
||||
- artifacts: True
|
||||
job: developer-tools-manylinux2014-generate
|
||||
|
||||
###########################################
|
||||
# Build tests for different developer tools
|
||||
# darwin
|
||||
###########################################
|
||||
.developer-tools-darwin:
|
||||
extends: [ ".darwin_aarch64" ]
|
||||
variables:
|
||||
SPACK_CI_STACK_NAME: developer-tools-darwin
|
||||
|
||||
developer-tools-darwin-generate:
|
||||
tags: [ "macos-ventura", "apple-clang-15", "aarch64-macos" ]
|
||||
extends: [ ".developer-tools-darwin", ".generate-base"]
|
||||
|
||||
developer-tools-darwin-build:
|
||||
extends: [ ".developer-tools-darwin", ".build" ]
|
||||
trigger:
|
||||
include:
|
||||
- artifact: jobs_scratch_dir/cloud-ci-pipeline.yml
|
||||
job: developer-tools-darwin-generate
|
||||
strategy: depend
|
||||
needs:
|
||||
- artifacts: True
|
||||
job: developer-tools-darwin-generate
|
||||
|
||||
#########################################
|
||||
# RADIUSS
|
||||
#########################################
|
||||
@@ -799,7 +823,7 @@ deprecated-ci-build:
|
||||
########################################
|
||||
|
||||
.aws-pcluster-generate:
|
||||
image: { "name": "ghcr.io/spack/pcluster-amazonlinux-2:v2024-01-29", "entrypoint": [""] }
|
||||
image: { "name": "ghcr.io/spack/pcluster-amazonlinux-2:v2024-10-07", "entrypoint": [""] }
|
||||
before_script:
|
||||
# Use gcc from pre-installed spack store
|
||||
- - . "./share/spack/setup-env.sh"
|
||||
|
||||
@@ -108,10 +108,11 @@ ci:
|
||||
tags: ["service"]
|
||||
image: busybox:latest
|
||||
variables:
|
||||
CI_OIDC_REQUIRED: 0
|
||||
GIT_STRATEGY: "none"
|
||||
CI_JOB_SIZE: "small"
|
||||
KUBERNETES_CPU_REQUEST: "500m"
|
||||
KUBERNETES_MEMORY_REQUEST: "500M"
|
||||
KUBERNETES_CPU_REQUEST: "100m"
|
||||
KUBERNETES_MEMORY_REQUEST: "5M"
|
||||
before_script:: []
|
||||
after_script:: []
|
||||
|
||||
|
||||
@@ -6,29 +6,19 @@
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
set -e
|
||||
|
||||
# Intel compiler needs to be installed from a specific spack git commit.
|
||||
# The best solution would be to have the compilers hash (or packages contents) be part of the
|
||||
# individual packages hashes. I don't see this at the moment.
|
||||
# Set to the latest tag including a recent oneapi compiler.
|
||||
# NOTE: If we update this spack version in the future make sure the compiler version also updates.
|
||||
spack_intel_compiler_commit="develop-2023-08-06"
|
||||
|
||||
set_pcluster_defaults() {
|
||||
# Set versions of pre-installed software in packages.yaml
|
||||
[ -z "${SLURM_VERSION}" ] && SLURM_VERSION=$(strings /opt/slurm/lib/libslurm.so | grep -e '^VERSION' | awk '{print $2}' | sed -e 's?"??g')
|
||||
[ -z "${SLURM_ROOT}" ] && ls /etc/systemd/system/slurm* &>/dev/null && \
|
||||
SLURM_ROOT=$(dirname $(dirname "$(awk '/ExecStart=/ {print $1}' /etc/systemd/system/slurm* | sed -e 's?^.*=??1' | head -n1)"))
|
||||
# Fallback to default location if SLURM not in systemd
|
||||
[ -z "${SLURM_ROOT}" ] && [ -d "/opt/slurm" ] && SLURM_ROOT=/opt/slurm
|
||||
[ -z "${SLURM_VERSION}" ] && SLURM_VERSION=$(strings "${SLURM_ROOT}"/lib/libslurm.so | grep -e '^VERSION' | awk '{print $2}' | sed -e 's?"??g')
|
||||
[ -z "${LIBFABRIC_VERSION}" ] && LIBFABRIC_VERSION=$(awk '/Version:/{print $2}' "$(find /opt/amazon/efa/ -name libfabric.pc | head -n1)" | sed -e 's?~??g' -e 's?amzn.*??g')
|
||||
export SLURM_VERSION LIBFABRIC_VERSION
|
||||
export SLURM_ROOT SLURM_VERSION LIBFABRIC_VERSION
|
||||
|
||||
envsubst < "${SPACK_ROOT}/share/spack/gitlab/cloud_pipelines/stacks/${SPACK_CI_STACK_NAME}/packages.yaml" > "${SPACK_ROOT}"/etc/spack/packages.yaml
|
||||
}
|
||||
|
||||
setup_spack() {
|
||||
spack compiler add --scope site
|
||||
# Do not add autotools/buildtools packages. These versions need to be managed by spack or it will
|
||||
# eventually end up in a version mismatch (e.g. when compiling gmp).
|
||||
spack external find --scope site --tag core-packages
|
||||
}
|
||||
|
||||
patch_compilers_yaml() {
|
||||
# Graceful exit if package not found by spack
|
||||
set -o pipefail
|
||||
@@ -76,55 +66,47 @@ EOF
|
||||
}
|
||||
|
||||
install_compilers() {
|
||||
# We need to treat compilers as essentially external, i.e. their installation location
|
||||
# (including hash) must not change when any changes are pushed to spack. The reason is that
|
||||
# changes in the compilers are not reflected in the package hashes built in the CI. Hence, those
|
||||
# packages will reference a wrong compiler path once the path changes.
|
||||
|
||||
# `gcc@12.3.0%gcc@7.3.1` is created as part of building the pipeline containers.
|
||||
# `ghcr.io/spack/pcluster-amazonlinux-2:v2024-01-29` produced the following hashes.
|
||||
if [ "x86_64" == "$(arch)" ]; then
|
||||
gcc_hash="vxlibl3ubl5ptwzb3zydgksfa5osdea6"
|
||||
else
|
||||
gcc_hash="bikooik6f3fyrkroarulsadbii43ggz5"
|
||||
fi
|
||||
|
||||
spack install /${gcc_hash}
|
||||
(
|
||||
spack load gcc
|
||||
spack compiler add --scope site
|
||||
)
|
||||
|
||||
# Install Intel compilers through a static spack version such that the compiler's hash does not change.
|
||||
# The compilers needs to be in the same install tree as the rest of the software such that the path
|
||||
# relocation works correctly. This holds the danger that this part will fail when the current spack gets
|
||||
# incompatible with the one in $spack_intel_compiler_commit. Therefore, we make intel installations optional
|
||||
# in package.yaml files and add a fallback `%gcc` version for each application.
|
||||
if [ "x86_64" == "$(arch)" ]; then
|
||||
# in packages.yaml files and add a fallback `%gcc` version for each application.
|
||||
if [ -f "/bootstrap-compilers/spack/etc/spack/compilers.yaml" ]; then
|
||||
# Running inside a gitlab CI container
|
||||
# Intel and gcc@12 compiler are pre-installed and their location is known in
|
||||
cp /bootstrap-compilers/spack/etc/spack/compilers.yaml "${SPACK_ROOT}"/etc/spack/
|
||||
else
|
||||
spack compiler add --scope site
|
||||
# We need to treat compilers as essentially external, i.e. their installation location
|
||||
# (including hash) must not change when any changes are pushed to spack. The reason is that
|
||||
# changes in the compilers are not reflected in the package hashes built in the CI. Hence, those
|
||||
# packages will reference a wrong compiler path once the path changes.
|
||||
|
||||
# `gcc@12.4.0%gcc@7.3.1` is created as part of building the pipeline containers.
|
||||
# `ghcr.io/spack/pcluster-amazonlinux-2:v2024-10-07` produced the following hashes.
|
||||
if [ "x86_64" == "$(arch)" ]; then
|
||||
gcc_hash="pttzchh7o54nhmycj4wgzw5mic6rk2nb"
|
||||
else
|
||||
gcc_hash="v6wxye6ijzrxnzxftcwnpu3psohsjl2b"
|
||||
fi
|
||||
|
||||
spack install /${gcc_hash}
|
||||
(
|
||||
CURRENT_SPACK_ROOT=${SPACK_ROOT}
|
||||
DIR="$(mktemp -d)"
|
||||
cd "${DIR}"
|
||||
# This needs to include commit 361a185ddb such that `ifx` picks up the correct toolchain. Otherwise
|
||||
# this leads to libstdc++.so errors during linking (e.g. slepc).
|
||||
git clone --depth=1 -b ${spack_intel_compiler_commit} https://github.com/spack/spack.git \
|
||||
&& cd spack \
|
||||
&& curl -sL https://github.com/spack/spack/pull/40557.patch | patch -p1 \
|
||||
&& curl -sL https://github.com/spack/spack/pull/40561.patch | patch -p1 \
|
||||
&& cp "${CURRENT_SPACK_ROOT}/etc/spack/config.yaml" etc/spack/ \
|
||||
&& cp "${CURRENT_SPACK_ROOT}/etc/spack/compilers.yaml" etc/spack/ \
|
||||
&& cp "${CURRENT_SPACK_ROOT}/etc/spack/packages.yaml" etc/spack/ \
|
||||
&& . share/spack/setup-env.sh \
|
||||
&& spack install intel-oneapi-compilers-classic
|
||||
rm -rf "${DIR}"
|
||||
spack load gcc
|
||||
spack compiler add --scope site
|
||||
)
|
||||
bash -c ". \"$(spack location -i intel-oneapi-compilers)\"/setvars.sh; spack compiler add --scope site" \
|
||||
|| true
|
||||
spack clean -m
|
||||
|
||||
if [ "x86_64" == "$(arch)" ]; then
|
||||
# 2024.1.0 is the last oneapi compiler that works on AL2 and is the one used to compile packages in the build cache.
|
||||
spack install intel-oneapi-compilers@2024.1.0
|
||||
(
|
||||
. "$(spack location -i intel-oneapi-compilers)"/setvars.sh; spack compiler add --scope site \
|
||||
|| true
|
||||
)
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
set_pcluster_defaults
|
||||
setup_spack
|
||||
install_compilers
|
||||
patch_compilers_yaml
|
||||
|
||||
@@ -47,7 +47,7 @@ packages:
|
||||
slurm:
|
||||
buildable: false
|
||||
externals:
|
||||
- prefix: /opt/slurm/
|
||||
- prefix: ${SLURM_ROOT}
|
||||
spec: slurm@${SLURM_VERSION} +pmix
|
||||
wrf:
|
||||
require:
|
||||
|
||||
@@ -7,8 +7,7 @@ spack:
|
||||
- mpas-model
|
||||
- mpich
|
||||
- openfoam
|
||||
# - quantum-espresso : %gcc@12.3.0 on neoverse_v1 fails.
|
||||
# Root cause: internal compiler error: in compute_live_loop_exits, at tree-ssa-loop-manip.cc:247
|
||||
- quantum-espresso
|
||||
- wrf
|
||||
|
||||
- targets:
|
||||
@@ -22,7 +21,7 @@ spack:
|
||||
ci:
|
||||
pipeline-gen:
|
||||
- build-job:
|
||||
image: { "name": "ghcr.io/spack/pcluster-amazonlinux-2:v2024-01-29", "entrypoint": [""] }
|
||||
image: { "name": "ghcr.io/spack/pcluster-amazonlinux-2:v2024-10-07", "entrypoint": [""] }
|
||||
tags: ["aarch64"]
|
||||
before_script:
|
||||
- - . "./share/spack/setup-env.sh"
|
||||
|
||||
@@ -19,6 +19,10 @@ packages:
|
||||
- "+intel_provided_gcc ^intel-oneapi-mkl target=x86_64_v4"
|
||||
- "+intel_provided_gcc ^intel-oneapi-mkl target=x86_64_v3"
|
||||
when: "%intel"
|
||||
- one_of:
|
||||
- "+intel_provided_gcc target=x86_64_v4 ^intel-oneapi-mkl"
|
||||
- "+intel_provided_gcc target=x86_64_v3 ^intel-oneapi-mkl"
|
||||
when: "%oneapi"
|
||||
intel-oneapi-compilers:
|
||||
require: "intel-oneapi-compilers %gcc target=x86_64_v3"
|
||||
intel-oneapi-mpi:
|
||||
@@ -29,6 +33,10 @@ packages:
|
||||
- "lammps_sizes=bigbig +molecule +kspace +rigid +asphere +opt +openmp +openmp-package +intel fft=mkl ^intel-oneapi-mkl target=x86_64_v4"
|
||||
- "lammps_sizes=bigbig +molecule +kspace +rigid +asphere +opt +openmp +openmp-package fft=mkl ^intel-oneapi-mkl target=x86_64_v3"
|
||||
when: "%intel"
|
||||
- one_of:
|
||||
- "lammps_sizes=bigbig +molecule +kspace +rigid +asphere +opt +openmp +openmp-package +intel fft=mkl ^intel-oneapi-mkl target=x86_64_v4"
|
||||
- "lammps_sizes=bigbig +molecule +kspace +rigid +asphere +opt +openmp +openmp-package fft=mkl ^intel-oneapi-mkl target=x86_64_v3"
|
||||
when: "%oneapi"
|
||||
libidn2:
|
||||
require:
|
||||
- one_of:
|
||||
@@ -53,6 +61,10 @@ packages:
|
||||
- "precision=single ^parallelio+pnetcdf target=x86_64_v4"
|
||||
- "precision=single ^parallelio+pnetcdf target=x86_64_v3"
|
||||
when: "%intel"
|
||||
- one_of:
|
||||
- "precision=single ^parallelio+pnetcdf target=x86_64_v4"
|
||||
- "precision=single ^parallelio+pnetcdf target=x86_64_v3"
|
||||
when: "%oneapi"
|
||||
mpich:
|
||||
require:
|
||||
- one_of:
|
||||
@@ -89,10 +101,14 @@ packages:
|
||||
- "quantum-espresso@6.6 ^intel-oneapi-mkl+cluster target=x86_64_v4"
|
||||
- "quantum-espresso@6.6 ^intel-oneapi-mkl+cluster target=x86_64_v3"
|
||||
when: "%intel"
|
||||
- one_of:
|
||||
- "quantum-espresso@6.6 ^intel-oneapi-mkl+cluster target=x86_64_v4"
|
||||
- "quantum-espresso@6.6 ^intel-oneapi-mkl+cluster target=x86_64_v3"
|
||||
when: "%oneapi"
|
||||
slurm:
|
||||
buildable: false
|
||||
externals:
|
||||
- prefix: /opt/slurm/
|
||||
- prefix: ${SLURM_ROOT}
|
||||
spec: slurm@${SLURM_VERSION} +pmix
|
||||
wrf:
|
||||
require:
|
||||
@@ -101,9 +117,14 @@ packages:
|
||||
- "wrf@4 build_type=dm+sm target=x86_64_v3"
|
||||
- "wrf@4.2.2 +netcdf_classic fflags=\"-fp-model fast=2 -no-heap-arrays -no-prec-div -no-prec-sqrt -fno-common\" build_type=dm+sm target=x86_64_v3"
|
||||
when: "%intel"
|
||||
- one_of:
|
||||
- "wrf@4 build_type=dm+sm target=x86_64_v4"
|
||||
- "wrf@4 build_type=dm+sm target=x86_64_v3"
|
||||
- "wrf@4.2.2 +netcdf_classic fflags=\"-fp-model fast=2 -no-heap-arrays -no-prec-div -no-prec-sqrt -fno-common\" build_type=dm+sm target=x86_64_v3"
|
||||
when: "%oneapi"
|
||||
|
||||
all:
|
||||
compiler: [intel, oneapi, gcc]
|
||||
compiler: [oneapi, gcc]
|
||||
permissions:
|
||||
read: world
|
||||
write: user
|
||||
|
||||
@@ -3,14 +3,16 @@ spack:
|
||||
|
||||
definitions:
|
||||
- apps:
|
||||
- gromacs %intel
|
||||
- lammps %intel
|
||||
- mpas-model %intel
|
||||
- gromacs %oneapi
|
||||
- lammps %oneapi
|
||||
# earliest oneapi version with fix does not run on AmazonLinux2, see https://github.com/spack/spack/pull/46457
|
||||
# - mpas-model %oneapi
|
||||
- openfoam %gcc
|
||||
- palace %oneapi ^superlu-dist%oneapi # hack: force fortran-rt provider through superlu-dist
|
||||
- quantum-espresso %intel
|
||||
# - wrf : While building hdf5 cmake errors out with Detecting Fortran/C Interface: Failed to compile
|
||||
# Root cause: ifort cannot deal with arbitrarily long file names.
|
||||
# TODO: Find out how to make +ipo cmake flag work.
|
||||
# - quantum-espresso %oneapi
|
||||
- openmpi %oneapi
|
||||
- wrf %oneapi
|
||||
|
||||
- targets:
|
||||
- 'target=x86_64_v4'
|
||||
@@ -23,7 +25,7 @@ spack:
|
||||
ci:
|
||||
pipeline-gen:
|
||||
- build-job:
|
||||
image: { "name": "ghcr.io/spack/pcluster-amazonlinux-2:v2024-01-29", "entrypoint": [""] }
|
||||
image: { "name": "ghcr.io/spack/pcluster-amazonlinux-2:v2024-10-07", "entrypoint": [""] }
|
||||
before_script:
|
||||
- - . "./share/spack/setup-env.sh"
|
||||
- . /etc/profile.d/modules.sh
|
||||
|
||||
@@ -0,0 +1,75 @@
|
||||
spack:
|
||||
view: false
|
||||
packages:
|
||||
all:
|
||||
require:
|
||||
- target=aarch64
|
||||
concretizer:
|
||||
unify: true
|
||||
reuse: false
|
||||
specs:
|
||||
# editors
|
||||
- neovim~no_luajit
|
||||
- py-pynvim
|
||||
- emacs+json~native+treesitter # TODO native not supported until gcc builds on darwin
|
||||
# - tree-sitter is a dep, should also have cli but no package
|
||||
- nano # just in case
|
||||
# tags and scope search helpers
|
||||
- universal-ctags # only maintained ctags, works better with c++
|
||||
- direnv
|
||||
# runtimes and compilers
|
||||
- python
|
||||
- llvm+link_llvm_dylib+lld~lldb~polly+python build_type=MinSizeRel # for clangd, clang-format
|
||||
- node-js # for editor plugins etc., pyright language server
|
||||
- npm
|
||||
- cmake
|
||||
- libtool
|
||||
- go # to build fzf, gh, hub
|
||||
- rust+dev # fd, ripgrep, hyperfine, exa, rust-analyzer
|
||||
# styling and lints
|
||||
- astyle
|
||||
- cppcheck
|
||||
- uncrustify
|
||||
- py-fprettify
|
||||
- py-fortran-language-server
|
||||
- py-python-lsp-server
|
||||
# cli dev tools
|
||||
- ripgrep
|
||||
- gh
|
||||
- fd
|
||||
# - bfs # liburing: /usr/include/linux/ipv6.h:19:8: error: redefinition of 'struct in6_pktinfo'
|
||||
- fzf
|
||||
- tree
|
||||
- jq
|
||||
- py-yq
|
||||
- hub
|
||||
- ncdu
|
||||
- eza
|
||||
- lsd
|
||||
- hyperfine
|
||||
- htop
|
||||
- tmux
|
||||
- ccache
|
||||
# ensure we can use a jobserver build and do this fast
|
||||
- gmake
|
||||
- ninja # should be @kitware, can't be because of meson requirement
|
||||
- libtree
|
||||
- sed
|
||||
- which
|
||||
- flex
|
||||
- graphviz
|
||||
- doxygen
|
||||
- meson
|
||||
- lima
|
||||
|
||||
ci:
|
||||
pipeline-gen:
|
||||
- build-job-remove:
|
||||
tags: [ spack, public ]
|
||||
- build-job:
|
||||
variables:
|
||||
CI_GPG_KEY_ROOT: /etc/protected-runner
|
||||
tags: [ "macos-ventura", "apple-clang-15", "aarch64-macos" ]
|
||||
|
||||
cdash:
|
||||
build-group: Developer Tools Darwin
|
||||
@@ -5,6 +5,7 @@ spack:
|
||||
require: target=x86_64_v3
|
||||
concretizer:
|
||||
unify: true
|
||||
reuse: false
|
||||
definitions:
|
||||
- default_specs:
|
||||
# editors
|
||||
@@ -93,7 +94,7 @@ spack:
|
||||
ci:
|
||||
pipeline-gen:
|
||||
- build-job:
|
||||
image: ecpe4s/manylinux2014:2024.03.28
|
||||
image: ghcr.io/spack/spack/manylinux2014:2024.03.28
|
||||
|
||||
cdash:
|
||||
build-group: Developer Tools Manylinux2014
|
||||
|
||||
@@ -74,6 +74,7 @@ spack:
|
||||
- dyninst
|
||||
- ecp-data-vis-sdk ~cuda ~rocm +adios2 +ascent +cinema +darshan +faodel +hdf5 ~paraview +pnetcdf +sz +unifyfs +veloc ~visit +vtkm +zfp # +visit: ?
|
||||
- exaworks
|
||||
- fftx
|
||||
- flecsi
|
||||
# - flit
|
||||
# - flux-core
|
||||
@@ -113,6 +114,7 @@ spack:
|
||||
- netlib-scalapack
|
||||
- nrm
|
||||
# - nvhpc
|
||||
- nwchem
|
||||
- omega-h
|
||||
- openfoam
|
||||
# - openmpi
|
||||
@@ -197,6 +199,7 @@ spack:
|
||||
- cabana +cuda cuda_arch=90 ^kokkos +wrapper +cuda_lambda +cuda cuda_arch=90
|
||||
- caliper +cuda cuda_arch=90
|
||||
- chai +cuda cuda_arch=90 ^umpire ~shared
|
||||
- fftx +cuda cuda_arch=90
|
||||
- flecsi +cuda cuda_arch=90
|
||||
- ginkgo +cuda cuda_arch=90
|
||||
- gromacs +cuda cuda_arch=90
|
||||
@@ -241,7 +244,7 @@ spack:
|
||||
ci:
|
||||
pipeline-gen:
|
||||
- build-job:
|
||||
image: ecpe4s/ubuntu22.04-runner-arm64-gcc-11.4:2024.03.01
|
||||
image: ghcr.io/spack/spack/ubuntu22.04-runner-arm64-gcc-11.4:2024.03.01
|
||||
|
||||
cdash:
|
||||
build-group: E4S ARM Neoverse V2
|
||||
|
||||
@@ -74,6 +74,7 @@ spack:
|
||||
- dyninst
|
||||
- ecp-data-vis-sdk ~cuda ~rocm +adios2 +ascent +cinema +darshan +faodel +hdf5 +paraview +pnetcdf +sz +unifyfs +veloc ~visit +vtkm +zfp # +visit: ?
|
||||
- exaworks
|
||||
- fftx
|
||||
- flecsi
|
||||
- flit
|
||||
- flux-core
|
||||
@@ -114,6 +115,7 @@ spack:
|
||||
- netlib-scalapack
|
||||
- nrm
|
||||
- nvhpc
|
||||
- nwchem
|
||||
- omega-h
|
||||
- openfoam
|
||||
- openmpi
|
||||
@@ -226,6 +228,7 @@ spack:
|
||||
- cusz +cuda cuda_arch=75
|
||||
- dealii +cuda cuda_arch=75
|
||||
- ecp-data-vis-sdk +adios2 +hdf5 +vtkm +zfp ~paraview +cuda cuda_arch=75 # # +paraview: job killed oom?
|
||||
- fftx +cuda cuda_arch=75
|
||||
- flecsi +cuda cuda_arch=75
|
||||
- ginkgo +cuda cuda_arch=75
|
||||
- gromacs +cuda cuda_arch=75
|
||||
@@ -274,6 +277,7 @@ spack:
|
||||
- cusz +cuda cuda_arch=80
|
||||
- dealii +cuda cuda_arch=80
|
||||
- ecp-data-vis-sdk +adios2 +hdf5 +vtkm +zfp ~paraview +cuda cuda_arch=80 # +paraview: job killed oom?
|
||||
- fftx +cuda cuda_arch=80
|
||||
- flecsi +cuda cuda_arch=80
|
||||
- ginkgo +cuda cuda_arch=80
|
||||
- gromacs +cuda cuda_arch=80
|
||||
@@ -320,6 +324,7 @@ spack:
|
||||
- chai +cuda cuda_arch=90 ^umpire ~shared
|
||||
- chapel +cuda cuda_arch=90
|
||||
- ecp-data-vis-sdk +adios2 +hdf5 +vtkm +zfp ~paraview +cuda cuda_arch=90 # +paraview: vtkm/exec/cuda/internal/ThrustPatches.h(213): error: this declaration has no storage class or type specifier
|
||||
- fftx +cuda cuda_arch=90
|
||||
- flecsi +cuda cuda_arch=90
|
||||
- ginkgo +cuda cuda_arch=90
|
||||
- gromacs +cuda cuda_arch=90
|
||||
@@ -362,7 +367,7 @@ spack:
|
||||
ci:
|
||||
pipeline-gen:
|
||||
- build-job:
|
||||
image: ecpe4s/ubuntu22.04-runner-arm64-gcc-11.4:2024.03.01
|
||||
image: ghcr.io/spack/spack/ubuntu22.04-runner-arm64-gcc-11.4:2024.03.01
|
||||
|
||||
cdash:
|
||||
build-group: E4S ARM Neoverse V1
|
||||
|
||||
@@ -128,6 +128,7 @@ spack:
|
||||
- nco
|
||||
- netlib-scalapack
|
||||
- nrm
|
||||
- nwchem
|
||||
- omega-h
|
||||
- openfoam
|
||||
- openmpi
|
||||
@@ -192,6 +193,7 @@ spack:
|
||||
# --
|
||||
# - chapel ~cuda ~rocm # llvm: closures.c:(.text+0x305e): undefined reference to `_intel_fast_memset'
|
||||
# - cp2k +mpi # dbcsr: dbcsr_api.F(973): #error: incomplete macro call DBCSR_ABORT.
|
||||
# - fftx # fftx: https://github.com/spack/spack/issues/47048
|
||||
# - geopm-runtime # libelf: configure: error: installation or configuration problem: C compiler cannot create executables.
|
||||
# - hpctoolkit # dyninst@13.0.0%gcc: libiberty/./d-demangle.c:142: undefined reference to `_intel_fast_memcpy'
|
||||
# - lbann # 2024.2 internal compiler error
|
||||
@@ -242,7 +244,7 @@ spack:
|
||||
ci:
|
||||
pipeline-gen:
|
||||
- build-job:
|
||||
image: ecpe4s/ubuntu22.04-runner-amd64-oneapi-2024.2:2024.09.06
|
||||
image: ghcr.io/spack/spack/ubuntu22.04-runner-amd64-oneapi-2024.2:2024.09.06
|
||||
|
||||
cdash:
|
||||
build-group: E4S OneAPI
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user