Compare commits
200 Commits
docs/no-se
...
develop-20
Author | SHA1 | Date | |
---|---|---|---|
![]() |
5ab10d57be | ||
![]() |
96061d2c00 | ||
![]() |
e78d20dc84 | ||
![]() |
6d2341c109 | ||
![]() |
968ad02473 | ||
![]() |
b93882804f | ||
![]() |
f58ebd4fbb | ||
![]() |
6f7f9528e5 | ||
![]() |
59c7ff8683 | ||
![]() |
4495e0341d | ||
![]() |
ba39924046 | ||
![]() |
751c3fef86 | ||
![]() |
102811adb9 | ||
![]() |
8f56eb620f | ||
![]() |
ec517b40e9 | ||
![]() |
22cb3815fe | ||
![]() |
f549354f78 | ||
![]() |
dc212d0e59 | ||
![]() |
8f14acb139 | ||
![]() |
c38ef72b06 | ||
![]() |
7d67d9ece4 | ||
![]() |
2c30962c74 | ||
![]() |
cc28334049 | ||
![]() |
dbdf5bacc4 | ||
![]() |
531f01f0b9 | ||
![]() |
794593b478 | ||
![]() |
afcf0d2e39 | ||
![]() |
29ee861366 | ||
![]() |
b1a984ef02 | ||
![]() |
cc545d8c9a | ||
![]() |
49ff816fb0 | ||
![]() |
8c33841567 | ||
![]() |
21b50fbbe3 | ||
![]() |
2a8e503a04 | ||
![]() |
4b695d4722 | ||
![]() |
2fa816184e | ||
![]() |
25f622e809 | ||
![]() |
7506acabe7 | ||
![]() |
3a828358cb | ||
![]() |
94a1d1414a | ||
![]() |
0f080b38f4 | ||
![]() |
f1ec4859c8 | ||
![]() |
63baba0308 | ||
![]() |
aeec861544 | ||
![]() |
e54d4678f9 | ||
![]() |
187b8adb4f | ||
![]() |
d6fd96f024 | ||
![]() |
e3b6d2c3c7 | ||
![]() |
1e9c46296c | ||
![]() |
48183b37be | ||
![]() |
9a3d248348 | ||
![]() |
03e22adb5b | ||
![]() |
5f5fc78236 | ||
![]() |
e12a8a69c7 | ||
![]() |
001af62585 | ||
![]() |
f5e89df6f2 | ||
![]() |
ce75adada6 | ||
![]() |
24d37df1a2 | ||
![]() |
a9d294c532 | ||
![]() |
9dcaa56db4 | ||
![]() |
98162aa2e1 | ||
![]() |
3934df622c | ||
![]() |
dbf5d79557 | ||
![]() |
97e29e501d | ||
![]() |
258c651a8f | ||
![]() |
43ca6da346 | ||
![]() |
9786bd932b | ||
![]() |
c72619d4db | ||
![]() |
8ecae17c46 | ||
![]() |
1e47ccb83a | ||
![]() |
d6421a69eb | ||
![]() |
000dff2fd4 | ||
![]() |
1e413477dd | ||
![]() |
8955e63a68 | ||
![]() |
bf14b424bb | ||
![]() |
14209a86a6 | ||
![]() |
b7d9900764 | ||
![]() |
bc155e7b90 | ||
![]() |
65f9ba345f | ||
![]() |
ca49bc5652 | ||
![]() |
b84b85a7e0 | ||
![]() |
016cdba16f | ||
![]() |
4806e6549f | ||
![]() |
c14b277150 | ||
![]() |
919025d9f3 | ||
![]() |
52f57c90eb | ||
![]() |
ee1fa3e50c | ||
![]() |
772928241b | ||
![]() |
7440bb4c36 | ||
![]() |
c464866deb | ||
![]() |
799a8a5090 | ||
![]() |
c218ee50e9 | ||
![]() |
8ff7a20320 | ||
![]() |
e3fe6bc0f7 | ||
![]() |
c6fcb1068f | ||
![]() |
54ac3e72ed | ||
![]() |
274fbebc4c | ||
![]() |
d40eb19918 | ||
![]() |
31de670bd2 | ||
![]() |
6c0961549b | ||
![]() |
c090bc5ebe | ||
![]() |
bca4d37d76 | ||
![]() |
9b484d2eea | ||
![]() |
a57b0e1e2d | ||
![]() |
e3cb3b29d9 | ||
![]() |
ac48ecd375 | ||
![]() |
0bb20d34db | ||
![]() |
971fda5c33 | ||
![]() |
dcc4423a9d | ||
![]() |
82c380b563 | ||
![]() |
8bcf6a31ae | ||
![]() |
ddd88e266a | ||
![]() |
08c597d83e | ||
![]() |
bf5340755d | ||
![]() |
f8e70a0c96 | ||
![]() |
7e468aefd5 | ||
![]() |
e685d04f84 | ||
![]() |
9d962f55b0 | ||
![]() |
00d3066b97 | ||
![]() |
5ca0dcecb2 | ||
![]() |
fa8fb7903b | ||
![]() |
f35ff441f2 | ||
![]() |
9ea9ee05c8 | ||
![]() |
24ddc49c1b | ||
![]() |
2f4266161c | ||
![]() |
7bcb0fff7d | ||
![]() |
cd332c6370 | ||
![]() |
6daf9677f3 | ||
![]() |
cb6450977d | ||
![]() |
bf62ac0769 | ||
![]() |
0223fe746b | ||
![]() |
12fba13441 | ||
![]() |
0c44f5a140 | ||
![]() |
f4853790c5 | ||
![]() |
9ed2e396f4 | ||
![]() |
3ee6fc937e | ||
![]() |
c9b6cc9a58 | ||
![]() |
58b394bcec | ||
![]() |
4d89eeca9b | ||
![]() |
bfc71e9dae | ||
![]() |
f061dcda74 | ||
![]() |
cc460894fd | ||
![]() |
5e09660e87 | ||
![]() |
5a8efb3b14 | ||
![]() |
99002027c4 | ||
![]() |
a247879be3 | ||
![]() |
7b46993fed | ||
![]() |
dd59f4ba34 | ||
![]() |
18ab14e659 | ||
![]() |
28eb5e1bf6 | ||
![]() |
c658ddbfa3 | ||
![]() |
12963c894f | ||
![]() |
61fa12508f | ||
![]() |
daf6acef6e | ||
![]() |
d30621e787 | ||
![]() |
dd4b365608 | ||
![]() |
157d47fc5a | ||
![]() |
13daa1b692 | ||
![]() |
f923e650f9 | ||
![]() |
1a1bbb8af2 | ||
![]() |
594fcc3c80 | ||
![]() |
76ec19b26e | ||
![]() |
00baaf868e | ||
![]() |
3b06347f65 | ||
![]() |
5b9e207db2 | ||
![]() |
d6fd9017c4 | ||
![]() |
913d79238e | ||
![]() |
250038fa9b | ||
![]() |
26c553fce7 | ||
![]() |
e24c242fb7 | ||
![]() |
ca14ce2629 | ||
![]() |
44f443946c | ||
![]() |
6e6bc89bda | ||
![]() |
8714ea6652 | ||
![]() |
df92f0a7d4 | ||
![]() |
d24b91157c | ||
![]() |
1a0f77388c | ||
![]() |
34571d4ad6 | ||
![]() |
a574f40732 | ||
![]() |
d4ffe244af | ||
![]() |
e08e66ad89 | ||
![]() |
0543710258 | ||
![]() |
5d994e48d5 | ||
![]() |
d1fa23e9c6 | ||
![]() |
f1db8b7871 | ||
![]() |
c5cca54c27 | ||
![]() |
a9c1648db8 | ||
![]() |
3bd911377e | ||
![]() |
fcb2f7d3aa | ||
![]() |
a8a9e0160a | ||
![]() |
9ca6aaeafd | ||
![]() |
aed5c39312 | ||
![]() |
eb36bb2a8f | ||
![]() |
8dcf860888 | ||
![]() |
a5b7cb6e6f | ||
![]() |
34c0bfefa6 | ||
![]() |
ea5db048f3 | ||
![]() |
e68a17f2c6 | ||
![]() |
4af9ec3d8a | ||
![]() |
eb90e2c894 |
4
.github/workflows/audit.yaml
vendored
4
.github/workflows/audit.yaml
vendored
@@ -22,7 +22,7 @@ jobs:
|
||||
matrix:
|
||||
operating_system: ["ubuntu-latest", "macos-latest"]
|
||||
steps:
|
||||
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # @v2
|
||||
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # @v2
|
||||
- uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # @v2
|
||||
with:
|
||||
python-version: ${{inputs.python_version}}
|
||||
@@ -43,7 +43,7 @@ jobs:
|
||||
. share/spack/setup-env.sh
|
||||
$(which spack) audit packages
|
||||
$(which spack) audit externals
|
||||
- uses: codecov/codecov-action@0cfda1dd0a4ad9efc75517f399d859cd1ea4ced1 # @v2.1.0
|
||||
- uses: codecov/codecov-action@54bcd8715eee62d40e33596ef5e8f0f48dbbccab # @v2.1.0
|
||||
if: ${{ inputs.with_coverage == 'true' }}
|
||||
with:
|
||||
flags: unittests,audits
|
||||
|
22
.github/workflows/bootstrap.yml
vendored
22
.github/workflows/bootstrap.yml
vendored
@@ -24,7 +24,7 @@ jobs:
|
||||
make patch unzip which xz python3 python3-devel tree \
|
||||
cmake bison bison-devel libstdc++-static
|
||||
- name: Checkout
|
||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
|
||||
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Setup non-root user
|
||||
@@ -62,7 +62,7 @@ jobs:
|
||||
make patch unzip xz-utils python3 python3-dev tree \
|
||||
cmake bison
|
||||
- name: Checkout
|
||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
|
||||
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Setup non-root user
|
||||
@@ -99,7 +99,7 @@ jobs:
|
||||
bzip2 curl file g++ gcc gfortran git gnupg2 gzip \
|
||||
make patch unzip xz-utils python3 python3-dev tree
|
||||
- name: Checkout
|
||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
|
||||
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Setup non-root user
|
||||
@@ -133,7 +133,7 @@ jobs:
|
||||
make patch unzip which xz python3 python3-devel tree \
|
||||
cmake bison
|
||||
- name: Checkout
|
||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
|
||||
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Setup repo
|
||||
@@ -158,7 +158,7 @@ jobs:
|
||||
run: |
|
||||
brew install cmake bison@2.7 tree
|
||||
- name: Checkout
|
||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
|
||||
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
|
||||
- uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # @v2
|
||||
with:
|
||||
python-version: "3.12"
|
||||
@@ -182,7 +182,7 @@ jobs:
|
||||
run: |
|
||||
brew install tree
|
||||
- name: Checkout
|
||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
|
||||
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
|
||||
- name: Bootstrap clingo
|
||||
run: |
|
||||
set -ex
|
||||
@@ -207,7 +207,7 @@ jobs:
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
|
||||
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Setup repo
|
||||
@@ -250,7 +250,7 @@ jobs:
|
||||
bzip2 curl file g++ gcc patchelf gfortran git gzip \
|
||||
make patch unzip xz-utils python3 python3-dev tree
|
||||
- name: Checkout
|
||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
|
||||
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Setup non-root user
|
||||
@@ -287,7 +287,7 @@ jobs:
|
||||
make patch unzip xz-utils python3 python3-dev tree \
|
||||
gawk
|
||||
- name: Checkout
|
||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
|
||||
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Setup non-root user
|
||||
@@ -320,7 +320,7 @@ jobs:
|
||||
# Remove GnuPG since we want to bootstrap it
|
||||
sudo rm -rf /usr/local/bin/gpg
|
||||
- name: Checkout
|
||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
|
||||
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
|
||||
- name: Bootstrap GnuPG
|
||||
run: |
|
||||
source share/spack/setup-env.sh
|
||||
@@ -338,7 +338,7 @@ jobs:
|
||||
# Remove GnuPG since we want to bootstrap it
|
||||
sudo rm -rf /usr/local/bin/gpg
|
||||
- name: Checkout
|
||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
|
||||
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
|
||||
- name: Bootstrap GnuPG
|
||||
run: |
|
||||
source share/spack/setup-env.sh
|
||||
|
8
.github/workflows/build-containers.yml
vendored
8
.github/workflows/build-containers.yml
vendored
@@ -55,7 +55,7 @@ jobs:
|
||||
if: github.repository == 'spack/spack'
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # @v2
|
||||
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # @v2
|
||||
|
||||
- uses: docker/metadata-action@8e5442c4ef9f78752691e2d8f8d19755c6f78e81
|
||||
id: docker_meta
|
||||
@@ -99,7 +99,7 @@ jobs:
|
||||
uses: docker/setup-buildx-action@0d103c3126aa41d772a8362f6aa67afac040f80c
|
||||
|
||||
- name: Log in to GitHub Container Registry
|
||||
uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d
|
||||
uses: docker/login-action@e92390c5fb421da1463c202d546fed0ec5c39f20
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
@@ -107,13 +107,13 @@ jobs:
|
||||
|
||||
- name: Log in to DockerHub
|
||||
if: github.event_name != 'pull_request'
|
||||
uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d
|
||||
uses: docker/login-action@e92390c5fb421da1463c202d546fed0ec5c39f20
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Build & Deploy ${{ matrix.dockerfile[0] }}
|
||||
uses: docker/build-push-action@4a13e500e55cf31b7a5d59a38ab2040ab0f42f56
|
||||
uses: docker/build-push-action@af5a7ed5ba88268d5278f7203fb52cd833f66d6e
|
||||
with:
|
||||
context: dockerfiles/${{ matrix.dockerfile[0] }}
|
||||
platforms: ${{ matrix.dockerfile[1] }}
|
||||
|
4
.github/workflows/ci.yaml
vendored
4
.github/workflows/ci.yaml
vendored
@@ -35,12 +35,12 @@ jobs:
|
||||
core: ${{ steps.filter.outputs.core }}
|
||||
packages: ${{ steps.filter.outputs.packages }}
|
||||
steps:
|
||||
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # @v2
|
||||
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # @v2
|
||||
if: ${{ github.event_name == 'push' }}
|
||||
with:
|
||||
fetch-depth: 0
|
||||
# For pull requests it's not necessary to checkout the code
|
||||
- uses: dorny/paths-filter@ebc4d7e9ebcb0b1eb21480bb8f43113e996ac77a
|
||||
- uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36
|
||||
id: filter
|
||||
with:
|
||||
# See https://github.com/dorny/paths-filter/issues/56 for the syntax used below
|
||||
|
2
.github/workflows/nightly-win-builds.yml
vendored
2
.github/workflows/nightly-win-builds.yml
vendored
@@ -14,7 +14,7 @@ jobs:
|
||||
build-paraview-deps:
|
||||
runs-on: windows-latest
|
||||
steps:
|
||||
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
|
||||
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c
|
||||
|
2
.github/workflows/style/requirements.txt
vendored
2
.github/workflows/style/requirements.txt
vendored
@@ -1,4 +1,4 @@
|
||||
black==24.2.0
|
||||
black==24.3.0
|
||||
clingo==5.7.1
|
||||
flake8==7.0.0
|
||||
isort==5.13.2
|
||||
|
18
.github/workflows/unit_tests.yaml
vendored
18
.github/workflows/unit_tests.yaml
vendored
@@ -51,7 +51,7 @@ jobs:
|
||||
on_develop: false
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # @v2
|
||||
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # @v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # @v2
|
||||
@@ -91,14 +91,14 @@ jobs:
|
||||
UNIT_TEST_COVERAGE: ${{ matrix.python-version == '3.11' }}
|
||||
run: |
|
||||
share/spack/qa/run-unit-tests
|
||||
- uses: codecov/codecov-action@0cfda1dd0a4ad9efc75517f399d859cd1ea4ced1
|
||||
- uses: codecov/codecov-action@54bcd8715eee62d40e33596ef5e8f0f48dbbccab
|
||||
with:
|
||||
flags: unittests,linux,${{ matrix.concretizer }}
|
||||
# Test shell integration
|
||||
shell:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # @v2
|
||||
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # @v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # @v2
|
||||
@@ -122,7 +122,7 @@ jobs:
|
||||
COVERAGE: true
|
||||
run: |
|
||||
share/spack/qa/run-shell-tests
|
||||
- uses: codecov/codecov-action@0cfda1dd0a4ad9efc75517f399d859cd1ea4ced1
|
||||
- uses: codecov/codecov-action@54bcd8715eee62d40e33596ef5e8f0f48dbbccab
|
||||
with:
|
||||
flags: shelltests,linux
|
||||
|
||||
@@ -137,7 +137,7 @@ jobs:
|
||||
dnf install -y \
|
||||
bzip2 curl file gcc-c++ gcc gcc-gfortran git gnupg2 gzip \
|
||||
make patch tcl unzip which xz
|
||||
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # @v2
|
||||
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # @v2
|
||||
- name: Setup repo and non-root user
|
||||
run: |
|
||||
git --version
|
||||
@@ -156,7 +156,7 @@ jobs:
|
||||
clingo-cffi:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # @v2
|
||||
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # @v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # @v2
|
||||
@@ -181,7 +181,7 @@ jobs:
|
||||
SPACK_TEST_SOLVER: clingo
|
||||
run: |
|
||||
share/spack/qa/run-unit-tests
|
||||
- uses: codecov/codecov-action@0cfda1dd0a4ad9efc75517f399d859cd1ea4ced1 # @v2.1.0
|
||||
- uses: codecov/codecov-action@54bcd8715eee62d40e33596ef5e8f0f48dbbccab # @v2.1.0
|
||||
with:
|
||||
flags: unittests,linux,clingo
|
||||
# Run unit tests on MacOS
|
||||
@@ -191,7 +191,7 @@ jobs:
|
||||
matrix:
|
||||
python-version: ["3.11"]
|
||||
steps:
|
||||
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # @v2
|
||||
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # @v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # @v2
|
||||
@@ -216,6 +216,6 @@ jobs:
|
||||
$(which spack) solve zlib
|
||||
common_args=(--dist loadfile --tx '4*popen//python=./bin/spack-tmpconfig python -u ./bin/spack python' -x)
|
||||
$(which spack) unit-test --verbose --cov --cov-config=pyproject.toml --cov-report=xml:coverage.xml "${common_args[@]}"
|
||||
- uses: codecov/codecov-action@0cfda1dd0a4ad9efc75517f399d859cd1ea4ced1
|
||||
- uses: codecov/codecov-action@54bcd8715eee62d40e33596ef5e8f0f48dbbccab
|
||||
with:
|
||||
flags: unittests,macos
|
||||
|
6
.github/workflows/valid-style.yml
vendored
6
.github/workflows/valid-style.yml
vendored
@@ -18,7 +18,7 @@ jobs:
|
||||
validate:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
|
||||
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
|
||||
- uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c
|
||||
with:
|
||||
python-version: '3.11'
|
||||
@@ -35,7 +35,7 @@ jobs:
|
||||
style:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
|
||||
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c
|
||||
@@ -69,7 +69,7 @@ jobs:
|
||||
dnf install -y \
|
||||
bzip2 curl file gcc-c++ gcc gcc-gfortran git gnupg2 gzip \
|
||||
make patch tcl unzip which xz
|
||||
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # @v2
|
||||
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # @v2
|
||||
- name: Setup repo and non-root user
|
||||
run: |
|
||||
git --version
|
||||
|
10
.github/workflows/windows_python.yml
vendored
10
.github/workflows/windows_python.yml
vendored
@@ -15,7 +15,7 @@ jobs:
|
||||
unit-tests:
|
||||
runs-on: windows-latest
|
||||
steps:
|
||||
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
|
||||
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c
|
||||
@@ -33,13 +33,13 @@ jobs:
|
||||
./share/spack/qa/validate_last_exit.ps1
|
||||
coverage combine -a
|
||||
coverage xml
|
||||
- uses: codecov/codecov-action@0cfda1dd0a4ad9efc75517f399d859cd1ea4ced1
|
||||
- uses: codecov/codecov-action@54bcd8715eee62d40e33596ef5e8f0f48dbbccab
|
||||
with:
|
||||
flags: unittests,windows
|
||||
unit-tests-cmd:
|
||||
runs-on: windows-latest
|
||||
steps:
|
||||
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
|
||||
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c
|
||||
@@ -57,13 +57,13 @@ jobs:
|
||||
./share/spack/qa/validate_last_exit.ps1
|
||||
coverage combine -a
|
||||
coverage xml
|
||||
- uses: codecov/codecov-action@0cfda1dd0a4ad9efc75517f399d859cd1ea4ced1
|
||||
- uses: codecov/codecov-action@54bcd8715eee62d40e33596ef5e8f0f48dbbccab
|
||||
with:
|
||||
flags: unittests,windows
|
||||
build-abseil:
|
||||
runs-on: windows-latest
|
||||
steps:
|
||||
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
|
||||
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c
|
||||
|
@@ -87,7 +87,7 @@ You can check what is installed in the bootstrapping store at any time using:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
% spack find -b
|
||||
% spack -b find
|
||||
==> Showing internal bootstrap store at "/Users/spack/.spack/bootstrap/store"
|
||||
==> 11 installed packages
|
||||
-- darwin-catalina-x86_64 / apple-clang@12.0.0 ------------------
|
||||
@@ -101,7 +101,7 @@ In case it is needed you can remove all the software in the current bootstrappin
|
||||
% spack clean -b
|
||||
==> Removing bootstrapped software and configuration in "/Users/spack/.spack/bootstrap"
|
||||
|
||||
% spack find -b
|
||||
% spack -b find
|
||||
==> Showing internal bootstrap store at "/Users/spack/.spack/bootstrap/store"
|
||||
==> 0 installed packages
|
||||
|
||||
@@ -175,4 +175,4 @@ bootstrapping.
|
||||
|
||||
This command needs to be run on a machine with internet access and the resulting folder
|
||||
has to be moved over to the air-gapped system. Once the local sources are added using the
|
||||
commands suggested at the prompt, they can be used to bootstrap Spack.
|
||||
commands suggested at the prompt, they can be used to bootstrap Spack.
|
||||
|
@@ -173,6 +173,72 @@ arguments to ``Makefile.PL`` or ``Build.PL`` by overriding
|
||||
]
|
||||
|
||||
|
||||
^^^^^^^
|
||||
Testing
|
||||
^^^^^^^
|
||||
|
||||
``PerlPackage`` provides a simple stand-alone test of the successfully
|
||||
installed package to confirm that installed perl module(s) can be used.
|
||||
These tests can be performed any time after the installation using
|
||||
``spack -v test run``. (For more information on the command, see
|
||||
:ref:`cmd-spack-test-run`.)
|
||||
|
||||
The base class automatically detects perl modules based on the presence
|
||||
of ``*.pm`` files under the package's library directory. For example,
|
||||
the files under ``perl-bignum``'s perl library are:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ find . -name "*.pm"
|
||||
./bigfloat.pm
|
||||
./bigrat.pm
|
||||
./Math/BigFloat/Trace.pm
|
||||
./Math/BigInt/Trace.pm
|
||||
./Math/BigRat/Trace.pm
|
||||
./bigint.pm
|
||||
./bignum.pm
|
||||
|
||||
|
||||
which results in the package having the ``use_modules`` property containing:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
use_modules = [
|
||||
"bigfloat",
|
||||
"bigrat",
|
||||
"Math::BigFloat::Trace",
|
||||
"Math::BigInt::Trace",
|
||||
"Math::BigRat::Trace",
|
||||
"bigint",
|
||||
"bignum",
|
||||
]
|
||||
|
||||
.. note::
|
||||
|
||||
This list can often be used to catch missing dependencies.
|
||||
|
||||
If the list is somehow wrong, you can provide the names of the modules
|
||||
yourself by overriding ``use_modules`` like so:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
use_modules = ["bigfloat", "bigrat", "bigint", "bignum"]
|
||||
|
||||
If you only want a subset of the automatically detected modules to be
|
||||
tested, you could instead define the ``skip_modules`` property on the
|
||||
package. So, instead of overriding ``use_modules`` as shown above, you
|
||||
could define the following:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
skip_modules = [
|
||||
"Math::BigFloat::Trace",
|
||||
"Math::BigInt::Trace",
|
||||
"Math::BigRat::Trace",
|
||||
]
|
||||
|
||||
for the same use tests.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
Alternatives to Spack
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
|
@@ -73,9 +73,12 @@ are six configuration scopes. From lowest to highest:
|
||||
Spack instance per project) or for site-wide settings on a multi-user
|
||||
machine (e.g., for a common Spack instance).
|
||||
|
||||
#. **plugin**: Read from a Python project's entry points. Settings here affect
|
||||
all instances of Spack running with the same Python installation. This scope takes higher precedence than site, system, and default scopes.
|
||||
|
||||
#. **user**: Stored in the home directory: ``~/.spack/``. These settings
|
||||
affect all instances of Spack and take higher precedence than site,
|
||||
system, or defaults scopes.
|
||||
system, plugin, or defaults scopes.
|
||||
|
||||
#. **custom**: Stored in a custom directory specified by ``--config-scope``.
|
||||
If multiple scopes are listed on the command line, they are ordered
|
||||
@@ -196,6 +199,45 @@ with MPICH. You can create different configuration scopes for use with
|
||||
mpi: [mpich]
|
||||
|
||||
|
||||
.. _plugin-scopes:
|
||||
|
||||
^^^^^^^^^^^^^
|
||||
Plugin scopes
|
||||
^^^^^^^^^^^^^
|
||||
|
||||
.. note::
|
||||
Python version >= 3.8 is required to enable plugin configuration.
|
||||
|
||||
Spack can be made aware of configuration scopes that are installed as part of a python package. To do so, register a function that returns the scope's path to the ``"spack.config"`` entry point. Consider the Python package ``my_package`` that includes Spack configurations:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
my-package/
|
||||
├── src
|
||||
│ ├── my_package
|
||||
│ │ ├── __init__.py
|
||||
│ │ └── spack/
|
||||
│ │ │ └── config.yaml
|
||||
└── pyproject.toml
|
||||
|
||||
adding the following to ``my_package``'s ``pyproject.toml`` will make ``my_package``'s ``spack/`` configurations visible to Spack when ``my_package`` is installed:
|
||||
|
||||
.. code-block:: toml
|
||||
|
||||
[project.entry_points."spack.config"]
|
||||
my_package = "my_package:get_config_path"
|
||||
|
||||
The function ``my_package.get_extension_path`` in ``my_package/__init__.py`` might look like
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
import importlib.resources
|
||||
|
||||
def get_config_path():
|
||||
dirname = importlib.resources.files("my_package").joinpath("spack")
|
||||
if dirname.exists():
|
||||
return str(dirname)
|
||||
|
||||
.. _platform-scopes:
|
||||
|
||||
------------------------
|
||||
|
@@ -952,6 +952,17 @@ function, as shown in the example below:
|
||||
^mpi: "{name}-{version}/{^mpi.name}-{^mpi.version}-{compiler.name}-{compiler.version}"
|
||||
all: "{name}-{version}/{compiler.name}-{compiler.version}"
|
||||
|
||||
Projections also permit environment and spack configuration variable
|
||||
expansions as shown below:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
projections:
|
||||
all: "{name}-{version}/{compiler.name}-{compiler.version}/$date/$SYSTEM_ENV_VARIBLE"
|
||||
|
||||
where ``$date`` is the spack configuration variable that will expand with the ``YYYY-MM-DD``
|
||||
format and ``$SYSTEM_ENV_VARIABLE`` is an environment variable defined in the shell.
|
||||
|
||||
The entries in the projections configuration file must all be either
|
||||
specs or the keyword ``all``. For each spec, the projection used will
|
||||
be the first non-``all`` entry that the spec satisfies, or ``all`` if
|
||||
|
@@ -111,3 +111,39 @@ The corresponding unit tests can be run giving the appropriate options to ``spac
|
||||
|
||||
(5 durations < 0.005s hidden. Use -vv to show these durations.)
|
||||
=========================================== 5 passed in 5.06s ============================================
|
||||
|
||||
---------------------------------------
|
||||
Registering Extensions via Entry Points
|
||||
---------------------------------------
|
||||
|
||||
.. note::
|
||||
Python version >= 3.8 is required to register extensions via entry points.
|
||||
|
||||
Spack can be made aware of extensions that are installed as part of a python package. To do so, register a function that returns the extension path, or paths, to the ``"spack.extensions"`` entry point. Consider the Python package ``my_package`` that includes a Spack extension:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
my-package/
|
||||
├── src
|
||||
│ ├── my_package
|
||||
│ │ └── __init__.py
|
||||
│ └── spack-scripting/ # the spack extensions
|
||||
└── pyproject.toml
|
||||
|
||||
adding the following to ``my_package``'s ``pyproject.toml`` will make the ``spack-scripting`` extension visible to Spack when ``my_package`` is installed:
|
||||
|
||||
.. code-block:: toml
|
||||
|
||||
[project.entry_points."spack.extenions"]
|
||||
my_package = "my_package:get_extension_path"
|
||||
|
||||
The function ``my_package.get_extension_path`` in ``my_package/__init__.py`` might look like
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
import importlib.resources
|
||||
|
||||
def get_extension_path():
|
||||
dirname = importlib.resources.files("my_package").joinpath("spack-scripting")
|
||||
if dirname.exists():
|
||||
return str(dirname)
|
||||
|
@@ -250,9 +250,10 @@ Compiler configuration
|
||||
|
||||
Spack has the ability to build packages with multiple compilers and
|
||||
compiler versions. Compilers can be made available to Spack by
|
||||
specifying them manually in ``compilers.yaml``, or automatically by
|
||||
running ``spack compiler find``, but for convenience Spack will
|
||||
automatically detect compilers the first time it needs them.
|
||||
specifying them manually in ``compilers.yaml`` or ``packages.yaml``,
|
||||
or automatically by running ``spack compiler find``, but for
|
||||
convenience Spack will automatically detect compilers the first time
|
||||
it needs them.
|
||||
|
||||
.. _cmd-spack-compilers:
|
||||
|
||||
@@ -457,6 +458,48 @@ specification. The operations available to modify the environment are ``set``, `
|
||||
prepend_path: # Similar for append|remove_path
|
||||
LD_LIBRARY_PATH: /ld/paths/added/by/setvars/sh
|
||||
|
||||
.. note::
|
||||
|
||||
Spack is in the process of moving compilers from a separate
|
||||
attribute to be handled like all other packages. As part of this
|
||||
process, the ``compilers.yaml`` section will eventually be replaced
|
||||
by configuration in the ``packages.yaml`` section. This new
|
||||
configuration is now available, although it is not yet the default
|
||||
behavior.
|
||||
|
||||
Compilers can also be configured as external packages in the
|
||||
``packages.yaml`` config file. Any external package for a compiler
|
||||
(e.g. ``gcc`` or ``llvm``) will be treated as a configured compiler
|
||||
assuming the paths to the compiler executables are determinable from
|
||||
the prefix.
|
||||
|
||||
If the paths to the compiler executable are not determinable from the
|
||||
prefix, you can add them to the ``extra_attributes`` field. Similarly,
|
||||
all other fields from the compilers config can be added to the
|
||||
``extra_attributes`` field for an external representing a compiler.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
packages:
|
||||
gcc:
|
||||
external:
|
||||
- spec: gcc@12.2.0 arch=linux-rhel8-skylake
|
||||
prefix: /usr
|
||||
extra_attributes:
|
||||
environment:
|
||||
set:
|
||||
GCC_ROOT: /usr
|
||||
external:
|
||||
- spec: llvm+clang@15.0.0 arch=linux-rhel8-skylake
|
||||
prefix: /usr
|
||||
extra_attributes:
|
||||
paths:
|
||||
cc: /usr/bin/clang-with-suffix
|
||||
cxx: /usr/bin/clang++-with-extra-info
|
||||
fc: /usr/bin/gfortran
|
||||
f77: /usr/bin/gfortran
|
||||
extra_rpaths:
|
||||
- /usr/lib/llvm/
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Build Your Own Compiler
|
||||
|
@@ -273,9 +273,21 @@ builtin support through the ``depends_on`` function, the latter simply uses a ``
|
||||
statement. Both module systems (at least in newer versions) do reference counting, so that if a
|
||||
module is loaded by two different modules, it will only be unloaded after the others are.
|
||||
|
||||
The ``autoload`` key accepts the values ``none``, ``direct``, and ``all``. To disable it, use
|
||||
``none``, and to enable, it's best to stick to ``direct``, which only autoloads the direct link and
|
||||
run type dependencies, relying on recursive autoloading to load the rest.
|
||||
The ``autoload`` key accepts the values:
|
||||
|
||||
* ``none``: no autoloading
|
||||
* ``run``: autoload direct *run* type dependencies
|
||||
* ``direct``: autoload direct *link and run* type dependencies
|
||||
* ``all``: autoload all dependencies
|
||||
|
||||
In case of ``run`` and ``direct``, a ``module load`` triggers a recursive load.
|
||||
|
||||
The ``direct`` option is most correct: there are cases where pure link dependencies need to set
|
||||
variables for themselves, or need to have variables of their own dependencies set.
|
||||
|
||||
In practice however, ``run`` is often sufficient, and may make ``module load`` snappier.
|
||||
|
||||
The ``all`` option is discouraged and seldomly used.
|
||||
|
||||
A common complaint about autoloading is the large number of modules that are visible to the user.
|
||||
Spack has a solution for this as well: ``hide_implicits: true``. This ensures that only those
|
||||
@@ -297,11 +309,11 @@ Environment Modules requires version 4.7 or higher.
|
||||
tcl:
|
||||
hide_implicits: true
|
||||
all:
|
||||
autoload: direct
|
||||
autoload: direct # or `run`
|
||||
lmod:
|
||||
hide_implicits: true
|
||||
all:
|
||||
autoload: direct
|
||||
autoload: direct # or `run`
|
||||
|
||||
.. _anonymous_specs:
|
||||
|
||||
|
@@ -6,8 +6,8 @@ python-levenshtein==0.25.0
|
||||
docutils==0.20.1
|
||||
pygments==2.17.2
|
||||
urllib3==2.2.1
|
||||
pytest==8.0.2
|
||||
pytest==8.1.1
|
||||
isort==5.13.2
|
||||
black==24.2.0
|
||||
black==24.3.0
|
||||
flake8==7.0.0
|
||||
mypy==1.8.0
|
||||
mypy==1.9.0
|
||||
|
2
lib/spack/external/__init__.py
vendored
2
lib/spack/external/__init__.py
vendored
@@ -18,7 +18,7 @@
|
||||
|
||||
* Homepage: https://pypi.python.org/pypi/archspec
|
||||
* Usage: Labeling, comparison and detection of microarchitectures
|
||||
* Version: 0.2.2 (commit 1dc58a5776dd77e6fc6e4ba5626af5b1fb24996e)
|
||||
* Version: 0.2.3 (commit 7b8fe60b69e2861e7dac104bc1c183decfcd3daf)
|
||||
|
||||
astunparse
|
||||
----------------
|
||||
|
3
lib/spack/external/archspec/__init__.py
vendored
3
lib/spack/external/archspec/__init__.py
vendored
@@ -1,2 +1,3 @@
|
||||
"""Init file to avoid namespace packages"""
|
||||
__version__ = "0.2.2"
|
||||
|
||||
__version__ = "0.2.3"
|
||||
|
1
lib/spack/external/archspec/__main__.py
vendored
1
lib/spack/external/archspec/__main__.py
vendored
@@ -3,6 +3,7 @@
|
||||
"""
|
||||
|
||||
import sys
|
||||
|
||||
from .cli import main
|
||||
|
||||
sys.exit(main())
|
||||
|
6
lib/spack/external/archspec/cli.py
vendored
6
lib/spack/external/archspec/cli.py
vendored
@@ -46,7 +46,11 @@ def _make_parser() -> argparse.ArgumentParser:
|
||||
|
||||
def cpu() -> int:
|
||||
"""Run the `archspec cpu` subcommand."""
|
||||
print(archspec.cpu.host())
|
||||
try:
|
||||
print(archspec.cpu.host())
|
||||
except FileNotFoundError as exc:
|
||||
print(exc)
|
||||
return 1
|
||||
return 0
|
||||
|
||||
|
||||
|
10
lib/spack/external/archspec/cpu/__init__.py
vendored
10
lib/spack/external/archspec/cpu/__init__.py
vendored
@@ -5,10 +5,14 @@
|
||||
"""The "cpu" package permits to query and compare different
|
||||
CPU microarchitectures.
|
||||
"""
|
||||
from .microarchitecture import Microarchitecture, UnsupportedMicroarchitecture
|
||||
from .microarchitecture import TARGETS, generic_microarchitecture
|
||||
from .microarchitecture import version_components
|
||||
from .detect import host
|
||||
from .microarchitecture import (
|
||||
TARGETS,
|
||||
Microarchitecture,
|
||||
UnsupportedMicroarchitecture,
|
||||
generic_microarchitecture,
|
||||
version_components,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"Microarchitecture",
|
||||
|
372
lib/spack/external/archspec/cpu/detect.py
vendored
372
lib/spack/external/archspec/cpu/detect.py
vendored
@@ -4,15 +4,17 @@
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
"""Detection of CPU microarchitectures"""
|
||||
import collections
|
||||
import functools
|
||||
import os
|
||||
import platform
|
||||
import re
|
||||
import struct
|
||||
import subprocess
|
||||
import warnings
|
||||
from typing import Dict, List, Optional, Set, Tuple, Union
|
||||
|
||||
from .microarchitecture import generic_microarchitecture, TARGETS
|
||||
from .schema import TARGETS_JSON
|
||||
from ..vendor.cpuid.cpuid import CPUID
|
||||
from .microarchitecture import TARGETS, Microarchitecture, generic_microarchitecture
|
||||
from .schema import CPUID_JSON, TARGETS_JSON
|
||||
|
||||
#: Mapping from operating systems to chain of commands
|
||||
#: to obtain a dictionary of raw info on the current cpu
|
||||
@@ -22,43 +24,46 @@
|
||||
#: functions checking the compatibility of the host with a given target
|
||||
COMPATIBILITY_CHECKS = {}
|
||||
|
||||
# Constants for commonly used architectures
|
||||
X86_64 = "x86_64"
|
||||
AARCH64 = "aarch64"
|
||||
PPC64LE = "ppc64le"
|
||||
PPC64 = "ppc64"
|
||||
RISCV64 = "riscv64"
|
||||
|
||||
def info_dict(operating_system):
|
||||
"""Decorator to mark functions that are meant to return raw info on
|
||||
the current cpu.
|
||||
|
||||
def detection(operating_system: str):
|
||||
"""Decorator to mark functions that are meant to return partial information on the current cpu.
|
||||
|
||||
Args:
|
||||
operating_system (str or tuple): operating system for which the marked
|
||||
function is a viable factory of raw info dictionaries.
|
||||
operating_system: operating system where this function can be used.
|
||||
"""
|
||||
|
||||
def decorator(factory):
|
||||
INFO_FACTORY[operating_system].append(factory)
|
||||
|
||||
@functools.wraps(factory)
|
||||
def _impl():
|
||||
info = factory()
|
||||
|
||||
# Check that info contains a few mandatory fields
|
||||
msg = 'field "{0}" is missing from raw info dictionary'
|
||||
assert "vendor_id" in info, msg.format("vendor_id")
|
||||
assert "flags" in info, msg.format("flags")
|
||||
assert "model" in info, msg.format("model")
|
||||
assert "model_name" in info, msg.format("model_name")
|
||||
|
||||
return info
|
||||
|
||||
return _impl
|
||||
return factory
|
||||
|
||||
return decorator
|
||||
|
||||
|
||||
@info_dict(operating_system="Linux")
|
||||
def proc_cpuinfo():
|
||||
"""Returns a raw info dictionary by parsing the first entry of
|
||||
``/proc/cpuinfo``
|
||||
"""
|
||||
info = {}
|
||||
def partial_uarch(
|
||||
name: str = "", vendor: str = "", features: Optional[Set[str]] = None, generation: int = 0
|
||||
) -> Microarchitecture:
|
||||
"""Construct a partial microarchitecture, from information gathered during system scan."""
|
||||
return Microarchitecture(
|
||||
name=name,
|
||||
parents=[],
|
||||
vendor=vendor,
|
||||
features=features or set(),
|
||||
compilers={},
|
||||
generation=generation,
|
||||
)
|
||||
|
||||
|
||||
@detection(operating_system="Linux")
|
||||
def proc_cpuinfo() -> Microarchitecture:
|
||||
"""Returns a partial Microarchitecture, obtained from scanning ``/proc/cpuinfo``"""
|
||||
data = {}
|
||||
with open("/proc/cpuinfo") as file: # pylint: disable=unspecified-encoding
|
||||
for line in file:
|
||||
key, separator, value = line.partition(":")
|
||||
@@ -70,11 +75,96 @@ def proc_cpuinfo():
|
||||
#
|
||||
# we are on a blank line separating two cpus. Exit early as
|
||||
# we want to read just the first entry in /proc/cpuinfo
|
||||
if separator != ":" and info:
|
||||
if separator != ":" and data:
|
||||
break
|
||||
|
||||
info[key.strip()] = value.strip()
|
||||
return info
|
||||
data[key.strip()] = value.strip()
|
||||
|
||||
architecture = _machine()
|
||||
if architecture == X86_64:
|
||||
return partial_uarch(
|
||||
vendor=data.get("vendor_id", "generic"), features=_feature_set(data, key="flags")
|
||||
)
|
||||
|
||||
if architecture == AARCH64:
|
||||
return partial_uarch(
|
||||
vendor=_canonicalize_aarch64_vendor(data),
|
||||
features=_feature_set(data, key="Features"),
|
||||
)
|
||||
|
||||
if architecture in (PPC64LE, PPC64):
|
||||
generation_match = re.search(r"POWER(\d+)", data.get("cpu", ""))
|
||||
try:
|
||||
generation = int(generation_match.group(1))
|
||||
except AttributeError:
|
||||
# There might be no match under emulated environments. For instance
|
||||
# emulating a ppc64le with QEMU and Docker still reports the host
|
||||
# /proc/cpuinfo and not a Power
|
||||
generation = 0
|
||||
return partial_uarch(generation=generation)
|
||||
|
||||
if architecture == RISCV64:
|
||||
if data.get("uarch") == "sifive,u74-mc":
|
||||
data["uarch"] = "u74mc"
|
||||
return partial_uarch(name=data.get("uarch", RISCV64))
|
||||
|
||||
return generic_microarchitecture(architecture)
|
||||
|
||||
|
||||
class CpuidInfoCollector:
|
||||
"""Collects the information we need on the host CPU from cpuid"""
|
||||
|
||||
# pylint: disable=too-few-public-methods
|
||||
def __init__(self):
|
||||
self.cpuid = CPUID()
|
||||
|
||||
registers = self.cpuid.registers_for(**CPUID_JSON["vendor"]["input"])
|
||||
self.highest_basic_support = registers.eax
|
||||
self.vendor = struct.pack("III", registers.ebx, registers.edx, registers.ecx).decode(
|
||||
"utf-8"
|
||||
)
|
||||
|
||||
registers = self.cpuid.registers_for(**CPUID_JSON["highest_extension_support"]["input"])
|
||||
self.highest_extension_support = registers.eax
|
||||
|
||||
self.features = self._features()
|
||||
|
||||
def _features(self):
|
||||
result = set()
|
||||
|
||||
def check_features(data):
|
||||
registers = self.cpuid.registers_for(**data["input"])
|
||||
for feature_check in data["bits"]:
|
||||
current = getattr(registers, feature_check["register"])
|
||||
if self._is_bit_set(current, feature_check["bit"]):
|
||||
result.add(feature_check["name"])
|
||||
|
||||
for call_data in CPUID_JSON["flags"]:
|
||||
if call_data["input"]["eax"] > self.highest_basic_support:
|
||||
continue
|
||||
check_features(call_data)
|
||||
|
||||
for call_data in CPUID_JSON["extension-flags"]:
|
||||
if call_data["input"]["eax"] > self.highest_extension_support:
|
||||
continue
|
||||
check_features(call_data)
|
||||
|
||||
return result
|
||||
|
||||
def _is_bit_set(self, register: int, bit: int) -> bool:
|
||||
mask = 1 << bit
|
||||
return register & mask > 0
|
||||
|
||||
|
||||
@detection(operating_system="Windows")
|
||||
def cpuid_info():
|
||||
"""Returns a partial Microarchitecture, obtained from running the cpuid instruction"""
|
||||
architecture = _machine()
|
||||
if architecture == X86_64:
|
||||
data = CpuidInfoCollector()
|
||||
return partial_uarch(vendor=data.vendor, features=data.features)
|
||||
|
||||
return generic_microarchitecture(architecture)
|
||||
|
||||
|
||||
def _check_output(args, env):
|
||||
@@ -83,14 +173,25 @@ def _check_output(args, env):
|
||||
return str(output.decode("utf-8"))
|
||||
|
||||
|
||||
WINDOWS_MAPPING = {
|
||||
"AMD64": "x86_64",
|
||||
"ARM64": "aarch64",
|
||||
}
|
||||
|
||||
|
||||
def _machine():
|
||||
""" "Return the machine architecture we are on"""
|
||||
"""Return the machine architecture we are on"""
|
||||
operating_system = platform.system()
|
||||
|
||||
# If we are not on Darwin, trust what Python tells us
|
||||
if operating_system != "Darwin":
|
||||
# If we are not on Darwin or Windows, trust what Python tells us
|
||||
if operating_system not in ("Darwin", "Windows"):
|
||||
return platform.machine()
|
||||
|
||||
# Normalize windows specific names
|
||||
if operating_system == "Windows":
|
||||
platform_machine = platform.machine()
|
||||
return WINDOWS_MAPPING.get(platform_machine, platform_machine)
|
||||
|
||||
# On Darwin it might happen that we are on M1, but using an interpreter
|
||||
# built for x86_64. In that case "platform.machine() == 'x86_64'", so we
|
||||
# need to fix that.
|
||||
@@ -103,54 +204,47 @@ def _machine():
|
||||
if "Apple" in output:
|
||||
# Note that a native Python interpreter on Apple M1 would return
|
||||
# "arm64" instead of "aarch64". Here we normalize to the latter.
|
||||
return "aarch64"
|
||||
return AARCH64
|
||||
|
||||
return "x86_64"
|
||||
return X86_64
|
||||
|
||||
|
||||
@info_dict(operating_system="Darwin")
|
||||
def sysctl_info_dict():
|
||||
@detection(operating_system="Darwin")
|
||||
def sysctl_info() -> Microarchitecture:
|
||||
"""Returns a raw info dictionary parsing the output of sysctl."""
|
||||
child_environment = _ensure_bin_usrbin_in_path()
|
||||
|
||||
def sysctl(*args):
|
||||
def sysctl(*args: str) -> str:
|
||||
return _check_output(["sysctl"] + list(args), env=child_environment).strip()
|
||||
|
||||
if _machine() == "x86_64":
|
||||
flags = (
|
||||
sysctl("-n", "machdep.cpu.features").lower()
|
||||
+ " "
|
||||
+ sysctl("-n", "machdep.cpu.leaf7_features").lower()
|
||||
if _machine() == X86_64:
|
||||
features = (
|
||||
f'{sysctl("-n", "machdep.cpu.features").lower()} '
|
||||
f'{sysctl("-n", "machdep.cpu.leaf7_features").lower()}'
|
||||
)
|
||||
info = {
|
||||
"vendor_id": sysctl("-n", "machdep.cpu.vendor"),
|
||||
"flags": flags,
|
||||
"model": sysctl("-n", "machdep.cpu.model"),
|
||||
"model name": sysctl("-n", "machdep.cpu.brand_string"),
|
||||
}
|
||||
else:
|
||||
model = "unknown"
|
||||
model_str = sysctl("-n", "machdep.cpu.brand_string").lower()
|
||||
if "m2" in model_str:
|
||||
model = "m2"
|
||||
elif "m1" in model_str:
|
||||
model = "m1"
|
||||
elif "apple" in model_str:
|
||||
model = "m1"
|
||||
features = set(features.split())
|
||||
|
||||
info = {
|
||||
"vendor_id": "Apple",
|
||||
"flags": [],
|
||||
"model": model,
|
||||
"CPU implementer": "Apple",
|
||||
"model name": sysctl("-n", "machdep.cpu.brand_string"),
|
||||
}
|
||||
return info
|
||||
# Flags detected on Darwin turned to their linux counterpart
|
||||
for darwin_flag, linux_flag in TARGETS_JSON["conversions"]["darwin_flags"].items():
|
||||
if darwin_flag in features:
|
||||
features.update(linux_flag.split())
|
||||
|
||||
return partial_uarch(vendor=sysctl("-n", "machdep.cpu.vendor"), features=features)
|
||||
|
||||
model = "unknown"
|
||||
model_str = sysctl("-n", "machdep.cpu.brand_string").lower()
|
||||
if "m2" in model_str:
|
||||
model = "m2"
|
||||
elif "m1" in model_str:
|
||||
model = "m1"
|
||||
elif "apple" in model_str:
|
||||
model = "m1"
|
||||
|
||||
return partial_uarch(name=model, vendor="Apple")
|
||||
|
||||
|
||||
def _ensure_bin_usrbin_in_path():
|
||||
# Make sure that /sbin and /usr/sbin are in PATH as sysctl is
|
||||
# usually found there
|
||||
# Make sure that /sbin and /usr/sbin are in PATH as sysctl is usually found there
|
||||
child_environment = dict(os.environ.items())
|
||||
search_paths = child_environment.get("PATH", "").split(os.pathsep)
|
||||
for additional_path in ("/sbin", "/usr/sbin"):
|
||||
@@ -160,22 +254,10 @@ def _ensure_bin_usrbin_in_path():
|
||||
return child_environment
|
||||
|
||||
|
||||
def adjust_raw_flags(info):
|
||||
"""Adjust the flags detected on the system to homogenize
|
||||
slightly different representations.
|
||||
"""
|
||||
# Flags detected on Darwin turned to their linux counterpart
|
||||
flags = info.get("flags", [])
|
||||
d2l = TARGETS_JSON["conversions"]["darwin_flags"]
|
||||
for darwin_flag, linux_flag in d2l.items():
|
||||
if darwin_flag in flags:
|
||||
info["flags"] += " " + linux_flag
|
||||
|
||||
|
||||
def adjust_raw_vendor(info):
|
||||
"""Adjust the vendor field to make it human readable"""
|
||||
if "CPU implementer" not in info:
|
||||
return
|
||||
def _canonicalize_aarch64_vendor(data: Dict[str, str]) -> str:
|
||||
"""Adjust the vendor field to make it human-readable"""
|
||||
if "CPU implementer" not in data:
|
||||
return "generic"
|
||||
|
||||
# Mapping numeric codes to vendor (ARM). This list is a merge from
|
||||
# different sources:
|
||||
@@ -185,43 +267,37 @@ def adjust_raw_vendor(info):
|
||||
# https://github.com/gcc-mirror/gcc/blob/master/gcc/config/aarch64/aarch64-cores.def
|
||||
# https://patchwork.kernel.org/patch/10524949/
|
||||
arm_vendors = TARGETS_JSON["conversions"]["arm_vendors"]
|
||||
arm_code = info["CPU implementer"]
|
||||
if arm_code in arm_vendors:
|
||||
info["CPU implementer"] = arm_vendors[arm_code]
|
||||
arm_code = data["CPU implementer"]
|
||||
return arm_vendors.get(arm_code, arm_code)
|
||||
|
||||
|
||||
def raw_info_dictionary():
|
||||
"""Returns a dictionary with information on the cpu of the current host.
|
||||
def _feature_set(data: Dict[str, str], key: str) -> Set[str]:
|
||||
return set(data.get(key, "").split())
|
||||
|
||||
This function calls all the viable factories one after the other until
|
||||
there's one that is able to produce the requested information.
|
||||
|
||||
def detected_info() -> Microarchitecture:
|
||||
"""Returns a partial Microarchitecture with information on the CPU of the current host.
|
||||
|
||||
This function calls all the viable factories one after the other until there's one that is
|
||||
able to produce the requested information. Falls-back to a generic microarchitecture, if none
|
||||
of the calls succeed.
|
||||
"""
|
||||
# pylint: disable=broad-except
|
||||
info = {}
|
||||
for factory in INFO_FACTORY[platform.system()]:
|
||||
try:
|
||||
info = factory()
|
||||
return factory()
|
||||
except Exception as exc:
|
||||
warnings.warn(str(exc))
|
||||
|
||||
if info:
|
||||
adjust_raw_flags(info)
|
||||
adjust_raw_vendor(info)
|
||||
break
|
||||
|
||||
return info
|
||||
return generic_microarchitecture(_machine())
|
||||
|
||||
|
||||
def compatible_microarchitectures(info):
|
||||
"""Returns an unordered list of known micro-architectures that are
|
||||
compatible with the info dictionary passed as argument.
|
||||
|
||||
Args:
|
||||
info (dict): dictionary containing information on the host cpu
|
||||
def compatible_microarchitectures(info: Microarchitecture) -> List[Microarchitecture]:
|
||||
"""Returns an unordered list of known micro-architectures that are compatible with the
|
||||
partial Microarchitecture passed as input.
|
||||
"""
|
||||
architecture_family = _machine()
|
||||
# If a tester is not registered, be conservative and assume no known
|
||||
# target is compatible with the host
|
||||
# If a tester is not registered, assume no known target is compatible with the host
|
||||
tester = COMPATIBILITY_CHECKS.get(architecture_family, lambda x, y: False)
|
||||
return [x for x in TARGETS.values() if tester(info, x)] or [
|
||||
generic_microarchitecture(architecture_family)
|
||||
@@ -230,8 +306,8 @@ def compatible_microarchitectures(info):
|
||||
|
||||
def host():
|
||||
"""Detects the host micro-architecture and returns it."""
|
||||
# Retrieve a dictionary with raw information on the host's cpu
|
||||
info = raw_info_dictionary()
|
||||
# Retrieve information on the host's cpu
|
||||
info = detected_info()
|
||||
|
||||
# Get a list of possible candidates for this micro-architecture
|
||||
candidates = compatible_microarchitectures(info)
|
||||
@@ -258,16 +334,15 @@ def sorting_fn(item):
|
||||
return max(candidates, key=sorting_fn)
|
||||
|
||||
|
||||
def compatibility_check(architecture_family):
|
||||
def compatibility_check(architecture_family: Union[str, Tuple[str, ...]]):
|
||||
"""Decorator to register a function as a proper compatibility check.
|
||||
|
||||
A compatibility check function takes the raw info dictionary as a first
|
||||
argument and an arbitrary target as the second argument. It returns True
|
||||
if the target is compatible with the info dictionary, False otherwise.
|
||||
A compatibility check function takes a partial Microarchitecture object as a first argument,
|
||||
and an arbitrary target Microarchitecture as the second argument. It returns True if the
|
||||
target is compatible with first argument, False otherwise.
|
||||
|
||||
Args:
|
||||
architecture_family (str or tuple): architecture family for which
|
||||
this test can be used, e.g. x86_64 or ppc64le etc.
|
||||
architecture_family: architecture family for which this test can be used
|
||||
"""
|
||||
# Turn the argument into something iterable
|
||||
if isinstance(architecture_family, str):
|
||||
@@ -280,86 +355,57 @@ def decorator(func):
|
||||
return decorator
|
||||
|
||||
|
||||
@compatibility_check(architecture_family=("ppc64le", "ppc64"))
|
||||
@compatibility_check(architecture_family=(PPC64LE, PPC64))
|
||||
def compatibility_check_for_power(info, target):
|
||||
"""Compatibility check for PPC64 and PPC64LE architectures."""
|
||||
basename = platform.machine()
|
||||
generation_match = re.search(r"POWER(\d+)", info.get("cpu", ""))
|
||||
try:
|
||||
generation = int(generation_match.group(1))
|
||||
except AttributeError:
|
||||
# There might be no match under emulated environments. For instance
|
||||
# emulating a ppc64le with QEMU and Docker still reports the host
|
||||
# /proc/cpuinfo and not a Power
|
||||
generation = 0
|
||||
|
||||
# We can use a target if it descends from our machine type and our
|
||||
# generation (9 for POWER9, etc) is at least its generation.
|
||||
arch_root = TARGETS[basename]
|
||||
arch_root = TARGETS[_machine()]
|
||||
return (
|
||||
target == arch_root or arch_root in target.ancestors
|
||||
) and target.generation <= generation
|
||||
) and target.generation <= info.generation
|
||||
|
||||
|
||||
@compatibility_check(architecture_family="x86_64")
|
||||
@compatibility_check(architecture_family=X86_64)
|
||||
def compatibility_check_for_x86_64(info, target):
|
||||
"""Compatibility check for x86_64 architectures."""
|
||||
basename = "x86_64"
|
||||
vendor = info.get("vendor_id", "generic")
|
||||
features = set(info.get("flags", "").split())
|
||||
|
||||
# We can use a target if it descends from our machine type, is from our
|
||||
# vendor, and we have all of its features
|
||||
arch_root = TARGETS[basename]
|
||||
arch_root = TARGETS[X86_64]
|
||||
return (
|
||||
(target == arch_root or arch_root in target.ancestors)
|
||||
and target.vendor in (vendor, "generic")
|
||||
and target.features.issubset(features)
|
||||
and target.vendor in (info.vendor, "generic")
|
||||
and target.features.issubset(info.features)
|
||||
)
|
||||
|
||||
|
||||
@compatibility_check(architecture_family="aarch64")
|
||||
@compatibility_check(architecture_family=AARCH64)
|
||||
def compatibility_check_for_aarch64(info, target):
|
||||
"""Compatibility check for AARCH64 architectures."""
|
||||
basename = "aarch64"
|
||||
features = set(info.get("Features", "").split())
|
||||
vendor = info.get("CPU implementer", "generic")
|
||||
|
||||
# At the moment it's not clear how to detect compatibility with
|
||||
# At the moment, it's not clear how to detect compatibility with
|
||||
# a specific version of the architecture
|
||||
if target.vendor == "generic" and target.name != "aarch64":
|
||||
if target.vendor == "generic" and target.name != AARCH64:
|
||||
return False
|
||||
|
||||
arch_root = TARGETS[basename]
|
||||
arch_root = TARGETS[AARCH64]
|
||||
arch_root_and_vendor = arch_root == target.family and target.vendor in (
|
||||
vendor,
|
||||
info.vendor,
|
||||
"generic",
|
||||
)
|
||||
|
||||
# On macOS it seems impossible to get all the CPU features
|
||||
# with syctl info, but for ARM we can get the exact model
|
||||
if platform.system() == "Darwin":
|
||||
model_key = info.get("model", basename)
|
||||
model = TARGETS[model_key]
|
||||
model = TARGETS[info.name]
|
||||
return arch_root_and_vendor and (target == model or target in model.ancestors)
|
||||
|
||||
return arch_root_and_vendor and target.features.issubset(features)
|
||||
return arch_root_and_vendor and target.features.issubset(info.features)
|
||||
|
||||
|
||||
@compatibility_check(architecture_family="riscv64")
|
||||
@compatibility_check(architecture_family=RISCV64)
|
||||
def compatibility_check_for_riscv64(info, target):
|
||||
"""Compatibility check for riscv64 architectures."""
|
||||
basename = "riscv64"
|
||||
uarch = info.get("uarch")
|
||||
|
||||
# sifive unmatched board
|
||||
if uarch == "sifive,u74-mc":
|
||||
uarch = "u74mc"
|
||||
# catch-all for unknown uarchs
|
||||
else:
|
||||
uarch = "riscv64"
|
||||
|
||||
arch_root = TARGETS[basename]
|
||||
arch_root = TARGETS[RISCV64]
|
||||
return (target == arch_root or arch_root in target.ancestors) and (
|
||||
target == uarch or target.vendor == "generic"
|
||||
target.name == info.name or target.vendor == "generic"
|
||||
)
|
||||
|
@@ -13,6 +13,7 @@
|
||||
import archspec
|
||||
import archspec.cpu.alias
|
||||
import archspec.cpu.schema
|
||||
|
||||
from .alias import FEATURE_ALIASES
|
||||
from .schema import LazyDictionary
|
||||
|
||||
@@ -47,7 +48,7 @@ class Microarchitecture:
|
||||
which has "broadwell" as a parent, supports running binaries
|
||||
optimized for "broadwell".
|
||||
vendor (str): vendor of the micro-architecture
|
||||
features (list of str): supported CPU flags. Note that the semantic
|
||||
features (set of str): supported CPU flags. Note that the semantic
|
||||
of the flags in this field might vary among architectures, if
|
||||
at all present. For instance x86_64 processors will list all
|
||||
the flags supported by a given CPU while Arm processors will
|
||||
@@ -180,24 +181,28 @@ def generic(self):
|
||||
generics = [x for x in [self] + self.ancestors if x.vendor == "generic"]
|
||||
return max(generics, key=lambda x: len(x.ancestors))
|
||||
|
||||
def to_dict(self, return_list_of_items=False):
|
||||
"""Returns a dictionary representation of this object.
|
||||
def to_dict(self):
|
||||
"""Returns a dictionary representation of this object."""
|
||||
return {
|
||||
"name": str(self.name),
|
||||
"vendor": str(self.vendor),
|
||||
"features": sorted(str(x) for x in self.features),
|
||||
"generation": self.generation,
|
||||
"parents": [str(x) for x in self.parents],
|
||||
"compilers": self.compilers,
|
||||
}
|
||||
|
||||
Args:
|
||||
return_list_of_items (bool): if True returns an ordered list of
|
||||
items instead of the dictionary
|
||||
"""
|
||||
list_of_items = [
|
||||
("name", str(self.name)),
|
||||
("vendor", str(self.vendor)),
|
||||
("features", sorted(str(x) for x in self.features)),
|
||||
("generation", self.generation),
|
||||
("parents", [str(x) for x in self.parents]),
|
||||
]
|
||||
if return_list_of_items:
|
||||
return list_of_items
|
||||
|
||||
return dict(list_of_items)
|
||||
@staticmethod
|
||||
def from_dict(data) -> "Microarchitecture":
|
||||
"""Construct a microarchitecture from a dictionary representation."""
|
||||
return Microarchitecture(
|
||||
name=data["name"],
|
||||
parents=[TARGETS[x] for x in data["parents"]],
|
||||
vendor=data["vendor"],
|
||||
features=set(data["features"]),
|
||||
compilers=data.get("compilers", {}),
|
||||
generation=data.get("generation", 0),
|
||||
)
|
||||
|
||||
def optimization_flags(self, compiler, version):
|
||||
"""Returns a string containing the optimization flags that needs
|
||||
@@ -271,9 +276,7 @@ def tuplify(ver):
|
||||
flags = flags_fmt.format(**compiler_entry)
|
||||
return flags
|
||||
|
||||
msg = (
|
||||
"cannot produce optimized binary for micro-architecture '{0}' with {1}@{2}"
|
||||
)
|
||||
msg = "cannot produce optimized binary for micro-architecture '{0}' with {1}@{2}"
|
||||
if compiler_info:
|
||||
versions = [x["versions"] for x in compiler_info]
|
||||
msg += f' [supported compiler versions are {", ".join(versions)}]'
|
||||
@@ -289,9 +292,7 @@ def generic_microarchitecture(name):
|
||||
Args:
|
||||
name (str): name of the micro-architecture
|
||||
"""
|
||||
return Microarchitecture(
|
||||
name, parents=[], vendor="generic", features=[], compilers={}
|
||||
)
|
||||
return Microarchitecture(name, parents=[], vendor="generic", features=[], compilers={})
|
||||
|
||||
|
||||
def version_components(version):
|
||||
@@ -345,9 +346,7 @@ def fill_target_from_dict(name, data, targets):
|
||||
compilers = values.get("compilers", {})
|
||||
generation = values.get("generation", 0)
|
||||
|
||||
targets[name] = Microarchitecture(
|
||||
name, parents, vendor, features, compilers, generation
|
||||
)
|
||||
targets[name] = Microarchitecture(name, parents, vendor, features, compilers, generation)
|
||||
|
||||
known_targets = {}
|
||||
data = archspec.cpu.schema.TARGETS_JSON["microarchitectures"]
|
||||
|
68
lib/spack/external/archspec/cpu/schema.py
vendored
68
lib/spack/external/archspec/cpu/schema.py
vendored
@@ -7,7 +7,9 @@
|
||||
"""
|
||||
import collections.abc
|
||||
import json
|
||||
import os.path
|
||||
import os
|
||||
import pathlib
|
||||
from typing import Tuple
|
||||
|
||||
|
||||
class LazyDictionary(collections.abc.MutableMapping):
|
||||
@@ -46,21 +48,65 @@ def __len__(self):
|
||||
return len(self.data)
|
||||
|
||||
|
||||
def _load_json_file(json_file):
|
||||
json_dir = os.path.join(os.path.dirname(__file__), "..", "json", "cpu")
|
||||
json_dir = os.path.abspath(json_dir)
|
||||
#: Environment variable that might point to a directory with a user defined JSON file
|
||||
DIR_FROM_ENVIRONMENT = "ARCHSPEC_CPU_DIR"
|
||||
|
||||
def _factory():
|
||||
filename = os.path.join(json_dir, json_file)
|
||||
with open(filename, "r", encoding="utf-8") as file:
|
||||
return json.load(file)
|
||||
#: Environment variable that might point to a directory with extensions to JSON files
|
||||
EXTENSION_DIR_FROM_ENVIRONMENT = "ARCHSPEC_EXTENSION_CPU_DIR"
|
||||
|
||||
return _factory
|
||||
|
||||
def _json_file(filename: str, allow_custom: bool = False) -> Tuple[pathlib.Path, pathlib.Path]:
|
||||
"""Given a filename, returns the absolute path for the main JSON file, and an
|
||||
optional absolute path for an extension JSON file.
|
||||
|
||||
Args:
|
||||
filename: filename for the JSON file
|
||||
allow_custom: if True, allows overriding the location where the file resides
|
||||
"""
|
||||
json_dir = pathlib.Path(__file__).parent / ".." / "json" / "cpu"
|
||||
if allow_custom and DIR_FROM_ENVIRONMENT in os.environ:
|
||||
json_dir = pathlib.Path(os.environ[DIR_FROM_ENVIRONMENT])
|
||||
json_dir = json_dir.absolute()
|
||||
json_file = json_dir / filename
|
||||
|
||||
extension_file = None
|
||||
if allow_custom and EXTENSION_DIR_FROM_ENVIRONMENT in os.environ:
|
||||
extension_dir = pathlib.Path(os.environ[EXTENSION_DIR_FROM_ENVIRONMENT])
|
||||
extension_dir.absolute()
|
||||
extension_file = extension_dir / filename
|
||||
|
||||
return json_file, extension_file
|
||||
|
||||
|
||||
def _load(json_file: pathlib.Path, extension_file: pathlib.Path):
|
||||
with open(json_file, "r", encoding="utf-8") as file:
|
||||
data = json.load(file)
|
||||
|
||||
if not extension_file or not extension_file.exists():
|
||||
return data
|
||||
|
||||
with open(extension_file, "r", encoding="utf-8") as file:
|
||||
extension_data = json.load(file)
|
||||
|
||||
top_level_sections = list(data.keys())
|
||||
for key in top_level_sections:
|
||||
if key not in extension_data:
|
||||
continue
|
||||
|
||||
data[key].update(extension_data[key])
|
||||
|
||||
return data
|
||||
|
||||
|
||||
#: In memory representation of the data in microarchitectures.json,
|
||||
#: loaded on first access
|
||||
TARGETS_JSON = LazyDictionary(_load_json_file("microarchitectures.json"))
|
||||
TARGETS_JSON = LazyDictionary(_load, *_json_file("microarchitectures.json", allow_custom=True))
|
||||
|
||||
#: JSON schema for microarchitectures.json, loaded on first access
|
||||
SCHEMA = LazyDictionary(_load_json_file("microarchitectures_schema.json"))
|
||||
TARGETS_JSON_SCHEMA = LazyDictionary(_load, *_json_file("microarchitectures_schema.json"))
|
||||
|
||||
#: Information on how to call 'cpuid' to get information on the HOST CPU
|
||||
CPUID_JSON = LazyDictionary(_load, *_json_file("cpuid.json", allow_custom=True))
|
||||
|
||||
#: JSON schema for cpuid.json, loaded on first access
|
||||
CPUID_JSON_SCHEMA = LazyDictionary(_load, *_json_file("cpuid_schema.json"))
|
||||
|
10
lib/spack/external/archspec/json/README.md
vendored
10
lib/spack/external/archspec/json/README.md
vendored
@@ -9,11 +9,11 @@ language specific APIs.
|
||||
|
||||
Currently the repository contains the following JSON files:
|
||||
```console
|
||||
.
|
||||
├── COPYRIGHT
|
||||
└── cpu
|
||||
├── microarchitectures.json # Contains information on CPU microarchitectures
|
||||
└── microarchitectures_schema.json # Schema for the file above
|
||||
cpu/
|
||||
├── cpuid.json # Contains information on CPUID calls to retrieve vendor and features on x86_64
|
||||
├── cpuid_schema.json # Schema for the file above
|
||||
├── microarchitectures.json # Contains information on CPU microarchitectures
|
||||
└── microarchitectures_schema.json # Schema for the file above
|
||||
```
|
||||
|
||||
|
||||
|
1050
lib/spack/external/archspec/json/cpu/cpuid.json
vendored
Normal file
1050
lib/spack/external/archspec/json/cpu/cpuid.json
vendored
Normal file
File diff suppressed because it is too large
Load Diff
134
lib/spack/external/archspec/json/cpu/cpuid_schema.json
vendored
Normal file
134
lib/spack/external/archspec/json/cpu/cpuid_schema.json
vendored
Normal file
@@ -0,0 +1,134 @@
|
||||
{
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"title": "Schema for microarchitecture definitions and feature aliases",
|
||||
"type": "object",
|
||||
"additionalProperties": false,
|
||||
"properties": {
|
||||
"vendor": {
|
||||
"type": "object",
|
||||
"additionalProperties": false,
|
||||
"properties": {
|
||||
"description": {
|
||||
"type": "string"
|
||||
},
|
||||
"input": {
|
||||
"type": "object",
|
||||
"additionalProperties": false,
|
||||
"properties": {
|
||||
"eax": {
|
||||
"type": "integer"
|
||||
},
|
||||
"ecx": {
|
||||
"type": "integer"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"highest_extension_support": {
|
||||
"type": "object",
|
||||
"additionalProperties": false,
|
||||
"properties": {
|
||||
"description": {
|
||||
"type": "string"
|
||||
},
|
||||
"input": {
|
||||
"type": "object",
|
||||
"additionalProperties": false,
|
||||
"properties": {
|
||||
"eax": {
|
||||
"type": "integer"
|
||||
},
|
||||
"ecx": {
|
||||
"type": "integer"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"flags": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "object",
|
||||
"additionalProperties": false,
|
||||
"properties": {
|
||||
"description": {
|
||||
"type": "string"
|
||||
},
|
||||
"input": {
|
||||
"type": "object",
|
||||
"additionalProperties": false,
|
||||
"properties": {
|
||||
"eax": {
|
||||
"type": "integer"
|
||||
},
|
||||
"ecx": {
|
||||
"type": "integer"
|
||||
}
|
||||
}
|
||||
},
|
||||
"bits": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "object",
|
||||
"additionalProperties": false,
|
||||
"properties": {
|
||||
"name": {
|
||||
"type": "string"
|
||||
},
|
||||
"register": {
|
||||
"type": "string"
|
||||
},
|
||||
"bit": {
|
||||
"type": "integer"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"extension-flags": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "object",
|
||||
"additionalProperties": false,
|
||||
"properties": {
|
||||
"description": {
|
||||
"type": "string"
|
||||
},
|
||||
"input": {
|
||||
"type": "object",
|
||||
"additionalProperties": false,
|
||||
"properties": {
|
||||
"eax": {
|
||||
"type": "integer"
|
||||
},
|
||||
"ecx": {
|
||||
"type": "integer"
|
||||
}
|
||||
}
|
||||
},
|
||||
"bits": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "object",
|
||||
"additionalProperties": false,
|
||||
"properties": {
|
||||
"name": {
|
||||
"type": "string"
|
||||
},
|
||||
"register": {
|
||||
"type": "string"
|
||||
},
|
||||
"bit": {
|
||||
"type": "integer"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
20
lib/spack/external/archspec/vendor/cpuid/LICENSE
vendored
Normal file
20
lib/spack/external/archspec/vendor/cpuid/LICENSE
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2014 Anders Høst
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||
the Software, and to permit persons to whom the Software is furnished to do so,
|
||||
subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
76
lib/spack/external/archspec/vendor/cpuid/README.md
vendored
Normal file
76
lib/spack/external/archspec/vendor/cpuid/README.md
vendored
Normal file
@@ -0,0 +1,76 @@
|
||||
cpuid.py
|
||||
========
|
||||
|
||||
Now, this is silly!
|
||||
|
||||
Pure Python library for accessing information about x86 processors
|
||||
by querying the [CPUID](http://en.wikipedia.org/wiki/CPUID)
|
||||
instruction. Well, not exactly pure Python...
|
||||
|
||||
It works by allocating a small piece of virtual memory, copying
|
||||
a raw x86 function to that memory, giving the memory execute
|
||||
permissions and then calling the memory as a function. The injected
|
||||
function executes the CPUID instruction and copies the result back
|
||||
to a ctypes.Structure where is can be read by Python.
|
||||
|
||||
It should work fine on both 32 and 64 bit versions of Windows and Linux
|
||||
running x86 processors. Apple OS X and other BSD systems should also work,
|
||||
not tested though...
|
||||
|
||||
|
||||
Why?
|
||||
----
|
||||
For poops and giggles. Plus, having access to a low-level feature
|
||||
without having to compile a C wrapper is pretty neat.
|
||||
|
||||
|
||||
Examples
|
||||
--------
|
||||
Getting info with eax=0:
|
||||
|
||||
import cpuid
|
||||
|
||||
q = cpuid.CPUID()
|
||||
eax, ebx, ecx, edx = q(0)
|
||||
|
||||
Running the files:
|
||||
|
||||
$ python example.py
|
||||
Vendor ID : GenuineIntel
|
||||
CPU name : Intel(R) Xeon(R) CPU W3550 @ 3.07GHz
|
||||
|
||||
Vector instructions supported:
|
||||
SSE : Yes
|
||||
SSE2 : Yes
|
||||
SSE3 : Yes
|
||||
SSSE3 : Yes
|
||||
SSE4.1 : Yes
|
||||
SSE4.2 : Yes
|
||||
SSE4a : --
|
||||
AVX : --
|
||||
AVX2 : --
|
||||
|
||||
$ python cpuid.py
|
||||
CPUID A B C D
|
||||
00000000 0000000b 756e6547 6c65746e 49656e69
|
||||
00000001 000106a5 00100800 009ce3bd bfebfbff
|
||||
00000002 55035a01 00f0b2e4 00000000 09ca212c
|
||||
00000003 00000000 00000000 00000000 00000000
|
||||
00000004 00000000 00000000 00000000 00000000
|
||||
00000005 00000040 00000040 00000003 00001120
|
||||
00000006 00000003 00000002 00000001 00000000
|
||||
00000007 00000000 00000000 00000000 00000000
|
||||
00000008 00000000 00000000 00000000 00000000
|
||||
00000009 00000000 00000000 00000000 00000000
|
||||
0000000a 07300403 00000044 00000000 00000603
|
||||
0000000b 00000000 00000000 00000095 00000000
|
||||
80000000 80000008 00000000 00000000 00000000
|
||||
80000001 00000000 00000000 00000001 28100800
|
||||
80000002 65746e49 2952286c 6f655820 2952286e
|
||||
80000003 55504320 20202020 20202020 57202020
|
||||
80000004 30353533 20402020 37302e33 007a4847
|
||||
80000005 00000000 00000000 00000000 00000000
|
||||
80000006 00000000 00000000 01006040 00000000
|
||||
80000007 00000000 00000000 00000000 00000100
|
||||
80000008 00003024 00000000 00000000 00000000
|
||||
|
172
lib/spack/external/archspec/vendor/cpuid/cpuid.py
vendored
Normal file
172
lib/spack/external/archspec/vendor/cpuid/cpuid.py
vendored
Normal file
@@ -0,0 +1,172 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (c) 2024 Anders Høst
|
||||
#
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import platform
|
||||
import os
|
||||
import ctypes
|
||||
from ctypes import c_uint32, c_long, c_ulong, c_size_t, c_void_p, POINTER, CFUNCTYPE
|
||||
|
||||
# Posix x86_64:
|
||||
# Three first call registers : RDI, RSI, RDX
|
||||
# Volatile registers : RAX, RCX, RDX, RSI, RDI, R8-11
|
||||
|
||||
# Windows x86_64:
|
||||
# Three first call registers : RCX, RDX, R8
|
||||
# Volatile registers : RAX, RCX, RDX, R8-11
|
||||
|
||||
# cdecl 32 bit:
|
||||
# Three first call registers : Stack (%esp)
|
||||
# Volatile registers : EAX, ECX, EDX
|
||||
|
||||
_POSIX_64_OPC = [
|
||||
0x53, # push %rbx
|
||||
0x89, 0xf0, # mov %esi,%eax
|
||||
0x89, 0xd1, # mov %edx,%ecx
|
||||
0x0f, 0xa2, # cpuid
|
||||
0x89, 0x07, # mov %eax,(%rdi)
|
||||
0x89, 0x5f, 0x04, # mov %ebx,0x4(%rdi)
|
||||
0x89, 0x4f, 0x08, # mov %ecx,0x8(%rdi)
|
||||
0x89, 0x57, 0x0c, # mov %edx,0xc(%rdi)
|
||||
0x5b, # pop %rbx
|
||||
0xc3 # retq
|
||||
]
|
||||
|
||||
_WINDOWS_64_OPC = [
|
||||
0x53, # push %rbx
|
||||
0x89, 0xd0, # mov %edx,%eax
|
||||
0x49, 0x89, 0xc9, # mov %rcx,%r9
|
||||
0x44, 0x89, 0xc1, # mov %r8d,%ecx
|
||||
0x0f, 0xa2, # cpuid
|
||||
0x41, 0x89, 0x01, # mov %eax,(%r9)
|
||||
0x41, 0x89, 0x59, 0x04, # mov %ebx,0x4(%r9)
|
||||
0x41, 0x89, 0x49, 0x08, # mov %ecx,0x8(%r9)
|
||||
0x41, 0x89, 0x51, 0x0c, # mov %edx,0xc(%r9)
|
||||
0x5b, # pop %rbx
|
||||
0xc3 # retq
|
||||
]
|
||||
|
||||
_CDECL_32_OPC = [
|
||||
0x53, # push %ebx
|
||||
0x57, # push %edi
|
||||
0x8b, 0x7c, 0x24, 0x0c, # mov 0xc(%esp),%edi
|
||||
0x8b, 0x44, 0x24, 0x10, # mov 0x10(%esp),%eax
|
||||
0x8b, 0x4c, 0x24, 0x14, # mov 0x14(%esp),%ecx
|
||||
0x0f, 0xa2, # cpuid
|
||||
0x89, 0x07, # mov %eax,(%edi)
|
||||
0x89, 0x5f, 0x04, # mov %ebx,0x4(%edi)
|
||||
0x89, 0x4f, 0x08, # mov %ecx,0x8(%edi)
|
||||
0x89, 0x57, 0x0c, # mov %edx,0xc(%edi)
|
||||
0x5f, # pop %edi
|
||||
0x5b, # pop %ebx
|
||||
0xc3 # ret
|
||||
]
|
||||
|
||||
is_windows = os.name == "nt"
|
||||
is_64bit = ctypes.sizeof(ctypes.c_voidp) == 8
|
||||
|
||||
|
||||
class CPUID_struct(ctypes.Structure):
|
||||
_register_names = ("eax", "ebx", "ecx", "edx")
|
||||
_fields_ = [(r, c_uint32) for r in _register_names]
|
||||
|
||||
def __getitem__(self, item):
|
||||
if item not in self._register_names:
|
||||
raise KeyError(item)
|
||||
return getattr(self, item)
|
||||
|
||||
def __repr__(self):
|
||||
return "eax=0x{:x}, ebx=0x{:x}, ecx=0x{:x}, edx=0x{:x}".format(self.eax, self.ebx, self.ecx, self.edx)
|
||||
|
||||
|
||||
class CPUID(object):
|
||||
def __init__(self):
|
||||
if platform.machine() not in ("AMD64", "x86_64", "x86", "i686"):
|
||||
raise SystemError("Only available for x86")
|
||||
|
||||
if is_windows:
|
||||
if is_64bit:
|
||||
# VirtualAlloc seems to fail under some weird
|
||||
# circumstances when ctypes.windll.kernel32 is
|
||||
# used under 64 bit Python. CDLL fixes this.
|
||||
self.win = ctypes.CDLL("kernel32.dll")
|
||||
opc = _WINDOWS_64_OPC
|
||||
else:
|
||||
# Here ctypes.windll.kernel32 is needed to get the
|
||||
# right DLL. Otherwise it will fail when running
|
||||
# 32 bit Python on 64 bit Windows.
|
||||
self.win = ctypes.windll.kernel32
|
||||
opc = _CDECL_32_OPC
|
||||
else:
|
||||
opc = _POSIX_64_OPC if is_64bit else _CDECL_32_OPC
|
||||
|
||||
size = len(opc)
|
||||
code = (ctypes.c_ubyte * size)(*opc)
|
||||
|
||||
if is_windows:
|
||||
self.win.VirtualAlloc.restype = c_void_p
|
||||
self.win.VirtualAlloc.argtypes = [ctypes.c_void_p, ctypes.c_size_t, ctypes.c_ulong, ctypes.c_ulong]
|
||||
self.addr = self.win.VirtualAlloc(None, size, 0x1000, 0x40)
|
||||
if not self.addr:
|
||||
raise MemoryError("Could not allocate RWX memory")
|
||||
ctypes.memmove(self.addr, code, size)
|
||||
else:
|
||||
from mmap import (
|
||||
mmap,
|
||||
MAP_PRIVATE,
|
||||
MAP_ANONYMOUS,
|
||||
PROT_WRITE,
|
||||
PROT_READ,
|
||||
PROT_EXEC,
|
||||
)
|
||||
self.mm = mmap(
|
||||
-1,
|
||||
size,
|
||||
flags=MAP_PRIVATE | MAP_ANONYMOUS,
|
||||
prot=PROT_WRITE | PROT_READ | PROT_EXEC,
|
||||
)
|
||||
self.mm.write(code)
|
||||
self.addr = ctypes.addressof(ctypes.c_int.from_buffer(self.mm))
|
||||
|
||||
func_type = CFUNCTYPE(None, POINTER(CPUID_struct), c_uint32, c_uint32)
|
||||
self.func_ptr = func_type(self.addr)
|
||||
|
||||
def __call__(self, eax, ecx=0):
|
||||
struct = self.registers_for(eax=eax, ecx=ecx)
|
||||
return struct.eax, struct.ebx, struct.ecx, struct.edx
|
||||
|
||||
def registers_for(self, eax, ecx=0):
|
||||
"""Calls cpuid with eax and ecx set as the input arguments, and returns a structure
|
||||
containing eax, ebx, ecx, and edx.
|
||||
"""
|
||||
struct = CPUID_struct()
|
||||
self.func_ptr(struct, eax, ecx)
|
||||
return struct
|
||||
|
||||
def __del__(self):
|
||||
if is_windows:
|
||||
self.win.VirtualFree.restype = c_long
|
||||
self.win.VirtualFree.argtypes = [c_void_p, c_size_t, c_ulong]
|
||||
self.win.VirtualFree(self.addr, 0, 0x8000)
|
||||
else:
|
||||
self.mm.close()
|
||||
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
def valid_inputs():
|
||||
cpuid = CPUID()
|
||||
for eax in (0x0, 0x80000000):
|
||||
highest, _, _, _ = cpuid(eax)
|
||||
while eax <= highest:
|
||||
regs = cpuid(eax)
|
||||
yield (eax, regs)
|
||||
eax += 1
|
||||
|
||||
|
||||
print(" ".join(x.ljust(8) for x in ("CPUID", "A", "B", "C", "D")).strip())
|
||||
for eax, regs in valid_inputs():
|
||||
print("%08x" % eax, " ".join("%08x" % reg for reg in regs))
|
62
lib/spack/external/archspec/vendor/cpuid/example.py
vendored
Normal file
62
lib/spack/external/archspec/vendor/cpuid/example.py
vendored
Normal file
@@ -0,0 +1,62 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (c) 2024 Anders Høst
|
||||
#
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import struct
|
||||
import cpuid
|
||||
|
||||
|
||||
def cpu_vendor(cpu):
|
||||
_, b, c, d = cpu(0)
|
||||
return struct.pack("III", b, d, c).decode("utf-8")
|
||||
|
||||
|
||||
def cpu_name(cpu):
|
||||
name = "".join((struct.pack("IIII", *cpu(0x80000000 + i)).decode("utf-8")
|
||||
for i in range(2, 5)))
|
||||
|
||||
return name.split('\x00', 1)[0]
|
||||
|
||||
|
||||
def is_set(cpu, leaf, subleaf, reg_idx, bit):
|
||||
"""
|
||||
@param {leaf} %eax
|
||||
@param {sublead} %ecx, 0 in most cases
|
||||
@param {reg_idx} idx of [%eax, %ebx, %ecx, %edx], 0-based
|
||||
@param {bit} bit of reg selected by {reg_idx}, 0-based
|
||||
"""
|
||||
|
||||
regs = cpu(leaf, subleaf)
|
||||
|
||||
if (1 << bit) & regs[reg_idx]:
|
||||
return "Yes"
|
||||
else:
|
||||
return "--"
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
cpu = cpuid.CPUID()
|
||||
|
||||
print("Vendor ID : %s" % cpu_vendor(cpu))
|
||||
print("CPU name : %s" % cpu_name(cpu))
|
||||
print()
|
||||
print("Vector instructions supported:")
|
||||
print("SSE : %s" % is_set(cpu, 1, 0, 3, 25))
|
||||
print("SSE2 : %s" % is_set(cpu, 1, 0, 3, 26))
|
||||
print("SSE3 : %s" % is_set(cpu, 1, 0, 2, 0))
|
||||
print("SSSE3 : %s" % is_set(cpu, 1, 0, 2, 9))
|
||||
print("SSE4.1 : %s" % is_set(cpu, 1, 0, 2, 19))
|
||||
print("SSE4.2 : %s" % is_set(cpu, 1, 0, 2, 20))
|
||||
print("SSE4a : %s" % is_set(cpu, 0x80000001, 0, 2, 6))
|
||||
print("AVX : %s" % is_set(cpu, 1, 0, 2, 28))
|
||||
print("AVX2 : %s" % is_set(cpu, 7, 0, 1, 5))
|
||||
print("BMI1 : %s" % is_set(cpu, 7, 0, 1, 3))
|
||||
print("BMI2 : %s" % is_set(cpu, 7, 0, 1, 8))
|
||||
# Intel RDT CMT/MBM
|
||||
print("L3 Monitoring : %s" % is_set(cpu, 0xf, 0, 3, 1))
|
||||
print("L3 Occupancy : %s" % is_set(cpu, 0xf, 1, 3, 0))
|
||||
print("L3 Total BW : %s" % is_set(cpu, 0xf, 1, 3, 1))
|
||||
print("L3 Local BW : %s" % is_set(cpu, 0xf, 1, 3, 2))
|
@@ -42,11 +42,6 @@ def convert_to_posix_path(path: str) -> str:
|
||||
return format_os_path(path, mode=Path.unix)
|
||||
|
||||
|
||||
def convert_to_windows_path(path: str) -> str:
|
||||
"""Converts the input path to Windows style."""
|
||||
return format_os_path(path, mode=Path.windows)
|
||||
|
||||
|
||||
def convert_to_platform_path(path: str) -> str:
|
||||
"""Converts the input path to the current platform's native style."""
|
||||
return format_os_path(path, mode=Path.platform_path)
|
||||
|
@@ -237,16 +237,6 @@ def _get_mime_type():
|
||||
return file_command("-b", "-h", "--mime-type")
|
||||
|
||||
|
||||
@memoized
|
||||
def _get_mime_type_compressed():
|
||||
"""Same as _get_mime_type but attempts to check for
|
||||
compression first
|
||||
"""
|
||||
mime_uncompressed = _get_mime_type()
|
||||
mime_uncompressed.add_default_arg("-Z")
|
||||
return mime_uncompressed
|
||||
|
||||
|
||||
def mime_type(filename):
|
||||
"""Returns the mime type and subtype of a file.
|
||||
|
||||
@@ -262,21 +252,6 @@ def mime_type(filename):
|
||||
return type, subtype
|
||||
|
||||
|
||||
def compressed_mime_type(filename):
|
||||
"""Same as mime_type but checks for type that has been compressed
|
||||
|
||||
Args:
|
||||
filename (str): file to be analyzed
|
||||
|
||||
Returns:
|
||||
Tuple containing the MIME type and subtype
|
||||
"""
|
||||
output = _get_mime_type_compressed()(filename, output=str, error=str).strip()
|
||||
tty.debug("==> " + output)
|
||||
type, _, subtype = output.partition("/")
|
||||
return type, subtype
|
||||
|
||||
|
||||
#: This generates the library filenames that may appear on any OS.
|
||||
library_extensions = ["a", "la", "so", "tbd", "dylib"]
|
||||
|
||||
@@ -308,13 +283,6 @@ def paths_containing_libs(paths, library_names):
|
||||
return rpaths_to_include
|
||||
|
||||
|
||||
@system_path_filter
|
||||
def same_path(path1, path2):
|
||||
norm1 = os.path.abspath(path1).rstrip(os.path.sep)
|
||||
norm2 = os.path.abspath(path2).rstrip(os.path.sep)
|
||||
return norm1 == norm2
|
||||
|
||||
|
||||
def filter_file(
|
||||
regex: str,
|
||||
repl: Union[str, Callable[[Match], str]],
|
||||
@@ -909,17 +877,6 @@ def is_exe(path):
|
||||
return os.path.isfile(path) and os.access(path, os.X_OK)
|
||||
|
||||
|
||||
@system_path_filter
|
||||
def get_filetype(path_name):
|
||||
"""
|
||||
Return the output of file path_name as a string to identify file type.
|
||||
"""
|
||||
file = Executable("file")
|
||||
file.add_default_env("LC_ALL", "C")
|
||||
output = file("-b", "-h", "%s" % path_name, output=str, error=str)
|
||||
return output.strip()
|
||||
|
||||
|
||||
def has_shebang(path):
|
||||
"""Returns whether a path has a shebang line. Returns False if the file cannot be opened."""
|
||||
try:
|
||||
@@ -1169,20 +1126,6 @@ def write_tmp_and_move(filename):
|
||||
shutil.move(tmp, filename)
|
||||
|
||||
|
||||
@contextmanager
|
||||
@system_path_filter
|
||||
def open_if_filename(str_or_file, mode="r"):
|
||||
"""Takes either a path or a file object, and opens it if it is a path.
|
||||
|
||||
If it's a file object, just yields the file object.
|
||||
"""
|
||||
if isinstance(str_or_file, str):
|
||||
with open(str_or_file, mode) as f:
|
||||
yield f
|
||||
else:
|
||||
yield str_or_file
|
||||
|
||||
|
||||
@system_path_filter
|
||||
def touch(path):
|
||||
"""Creates an empty file at the specified path."""
|
||||
@@ -1295,19 +1238,6 @@ def temp_cwd():
|
||||
shutil.rmtree(tmp_dir, **kwargs)
|
||||
|
||||
|
||||
@contextmanager
|
||||
@system_path_filter
|
||||
def temp_rename(orig_path, temp_path):
|
||||
same_path = os.path.realpath(orig_path) == os.path.realpath(temp_path)
|
||||
if not same_path:
|
||||
shutil.move(orig_path, temp_path)
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
if not same_path:
|
||||
shutil.move(temp_path, orig_path)
|
||||
|
||||
|
||||
@system_path_filter
|
||||
def can_access(file_name):
|
||||
"""True if we have read/write access to the file."""
|
||||
|
@@ -98,36 +98,6 @@ def caller_locals():
|
||||
del stack
|
||||
|
||||
|
||||
def get_calling_module_name():
|
||||
"""Make sure that the caller is a class definition, and return the
|
||||
enclosing module's name.
|
||||
"""
|
||||
# Passing zero here skips line context for speed.
|
||||
stack = inspect.stack(0)
|
||||
try:
|
||||
# Make sure locals contain __module__
|
||||
caller_locals = stack[2][0].f_locals
|
||||
finally:
|
||||
del stack
|
||||
|
||||
if "__module__" not in caller_locals:
|
||||
raise RuntimeError(
|
||||
"Must invoke get_calling_module_name() " "from inside a class definition!"
|
||||
)
|
||||
|
||||
module_name = caller_locals["__module__"]
|
||||
base_name = module_name.split(".")[-1]
|
||||
return base_name
|
||||
|
||||
|
||||
def attr_required(obj, attr_name):
|
||||
"""Ensure that a class has a required attribute."""
|
||||
if not hasattr(obj, attr_name):
|
||||
raise RequiredAttributeError(
|
||||
"No required attribute '%s' in class '%s'" % (attr_name, obj.__class__.__name__)
|
||||
)
|
||||
|
||||
|
||||
def attr_setdefault(obj, name, value):
|
||||
"""Like dict.setdefault, but for objects."""
|
||||
if not hasattr(obj, name):
|
||||
@@ -513,42 +483,6 @@ def copy(self):
|
||||
return clone
|
||||
|
||||
|
||||
def in_function(function_name):
|
||||
"""True if the caller was called from some function with
|
||||
the supplied Name, False otherwise."""
|
||||
stack = inspect.stack()
|
||||
try:
|
||||
for elt in stack[2:]:
|
||||
if elt[3] == function_name:
|
||||
return True
|
||||
return False
|
||||
finally:
|
||||
del stack
|
||||
|
||||
|
||||
def check_kwargs(kwargs, fun):
|
||||
"""Helper for making functions with kwargs. Checks whether the kwargs
|
||||
are empty after all of them have been popped off. If they're
|
||||
not, raises an error describing which kwargs are invalid.
|
||||
|
||||
Example::
|
||||
|
||||
def foo(self, **kwargs):
|
||||
x = kwargs.pop('x', None)
|
||||
y = kwargs.pop('y', None)
|
||||
z = kwargs.pop('z', None)
|
||||
check_kwargs(kwargs, self.foo)
|
||||
|
||||
# This raises a TypeError:
|
||||
foo(w='bad kwarg')
|
||||
"""
|
||||
if kwargs:
|
||||
raise TypeError(
|
||||
"'%s' is an invalid keyword argument for function %s()."
|
||||
% (next(iter(kwargs)), fun.__name__)
|
||||
)
|
||||
|
||||
|
||||
def match_predicate(*args):
|
||||
"""Utility function for making string matching predicates.
|
||||
|
||||
@@ -764,11 +698,6 @@ def pretty_seconds(seconds):
|
||||
return pretty_seconds_formatter(seconds)(seconds)
|
||||
|
||||
|
||||
class RequiredAttributeError(ValueError):
|
||||
def __init__(self, message):
|
||||
super().__init__(message)
|
||||
|
||||
|
||||
class ObjectWrapper:
|
||||
"""Base class that wraps an object. Derived classes can add new behavior
|
||||
while staying undercover.
|
||||
@@ -843,6 +772,30 @@ def __repr__(self):
|
||||
return repr(self.instance)
|
||||
|
||||
|
||||
def get_entry_points(*, group: str):
|
||||
"""Wrapper for ``importlib.metadata.entry_points``
|
||||
|
||||
Args:
|
||||
group: entry points to select
|
||||
|
||||
Returns:
|
||||
EntryPoints for ``group`` or empty list if unsupported
|
||||
"""
|
||||
|
||||
try:
|
||||
import importlib.metadata # type: ignore # novermin
|
||||
except ImportError:
|
||||
return []
|
||||
|
||||
try:
|
||||
return importlib.metadata.entry_points(group=group)
|
||||
except TypeError:
|
||||
# Prior to Python 3.10, entry_points accepted no parameters and always
|
||||
# returned a dictionary of entry points, keyed by group. See
|
||||
# https://docs.python.org/3/library/importlib.metadata.html#entry-points
|
||||
return importlib.metadata.entry_points().get(group, [])
|
||||
|
||||
|
||||
def load_module_from_file(module_name, module_path):
|
||||
"""Loads a python module from the path of the corresponding file.
|
||||
|
||||
@@ -911,25 +864,6 @@ def uniq(sequence):
|
||||
return uniq_list
|
||||
|
||||
|
||||
def star(func):
|
||||
"""Unpacks arguments for use with Multiprocessing mapping functions"""
|
||||
|
||||
def _wrapper(args):
|
||||
return func(*args)
|
||||
|
||||
return _wrapper
|
||||
|
||||
|
||||
class Devnull:
|
||||
"""Null stream with less overhead than ``os.devnull``.
|
||||
|
||||
See https://stackoverflow.com/a/2929954.
|
||||
"""
|
||||
|
||||
def write(self, *_):
|
||||
pass
|
||||
|
||||
|
||||
def elide_list(line_list, max_num=10):
|
||||
"""Takes a long list and limits it to a smaller number of elements,
|
||||
replacing intervening elements with '...'. For example::
|
||||
|
@@ -815,10 +815,6 @@ def __init__(self, path):
|
||||
super().__init__(msg)
|
||||
|
||||
|
||||
class LockLimitError(LockError):
|
||||
"""Raised when exceed maximum attempts to acquire a lock."""
|
||||
|
||||
|
||||
class LockTimeoutError(LockError):
|
||||
"""Raised when an attempt to acquire a lock times out."""
|
||||
|
||||
|
@@ -44,10 +44,6 @@ def is_debug(level=1):
|
||||
return _debug >= level
|
||||
|
||||
|
||||
def is_stacktrace():
|
||||
return _stacktrace
|
||||
|
||||
|
||||
def set_debug(level=0):
|
||||
global _debug
|
||||
assert level >= 0, "Debug level must be a positive value"
|
||||
@@ -252,37 +248,6 @@ def die(message, *args, **kwargs) -> NoReturn:
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def get_number(prompt, **kwargs):
|
||||
default = kwargs.get("default", None)
|
||||
abort = kwargs.get("abort", None)
|
||||
|
||||
if default is not None and abort is not None:
|
||||
prompt += " (default is %s, %s to abort) " % (default, abort)
|
||||
elif default is not None:
|
||||
prompt += " (default is %s) " % default
|
||||
elif abort is not None:
|
||||
prompt += " (%s to abort) " % abort
|
||||
|
||||
number = None
|
||||
while number is None:
|
||||
msg(prompt, newline=False)
|
||||
ans = input()
|
||||
if ans == str(abort):
|
||||
return None
|
||||
|
||||
if ans:
|
||||
try:
|
||||
number = int(ans)
|
||||
if number < 1:
|
||||
msg("Please enter a valid number.")
|
||||
number = None
|
||||
except ValueError:
|
||||
msg("Please enter a valid number.")
|
||||
elif default is not None:
|
||||
number = default
|
||||
return number
|
||||
|
||||
|
||||
def get_yes_or_no(prompt, **kwargs):
|
||||
default_value = kwargs.get("default", None)
|
||||
|
||||
|
@@ -213,9 +213,6 @@ def _root_spec(spec_str: str) -> str:
|
||||
platform = str(spack.platforms.host())
|
||||
if platform == "darwin":
|
||||
spec_str += " %apple-clang"
|
||||
elif platform == "windows":
|
||||
# TODO (johnwparent): Remove version constraint when clingo patch is up
|
||||
spec_str += " %msvc@:19.37"
|
||||
elif platform == "linux":
|
||||
spec_str += " %gcc"
|
||||
elif platform == "freebsd":
|
||||
|
@@ -147,7 +147,7 @@ def _add_compilers_if_missing() -> None:
|
||||
mixed_toolchain=sys.platform == "darwin"
|
||||
)
|
||||
if new_compilers:
|
||||
spack.compilers.add_compilers_to_config(new_compilers, init_config=False)
|
||||
spack.compilers.add_compilers_to_config(new_compilers)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
|
@@ -541,7 +541,7 @@ def autoreconf(self, pkg, spec, prefix):
|
||||
if os.path.exists(self.configure_abs_path):
|
||||
return
|
||||
|
||||
# Else try to regenerate it, which reuquires a few build dependencies
|
||||
# Else try to regenerate it, which requires a few build dependencies
|
||||
ensure_build_dependencies_or_raise(
|
||||
spec=spec,
|
||||
dependencies=["autoconf", "automake", "libtool"],
|
||||
|
@@ -4,12 +4,15 @@
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
import inspect
|
||||
import os
|
||||
from typing import Iterable
|
||||
|
||||
from llnl.util.filesystem import filter_file
|
||||
from llnl.util.filesystem import filter_file, find
|
||||
from llnl.util.lang import memoized
|
||||
|
||||
import spack.builder
|
||||
import spack.package_base
|
||||
from spack.directives import build_system, extends
|
||||
from spack.install_test import SkipTest, test_part
|
||||
from spack.util.executable import Executable
|
||||
|
||||
from ._checks import BaseBuilder, execute_build_time_tests
|
||||
@@ -28,6 +31,58 @@ class PerlPackage(spack.package_base.PackageBase):
|
||||
|
||||
extends("perl", when="build_system=perl")
|
||||
|
||||
@property
|
||||
@memoized
|
||||
def _platform_dir(self):
|
||||
"""Name of platform-specific module subdirectory."""
|
||||
perl = self.spec["perl"].command
|
||||
options = "-E", "use Config; say $Config{archname}"
|
||||
out = perl(*options, output=str.split, error=str.split)
|
||||
return out.strip()
|
||||
|
||||
@property
|
||||
def use_modules(self) -> Iterable[str]:
|
||||
"""Names of the package's perl modules."""
|
||||
module_files = find(self.prefix.lib, ["*.pm"], recursive=True)
|
||||
|
||||
# Drop the platform directory, if present
|
||||
if self._platform_dir:
|
||||
platform_dir = self._platform_dir + os.sep
|
||||
module_files = [m.replace(platform_dir, "") for m in module_files]
|
||||
|
||||
# Drop the extension and library path
|
||||
prefix = self.prefix.lib + os.sep
|
||||
modules = [os.path.splitext(m)[0].replace(prefix, "") for m in module_files]
|
||||
|
||||
# Drop the perl subdirectory as well
|
||||
return ["::".join(m.split(os.sep)[1:]) for m in modules]
|
||||
|
||||
@property
|
||||
def skip_modules(self) -> Iterable[str]:
|
||||
"""Names of modules that should be skipped when running tests.
|
||||
|
||||
These are a subset of use_modules.
|
||||
|
||||
Returns:
|
||||
List of strings of module names.
|
||||
"""
|
||||
return []
|
||||
|
||||
def test_use(self):
|
||||
"""Test 'use module'"""
|
||||
if not self.use_modules:
|
||||
raise SkipTest("Test requires use_modules package property.")
|
||||
|
||||
perl = self.spec["perl"].command
|
||||
for module in self.use_modules:
|
||||
if module in self.skip_modules:
|
||||
continue
|
||||
|
||||
with test_part(self, f"test_use-{module}", purpose=f"checking use of {module}"):
|
||||
options = ["-we", f'use strict; use {module}; print("OK\n")']
|
||||
out = perl(*options, output=str.split, error=str.split)
|
||||
assert "OK" in out
|
||||
|
||||
|
||||
@spack.builder.builder("perl")
|
||||
class PerlBuilder(BaseBuilder):
|
||||
@@ -52,7 +107,7 @@ class PerlBuilder(BaseBuilder):
|
||||
phases = ("configure", "build", "install")
|
||||
|
||||
#: Names associated with package methods in the old build-system format
|
||||
legacy_methods = ("configure_args", "check")
|
||||
legacy_methods = ("configure_args", "check", "test_use")
|
||||
|
||||
#: Names associated with package attributes in the old build-system format
|
||||
legacy_attributes = ()
|
||||
|
@@ -27,7 +27,7 @@
|
||||
import spack.package_base
|
||||
import spack.spec
|
||||
import spack.store
|
||||
from spack.directives import build_system, depends_on, extends, maintainers
|
||||
from spack.directives import build_system, depends_on, extends
|
||||
from spack.error import NoHeadersError, NoLibrariesError
|
||||
from spack.install_test import test_part
|
||||
from spack.spec import Spec
|
||||
@@ -56,8 +56,6 @@ def _flatten_dict(dictionary: Mapping[str, object]) -> Iterable[str]:
|
||||
|
||||
|
||||
class PythonExtension(spack.package_base.PackageBase):
|
||||
maintainers("adamjstewart")
|
||||
|
||||
@property
|
||||
def import_modules(self) -> Iterable[str]:
|
||||
"""Names of modules that the Python package provides.
|
||||
|
@@ -9,6 +9,8 @@
|
||||
import inspect
|
||||
from typing import List, Optional, Tuple
|
||||
|
||||
from llnl.util import lang
|
||||
|
||||
import spack.build_environment
|
||||
|
||||
#: Builder classes, as registered by the "builder" decorator
|
||||
@@ -231,24 +233,27 @@ def __new__(mcs, name, bases, attr_dict):
|
||||
for temporary_stage in (_RUN_BEFORE, _RUN_AFTER):
|
||||
staged_callbacks = temporary_stage.callbacks
|
||||
|
||||
# We don't have callbacks in this class, move on
|
||||
if not staged_callbacks:
|
||||
# Here we have an adapter from an old-style package. This means there is no
|
||||
# hierarchy of builders, and every callback that had to be combined between
|
||||
# *Package and *Builder has been combined already by _PackageAdapterMeta
|
||||
if name == "Adapter":
|
||||
continue
|
||||
|
||||
# If we are here we have callbacks. To get a complete list, get first what
|
||||
# was attached to parent classes, then prepend what we have registered here.
|
||||
# If we are here we have callbacks. To get a complete list, we accumulate all the
|
||||
# callbacks from base classes, we deduplicate them, then prepend what we have
|
||||
# registered here.
|
||||
#
|
||||
# The order should be:
|
||||
# 1. Callbacks are registered in order within the same class
|
||||
# 2. Callbacks defined in derived classes precede those defined in base
|
||||
# classes
|
||||
callbacks_from_base = []
|
||||
for base in bases:
|
||||
callbacks_from_base = getattr(base, temporary_stage.attribute_name, None)
|
||||
if callbacks_from_base:
|
||||
break
|
||||
else:
|
||||
callbacks_from_base = []
|
||||
|
||||
current_callbacks = getattr(base, temporary_stage.attribute_name, None)
|
||||
if not current_callbacks:
|
||||
continue
|
||||
callbacks_from_base.extend(current_callbacks)
|
||||
callbacks_from_base = list(lang.dedupe(callbacks_from_base))
|
||||
# Set the callbacks in this class and flush the temporary stage
|
||||
attr_dict[temporary_stage.attribute_name] = staged_callbacks[:] + callbacks_from_base
|
||||
del temporary_stage.callbacks[:]
|
||||
|
@@ -570,6 +570,14 @@ def add_concretizer_args(subparser):
|
||||
default=None,
|
||||
help="reuse installed dependencies only",
|
||||
)
|
||||
subgroup.add_argument(
|
||||
"--deprecated",
|
||||
action=ConfigSetAction,
|
||||
dest="config:deprecated",
|
||||
const=True,
|
||||
default=None,
|
||||
help="allow concretizer to select deprecated versions",
|
||||
)
|
||||
|
||||
|
||||
def add_connection_args(subparser, add_help):
|
||||
|
@@ -89,7 +89,7 @@ def compiler_find(args):
|
||||
paths, scope=None, mixed_toolchain=args.mixed_toolchain
|
||||
)
|
||||
if new_compilers:
|
||||
spack.compilers.add_compilers_to_config(new_compilers, scope=args.scope, init_config=False)
|
||||
spack.compilers.add_compilers_to_config(new_compilers, scope=args.scope)
|
||||
n = len(new_compilers)
|
||||
s = "s" if n > 1 else ""
|
||||
|
||||
|
@@ -19,7 +19,7 @@
|
||||
|
||||
|
||||
def setup_parser(subparser):
|
||||
arguments.add_common_arguments(subparser, ["jobs"])
|
||||
arguments.add_common_arguments(subparser, ["jobs", "no_checksum", "spec"])
|
||||
subparser.add_argument(
|
||||
"-d",
|
||||
"--source-path",
|
||||
@@ -34,7 +34,6 @@ def setup_parser(subparser):
|
||||
dest="ignore_deps",
|
||||
help="do not try to install dependencies of requested packages",
|
||||
)
|
||||
arguments.add_common_arguments(subparser, ["no_checksum", "deprecated"])
|
||||
subparser.add_argument(
|
||||
"--keep-prefix",
|
||||
action="store_true",
|
||||
@@ -63,7 +62,6 @@ def setup_parser(subparser):
|
||||
choices=["root", "all"],
|
||||
help="run tests on only root packages or all packages",
|
||||
)
|
||||
arguments.add_common_arguments(subparser, ["spec"])
|
||||
|
||||
stop_group = subparser.add_mutually_exclusive_group()
|
||||
stop_group.add_argument(
|
||||
@@ -125,9 +123,6 @@ def dev_build(self, args):
|
||||
if args.no_checksum:
|
||||
spack.config.set("config:checksum", False, scope="command_line")
|
||||
|
||||
if args.deprecated:
|
||||
spack.config.set("config:deprecated", True, scope="command_line")
|
||||
|
||||
tests = False
|
||||
if args.test == "all":
|
||||
tests = True
|
||||
|
@@ -18,6 +18,7 @@
|
||||
import spack.cray_manifest as cray_manifest
|
||||
import spack.detection
|
||||
import spack.error
|
||||
import spack.repo
|
||||
import spack.util.environment
|
||||
from spack.cmd.common import arguments
|
||||
|
||||
@@ -152,9 +153,9 @@ def external_find(args):
|
||||
def packages_to_search_for(
|
||||
*, names: Optional[List[str]], tags: List[str], exclude: Optional[List[str]]
|
||||
):
|
||||
result = []
|
||||
for current_tag in tags:
|
||||
result.extend(spack.repo.PATH.packages_with_tags(current_tag, full=True))
|
||||
result = list(
|
||||
{pkg for tag in tags for pkg in spack.repo.PATH.packages_with_tags(tag, full=True)}
|
||||
)
|
||||
|
||||
if names:
|
||||
# Match both fully qualified and unqualified
|
||||
|
@@ -18,7 +18,7 @@
|
||||
|
||||
|
||||
def setup_parser(subparser):
|
||||
arguments.add_common_arguments(subparser, ["no_checksum", "deprecated"])
|
||||
arguments.add_common_arguments(subparser, ["no_checksum", "specs"])
|
||||
subparser.add_argument(
|
||||
"-m",
|
||||
"--missing",
|
||||
@@ -28,7 +28,7 @@ def setup_parser(subparser):
|
||||
subparser.add_argument(
|
||||
"-D", "--dependencies", action="store_true", help="also fetch all dependencies"
|
||||
)
|
||||
arguments.add_common_arguments(subparser, ["specs"])
|
||||
arguments.add_concretizer_args(subparser)
|
||||
subparser.epilog = (
|
||||
"With an active environment, the specs "
|
||||
"parameter can be omitted. In this case all (uninstalled"
|
||||
@@ -40,9 +40,6 @@ def fetch(parser, args):
|
||||
if args.no_checksum:
|
||||
spack.config.set("config:checksum", False, scope="command_line")
|
||||
|
||||
if args.deprecated:
|
||||
spack.config.set("config:deprecated", True, scope="command_line")
|
||||
|
||||
if args.specs:
|
||||
specs = spack.cmd.parse_specs(args.specs, concretize=True)
|
||||
else:
|
||||
|
@@ -176,7 +176,7 @@ def setup_parser(subparser):
|
||||
dest="install_source",
|
||||
help="install source files in prefix",
|
||||
)
|
||||
arguments.add_common_arguments(subparser, ["no_checksum", "deprecated"])
|
||||
arguments.add_common_arguments(subparser, ["no_checksum"])
|
||||
subparser.add_argument(
|
||||
"-v",
|
||||
"--verbose",
|
||||
@@ -326,9 +326,6 @@ def install(parser, args):
|
||||
if args.no_checksum:
|
||||
spack.config.set("config:checksum", False, scope="command_line")
|
||||
|
||||
if args.deprecated:
|
||||
spack.config.set("config:deprecated", True, scope="command_line")
|
||||
|
||||
if args.log_file and not args.log_format:
|
||||
msg = "the '--log-format' must be specified when using '--log-file'"
|
||||
tty.die(msg)
|
||||
|
@@ -53,6 +53,7 @@ def setup_parser(subparser):
|
||||
"-S", "--stages", action="store_true", help="top level stage directory"
|
||||
)
|
||||
directories.add_argument(
|
||||
"-c",
|
||||
"--source-dir",
|
||||
action="store_true",
|
||||
help="source directory for a spec (requires it to be staged first)",
|
||||
|
@@ -28,7 +28,7 @@
|
||||
|
||||
|
||||
def setup_parser(subparser):
|
||||
arguments.add_common_arguments(subparser, ["no_checksum", "deprecated"])
|
||||
arguments.add_common_arguments(subparser, ["no_checksum"])
|
||||
|
||||
sp = subparser.add_subparsers(metavar="SUBCOMMAND", dest="mirror_command")
|
||||
|
||||
@@ -72,6 +72,7 @@ def setup_parser(subparser):
|
||||
" retrieve all versions of each package",
|
||||
)
|
||||
arguments.add_common_arguments(create_parser, ["specs"])
|
||||
arguments.add_concretizer_args(create_parser)
|
||||
|
||||
# Destroy
|
||||
destroy_parser = sp.add_parser("destroy", help=mirror_destroy.__doc__)
|
||||
@@ -549,7 +550,4 @@ def mirror(parser, args):
|
||||
if args.no_checksum:
|
||||
spack.config.set("config:checksum", False, scope="command_line")
|
||||
|
||||
if args.deprecated:
|
||||
spack.config.set("config:deprecated", True, scope="command_line")
|
||||
|
||||
action[args.mirror_command](args)
|
||||
|
@@ -19,7 +19,7 @@
|
||||
|
||||
|
||||
def setup_parser(subparser):
|
||||
arguments.add_common_arguments(subparser, ["no_checksum", "deprecated", "specs"])
|
||||
arguments.add_common_arguments(subparser, ["no_checksum", "specs"])
|
||||
arguments.add_concretizer_args(subparser)
|
||||
|
||||
|
||||
@@ -33,9 +33,6 @@ def patch(parser, args):
|
||||
if args.no_checksum:
|
||||
spack.config.set("config:checksum", False, scope="command_line")
|
||||
|
||||
if args.deprecated:
|
||||
spack.config.set("config:deprecated", True, scope="command_line")
|
||||
|
||||
specs = spack.cmd.parse_specs(args.specs, concretize=False)
|
||||
for spec in specs:
|
||||
_patch(spack.cmd.matching_spec_from_env(spec).package)
|
||||
|
@@ -22,7 +22,7 @@
|
||||
|
||||
|
||||
def setup_parser(subparser):
|
||||
arguments.add_common_arguments(subparser, ["no_checksum", "deprecated", "specs"])
|
||||
arguments.add_common_arguments(subparser, ["no_checksum", "specs"])
|
||||
subparser.add_argument(
|
||||
"-p", "--path", dest="path", help="path to stage package, does not add to spack tree"
|
||||
)
|
||||
@@ -33,9 +33,6 @@ def stage(parser, args):
|
||||
if args.no_checksum:
|
||||
spack.config.set("config:checksum", False, scope="command_line")
|
||||
|
||||
if args.deprecated:
|
||||
spack.config.set("config:deprecated", True, scope="command_line")
|
||||
|
||||
if not args.specs:
|
||||
env = ev.active_environment()
|
||||
if not env:
|
||||
|
@@ -228,7 +228,7 @@ def create_reporter(args, specs_to_test, test_suite):
|
||||
|
||||
def test_list(args):
|
||||
"""list installed packages with available tests"""
|
||||
tagged = set(spack.repo.PATH.packages_with_tags(*args.tag)) if args.tag else set()
|
||||
tagged = spack.repo.PATH.packages_with_tags(*args.tag) if args.tag else set()
|
||||
|
||||
def has_test_and_tags(pkg_class):
|
||||
tests = spack.install_test.test_functions(pkg_class)
|
||||
|
@@ -334,6 +334,40 @@ def __init__(
|
||||
# used for version checks for API, e.g. C++11 flag
|
||||
self._real_version = None
|
||||
|
||||
def __eq__(self, other):
|
||||
return (
|
||||
self.cc == other.cc
|
||||
and self.cxx == other.cxx
|
||||
and self.fc == other.fc
|
||||
and self.f77 == other.f77
|
||||
and self.spec == other.spec
|
||||
and self.operating_system == other.operating_system
|
||||
and self.target == other.target
|
||||
and self.flags == other.flags
|
||||
and self.modules == other.modules
|
||||
and self.environment == other.environment
|
||||
and self.extra_rpaths == other.extra_rpaths
|
||||
and self.enable_implicit_rpaths == other.enable_implicit_rpaths
|
||||
)
|
||||
|
||||
def __hash__(self):
|
||||
return hash(
|
||||
(
|
||||
self.cc,
|
||||
self.cxx,
|
||||
self.fc,
|
||||
self.f77,
|
||||
self.spec,
|
||||
self.operating_system,
|
||||
self.target,
|
||||
str(self.flags),
|
||||
str(self.modules),
|
||||
str(self.environment),
|
||||
str(self.extra_rpaths),
|
||||
self.enable_implicit_rpaths,
|
||||
)
|
||||
)
|
||||
|
||||
def verify_executables(self):
|
||||
"""Raise an error if any of the compiler executables is not valid.
|
||||
|
||||
@@ -389,8 +423,7 @@ def implicit_rpaths(self):
|
||||
|
||||
# Put CXX first since it has the most linking issues
|
||||
# And because it has flags that affect linking
|
||||
exe_paths = [x for x in [self.cxx, self.cc, self.fc, self.f77] if x]
|
||||
link_dirs = self._get_compiler_link_paths(exe_paths)
|
||||
link_dirs = self._get_compiler_link_paths()
|
||||
|
||||
all_required_libs = list(self.required_libs) + Compiler._all_compiler_rpath_libraries
|
||||
return list(paths_containing_libs(link_dirs, all_required_libs))
|
||||
@@ -403,43 +436,33 @@ def required_libs(self):
|
||||
# By default every compiler returns the empty list
|
||||
return []
|
||||
|
||||
def _get_compiler_link_paths(self, paths):
|
||||
first_compiler = next((c for c in paths if c), None)
|
||||
if not first_compiler:
|
||||
return []
|
||||
if not self.verbose_flag:
|
||||
# In this case there is no mechanism to learn what link directories
|
||||
# are used by the compiler
|
||||
def _get_compiler_link_paths(self):
|
||||
cc = self.cc if self.cc else self.cxx
|
||||
if not cc or not self.verbose_flag:
|
||||
# Cannot determine implicit link paths without a compiler / verbose flag
|
||||
return []
|
||||
|
||||
# What flag types apply to first_compiler, in what order
|
||||
flags = ["cppflags", "ldflags"]
|
||||
if first_compiler == self.cc:
|
||||
flags = ["cflags"] + flags
|
||||
elif first_compiler == self.cxx:
|
||||
flags = ["cxxflags"] + flags
|
||||
if cc == self.cc:
|
||||
flags = ["cflags", "cppflags", "ldflags"]
|
||||
else:
|
||||
flags.append("fflags")
|
||||
flags = ["cxxflags", "cppflags", "ldflags"]
|
||||
|
||||
try:
|
||||
tmpdir = tempfile.mkdtemp(prefix="spack-implicit-link-info")
|
||||
fout = os.path.join(tmpdir, "output")
|
||||
fin = os.path.join(tmpdir, "main.c")
|
||||
|
||||
with open(fin, "w+") as csource:
|
||||
with open(fin, "w") as csource:
|
||||
csource.write(
|
||||
"int main(int argc, char* argv[]) { " "(void)argc; (void)argv; return 0; }\n"
|
||||
"int main(int argc, char* argv[]) { (void)argc; (void)argv; return 0; }\n"
|
||||
)
|
||||
compiler_exe = spack.util.executable.Executable(first_compiler)
|
||||
cc_exe = spack.util.executable.Executable(cc)
|
||||
for flag_type in flags:
|
||||
for flag in self.flags.get(flag_type, []):
|
||||
compiler_exe.add_default_arg(flag)
|
||||
cc_exe.add_default_arg(*self.flags.get(flag_type, []))
|
||||
|
||||
output = ""
|
||||
with self.compiler_environment():
|
||||
output = str(
|
||||
compiler_exe(self.verbose_flag, fin, "-o", fout, output=str, error=str)
|
||||
) # str for py2
|
||||
output = cc_exe(self.verbose_flag, fin, "-o", fout, output=str, error=str)
|
||||
return _parse_non_system_link_dirs(output)
|
||||
except spack.util.executable.ProcessError as pe:
|
||||
tty.debug("ProcessError: Command exited with non-zero status: " + pe.long_message)
|
||||
|
@@ -109,7 +109,7 @@ def _to_dict(compiler):
|
||||
return {"compiler": d}
|
||||
|
||||
|
||||
def get_compiler_config(scope=None, init_config=True):
|
||||
def get_compiler_config(scope=None, init_config=False):
|
||||
"""Return the compiler configuration for the specified architecture."""
|
||||
|
||||
config = spack.config.get("compilers", scope=scope) or []
|
||||
@@ -118,6 +118,8 @@ def get_compiler_config(scope=None, init_config=True):
|
||||
|
||||
merged_config = spack.config.get("compilers")
|
||||
if merged_config:
|
||||
# Config is empty for this scope
|
||||
# Do not init config because there is a non-empty scope
|
||||
return config
|
||||
|
||||
_init_compiler_config(scope=scope)
|
||||
@@ -125,6 +127,95 @@ def get_compiler_config(scope=None, init_config=True):
|
||||
return config
|
||||
|
||||
|
||||
def get_compiler_config_from_packages(scope=None):
|
||||
"""Return the compiler configuration from packages.yaml"""
|
||||
config = spack.config.get("packages", scope=scope)
|
||||
if not config:
|
||||
return []
|
||||
|
||||
packages = []
|
||||
compiler_package_names = supported_compilers() + list(package_name_to_compiler_name.keys())
|
||||
for name, entry in config.items():
|
||||
if name not in compiler_package_names:
|
||||
continue
|
||||
externals_config = entry.get("externals", None)
|
||||
if not externals_config:
|
||||
continue
|
||||
packages.extend(_compiler_config_from_package_config(externals_config))
|
||||
|
||||
return packages
|
||||
|
||||
|
||||
def _compiler_config_from_package_config(config):
|
||||
compilers = []
|
||||
for entry in config:
|
||||
compiler = _compiler_config_from_external(entry)
|
||||
if compiler:
|
||||
compilers.append(compiler)
|
||||
|
||||
return compilers
|
||||
|
||||
|
||||
def _compiler_config_from_external(config):
|
||||
spec = spack.spec.parse_with_version_concrete(config["spec"])
|
||||
# use str(spec.versions) to allow `@x.y.z` instead of `@=x.y.z`
|
||||
compiler_spec = spack.spec.CompilerSpec(
|
||||
package_name_to_compiler_name.get(spec.name, spec.name), spec.version
|
||||
)
|
||||
|
||||
extra_attributes = config.get("extra_attributes", {})
|
||||
prefix = config.get("prefix", None)
|
||||
|
||||
compiler_class = class_for_compiler_name(compiler_spec.name)
|
||||
paths = extra_attributes.get("paths", {})
|
||||
compiler_langs = ["cc", "cxx", "fc", "f77"]
|
||||
for lang in compiler_langs:
|
||||
if paths.setdefault(lang, None):
|
||||
continue
|
||||
|
||||
if not prefix:
|
||||
continue
|
||||
|
||||
# Check for files that satisfy the naming scheme for this compiler
|
||||
bindir = os.path.join(prefix, "bin")
|
||||
for f, regex in itertools.product(os.listdir(bindir), compiler_class.search_regexps(lang)):
|
||||
if regex.match(f):
|
||||
paths[lang] = os.path.join(bindir, f)
|
||||
|
||||
if all(v is None for v in paths.values()):
|
||||
return None
|
||||
|
||||
if not spec.architecture:
|
||||
host_platform = spack.platforms.host()
|
||||
operating_system = host_platform.operating_system("default_os")
|
||||
target = host_platform.target("default_target").microarchitecture
|
||||
else:
|
||||
target = spec.target
|
||||
if not target:
|
||||
host_platform = spack.platforms.host()
|
||||
target = host_platform.target("default_target").microarchitecture
|
||||
|
||||
operating_system = spec.os
|
||||
if not operating_system:
|
||||
host_platform = spack.platforms.host()
|
||||
operating_system = host_platform.operating_system("default_os")
|
||||
|
||||
compiler_entry = {
|
||||
"compiler": {
|
||||
"spec": str(compiler_spec),
|
||||
"paths": paths,
|
||||
"flags": extra_attributes.get("flags", {}),
|
||||
"operating_system": str(operating_system),
|
||||
"target": str(target.family),
|
||||
"modules": config.get("modules", []),
|
||||
"environment": extra_attributes.get("environment", {}),
|
||||
"extra_rpaths": extra_attributes.get("extra_rpaths", []),
|
||||
"implicit_rpaths": extra_attributes.get("implicit_rpaths", None),
|
||||
}
|
||||
}
|
||||
return compiler_entry
|
||||
|
||||
|
||||
def _init_compiler_config(*, scope):
|
||||
"""Compiler search used when Spack has no compilers."""
|
||||
compilers = find_compilers()
|
||||
@@ -142,17 +233,20 @@ def compiler_config_files():
|
||||
compiler_config = config.get("compilers", scope=name)
|
||||
if compiler_config:
|
||||
config_files.append(config.get_config_filename(name, "compilers"))
|
||||
compiler_config_from_packages = get_compiler_config_from_packages(scope=name)
|
||||
if compiler_config_from_packages:
|
||||
config_files.append(config.get_config_filename(name, "packages"))
|
||||
return config_files
|
||||
|
||||
|
||||
def add_compilers_to_config(compilers, scope=None, init_config=True):
|
||||
def add_compilers_to_config(compilers, scope=None):
|
||||
"""Add compilers to the config for the specified architecture.
|
||||
|
||||
Arguments:
|
||||
compilers: a list of Compiler objects.
|
||||
scope: configuration scope to modify.
|
||||
"""
|
||||
compiler_config = get_compiler_config(scope, init_config)
|
||||
compiler_config = get_compiler_config(scope, init_config=False)
|
||||
for compiler in compilers:
|
||||
if not compiler.cc:
|
||||
tty.debug(f"{compiler.spec} does not have a C compiler")
|
||||
@@ -184,6 +278,9 @@ def remove_compiler_from_config(compiler_spec, scope=None):
|
||||
for current_scope in candidate_scopes:
|
||||
removal_happened |= _remove_compiler_from_scope(compiler_spec, scope=current_scope)
|
||||
|
||||
msg = "`spack compiler remove` will not remove compilers defined in packages.yaml"
|
||||
msg += "\nTo remove these compilers, either edit the config or use `spack external remove`"
|
||||
tty.debug(msg)
|
||||
return removal_happened
|
||||
|
||||
|
||||
@@ -198,7 +295,7 @@ def _remove_compiler_from_scope(compiler_spec, scope):
|
||||
True if one or more compiler entries were actually removed, False otherwise
|
||||
"""
|
||||
assert scope is not None, "a specific scope is needed when calling this function"
|
||||
compiler_config = get_compiler_config(scope)
|
||||
compiler_config = get_compiler_config(scope, init_config=False)
|
||||
filtered_compiler_config = [
|
||||
compiler_entry
|
||||
for compiler_entry in compiler_config
|
||||
@@ -221,7 +318,14 @@ def all_compilers_config(scope=None, init_config=True):
|
||||
"""Return a set of specs for all the compiler versions currently
|
||||
available to build with. These are instances of CompilerSpec.
|
||||
"""
|
||||
return get_compiler_config(scope, init_config)
|
||||
from_packages_yaml = get_compiler_config_from_packages(scope)
|
||||
if from_packages_yaml:
|
||||
init_config = False
|
||||
from_compilers_yaml = get_compiler_config(scope, init_config)
|
||||
|
||||
result = from_compilers_yaml + from_packages_yaml
|
||||
key = lambda c: _compiler_from_config_entry(c["compiler"])
|
||||
return list(llnl.util.lang.dedupe(result, key=key))
|
||||
|
||||
|
||||
def all_compiler_specs(scope=None, init_config=True):
|
||||
@@ -388,7 +492,7 @@ def find_specs_by_arch(compiler_spec, arch_spec, scope=None, init_config=True):
|
||||
|
||||
|
||||
def all_compilers(scope=None, init_config=True):
|
||||
config = get_compiler_config(scope, init_config=init_config)
|
||||
config = all_compilers_config(scope, init_config=init_config)
|
||||
compilers = list()
|
||||
for items in config:
|
||||
items = items["compiler"]
|
||||
@@ -403,10 +507,7 @@ def compilers_for_spec(
|
||||
"""This gets all compilers that satisfy the supplied CompilerSpec.
|
||||
Returns an empty list if none are found.
|
||||
"""
|
||||
if use_cache:
|
||||
config = all_compilers_config(scope, init_config)
|
||||
else:
|
||||
config = get_compiler_config(scope, init_config)
|
||||
config = all_compilers_config(scope, init_config)
|
||||
|
||||
matches = set(find(compiler_spec, scope, init_config))
|
||||
compilers = []
|
||||
@@ -583,9 +684,7 @@ def get_compiler_duplicates(compiler_spec, arch_spec):
|
||||
|
||||
scope_to_compilers = {}
|
||||
for scope in config.scopes:
|
||||
compilers = compilers_for_spec(
|
||||
compiler_spec, arch_spec=arch_spec, scope=scope, use_cache=False
|
||||
)
|
||||
compilers = compilers_for_spec(compiler_spec, arch_spec=arch_spec, scope=scope)
|
||||
if compilers:
|
||||
scope_to_compilers[scope] = compilers
|
||||
|
||||
|
@@ -10,6 +10,8 @@
|
||||
import tempfile
|
||||
from typing import Dict, List, Set
|
||||
|
||||
import archspec.cpu
|
||||
|
||||
import spack.compiler
|
||||
import spack.operating_systems.windows_os
|
||||
import spack.platforms
|
||||
@@ -186,6 +188,9 @@ def __init__(self, *args, **kwargs):
|
||||
# get current platform architecture and format for vcvars argument
|
||||
arch = spack.platforms.real_host().default.lower()
|
||||
arch = arch.replace("-", "_")
|
||||
if str(archspec.cpu.host().family) == "x86_64":
|
||||
arch = "amd64"
|
||||
|
||||
self.vcvars_call = VCVarsInvocation(vcvars_script_path, arch, self.msvc_version)
|
||||
env_cmds.append(self.vcvars_call)
|
||||
# Below is a check for a valid fortran path
|
||||
@@ -318,7 +323,7 @@ def fc_version(cls, fc):
|
||||
fc_path[fc_ver] = fc
|
||||
if os.getenv("ONEAPI_ROOT"):
|
||||
try:
|
||||
sps = spack.operating_systems.windows_os.WindowsOs.compiler_search_paths
|
||||
sps = spack.operating_systems.windows_os.WindowsOs().compiler_search_paths
|
||||
except AttributeError:
|
||||
raise SpackError("Windows compiler search paths not established")
|
||||
clp = spack.util.executable.which_string("cl", path=sps)
|
||||
|
@@ -764,6 +764,31 @@ def _add_platform_scope(
|
||||
cfg.push_scope(scope_type(plat_name, plat_path))
|
||||
|
||||
|
||||
def config_paths_from_entry_points() -> List[Tuple[str, str]]:
|
||||
"""Load configuration paths from entry points
|
||||
|
||||
A python package can register entry point metadata so that Spack can find
|
||||
its configuration by adding the following to the project's pyproject.toml:
|
||||
|
||||
.. code-block:: toml
|
||||
|
||||
[project.entry-points."spack.config"]
|
||||
baz = "baz:get_spack_config_path"
|
||||
|
||||
The function ``get_spack_config_path`` returns the path to the package's
|
||||
spack configuration scope
|
||||
|
||||
"""
|
||||
config_paths: List[Tuple[str, str]] = []
|
||||
for entry_point in lang.get_entry_points(group="spack.config"):
|
||||
hook = entry_point.load()
|
||||
if callable(hook):
|
||||
config_path = hook()
|
||||
if config_path and os.path.exists(config_path):
|
||||
config_paths.append(("plugin-%s" % entry_point.name, str(config_path)))
|
||||
return config_paths
|
||||
|
||||
|
||||
def _add_command_line_scopes(
|
||||
cfg: Union[Configuration, lang.Singleton], command_line_scopes: List[str]
|
||||
) -> None:
|
||||
@@ -816,6 +841,9 @@ def create() -> Configuration:
|
||||
# No site-level configs should be checked into spack by default.
|
||||
configuration_paths.append(("site", os.path.join(spack.paths.etc_path)))
|
||||
|
||||
# Python package's can register configuration scopes via entry_points
|
||||
configuration_paths.extend(config_paths_from_entry_points())
|
||||
|
||||
# User configuration can override both spack defaults and site config
|
||||
# This is disabled if user asks for no local configuration.
|
||||
if not disable_local_config:
|
||||
|
@@ -19,9 +19,6 @@
|
||||
},
|
||||
"os_package_manager": "dnf",
|
||||
"build": "spack/fedora38",
|
||||
"build_tags": {
|
||||
"develop": "latest"
|
||||
},
|
||||
"final": {
|
||||
"image": "docker.io/fedora:38"
|
||||
}
|
||||
@@ -33,9 +30,6 @@
|
||||
},
|
||||
"os_package_manager": "dnf",
|
||||
"build": "spack/fedora37",
|
||||
"build_tags": {
|
||||
"develop": "latest"
|
||||
},
|
||||
"final": {
|
||||
"image": "docker.io/fedora:37"
|
||||
}
|
||||
@@ -47,9 +41,6 @@
|
||||
},
|
||||
"os_package_manager": "dnf_epel",
|
||||
"build": "spack/rockylinux9",
|
||||
"build_tags": {
|
||||
"develop": "latest"
|
||||
},
|
||||
"final": {
|
||||
"image": "docker.io/rockylinux:9"
|
||||
}
|
||||
@@ -61,9 +52,6 @@
|
||||
},
|
||||
"os_package_manager": "dnf_epel",
|
||||
"build": "spack/rockylinux8",
|
||||
"build_tags": {
|
||||
"develop": "latest"
|
||||
},
|
||||
"final": {
|
||||
"image": "docker.io/rockylinux:8"
|
||||
}
|
||||
@@ -75,9 +63,6 @@
|
||||
},
|
||||
"os_package_manager": "dnf_epel",
|
||||
"build": "spack/almalinux9",
|
||||
"build_tags": {
|
||||
"develop": "latest"
|
||||
},
|
||||
"final": {
|
||||
"image": "quay.io/almalinuxorg/almalinux:9"
|
||||
}
|
||||
@@ -89,9 +74,6 @@
|
||||
},
|
||||
"os_package_manager": "dnf_epel",
|
||||
"build": "spack/almalinux8",
|
||||
"build_tags": {
|
||||
"develop": "latest"
|
||||
},
|
||||
"final": {
|
||||
"image": "quay.io/almalinuxorg/almalinux:8"
|
||||
}
|
||||
@@ -105,9 +87,6 @@
|
||||
"build": "spack/centos-stream",
|
||||
"final": {
|
||||
"image": "quay.io/centos/centos:stream"
|
||||
},
|
||||
"build_tags": {
|
||||
"develop": "latest"
|
||||
}
|
||||
},
|
||||
"centos:7": {
|
||||
@@ -115,10 +94,7 @@
|
||||
"template": "container/centos_7.dockerfile"
|
||||
},
|
||||
"os_package_manager": "yum",
|
||||
"build": "spack/centos7",
|
||||
"build_tags": {
|
||||
"develop": "latest"
|
||||
}
|
||||
"build": "spack/centos7"
|
||||
},
|
||||
"opensuse/leap:15": {
|
||||
"bootstrap": {
|
||||
@@ -126,9 +102,6 @@
|
||||
},
|
||||
"os_package_manager": "zypper",
|
||||
"build": "spack/leap15",
|
||||
"build_tags": {
|
||||
"develop": "latest"
|
||||
},
|
||||
"final": {
|
||||
"image": "opensuse/leap:latest"
|
||||
}
|
||||
@@ -148,19 +121,13 @@
|
||||
"template": "container/ubuntu_2204.dockerfile"
|
||||
},
|
||||
"os_package_manager": "apt",
|
||||
"build": "spack/ubuntu-jammy",
|
||||
"build_tags": {
|
||||
"develop": "latest"
|
||||
}
|
||||
"build": "spack/ubuntu-jammy"
|
||||
},
|
||||
"ubuntu:20.04": {
|
||||
"bootstrap": {
|
||||
"template": "container/ubuntu_2004.dockerfile"
|
||||
},
|
||||
"build": "spack/ubuntu-focal",
|
||||
"build_tags": {
|
||||
"develop": "latest"
|
||||
},
|
||||
"os_package_manager": "apt"
|
||||
},
|
||||
"ubuntu:18.04": {
|
||||
@@ -168,10 +135,7 @@
|
||||
"template": "container/ubuntu_1804.dockerfile"
|
||||
},
|
||||
"os_package_manager": "apt",
|
||||
"build": "spack/ubuntu-bionic",
|
||||
"build_tags": {
|
||||
"develop": "latest"
|
||||
}
|
||||
"build": "spack/ubuntu-bionic"
|
||||
}
|
||||
},
|
||||
"os_package_managers": {
|
||||
|
@@ -50,10 +50,7 @@ def build_info(image, spack_version):
|
||||
if not build_image:
|
||||
return None, None
|
||||
|
||||
# Translate version from git to docker if necessary
|
||||
build_tag = image_data["build_tags"].get(spack_version, spack_version)
|
||||
|
||||
return build_image, build_tag
|
||||
return build_image, spack_version
|
||||
|
||||
|
||||
def os_package_manager_for(image):
|
||||
|
@@ -227,7 +227,7 @@ def read(path, apply_updates):
|
||||
if apply_updates and compilers:
|
||||
for compiler in compilers:
|
||||
try:
|
||||
spack.compilers.add_compilers_to_config([compiler], init_config=False)
|
||||
spack.compilers.add_compilers_to_config([compiler])
|
||||
except Exception:
|
||||
warnings.warn(
|
||||
f"Could not add compiler {str(compiler.spec)}: "
|
||||
|
@@ -660,6 +660,7 @@ def patch(
|
||||
level: int = 1,
|
||||
when: WhenType = None,
|
||||
working_dir: str = ".",
|
||||
reverse: bool = False,
|
||||
sha256: Optional[str] = None,
|
||||
archive_sha256: Optional[str] = None,
|
||||
) -> Patcher:
|
||||
@@ -673,10 +674,10 @@ def patch(
|
||||
level: patch level (as in the patch shell command)
|
||||
when: optional anonymous spec that specifies when to apply the patch
|
||||
working_dir: dir to change to before applying
|
||||
reverse: reverse the patch
|
||||
sha256: sha256 sum of the patch, used to verify the patch (only required for URL patches)
|
||||
archive_sha256: sha256 sum of the *archive*, if the patch is compressed (only required for
|
||||
compressed URL patches)
|
||||
|
||||
"""
|
||||
|
||||
def _execute_patch(pkg_or_dep: Union["spack.package_base.PackageBase", Dependency]):
|
||||
@@ -703,18 +704,22 @@ def _execute_patch(pkg_or_dep: Union["spack.package_base.PackageBase", Dependenc
|
||||
|
||||
patch: spack.patch.Patch
|
||||
if "://" in url_or_filename:
|
||||
if sha256 is None:
|
||||
raise ValueError("patch() with a url requires a sha256")
|
||||
|
||||
patch = spack.patch.UrlPatch(
|
||||
pkg,
|
||||
url_or_filename,
|
||||
level,
|
||||
working_dir,
|
||||
working_dir=working_dir,
|
||||
reverse=reverse,
|
||||
ordering_key=ordering_key,
|
||||
sha256=sha256,
|
||||
archive_sha256=archive_sha256,
|
||||
)
|
||||
else:
|
||||
patch = spack.patch.FilePatch(
|
||||
pkg, url_or_filename, level, working_dir, ordering_key=ordering_key
|
||||
pkg, url_or_filename, level, working_dir, reverse, ordering_key=ordering_key
|
||||
)
|
||||
|
||||
cur_patches.append(patch)
|
||||
|
@@ -626,14 +626,13 @@ def view(self, new: Optional[str] = None) -> SimpleFilesystemView:
|
||||
new: If a string, create a FilesystemView rooted at that path. Default None. This
|
||||
should only be used to regenerate the view, and cannot be used to access specs.
|
||||
"""
|
||||
root = new if new else self._current_root
|
||||
if not root:
|
||||
path = new if new else self._current_root
|
||||
if not path:
|
||||
# This can only be hit if we write a future bug
|
||||
raise SpackEnvironmentViewError(
|
||||
"Attempting to get nonexistent view from environment. "
|
||||
f"View root is at {self.root}"
|
||||
f"Attempting to get nonexistent view from environment. View root is at {self.root}"
|
||||
)
|
||||
return self._view(root)
|
||||
return self._view(path)
|
||||
|
||||
def _view(self, root: str) -> SimpleFilesystemView:
|
||||
"""Returns a view object for a given root dir."""
|
||||
@@ -678,7 +677,9 @@ def specs_for_view(self, concrete_roots: List[Spec]) -> List[Spec]:
|
||||
|
||||
# Filter selected, installed specs
|
||||
with spack.store.STORE.db.read_transaction():
|
||||
return [s for s in specs if s in self and s.installed]
|
||||
result = [s for s in specs if s in self and s.installed]
|
||||
|
||||
return self._exclude_duplicate_runtimes(result)
|
||||
|
||||
def regenerate(self, concrete_roots: List[Spec]) -> None:
|
||||
specs = self.specs_for_view(concrete_roots)
|
||||
@@ -765,6 +766,16 @@ def regenerate(self, concrete_roots: List[Spec]) -> None:
|
||||
msg += str(e)
|
||||
tty.warn(msg)
|
||||
|
||||
def _exclude_duplicate_runtimes(self, nodes):
|
||||
all_runtimes = spack.repo.PATH.packages_with_tags("runtime")
|
||||
runtimes_by_name = {}
|
||||
for s in nodes:
|
||||
if s.name not in all_runtimes:
|
||||
continue
|
||||
current_runtime = runtimes_by_name.get(s.name, s)
|
||||
runtimes_by_name[s.name] = max(current_runtime, s, key=lambda x: x.version)
|
||||
return [x for x in nodes if x.name not in all_runtimes or runtimes_by_name[x.name] == x]
|
||||
|
||||
|
||||
def _create_environment(path):
|
||||
return Environment(path)
|
||||
@@ -1416,7 +1427,7 @@ def _concretize_separately(self, tests=False):
|
||||
|
||||
# Ensure we have compilers in compilers.yaml to avoid that
|
||||
# processes try to write the config file in parallel
|
||||
_ = spack.compilers.get_compiler_config()
|
||||
_ = spack.compilers.get_compiler_config(init_config=True)
|
||||
|
||||
# Early return if there is nothing to do
|
||||
if len(args) == 0:
|
||||
|
@@ -12,6 +12,7 @@
|
||||
import re
|
||||
import sys
|
||||
import types
|
||||
from pathlib import Path
|
||||
from typing import List
|
||||
|
||||
import llnl.util.lang
|
||||
@@ -132,10 +133,38 @@ def load_extension(name: str) -> str:
|
||||
def get_extension_paths():
|
||||
"""Return the list of canonicalized extension paths from config:extensions."""
|
||||
extension_paths = spack.config.get("config:extensions") or []
|
||||
extension_paths.extend(extension_paths_from_entry_points())
|
||||
paths = [spack.util.path.canonicalize_path(p) for p in extension_paths]
|
||||
return paths
|
||||
|
||||
|
||||
def extension_paths_from_entry_points() -> List[str]:
|
||||
"""Load extensions from a Python package's entry points.
|
||||
|
||||
A python package can register entry point metadata so that Spack can find
|
||||
its extensions by adding the following to the project's pyproject.toml:
|
||||
|
||||
.. code-block:: toml
|
||||
|
||||
[project.entry-points."spack.extensions"]
|
||||
baz = "baz:get_spack_extensions"
|
||||
|
||||
The function ``get_spack_extensions`` returns paths to the package's
|
||||
spack extensions
|
||||
|
||||
"""
|
||||
extension_paths: List[str] = []
|
||||
for entry_point in llnl.util.lang.get_entry_points(group="spack.extensions"):
|
||||
hook = entry_point.load()
|
||||
if callable(hook):
|
||||
paths = hook() or []
|
||||
if isinstance(paths, (Path, str)):
|
||||
extension_paths.append(str(paths))
|
||||
else:
|
||||
extension_paths.extend(paths)
|
||||
return extension_paths
|
||||
|
||||
|
||||
def get_command_paths():
|
||||
"""Return the list of paths where to search for command files."""
|
||||
command_paths = []
|
||||
|
@@ -121,43 +121,26 @@ def update_dictionary_extending_lists(target, update):
|
||||
target[key] = update[key]
|
||||
|
||||
|
||||
def dependencies(spec, request="all"):
|
||||
"""Returns the list of dependent specs for a given spec, according to the
|
||||
request passed as parameter.
|
||||
def dependencies(spec: spack.spec.Spec, request: str = "all") -> List[spack.spec.Spec]:
|
||||
"""Returns the list of dependent specs for a given spec.
|
||||
|
||||
Args:
|
||||
spec: spec to be analyzed
|
||||
request: either 'none', 'direct' or 'all'
|
||||
request: one of "none", "run", "direct", "all"
|
||||
|
||||
Returns:
|
||||
list of dependencies
|
||||
|
||||
The return list will be empty if request is 'none', will contain
|
||||
the direct dependencies if request is 'direct', or the entire DAG
|
||||
if request is 'all'.
|
||||
list of requested dependencies
|
||||
"""
|
||||
if request not in ("none", "direct", "all"):
|
||||
message = "Wrong value for argument 'request' : "
|
||||
message += "should be one of ('none', 'direct', 'all')"
|
||||
raise tty.error(message + " [current value is '%s']" % request)
|
||||
|
||||
if request == "none":
|
||||
return []
|
||||
elif request == "run":
|
||||
return spec.dependencies(deptype=dt.RUN)
|
||||
elif request == "direct":
|
||||
return spec.dependencies(deptype=dt.RUN | dt.LINK)
|
||||
elif request == "all":
|
||||
return list(spec.traverse(order="topo", deptype=dt.LINK | dt.RUN, root=False))
|
||||
|
||||
if request == "direct":
|
||||
return spec.dependencies(deptype=("link", "run"))
|
||||
|
||||
# FIXME : during module file creation nodes seem to be visited multiple
|
||||
# FIXME : times even if cover='nodes' is given. This work around permits
|
||||
# FIXME : to get a unique list of spec anyhow. Do we miss a merge
|
||||
# FIXME : step among nodes that refer to the same package?
|
||||
seen = set()
|
||||
seen_add = seen.add
|
||||
deps = sorted(
|
||||
spec.traverse(order="post", cover="nodes", deptype=("link", "run"), root=False),
|
||||
reverse=True,
|
||||
)
|
||||
return [d for d in deps if not (d in seen or seen_add(d))]
|
||||
raise ValueError(f'request "{request}" is not one of "none", "direct", "run", "all"')
|
||||
|
||||
|
||||
def merge_config_rules(configuration, spec):
|
||||
|
@@ -64,7 +64,7 @@
|
||||
install_test_root,
|
||||
)
|
||||
from spack.installer import InstallError, PackageInstaller
|
||||
from spack.stage import DIYStage, ResourceStage, Stage, StageComposite, compute_stage_name
|
||||
from spack.stage import DevelopStage, ResourceStage, Stage, StageComposite, compute_stage_name
|
||||
from spack.util.executable import ProcessError, which
|
||||
from spack.util.package_hash import package_hash
|
||||
from spack.version import GitVersion, StandardVersion
|
||||
@@ -1075,7 +1075,12 @@ def _make_stage(self):
|
||||
# If it's a dev package (not transitively), use a DIY stage object
|
||||
dev_path_var = self.spec.variants.get("dev_path", None)
|
||||
if dev_path_var:
|
||||
return DIYStage(dev_path_var.value)
|
||||
dev_path = dev_path_var.value
|
||||
link_format = spack.config.get("config:develop_stage_link")
|
||||
if not link_format:
|
||||
link_format = "build-{arch}-{hash:7}"
|
||||
stage_link = self.spec.format_path(link_format)
|
||||
return DevelopStage(compute_stage_name(self.spec), dev_path, stage_link)
|
||||
|
||||
# To fetch the current version
|
||||
source_stage = self._make_root_stage(self.fetcher)
|
||||
@@ -1407,7 +1412,7 @@ def do_fetch(self, mirror_only=False):
|
||||
return
|
||||
|
||||
checksum = spack.config.get("config:checksum")
|
||||
fetch = self.stage.managed_by_spack
|
||||
fetch = self.stage.needs_fetching
|
||||
if (
|
||||
checksum
|
||||
and fetch
|
||||
@@ -1480,9 +1485,6 @@ def do_stage(self, mirror_only=False):
|
||||
if self.has_code:
|
||||
self.do_fetch(mirror_only)
|
||||
self.stage.expand_archive()
|
||||
|
||||
if not os.listdir(self.stage.path):
|
||||
raise spack.error.FetchError("Archive was empty for %s" % self.name)
|
||||
else:
|
||||
# Support for post-install hooks requires a stage.source_path
|
||||
fsys.mkdirp(self.stage.source_path)
|
||||
@@ -1516,7 +1518,7 @@ def do_patch(self):
|
||||
# If we encounter an archive that failed to patch, restage it
|
||||
# so that we can apply all the patches again.
|
||||
if os.path.isfile(bad_file):
|
||||
if self.stage.managed_by_spack:
|
||||
if self.stage.requires_patch_success:
|
||||
tty.debug("Patching failed last time. Restaging.")
|
||||
self.stage.restage()
|
||||
else:
|
||||
@@ -1537,6 +1539,8 @@ def do_patch(self):
|
||||
tty.msg("No patches needed for {0}".format(self.name))
|
||||
return
|
||||
|
||||
errors = []
|
||||
|
||||
# Apply all the patches for specs that match this one
|
||||
patched = False
|
||||
for patch in patches:
|
||||
@@ -1546,12 +1550,16 @@ def do_patch(self):
|
||||
tty.msg("Applied patch {0}".format(patch.path_or_url))
|
||||
patched = True
|
||||
except spack.error.SpackError as e:
|
||||
tty.debug(e)
|
||||
|
||||
# Touch bad file if anything goes wrong.
|
||||
tty.msg("Patch %s failed." % patch.path_or_url)
|
||||
fsys.touch(bad_file)
|
||||
raise
|
||||
error_msg = f"Patch {patch.path_or_url} failed."
|
||||
if self.stage.requires_patch_success:
|
||||
tty.msg(error_msg)
|
||||
raise
|
||||
else:
|
||||
tty.debug(error_msg)
|
||||
tty.debug(e)
|
||||
errors.append(e)
|
||||
|
||||
if has_patch_fun:
|
||||
try:
|
||||
@@ -1569,24 +1577,29 @@ def do_patch(self):
|
||||
# printed a message for each patch.
|
||||
tty.msg("No patches needed for {0}".format(self.name))
|
||||
except spack.error.SpackError as e:
|
||||
tty.debug(e)
|
||||
|
||||
# Touch bad file if anything goes wrong.
|
||||
tty.msg("patch() function failed for {0}".format(self.name))
|
||||
fsys.touch(bad_file)
|
||||
raise
|
||||
error_msg = f"patch() function failed for {self.name}"
|
||||
if self.stage.requires_patch_success:
|
||||
tty.msg(error_msg)
|
||||
raise
|
||||
else:
|
||||
tty.debug(error_msg)
|
||||
tty.debug(e)
|
||||
errors.append(e)
|
||||
|
||||
# Get rid of any old failed file -- patches have either succeeded
|
||||
# or are not needed. This is mostly defensive -- it's needed
|
||||
# if the restage() method doesn't clean *everything* (e.g., for a repo)
|
||||
if os.path.isfile(bad_file):
|
||||
os.remove(bad_file)
|
||||
if not errors:
|
||||
# Get rid of any old failed file -- patches have either succeeded
|
||||
# or are not needed. This is mostly defensive -- it's needed
|
||||
# if we didn't restage
|
||||
if os.path.isfile(bad_file):
|
||||
os.remove(bad_file)
|
||||
|
||||
# touch good or no patches file so that we skip next time.
|
||||
if patched:
|
||||
fsys.touch(good_file)
|
||||
else:
|
||||
fsys.touch(no_patches_file)
|
||||
# touch good or no patches file so that we skip next time.
|
||||
if patched:
|
||||
fsys.touch(good_file)
|
||||
else:
|
||||
fsys.touch(no_patches_file)
|
||||
|
||||
@classmethod
|
||||
def all_patches(cls):
|
||||
|
@@ -9,9 +9,9 @@
|
||||
import os.path
|
||||
import pathlib
|
||||
import sys
|
||||
from typing import Any, Dict, Optional, Tuple, Type
|
||||
|
||||
import llnl.util.filesystem
|
||||
import llnl.util.lang
|
||||
from llnl.url import allowed_archive
|
||||
|
||||
import spack
|
||||
@@ -25,15 +25,21 @@
|
||||
from spack.util.executable import which, which_string
|
||||
|
||||
|
||||
def apply_patch(stage, patch_path, level=1, working_dir="."):
|
||||
def apply_patch(
|
||||
stage: "spack.stage.Stage",
|
||||
patch_path: str,
|
||||
level: int = 1,
|
||||
working_dir: str = ".",
|
||||
reverse: bool = False,
|
||||
) -> None:
|
||||
"""Apply the patch at patch_path to code in the stage.
|
||||
|
||||
Args:
|
||||
stage (spack.stage.Stage): stage with code that will be patched
|
||||
patch_path (str): filesystem location for the patch to apply
|
||||
level (int or None): patch level (default 1)
|
||||
working_dir (str): relative path *within* the stage to change to
|
||||
(default '.')
|
||||
stage: stage with code that will be patched
|
||||
patch_path: filesystem location for the patch to apply
|
||||
level: patch level
|
||||
working_dir: relative path *within* the stage to change to
|
||||
reverse: reverse the patch
|
||||
"""
|
||||
git_utils_path = os.environ.get("PATH", "")
|
||||
if sys.platform == "win32":
|
||||
@@ -44,6 +50,10 @@ def apply_patch(stage, patch_path, level=1, working_dir="."):
|
||||
git_root = git_root / "usr" / "bin"
|
||||
git_utils_path = os.pathsep.join([str(git_root), git_utils_path])
|
||||
|
||||
args = ["-s", "-p", str(level), "-i", patch_path, "-d", working_dir]
|
||||
if reverse:
|
||||
args.append("-R")
|
||||
|
||||
# TODO: Decouple Spack's patch support on Windows from Git
|
||||
# for Windows, and instead have Spack directly fetch, install, and
|
||||
# utilize that patch.
|
||||
@@ -52,22 +62,36 @@ def apply_patch(stage, patch_path, level=1, working_dir="."):
|
||||
# flag is passed.
|
||||
patch = which("patch", required=True, path=git_utils_path)
|
||||
with llnl.util.filesystem.working_dir(stage.source_path):
|
||||
patch("-s", "-p", str(level), "-i", patch_path, "-d", working_dir)
|
||||
patch(*args)
|
||||
|
||||
|
||||
class Patch:
|
||||
"""Base class for patches.
|
||||
|
||||
Arguments:
|
||||
pkg (str): the package that owns the patch
|
||||
|
||||
The owning package is not necessarily the package to apply the patch
|
||||
to -- in the case where a dependent package patches its dependency,
|
||||
it is the dependent's fullname.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, pkg, path_or_url, level, working_dir):
|
||||
sha256: str
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
pkg: "spack.package_base.PackageBase",
|
||||
path_or_url: str,
|
||||
level: int,
|
||||
working_dir: str,
|
||||
reverse: bool = False,
|
||||
) -> None:
|
||||
"""Initialize a new Patch instance.
|
||||
|
||||
Args:
|
||||
pkg: the package that owns the patch
|
||||
path_or_url: the relative path or URL to a patch file
|
||||
level: patch level
|
||||
working_dir: relative path *within* the stage to change to
|
||||
reverse: reverse the patch
|
||||
"""
|
||||
# validate level (must be an integer >= 0)
|
||||
if not isinstance(level, int) or not level >= 0:
|
||||
raise ValueError("Patch level needs to be a non-negative integer.")
|
||||
@@ -75,59 +99,88 @@ def __init__(self, pkg, path_or_url, level, working_dir):
|
||||
# Attributes shared by all patch subclasses
|
||||
self.owner = pkg.fullname
|
||||
self.path_or_url = path_or_url # needed for debug output
|
||||
self.path = None # must be set before apply()
|
||||
self.path: Optional[str] = None # must be set before apply()
|
||||
self.level = level
|
||||
self.working_dir = working_dir
|
||||
self.reverse = reverse
|
||||
|
||||
def apply(self, stage: "spack.stage.Stage"):
|
||||
def apply(self, stage: "spack.stage.Stage") -> None:
|
||||
"""Apply a patch to source in a stage.
|
||||
|
||||
Arguments:
|
||||
stage (spack.stage.Stage): stage where source code lives
|
||||
Args:
|
||||
stage: stage where source code lives
|
||||
"""
|
||||
if not self.path or not os.path.isfile(self.path):
|
||||
raise NoSuchPatchError(f"No such patch: {self.path}")
|
||||
|
||||
apply_patch(stage, self.path, self.level, self.working_dir)
|
||||
apply_patch(stage, self.path, self.level, self.working_dir, self.reverse)
|
||||
|
||||
@property
|
||||
def stage(self):
|
||||
return None
|
||||
# TODO: Use TypedDict once Spack supports Python 3.8+ only
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
"""Dictionary representation of the patch.
|
||||
|
||||
def to_dict(self):
|
||||
"""Partial dictionary -- subclases should add to this."""
|
||||
Returns:
|
||||
A dictionary representation.
|
||||
"""
|
||||
return {
|
||||
"owner": self.owner,
|
||||
"sha256": self.sha256,
|
||||
"level": self.level,
|
||||
"working_dir": self.working_dir,
|
||||
"reverse": self.reverse,
|
||||
}
|
||||
|
||||
def __eq__(self, other):
|
||||
def __eq__(self, other: object) -> bool:
|
||||
"""Equality check.
|
||||
|
||||
Args:
|
||||
other: another patch
|
||||
|
||||
Returns:
|
||||
True if both patches have the same checksum, else False
|
||||
"""
|
||||
if not isinstance(other, Patch):
|
||||
return NotImplemented
|
||||
return self.sha256 == other.sha256
|
||||
|
||||
def __hash__(self):
|
||||
def __hash__(self) -> int:
|
||||
"""Unique hash.
|
||||
|
||||
Returns:
|
||||
A unique hash based on the sha256.
|
||||
"""
|
||||
return hash(self.sha256)
|
||||
|
||||
|
||||
class FilePatch(Patch):
|
||||
"""Describes a patch that is retrieved from a file in the repository.
|
||||
"""Describes a patch that is retrieved from a file in the repository."""
|
||||
|
||||
Arguments:
|
||||
pkg (str): the class object for the package that owns the patch
|
||||
relative_path (str): path to patch, relative to the repository
|
||||
directory for a package.
|
||||
level (int): level to pass to patch command
|
||||
working_dir (str): path within the source directory where patch
|
||||
should be applied
|
||||
"""
|
||||
_sha256: Optional[str] = None
|
||||
|
||||
def __init__(self, pkg, relative_path, level, working_dir, ordering_key=None):
|
||||
def __init__(
|
||||
self,
|
||||
pkg: "spack.package_base.PackageBase",
|
||||
relative_path: str,
|
||||
level: int,
|
||||
working_dir: str,
|
||||
reverse: bool = False,
|
||||
ordering_key: Optional[Tuple[str, int]] = None,
|
||||
) -> None:
|
||||
"""Initialize a new FilePatch instance.
|
||||
|
||||
Args:
|
||||
pkg: the class object for the package that owns the patch
|
||||
relative_path: path to patch, relative to the repository directory for a package.
|
||||
level: level to pass to patch command
|
||||
working_dir: path within the source directory where patch should be applied
|
||||
reverse: reverse the patch
|
||||
ordering_key: key used to ensure patches are applied in a consistent order
|
||||
"""
|
||||
self.relative_path = relative_path
|
||||
|
||||
# patches may be defined by relative paths to parent classes
|
||||
# search mro to look for the file
|
||||
abs_path = None
|
||||
abs_path: Optional[str] = None
|
||||
# At different times we call FilePatch on instances and classes
|
||||
pkg_cls = pkg if inspect.isclass(pkg) else pkg.__class__
|
||||
for cls in inspect.getmro(pkg_cls):
|
||||
@@ -148,52 +201,94 @@ def __init__(self, pkg, relative_path, level, working_dir, ordering_key=None):
|
||||
msg += "package %s.%s does not exist." % (pkg.namespace, pkg.name)
|
||||
raise ValueError(msg)
|
||||
|
||||
super().__init__(pkg, abs_path, level, working_dir)
|
||||
super().__init__(pkg, abs_path, level, working_dir, reverse)
|
||||
self.path = abs_path
|
||||
self._sha256 = None
|
||||
self.ordering_key = ordering_key
|
||||
|
||||
@property
|
||||
def sha256(self):
|
||||
if self._sha256 is None:
|
||||
def sha256(self) -> str:
|
||||
"""Get the patch checksum.
|
||||
|
||||
Returns:
|
||||
The sha256 of the patch file.
|
||||
"""
|
||||
if self._sha256 is None and self.path is not None:
|
||||
self._sha256 = checksum(hashlib.sha256, self.path)
|
||||
assert isinstance(self._sha256, str)
|
||||
return self._sha256
|
||||
|
||||
def to_dict(self):
|
||||
return llnl.util.lang.union_dicts(super().to_dict(), {"relative_path": self.relative_path})
|
||||
@sha256.setter
|
||||
def sha256(self, value: str) -> None:
|
||||
"""Set the patch checksum.
|
||||
|
||||
Args:
|
||||
value: the sha256
|
||||
"""
|
||||
self._sha256 = value
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
"""Dictionary representation of the patch.
|
||||
|
||||
Returns:
|
||||
A dictionary representation.
|
||||
"""
|
||||
data = super().to_dict()
|
||||
data["relative_path"] = self.relative_path
|
||||
return data
|
||||
|
||||
|
||||
class UrlPatch(Patch):
|
||||
"""Describes a patch that is retrieved from a URL.
|
||||
"""Describes a patch that is retrieved from a URL."""
|
||||
|
||||
Arguments:
|
||||
pkg (str): the package that owns the patch
|
||||
url (str): URL where the patch can be fetched
|
||||
level (int): level to pass to patch command
|
||||
working_dir (str): path within the source directory where patch
|
||||
should be applied
|
||||
"""
|
||||
def __init__(
|
||||
self,
|
||||
pkg: "spack.package_base.PackageBase",
|
||||
url: str,
|
||||
level: int = 1,
|
||||
*,
|
||||
working_dir: str = ".",
|
||||
reverse: bool = False,
|
||||
sha256: str, # This is required for UrlPatch
|
||||
ordering_key: Optional[Tuple[str, int]] = None,
|
||||
archive_sha256: Optional[str] = None,
|
||||
) -> None:
|
||||
"""Initialize a new UrlPatch instance.
|
||||
|
||||
def __init__(self, pkg, url, level=1, working_dir=".", ordering_key=None, **kwargs):
|
||||
super().__init__(pkg, url, level, working_dir)
|
||||
Arguments:
|
||||
pkg: the package that owns the patch
|
||||
url: URL where the patch can be fetched
|
||||
level: level to pass to patch command
|
||||
working_dir: path within the source directory where patch should be applied
|
||||
reverse: reverse the patch
|
||||
ordering_key: key used to ensure patches are applied in a consistent order
|
||||
sha256: sha256 sum of the patch, used to verify the patch
|
||||
archive_sha256: sha256 sum of the *archive*, if the patch is compressed
|
||||
(only required for compressed URL patches)
|
||||
"""
|
||||
super().__init__(pkg, url, level, working_dir, reverse)
|
||||
|
||||
self.url = url
|
||||
self._stage = None
|
||||
self._stage: Optional["spack.stage.Stage"] = None
|
||||
|
||||
self.ordering_key = ordering_key
|
||||
|
||||
self.archive_sha256 = kwargs.get("archive_sha256")
|
||||
if allowed_archive(self.url) and not self.archive_sha256:
|
||||
if allowed_archive(self.url) and not archive_sha256:
|
||||
raise PatchDirectiveError(
|
||||
"Compressed patches require 'archive_sha256' "
|
||||
"and patch 'sha256' attributes: %s" % self.url
|
||||
)
|
||||
self.archive_sha256 = archive_sha256
|
||||
|
||||
self.sha256 = kwargs.get("sha256")
|
||||
if not self.sha256:
|
||||
if not sha256:
|
||||
raise PatchDirectiveError("URL patches require a sha256 checksum")
|
||||
self.sha256 = sha256
|
||||
|
||||
def apply(self, stage: "spack.stage.Stage"):
|
||||
def apply(self, stage: "spack.stage.Stage") -> None:
|
||||
"""Apply a patch to source in a stage.
|
||||
|
||||
Args:
|
||||
stage: stage where source code lives
|
||||
"""
|
||||
assert self.stage.expanded, "Stage must be expanded before applying patches"
|
||||
|
||||
# Get the patch file.
|
||||
@@ -204,15 +299,20 @@ def apply(self, stage: "spack.stage.Stage"):
|
||||
return super().apply(stage)
|
||||
|
||||
@property
|
||||
def stage(self):
|
||||
def stage(self) -> "spack.stage.Stage":
|
||||
"""The stage in which to download (and unpack) the URL patch.
|
||||
|
||||
Returns:
|
||||
The stage object.
|
||||
"""
|
||||
if self._stage:
|
||||
return self._stage
|
||||
|
||||
fetch_digest = self.archive_sha256 or self.sha256
|
||||
|
||||
# Two checksums, one for compressed file, one for its contents
|
||||
if self.archive_sha256:
|
||||
fetcher = fs.FetchAndVerifyExpandedFile(
|
||||
if self.archive_sha256 and self.sha256:
|
||||
fetcher: fs.FetchStrategy = fs.FetchAndVerifyExpandedFile(
|
||||
self.url, archive_sha256=self.archive_sha256, expanded_sha256=self.sha256
|
||||
)
|
||||
else:
|
||||
@@ -231,7 +331,12 @@ def stage(self):
|
||||
)
|
||||
return self._stage
|
||||
|
||||
def to_dict(self):
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
"""Dictionary representation of the patch.
|
||||
|
||||
Returns:
|
||||
A dictionary representation.
|
||||
"""
|
||||
data = super().to_dict()
|
||||
data["url"] = self.url
|
||||
if self.archive_sha256:
|
||||
@@ -239,8 +344,21 @@ def to_dict(self):
|
||||
return data
|
||||
|
||||
|
||||
def from_dict(dictionary, repository=None):
|
||||
"""Create a patch from json dictionary."""
|
||||
def from_dict(
|
||||
dictionary: Dict[str, Any], repository: Optional["spack.repo.RepoPath"] = None
|
||||
) -> Patch:
|
||||
"""Create a patch from json dictionary.
|
||||
|
||||
Args:
|
||||
dictionary: dictionary representation of a patch
|
||||
repository: repository containing package
|
||||
|
||||
Returns:
|
||||
A patch object.
|
||||
|
||||
Raises:
|
||||
ValueError: If *owner* or *url*/*relative_path* are missing in the dictionary.
|
||||
"""
|
||||
repository = repository or spack.repo.PATH
|
||||
owner = dictionary.get("owner")
|
||||
if "owner" not in dictionary:
|
||||
@@ -252,14 +370,21 @@ def from_dict(dictionary, repository=None):
|
||||
pkg_cls,
|
||||
dictionary["url"],
|
||||
dictionary["level"],
|
||||
dictionary["working_dir"],
|
||||
working_dir=dictionary["working_dir"],
|
||||
# Added in v0.22, fallback required for backwards compatibility
|
||||
reverse=dictionary.get("reverse", False),
|
||||
sha256=dictionary["sha256"],
|
||||
archive_sha256=dictionary.get("archive_sha256"),
|
||||
)
|
||||
|
||||
elif "relative_path" in dictionary:
|
||||
patch = FilePatch(
|
||||
pkg_cls, dictionary["relative_path"], dictionary["level"], dictionary["working_dir"]
|
||||
pkg_cls,
|
||||
dictionary["relative_path"],
|
||||
dictionary["level"],
|
||||
dictionary["working_dir"],
|
||||
# Added in v0.22, fallback required for backwards compatibility
|
||||
dictionary.get("reverse", False),
|
||||
)
|
||||
|
||||
# If the patch in the repo changes, we cannot get it back, so we
|
||||
@@ -267,7 +392,7 @@ def from_dict(dictionary, repository=None):
|
||||
# TODO: handle this more gracefully.
|
||||
sha256 = dictionary["sha256"]
|
||||
checker = Checker(sha256)
|
||||
if not checker.check(patch.path):
|
||||
if patch.path and not checker.check(patch.path):
|
||||
raise fs.ChecksumError(
|
||||
"sha256 checksum failed for %s" % patch.path,
|
||||
"Expected %s but got %s " % (sha256, checker.sum)
|
||||
@@ -295,10 +420,17 @@ class PatchCache:
|
||||
namespace2.package2:
|
||||
<patch json>
|
||||
... etc. ...
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, repository, data=None):
|
||||
def __init__(
|
||||
self, repository: "spack.repo.RepoPath", data: Optional[Dict[str, Any]] = None
|
||||
) -> None:
|
||||
"""Initialize a new PatchCache instance.
|
||||
|
||||
Args:
|
||||
repository: repository containing package
|
||||
data: nested dictionary of patches
|
||||
"""
|
||||
if data is None:
|
||||
self.index = {}
|
||||
else:
|
||||
@@ -309,21 +441,39 @@ def __init__(self, repository, data=None):
|
||||
self.repository = repository
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, stream, repository):
|
||||
def from_json(cls, stream: Any, repository: "spack.repo.RepoPath") -> "PatchCache":
|
||||
"""Initialize a new PatchCache instance from JSON.
|
||||
|
||||
Args:
|
||||
stream: stream of data
|
||||
repository: repository containing package
|
||||
|
||||
Returns:
|
||||
A new PatchCache instance.
|
||||
"""
|
||||
return PatchCache(repository=repository, data=sjson.load(stream))
|
||||
|
||||
def to_json(self, stream):
|
||||
def to_json(self, stream: Any) -> None:
|
||||
"""Dump a JSON representation to a stream.
|
||||
|
||||
Args:
|
||||
stream: stream of data
|
||||
"""
|
||||
sjson.dump({"patches": self.index}, stream)
|
||||
|
||||
def patch_for_package(self, sha256: str, pkg):
|
||||
def patch_for_package(self, sha256: str, pkg: "spack.package_base.PackageBase") -> Patch:
|
||||
"""Look up a patch in the index and build a patch object for it.
|
||||
|
||||
Arguments:
|
||||
sha256: sha256 hash to look up
|
||||
pkg (spack.package_base.PackageBase): Package object to get patch for.
|
||||
|
||||
We build patch objects lazily because building them requires that
|
||||
we have information about the package's location in its repo."""
|
||||
we have information about the package's location in its repo.
|
||||
|
||||
Args:
|
||||
sha256: sha256 hash to look up
|
||||
pkg: Package object to get patch for.
|
||||
|
||||
Returns:
|
||||
The patch object.
|
||||
"""
|
||||
sha_index = self.index.get(sha256)
|
||||
if not sha_index:
|
||||
raise PatchLookupError(
|
||||
@@ -346,7 +496,12 @@ def patch_for_package(self, sha256: str, pkg):
|
||||
patch_dict["sha256"] = sha256
|
||||
return from_dict(patch_dict, repository=self.repository)
|
||||
|
||||
def update_package(self, pkg_fullname):
|
||||
def update_package(self, pkg_fullname: str) -> None:
|
||||
"""Update the patch cache.
|
||||
|
||||
Args:
|
||||
pkg_fullname: package to update.
|
||||
"""
|
||||
# remove this package from any patch entries that reference it.
|
||||
empty = []
|
||||
for sha256, package_to_patch in self.index.items():
|
||||
@@ -372,14 +527,29 @@ def update_package(self, pkg_fullname):
|
||||
p2p = self.index.setdefault(sha256, {})
|
||||
p2p.update(package_to_patch)
|
||||
|
||||
def update(self, other):
|
||||
"""Update this cache with the contents of another."""
|
||||
def update(self, other: "PatchCache") -> None:
|
||||
"""Update this cache with the contents of another.
|
||||
|
||||
Args:
|
||||
other: another patch cache to merge
|
||||
"""
|
||||
for sha256, package_to_patch in other.index.items():
|
||||
p2p = self.index.setdefault(sha256, {})
|
||||
p2p.update(package_to_patch)
|
||||
|
||||
@staticmethod
|
||||
def _index_patches(pkg_class, repository):
|
||||
def _index_patches(
|
||||
pkg_class: Type["spack.package_base.PackageBase"], repository: "spack.repo.RepoPath"
|
||||
) -> Dict[Any, Any]:
|
||||
"""Patch index for a specific patch.
|
||||
|
||||
Args:
|
||||
pkg_class: package object to get patches for
|
||||
repository: repository containing the package
|
||||
|
||||
Returns:
|
||||
The patch index for that package.
|
||||
"""
|
||||
index = {}
|
||||
|
||||
# Add patches from the class
|
||||
|
@@ -25,7 +25,7 @@
|
||||
import traceback
|
||||
import types
|
||||
import uuid
|
||||
from typing import Any, Dict, List, Tuple, Union
|
||||
from typing import Any, Dict, List, Set, Tuple, Union
|
||||
|
||||
import llnl.path
|
||||
import llnl.util.filesystem as fs
|
||||
@@ -746,19 +746,17 @@ def all_package_paths(self):
|
||||
for name in self.all_package_names():
|
||||
yield self.package_path(name)
|
||||
|
||||
def packages_with_tags(self, *tags, full=False):
|
||||
"""Returns a list of packages matching any of the tags in input.
|
||||
def packages_with_tags(self, *tags: str, full: bool = False) -> Set[str]:
|
||||
"""Returns a set of packages matching any of the tags in input.
|
||||
|
||||
Args:
|
||||
full: if True the package names in the output are fully-qualified
|
||||
"""
|
||||
r = set()
|
||||
for repo in self.repos:
|
||||
current = repo.packages_with_tags(*tags)
|
||||
if full:
|
||||
current = [f"{repo.namespace}.{x}" for x in current]
|
||||
r |= set(current)
|
||||
return sorted(r)
|
||||
return {
|
||||
f"{repo.namespace}.{pkg}" if full else pkg
|
||||
for repo in self.repos
|
||||
for pkg in repo.packages_with_tags(*tags)
|
||||
}
|
||||
|
||||
def all_package_classes(self):
|
||||
for name in self.all_package_names():
|
||||
@@ -1169,15 +1167,10 @@ def all_package_paths(self):
|
||||
for name in self.all_package_names():
|
||||
yield self.package_path(name)
|
||||
|
||||
def packages_with_tags(self, *tags):
|
||||
def packages_with_tags(self, *tags: str) -> Set[str]:
|
||||
v = set(self.all_package_names())
|
||||
index = self.tag_index
|
||||
|
||||
for t in tags:
|
||||
t = t.lower()
|
||||
v &= set(index[t])
|
||||
|
||||
return sorted(v)
|
||||
v.intersection_update(*(self.tag_index[tag.lower()] for tag in tags))
|
||||
return v
|
||||
|
||||
def all_package_classes(self):
|
||||
"""Iterator over all package *classes* in the repository.
|
||||
|
@@ -63,6 +63,7 @@
|
||||
"oneOf": [{"type": "string"}, {"type": "array", "items": {"type": "string"}}]
|
||||
},
|
||||
"stage_name": {"type": "string"},
|
||||
"develop_stage_link": {"type": "string"},
|
||||
"test_stage": {"type": "string"},
|
||||
"extensions": {"type": "array", "items": {"type": "string"}},
|
||||
"template_dirs": {"type": "array", "items": {"type": "string"}},
|
||||
|
@@ -34,7 +34,7 @@
|
||||
|
||||
dictionary_of_strings = {"type": "object", "patternProperties": {r"\w[\w-]*": {"type": "string"}}}
|
||||
|
||||
dependency_selection = {"type": "string", "enum": ["none", "direct", "all"]}
|
||||
dependency_selection = {"type": "string", "enum": ["none", "run", "direct", "all"]}
|
||||
|
||||
module_file_configuration = {
|
||||
"type": "object",
|
||||
|
@@ -823,11 +823,12 @@ def on_model(model):
|
||||
print("Statistics:")
|
||||
pprint.pprint(self.control.statistics)
|
||||
|
||||
if result.unsolved_specs and setup.concretize_everything:
|
||||
if result.satisfiable and result.unsolved_specs and setup.concretize_everything:
|
||||
unsolved_str = Result.format_unsolved(result.unsolved_specs)
|
||||
raise InternalConcretizerError(
|
||||
"Internal Spack error: the solver completed but produced specs"
|
||||
f" that do not satisfy the request.\n\t{unsolved_str}"
|
||||
" that do not satisfy the request. Please report a bug at "
|
||||
f"https://github.com/spack/spack/issues\n\t{unsolved_str}"
|
||||
)
|
||||
|
||||
return result, timer, self.control.statistics
|
||||
@@ -1786,6 +1787,11 @@ def _spec_clauses(
|
||||
dep = dspec.spec
|
||||
|
||||
if spec.concrete:
|
||||
# GCC runtime is solved again by clingo, even on concrete specs, to give
|
||||
# the possibility to reuse specs built against a different runtime.
|
||||
if dep.name == "gcc-runtime":
|
||||
continue
|
||||
|
||||
# We know dependencies are real for concrete specs. For abstract
|
||||
# specs they just mean the dep is somehow in the DAG.
|
||||
for dtype in dt.ALL_FLAGS:
|
||||
@@ -2287,8 +2293,7 @@ def setup(
|
||||
self.possible_virtuals = node_counter.possible_virtuals()
|
||||
self.pkgs = node_counter.possible_dependencies()
|
||||
|
||||
runtimes = spack.repo.PATH.packages_with_tags("runtime")
|
||||
self.pkgs.update(set(runtimes))
|
||||
self.pkgs.update(spack.repo.PATH.packages_with_tags("runtime"))
|
||||
|
||||
# Fail if we already know an unreachable node is requested
|
||||
for spec in specs:
|
||||
|
@@ -1172,11 +1172,13 @@ attr("node_compiler_version_satisfies", PackageNode, Compiler, Constraint)
|
||||
|
||||
% If the compiler version was set from the command line,
|
||||
% respect it verbatim
|
||||
:- attr("node_compiler_version_set", PackageNode, Compiler, Version),
|
||||
not attr("node_compiler_version", PackageNode, Compiler, Version).
|
||||
error(100, "Cannot set the required compiler: {2}%{0}@{1}", Compiler, Version, Package)
|
||||
:- attr("node_compiler_version_set", node(X, Package), Compiler, Version),
|
||||
not attr("node_compiler_version", node(X, Package), Compiler, Version).
|
||||
|
||||
:- attr("node_compiler_set", PackageNode, Compiler),
|
||||
not attr("node_compiler_version", PackageNode, Compiler, _).
|
||||
error(100, "Cannot set the required compiler: {1}%{0}", Compiler, Package)
|
||||
:- attr("node_compiler_set", node(X, Package), Compiler),
|
||||
not attr("node_compiler_version", node(X, Package), Compiler, _).
|
||||
|
||||
% Cannot select a compiler if it is not supported on the OS
|
||||
% Compilers that are explicitly marked as allowed
|
||||
|
@@ -117,7 +117,7 @@ def _compute_cache_values(self):
|
||||
self._possible_dependencies = set(self._link_run) | set(self._total_build)
|
||||
|
||||
def possible_packages_facts(self, gen, fn):
|
||||
build_tools = set(spack.repo.PATH.packages_with_tags("build-tools"))
|
||||
build_tools = spack.repo.PATH.packages_with_tags("build-tools")
|
||||
gen.h2("Packages with at most a single node")
|
||||
for package_name in sorted(self.possible_dependencies() - build_tools):
|
||||
gen.fact(fn.max_dupes(package_name, 1))
|
||||
@@ -142,7 +142,7 @@ def possible_packages_facts(self, gen, fn):
|
||||
|
||||
class FullDuplicatesCounter(MinimalDuplicatesCounter):
|
||||
def possible_packages_facts(self, gen, fn):
|
||||
build_tools = set(spack.repo.PATH.packages_with_tags("build-tools"))
|
||||
build_tools = spack.repo.PATH.packages_with_tags("build-tools")
|
||||
counter = collections.Counter(
|
||||
list(self._link_run) + list(self._total_build) + list(self._direct_build)
|
||||
)
|
||||
|
@@ -625,18 +625,6 @@ def __init__(self, *args):
|
||||
else:
|
||||
raise TypeError("__init__ takes 1 or 2 arguments. (%d given)" % nargs)
|
||||
|
||||
def _add_versions(self, version_list):
|
||||
# If it already has a non-trivial version list, this is an error
|
||||
if self.versions and self.versions != vn.any_version:
|
||||
# Note: This may be impossible to reach by the current parser
|
||||
# Keeping it in case the implementation changes.
|
||||
raise MultipleVersionError(
|
||||
"A spec cannot contain multiple version signifiers. Use a version list instead."
|
||||
)
|
||||
self.versions = vn.VersionList()
|
||||
for version in version_list:
|
||||
self.versions.add(version)
|
||||
|
||||
def _autospec(self, compiler_spec_like):
|
||||
if isinstance(compiler_spec_like, CompilerSpec):
|
||||
return compiler_spec_like
|
||||
@@ -1544,20 +1532,6 @@ def _dependencies_dict(self, depflag: dt.DepFlag = dt.ALL):
|
||||
result[key] = list(group)
|
||||
return result
|
||||
|
||||
#
|
||||
# Private routines here are called by the parser when building a spec.
|
||||
#
|
||||
def _add_versions(self, version_list):
|
||||
"""Called by the parser to add an allowable version."""
|
||||
# If it already has a non-trivial version list, this is an error
|
||||
if self.versions and self.versions != vn.any_version:
|
||||
raise MultipleVersionError(
|
||||
"A spec cannot contain multiple version signifiers." " Use a version list instead."
|
||||
)
|
||||
self.versions = vn.VersionList()
|
||||
for version in version_list:
|
||||
self.versions.add(version)
|
||||
|
||||
def _add_flag(self, name, value, propagate):
|
||||
"""Called by the parser to add a known flag.
|
||||
Known flags currently include "arch"
|
||||
@@ -1626,14 +1600,6 @@ def _set_architecture(self, **kwargs):
|
||||
else:
|
||||
setattr(self.architecture, new_attr, new_value)
|
||||
|
||||
def _set_compiler(self, compiler):
|
||||
"""Called by the parser to set the compiler."""
|
||||
if self.compiler:
|
||||
raise DuplicateCompilerSpecError(
|
||||
"Spec for '%s' cannot have two compilers." % self.name
|
||||
)
|
||||
self.compiler = compiler
|
||||
|
||||
def _add_dependency(self, spec: "Spec", *, depflag: dt.DepFlag, virtuals: Tuple[str, ...]):
|
||||
"""Called by the parser to add another spec as a dependency."""
|
||||
if spec.name not in self._dependencies or not spec.name:
|
||||
|
@@ -208,7 +208,103 @@ def _mirror_roots():
|
||||
]
|
||||
|
||||
|
||||
class Stage:
|
||||
class LockableStagingDir:
|
||||
"""A directory whose lifetime can be managed with a context
|
||||
manager (but persists if the user requests it). Instances can have
|
||||
a specified name and if they do, then for all instances that have
|
||||
the same name, only one can enter the context manager at a time.
|
||||
"""
|
||||
|
||||
def __init__(self, name, path, keep, lock):
|
||||
# TODO: This uses a protected member of tempfile, but seemed the only
|
||||
# TODO: way to get a temporary name. It won't be the same as the
|
||||
# TODO: temporary stage area in _stage_root.
|
||||
self.name = name
|
||||
if name is None:
|
||||
self.name = stage_prefix + next(tempfile._get_candidate_names())
|
||||
|
||||
# Use the provided path or construct an optionally named stage path.
|
||||
if path is not None:
|
||||
self.path = path
|
||||
else:
|
||||
self.path = os.path.join(get_stage_root(), self.name)
|
||||
|
||||
# Flag to decide whether to delete the stage folder on exit or not
|
||||
self.keep = keep
|
||||
|
||||
# File lock for the stage directory. We use one file for all
|
||||
# stage locks. See spack.database.Database.prefix_locker.lock for
|
||||
# details on this approach.
|
||||
self._lock = None
|
||||
self._use_locks = lock
|
||||
|
||||
# When stages are reused, we need to know whether to re-create
|
||||
# it. This marks whether it has been created/destroyed.
|
||||
self.created = False
|
||||
|
||||
def _get_lock(self):
|
||||
if not self._lock:
|
||||
sha1 = hashlib.sha1(self.name.encode("utf-8")).digest()
|
||||
lock_id = prefix_bits(sha1, bit_length(sys.maxsize))
|
||||
stage_lock_path = os.path.join(get_stage_root(), ".lock")
|
||||
self._lock = spack.util.lock.Lock(
|
||||
stage_lock_path, start=lock_id, length=1, desc=self.name
|
||||
)
|
||||
return self._lock
|
||||
|
||||
def __enter__(self):
|
||||
"""
|
||||
Entering a stage context will create the stage directory
|
||||
|
||||
Returns:
|
||||
self
|
||||
"""
|
||||
if self._use_locks:
|
||||
self._get_lock().acquire_write(timeout=60)
|
||||
self.create()
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
"""
|
||||
Exiting from a stage context will delete the stage directory unless:
|
||||
- it was explicitly requested not to do so
|
||||
- an exception has been raised
|
||||
|
||||
Args:
|
||||
exc_type: exception type
|
||||
exc_val: exception value
|
||||
exc_tb: exception traceback
|
||||
|
||||
Returns:
|
||||
Boolean
|
||||
"""
|
||||
# Delete when there are no exceptions, unless asked to keep.
|
||||
if exc_type is None and not self.keep:
|
||||
self.destroy()
|
||||
|
||||
if self._use_locks:
|
||||
self._get_lock().release_write()
|
||||
|
||||
def create(self):
|
||||
"""
|
||||
Ensures the top-level (config:build_stage) directory exists.
|
||||
"""
|
||||
# User has full permissions and group has only read permissions
|
||||
if not os.path.exists(self.path):
|
||||
mkdirp(self.path, mode=stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP)
|
||||
elif not os.path.isdir(self.path):
|
||||
os.remove(self.path)
|
||||
mkdirp(self.path, mode=stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP)
|
||||
|
||||
# Make sure we can actually do something with the stage we made.
|
||||
ensure_access(self.path)
|
||||
self.created = True
|
||||
|
||||
def destroy(self):
|
||||
raise NotImplementedError(f"{self.__class__.__name__} is abstract")
|
||||
|
||||
|
||||
class Stage(LockableStagingDir):
|
||||
"""Manages a temporary stage directory for building.
|
||||
|
||||
A Stage object is a context manager that handles a directory where
|
||||
@@ -251,7 +347,8 @@ class Stage:
|
||||
"""
|
||||
|
||||
#: Most staging is managed by Spack. DIYStage is one exception.
|
||||
managed_by_spack = True
|
||||
needs_fetching = True
|
||||
requires_patch_success = True
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
@@ -297,6 +394,8 @@ def __init__(
|
||||
The search function that provides the fetch strategy
|
||||
instance.
|
||||
"""
|
||||
super().__init__(name, path, keep, lock)
|
||||
|
||||
# TODO: fetch/stage coupling needs to be reworked -- the logic
|
||||
# TODO: here is convoluted and not modular enough.
|
||||
if isinstance(url_or_fetch_strategy, str):
|
||||
@@ -314,72 +413,8 @@ def __init__(
|
||||
|
||||
self.srcdir = None
|
||||
|
||||
# TODO: This uses a protected member of tempfile, but seemed the only
|
||||
# TODO: way to get a temporary name. It won't be the same as the
|
||||
# TODO: temporary stage area in _stage_root.
|
||||
self.name = name
|
||||
if name is None:
|
||||
self.name = stage_prefix + next(tempfile._get_candidate_names())
|
||||
self.mirror_paths = mirror_paths
|
||||
|
||||
# Use the provided path or construct an optionally named stage path.
|
||||
if path is not None:
|
||||
self.path = path
|
||||
else:
|
||||
self.path = os.path.join(get_stage_root(), self.name)
|
||||
|
||||
# Flag to decide whether to delete the stage folder on exit or not
|
||||
self.keep = keep
|
||||
|
||||
# File lock for the stage directory. We use one file for all
|
||||
# stage locks. See spack.database.Database.prefix_locker.lock for
|
||||
# details on this approach.
|
||||
self._lock = None
|
||||
if lock:
|
||||
sha1 = hashlib.sha1(self.name.encode("utf-8")).digest()
|
||||
lock_id = prefix_bits(sha1, bit_length(sys.maxsize))
|
||||
stage_lock_path = os.path.join(get_stage_root(), ".lock")
|
||||
self._lock = spack.util.lock.Lock(
|
||||
stage_lock_path, start=lock_id, length=1, desc=self.name
|
||||
)
|
||||
|
||||
# When stages are reused, we need to know whether to re-create
|
||||
# it. This marks whether it has been created/destroyed.
|
||||
self.created = False
|
||||
|
||||
def __enter__(self):
|
||||
"""
|
||||
Entering a stage context will create the stage directory
|
||||
|
||||
Returns:
|
||||
self
|
||||
"""
|
||||
if self._lock is not None:
|
||||
self._lock.acquire_write(timeout=60)
|
||||
self.create()
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
"""
|
||||
Exiting from a stage context will delete the stage directory unless:
|
||||
- it was explicitly requested not to do so
|
||||
- an exception has been raised
|
||||
|
||||
Args:
|
||||
exc_type: exception type
|
||||
exc_val: exception value
|
||||
exc_tb: exception traceback
|
||||
|
||||
Returns:
|
||||
Boolean
|
||||
"""
|
||||
# Delete when there are no exceptions, unless asked to keep.
|
||||
if exc_type is None and not self.keep:
|
||||
self.destroy()
|
||||
|
||||
if self._lock is not None:
|
||||
self._lock.release_write()
|
||||
|
||||
@property
|
||||
def expected_archive_files(self):
|
||||
"""Possible archive file paths."""
|
||||
@@ -631,21 +666,6 @@ def restage(self):
|
||||
"""
|
||||
self.fetcher.reset()
|
||||
|
||||
def create(self):
|
||||
"""
|
||||
Ensures the top-level (config:build_stage) directory exists.
|
||||
"""
|
||||
# User has full permissions and group has only read permissions
|
||||
if not os.path.exists(self.path):
|
||||
mkdirp(self.path, mode=stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP)
|
||||
elif not os.path.isdir(self.path):
|
||||
os.remove(self.path)
|
||||
mkdirp(self.path, mode=stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP)
|
||||
|
||||
# Make sure we can actually do something with the stage we made.
|
||||
ensure_access(self.path)
|
||||
self.created = True
|
||||
|
||||
def destroy(self):
|
||||
"""Removes this stage directory."""
|
||||
remove_linked_tree(self.path)
|
||||
@@ -752,7 +772,8 @@ def __init__(self):
|
||||
"cache_mirror",
|
||||
"steal_source",
|
||||
"disable_mirrors",
|
||||
"managed_by_spack",
|
||||
"needs_fetching",
|
||||
"requires_patch_success",
|
||||
]
|
||||
)
|
||||
|
||||
@@ -808,8 +829,8 @@ class DIYStage:
|
||||
directory naming convention.
|
||||
"""
|
||||
|
||||
"""DIY staging is, by definition, not managed by Spack."""
|
||||
managed_by_spack = False
|
||||
needs_fetching = False
|
||||
requires_patch_success = False
|
||||
|
||||
def __init__(self, path):
|
||||
if path is None:
|
||||
@@ -857,6 +878,65 @@ def cache_local(self):
|
||||
tty.debug("Sources for DIY stages are not cached")
|
||||
|
||||
|
||||
class DevelopStage(LockableStagingDir):
|
||||
needs_fetching = False
|
||||
requires_patch_success = False
|
||||
|
||||
def __init__(self, name, dev_path, reference_link):
|
||||
super().__init__(name=name, path=None, keep=False, lock=True)
|
||||
self.dev_path = dev_path
|
||||
self.source_path = dev_path
|
||||
|
||||
# The path of a link that will point to this stage
|
||||
if os.path.isabs(reference_link):
|
||||
link_path = reference_link
|
||||
else:
|
||||
link_path = os.path.join(self.source_path, reference_link)
|
||||
if not os.path.isdir(os.path.dirname(link_path)):
|
||||
raise StageError(f"The directory containing {link_path} must exist")
|
||||
self.reference_link = link_path
|
||||
|
||||
@property
|
||||
def archive_file(self):
|
||||
return None
|
||||
|
||||
def fetch(self, *args, **kwargs):
|
||||
tty.debug("No fetching needed for develop stage.")
|
||||
|
||||
def check(self):
|
||||
tty.debug("No checksum needed for develop stage.")
|
||||
|
||||
def expand_archive(self):
|
||||
tty.debug("No expansion needed for develop stage.")
|
||||
|
||||
@property
|
||||
def expanded(self):
|
||||
"""Returns True since the source_path must exist."""
|
||||
return True
|
||||
|
||||
def create(self):
|
||||
super().create()
|
||||
try:
|
||||
llnl.util.symlink.symlink(self.path, self.reference_link)
|
||||
except (llnl.util.symlink.AlreadyExistsError, FileExistsError):
|
||||
pass
|
||||
|
||||
def destroy(self):
|
||||
# Destroy all files, but do not follow symlinks
|
||||
try:
|
||||
shutil.rmtree(self.path)
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
self.created = False
|
||||
|
||||
def restage(self):
|
||||
self.destroy()
|
||||
self.create()
|
||||
|
||||
def cache_local(self):
|
||||
tty.debug("Sources for Develop stages are not cached")
|
||||
|
||||
|
||||
def ensure_access(file):
|
||||
"""Ensure we can access a directory and die with an error if we can't."""
|
||||
if not can_access(file):
|
||||
|
@@ -102,7 +102,10 @@ def to_dict_or_value(self):
|
||||
if self.microarchitecture.vendor == "generic":
|
||||
return str(self)
|
||||
|
||||
return syaml.syaml_dict(self.microarchitecture.to_dict(return_list_of_items=True))
|
||||
# Get rid of compiler flag information before turning the uarch into a dict
|
||||
uarch_dict = self.microarchitecture.to_dict()
|
||||
uarch_dict.pop("compilers", None)
|
||||
return syaml.syaml_dict(uarch_dict.items())
|
||||
|
||||
def __repr__(self):
|
||||
cls_name = self.__class__.__name__
|
||||
|
@@ -164,3 +164,20 @@ def test_install_time_test_callback(tmpdir, config, mock_packages, mock_stage):
|
||||
with open(s.package.tester.test_log_file, "r") as f:
|
||||
results = f.read().replace("\n", " ")
|
||||
assert "PyTestCallback test" in results
|
||||
|
||||
|
||||
@pytest.mark.regression("43097")
|
||||
@pytest.mark.usefixtures("builder_test_repository", "config")
|
||||
def test_mixins_with_builders(working_env):
|
||||
"""Tests that run_after and run_before callbacks are accumulated correctly,
|
||||
when mixins are used with builders.
|
||||
"""
|
||||
s = spack.spec.Spec("builder-and-mixins").concretized()
|
||||
builder = spack.builder.create(s.package)
|
||||
|
||||
# Check that callbacks added by the mixin are in the list
|
||||
assert any(fn.__name__ == "before_install" for _, fn in builder.run_before_callbacks)
|
||||
assert any(fn.__name__ == "after_install" for _, fn in builder.run_after_callbacks)
|
||||
|
||||
# Check that callback from the GenericBuilder are in the list too
|
||||
assert any(fn.__name__ == "sanity_check_prefix" for _, fn in builder.run_after_callbacks)
|
||||
|
@@ -4,6 +4,7 @@
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
import os
|
||||
import shutil
|
||||
import sys
|
||||
|
||||
import pytest
|
||||
|
||||
@@ -247,3 +248,76 @@ def test_compiler_list_empty(no_compilers_yaml, working_env, compilers_dir):
|
||||
out = compiler("list")
|
||||
assert not out
|
||||
assert compiler.returncode == 0
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"external,expected",
|
||||
[
|
||||
(
|
||||
{
|
||||
"spec": "gcc@=7.7.7 os=foobar target=x86_64",
|
||||
"prefix": "/path/to/fake",
|
||||
"modules": ["gcc/7.7.7", "foobar"],
|
||||
"extra_attributes": {
|
||||
"paths": {
|
||||
"cc": "/path/to/fake/gcc",
|
||||
"cxx": "/path/to/fake/g++",
|
||||
"fc": "/path/to/fake/gfortran",
|
||||
"f77": "/path/to/fake/gfortran",
|
||||
},
|
||||
"flags": {"fflags": "-ffree-form"},
|
||||
},
|
||||
},
|
||||
"""gcc@7.7.7:
|
||||
\tpaths:
|
||||
\t\tcc = /path/to/fake/gcc
|
||||
\t\tcxx = /path/to/fake/g++
|
||||
\t\tf77 = /path/to/fake/gfortran
|
||||
\t\tfc = /path/to/fake/gfortran
|
||||
\tflags:
|
||||
\t\tfflags = ['-ffree-form']
|
||||
\tmodules = ['gcc/7.7.7', 'foobar']
|
||||
\toperating system = foobar
|
||||
""",
|
||||
),
|
||||
(
|
||||
{
|
||||
"spec": "gcc@7.7.7",
|
||||
"prefix": "{prefix}",
|
||||
"modules": ["gcc/7.7.7", "foobar"],
|
||||
"extra_attributes": {"flags": {"fflags": "-ffree-form"}},
|
||||
},
|
||||
"""gcc@7.7.7:
|
||||
\tpaths:
|
||||
\t\tcc = {compilers_dir}{sep}gcc-8{suffix}
|
||||
\t\tcxx = {compilers_dir}{sep}g++-8{suffix}
|
||||
\t\tf77 = {compilers_dir}{sep}gfortran-8{suffix}
|
||||
\t\tfc = {compilers_dir}{sep}gfortran-8{suffix}
|
||||
\tflags:
|
||||
\t\tfflags = ['-ffree-form']
|
||||
\tmodules = ['gcc/7.7.7', 'foobar']
|
||||
\toperating system = debian6
|
||||
""",
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_compilers_shows_packages_yaml(
|
||||
external, expected, no_compilers_yaml, working_env, compilers_dir
|
||||
):
|
||||
"""Spack should see a single compiler defined from packages.yaml"""
|
||||
external["prefix"] = external["prefix"].format(prefix=os.path.dirname(compilers_dir))
|
||||
gcc_entry = {"externals": [external]}
|
||||
|
||||
packages = spack.config.get("packages")
|
||||
packages["gcc"] = gcc_entry
|
||||
spack.config.set("packages", packages)
|
||||
|
||||
out = compiler("list")
|
||||
assert out.count("gcc@7.7.7") == 1
|
||||
|
||||
out = compiler("info", "gcc@7.7.7")
|
||||
assert out == expected.format(
|
||||
compilers_dir=str(compilers_dir),
|
||||
sep=os.sep,
|
||||
suffix=".bat" if sys.platform == "win32" else "",
|
||||
)
|
||||
|
@@ -41,6 +41,7 @@ def test_dev_build_basics(tmpdir, install_mockery):
|
||||
assert os.path.exists(str(tmpdir))
|
||||
|
||||
|
||||
@pytest.mark.disable_clean_stage_check
|
||||
def test_dev_build_before(tmpdir, install_mockery):
|
||||
spec = spack.spec.Spec(f"dev-build-test-install@0.0.0 dev_path={tmpdir}").concretized()
|
||||
|
||||
@@ -57,6 +58,7 @@ def test_dev_build_before(tmpdir, install_mockery):
|
||||
assert not os.path.exists(spec.prefix)
|
||||
|
||||
|
||||
@pytest.mark.disable_clean_stage_check
|
||||
def test_dev_build_until(tmpdir, install_mockery):
|
||||
spec = spack.spec.Spec(f"dev-build-test-install@0.0.0 dev_path={tmpdir}").concretized()
|
||||
|
||||
@@ -74,6 +76,7 @@ def test_dev_build_until(tmpdir, install_mockery):
|
||||
assert not spack.store.STORE.db.query(spec, installed=True)
|
||||
|
||||
|
||||
@pytest.mark.disable_clean_stage_check
|
||||
def test_dev_build_until_last_phase(tmpdir, install_mockery):
|
||||
# Test that we ignore the last_phase argument if it is already last
|
||||
spec = spack.spec.Spec(f"dev-build-test-install@0.0.0 dev_path={tmpdir}").concretized()
|
||||
@@ -93,6 +96,7 @@ def test_dev_build_until_last_phase(tmpdir, install_mockery):
|
||||
assert os.path.exists(str(tmpdir))
|
||||
|
||||
|
||||
@pytest.mark.disable_clean_stage_check
|
||||
def test_dev_build_before_until(tmpdir, install_mockery, capsys):
|
||||
spec = spack.spec.Spec(f"dev-build-test-install@0.0.0 dev_path={tmpdir}").concretized()
|
||||
|
||||
@@ -130,6 +134,7 @@ def mock_module_noop(*args):
|
||||
pass
|
||||
|
||||
|
||||
@pytest.mark.disable_clean_stage_check
|
||||
def test_dev_build_drop_in(tmpdir, mock_packages, monkeypatch, install_mockery, working_env):
|
||||
monkeypatch.setattr(os, "execvp", print_spack_cc)
|
||||
monkeypatch.setattr(spack.build_environment, "module", mock_module_noop)
|
||||
|
@@ -14,6 +14,7 @@
|
||||
import spack.spec
|
||||
from spack.main import SpackCommand
|
||||
|
||||
add = SpackCommand("add")
|
||||
develop = SpackCommand("develop")
|
||||
env = SpackCommand("env")
|
||||
|
||||
@@ -192,14 +193,16 @@ def test_develop_full_git_repo(
|
||||
finally:
|
||||
spec.package.do_clean()
|
||||
|
||||
# Now use "spack develop": look at the resulting stage directory and make
|
||||
# Now use "spack develop": look at the resulting dev_path and make
|
||||
# sure the git repo pulled includes the full branch history (or rather,
|
||||
# more than just one commit).
|
||||
env("create", "test")
|
||||
with ev.read("test"):
|
||||
with ev.read("test") as e:
|
||||
add("git-test-commit")
|
||||
develop("git-test-commit@1.2")
|
||||
|
||||
location = SpackCommand("location")
|
||||
develop_stage_dir = location("git-test-commit").strip()
|
||||
commits = _git_commit_list(develop_stage_dir)
|
||||
e.concretize()
|
||||
spec = e.all_specs()[0]
|
||||
develop_dir = spec.variants["dev_path"].value
|
||||
commits = _git_commit_list(develop_dir)
|
||||
assert len(commits) > 1
|
||||
|
@@ -144,12 +144,9 @@ def test_list_repos():
|
||||
os.path.join(spack.paths.repos_path, "builder.test"),
|
||||
):
|
||||
total_pkgs = len(list().strip().split())
|
||||
|
||||
mock_pkgs = len(list("-r", "builtin.mock").strip().split())
|
||||
builder_pkgs = len(list("-r", "builder.test").strip().split())
|
||||
|
||||
assert builder_pkgs == 8
|
||||
assert total_pkgs > mock_pkgs > builder_pkgs
|
||||
|
||||
both_repos = len(list("-r", "builtin.mock", "-r", "builder.test").strip().split())
|
||||
|
||||
assert total_pkgs > mock_pkgs > builder_pkgs
|
||||
assert both_repos == total_pkgs
|
||||
|
@@ -12,13 +12,7 @@
|
||||
|
||||
maintainers = spack.main.SpackCommand("maintainers")
|
||||
|
||||
MAINTAINED_PACKAGES = [
|
||||
"maintainers-1",
|
||||
"maintainers-2",
|
||||
"maintainers-3",
|
||||
"py-extension1",
|
||||
"py-extension2",
|
||||
]
|
||||
MAINTAINED_PACKAGES = ["maintainers-1", "maintainers-2", "maintainers-3", "py-extension1"]
|
||||
|
||||
|
||||
def split(output):
|
||||
@@ -53,11 +47,8 @@ def test_all(mock_packages, capfd):
|
||||
"user2,",
|
||||
"user3",
|
||||
"py-extension1:",
|
||||
"adamjstewart,",
|
||||
"user1,",
|
||||
"user2",
|
||||
"py-extension2:",
|
||||
"adamjstewart",
|
||||
]
|
||||
|
||||
with capfd.disabled():
|
||||
@@ -69,9 +60,6 @@ def test_all_by_user(mock_packages, capfd):
|
||||
with capfd.disabled():
|
||||
out = split(maintainers("--all", "--by-user"))
|
||||
assert out == [
|
||||
"adamjstewart:",
|
||||
"py-extension1,",
|
||||
"py-extension2",
|
||||
"user0:",
|
||||
"maintainers-3",
|
||||
"user1:",
|
||||
|
@@ -221,6 +221,7 @@ def test_test_list_all(mock_packages):
|
||||
[
|
||||
"fail-test-audit",
|
||||
"mpich",
|
||||
"perl-extension",
|
||||
"printing-package",
|
||||
"py-extension1",
|
||||
"py-extension2",
|
||||
|
@@ -15,7 +15,7 @@
|
||||
import spack.spec
|
||||
import spack.util.environment
|
||||
from spack.compiler import Compiler
|
||||
from spack.util.executable import ProcessError
|
||||
from spack.util.executable import Executable, ProcessError
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
@@ -138,11 +138,11 @@ def __init__(self):
|
||||
environment={},
|
||||
)
|
||||
|
||||
def _get_compiler_link_paths(self, paths):
|
||||
def _get_compiler_link_paths(self):
|
||||
# Mock os.path.isdir so the link paths don't have to exist
|
||||
old_isdir = os.path.isdir
|
||||
os.path.isdir = lambda x: True
|
||||
ret = super()._get_compiler_link_paths(paths)
|
||||
ret = super()._get_compiler_link_paths()
|
||||
os.path.isdir = old_isdir
|
||||
return ret
|
||||
|
||||
@@ -197,37 +197,37 @@ def call_compiler(exe, *args, **kwargs):
|
||||
@pytest.mark.parametrize(
|
||||
"exe,flagname",
|
||||
[
|
||||
("cxx", ""),
|
||||
("cxx", "cxxflags"),
|
||||
("cxx", "cppflags"),
|
||||
("cxx", "ldflags"),
|
||||
("cc", ""),
|
||||
("cc", "cflags"),
|
||||
("cc", "cppflags"),
|
||||
("fc", ""),
|
||||
("fc", "fflags"),
|
||||
("f77", "fflags"),
|
||||
("f77", "cppflags"),
|
||||
],
|
||||
)
|
||||
@pytest.mark.enable_compiler_link_paths
|
||||
def test_get_compiler_link_paths(monkeypatch, exe, flagname):
|
||||
# create fake compiler that emits mock verbose output
|
||||
compiler = MockCompiler()
|
||||
monkeypatch.setattr(spack.util.executable.Executable, "__call__", call_compiler)
|
||||
monkeypatch.setattr(Executable, "__call__", call_compiler)
|
||||
|
||||
# Grab executable path to test
|
||||
paths = [getattr(compiler, exe)]
|
||||
if exe == "cxx":
|
||||
compiler.cc = None
|
||||
compiler.fc = None
|
||||
compiler.f77 = None
|
||||
elif exe == "cc":
|
||||
compiler.cxx = None
|
||||
compiler.fc = None
|
||||
compiler.f77 = None
|
||||
else:
|
||||
assert False
|
||||
|
||||
# Test without flags
|
||||
dirs = compiler._get_compiler_link_paths(paths)
|
||||
assert dirs == no_flag_dirs
|
||||
assert compiler._get_compiler_link_paths() == no_flag_dirs
|
||||
|
||||
if flagname:
|
||||
# set flags and test
|
||||
setattr(compiler, "flags", {flagname: ["--correct-flag"]})
|
||||
dirs = compiler._get_compiler_link_paths(paths)
|
||||
assert dirs == flag_dirs
|
||||
compiler.flags = {flagname: ["--correct-flag"]}
|
||||
assert compiler._get_compiler_link_paths() == flag_dirs
|
||||
|
||||
|
||||
def test_get_compiler_link_paths_no_path():
|
||||
@@ -236,17 +236,13 @@ def test_get_compiler_link_paths_no_path():
|
||||
compiler.cxx = None
|
||||
compiler.f77 = None
|
||||
compiler.fc = None
|
||||
|
||||
dirs = compiler._get_compiler_link_paths([compiler.cxx])
|
||||
assert dirs == []
|
||||
assert compiler._get_compiler_link_paths() == []
|
||||
|
||||
|
||||
def test_get_compiler_link_paths_no_verbose_flag():
|
||||
compiler = MockCompiler()
|
||||
compiler._verbose_flag = None
|
||||
|
||||
dirs = compiler._get_compiler_link_paths([compiler.cxx])
|
||||
assert dirs == []
|
||||
assert compiler._get_compiler_link_paths() == []
|
||||
|
||||
|
||||
@pytest.mark.not_on_windows("Not supported on Windows (yet)")
|
||||
@@ -275,11 +271,11 @@ def module(*args):
|
||||
monkeypatch.setattr(spack.util.module_cmd, "module", module)
|
||||
|
||||
compiler = MockCompiler()
|
||||
compiler.cc = gcc
|
||||
compiler.environment = {"set": {"ENV_SET": "1"}}
|
||||
compiler.modules = ["turn_on"]
|
||||
|
||||
dirs = compiler._get_compiler_link_paths([gcc])
|
||||
assert dirs == no_flag_dirs
|
||||
assert compiler._get_compiler_link_paths() == no_flag_dirs
|
||||
|
||||
|
||||
# Get the desired flag from the specified compiler spec.
|
||||
@@ -824,7 +820,7 @@ def module(*args):
|
||||
def _call(*args, **kwargs):
|
||||
raise ProcessError("Failed intentionally")
|
||||
|
||||
monkeypatch.setattr(spack.util.executable.Executable, "__call__", _call)
|
||||
monkeypatch.setattr(Executable, "__call__", _call)
|
||||
|
||||
# Run and no change to environment
|
||||
compilers = spack.compilers.get_compilers([compiler_dict])
|
||||
|
@@ -2114,6 +2114,15 @@ def test_unsolved_specs_raises_error(self, monkeypatch, mock_packages, config):
|
||||
):
|
||||
solver.driver.solve(setup, specs, reuse=[])
|
||||
|
||||
@pytest.mark.regression("43141")
|
||||
@pytest.mark.only_clingo("Use case not supported by the original concretizer")
|
||||
def test_clear_error_when_unknown_compiler_requested(self, mock_packages, config):
|
||||
"""Tests that the solver can report a case where the compiler cannot be set"""
|
||||
with pytest.raises(
|
||||
spack.error.UnsatisfiableSpecError, match="Cannot set the required compiler: a%foo"
|
||||
):
|
||||
Spec("a %foo").concretized()
|
||||
|
||||
@pytest.mark.regression("36339")
|
||||
def test_compiler_match_constraints_when_selected(self):
|
||||
"""Test that, when multiple compilers with the same name are in the configuration
|
||||
|
@@ -11,6 +11,7 @@
|
||||
import spack.repo
|
||||
import spack.solver.asp
|
||||
import spack.spec
|
||||
from spack.environment.environment import ViewDescriptor
|
||||
from spack.version import Version
|
||||
|
||||
pytestmark = [
|
||||
@@ -19,6 +20,17 @@
|
||||
]
|
||||
|
||||
|
||||
def _concretize_with_reuse(*, root_str, reused_str):
|
||||
reused_spec = spack.spec.Spec(reused_str).concretized()
|
||||
setup = spack.solver.asp.SpackSolverSetup(tests=False)
|
||||
driver = spack.solver.asp.PyclingoDriver()
|
||||
result, _, _ = driver.solve(
|
||||
setup, [spack.spec.Spec(f"{root_str} ^{reused_str}")], reuse=[reused_spec]
|
||||
)
|
||||
root = result.specs[0]
|
||||
return root, reused_spec
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def runtime_repo(config):
|
||||
repo = os.path.join(spack.paths.repos_path, "compiler_runtime.test")
|
||||
@@ -60,3 +72,59 @@ def test_external_nodes_do_not_have_runtimes(runtime_repo, mutable_config, tmp_p
|
||||
assert a.dependencies("gcc-runtime")
|
||||
assert a.dependencies("b")
|
||||
assert not b.dependencies("gcc-runtime")
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"root_str,reused_str,expected,nruntime",
|
||||
[
|
||||
# The reused runtime is older than we need, thus we'll add a more recent one for a
|
||||
("a%gcc@10.2.1", "b%gcc@4.5.0", {"a": "gcc-runtime@10.2.1", "b": "gcc-runtime@4.5.0"}, 2),
|
||||
# The root is compiled with an older compiler, thus we'll reuse the runtime from b
|
||||
("a%gcc@4.5.0", "b%gcc@10.2.1", {"a": "gcc-runtime@10.2.1", "b": "gcc-runtime@10.2.1"}, 1),
|
||||
],
|
||||
)
|
||||
def test_reusing_specs_with_gcc_runtime(root_str, reused_str, expected, nruntime, runtime_repo):
|
||||
"""Tests that we can reuse specs with a "gcc-runtime" leaf node. In particular, checks
|
||||
that the semantic for gcc-runtimes versions accounts for reused packages too.
|
||||
"""
|
||||
root, reused_spec = _concretize_with_reuse(root_str=root_str, reused_str=reused_str)
|
||||
|
||||
assert f"{expected['b']}" in reused_spec
|
||||
runtime_a = root.dependencies("gcc-runtime")[0]
|
||||
assert runtime_a.satisfies(expected["a"])
|
||||
runtime_b = root["b"].dependencies("gcc-runtime")[0]
|
||||
assert runtime_b.satisfies(expected["b"])
|
||||
|
||||
runtimes = [x for x in root.traverse() if x.name == "gcc-runtime"]
|
||||
assert len(runtimes) == nruntime
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"root_str,reused_str,expected,not_expected",
|
||||
[
|
||||
# Ensure that, whether we have multiple runtimes in the DAG or not,
|
||||
# we always link only the latest version
|
||||
("a%gcc@10.2.1", "b%gcc@4.5.0", ["gcc-runtime@10.2.1"], ["gcc-runtime@4.5.0"]),
|
||||
("a%gcc@4.5.0", "b%gcc@10.2.1", ["gcc-runtime@10.2.1"], ["gcc-runtime@4.5.0"]),
|
||||
],
|
||||
)
|
||||
def test_views_can_handle_duplicate_runtime_nodes(
|
||||
root_str, reused_str, expected, not_expected, runtime_repo, tmp_path, monkeypatch
|
||||
):
|
||||
"""Tests that an environment is able to select the latest version of a runtime node to be
|
||||
linked in a view, in case more than one compatible version is in the DAG.
|
||||
"""
|
||||
root, reused_spec = _concretize_with_reuse(root_str=root_str, reused_str=reused_str)
|
||||
|
||||
# Mock the installation status to allow selecting nodes for the view
|
||||
monkeypatch.setattr(spack.spec.Spec, "installed", True)
|
||||
nodes = list(root.traverse())
|
||||
|
||||
view = ViewDescriptor(str(tmp_path), str(tmp_path))
|
||||
candidate_specs = view.specs_for_view(nodes)
|
||||
|
||||
for x in expected:
|
||||
assert any(node.satisfies(x) for node in candidate_specs)
|
||||
|
||||
for x in not_expected:
|
||||
assert all(not node.satisfies(x) for node in candidate_specs)
|
||||
|
@@ -25,7 +25,7 @@ def test_build_and_run_images(minimal_configuration):
|
||||
|
||||
# Test the output of the build property
|
||||
build = writer.build
|
||||
assert build.image == "spack/ubuntu-bionic:latest"
|
||||
assert build.image == "spack/ubuntu-bionic:develop"
|
||||
|
||||
|
||||
def test_packages(minimal_configuration):
|
||||
|
@@ -12,7 +12,7 @@
|
||||
@pytest.mark.parametrize(
|
||||
"image,spack_version,expected",
|
||||
[
|
||||
("ubuntu:18.04", "develop", ("spack/ubuntu-bionic", "latest")),
|
||||
("ubuntu:18.04", "develop", ("spack/ubuntu-bionic", "develop")),
|
||||
("ubuntu:18.04", "0.14.0", ("spack/ubuntu-bionic", "0.14.0")),
|
||||
],
|
||||
)
|
||||
|
@@ -24,8 +24,6 @@ class PyTorch(PythonPackage, CudaPackage):
|
||||
homepage = "https://pytorch.org/"
|
||||
git = "https://github.com/pytorch/pytorch.git"
|
||||
|
||||
maintainers("adamjstewart")
|
||||
|
||||
# Exact set of modules is version- and variant-specific, just attempt to import the
|
||||
# core libraries to ensure that the package was successfully installed.
|
||||
import_modules = ["torch", "torch.autograd", "torch.nn", "torch.utils"]
|
||||
|
@@ -79,7 +79,7 @@ def test_error_on_anonymous_dependency(config, mock_packages):
|
||||
[
|
||||
("maintainers-1", ["user1", "user2"]),
|
||||
# Extends PythonPackage
|
||||
("py-extension1", ["adamjstewart", "user1", "user2"]),
|
||||
("py-extension1", ["user1", "user2"]),
|
||||
# Extends maintainers-1
|
||||
("maintainers-3", ["user0", "user1", "user2", "user3"]),
|
||||
],
|
||||
|
114
lib/spack/spack/test/entry_points.py
Normal file
114
lib/spack/spack/test/entry_points.py
Normal file
@@ -0,0 +1,114 @@
|
||||
# Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
import pytest
|
||||
|
||||
import llnl.util.lang
|
||||
|
||||
import spack.config
|
||||
import spack.extensions
|
||||
|
||||
|
||||
class MockConfigEntryPoint:
|
||||
def __init__(self, tmp_path):
|
||||
self.dir = tmp_path
|
||||
self.name = "mypackage_config"
|
||||
|
||||
def load(self):
|
||||
etc_path = self.dir.joinpath("spack/etc")
|
||||
etc_path.mkdir(exist_ok=True, parents=True)
|
||||
f = self.dir / "spack/etc/config.yaml"
|
||||
with open(f, "w") as fh:
|
||||
fh.write("config:\n install_tree:\n root: /spam/opt\n")
|
||||
|
||||
def ep():
|
||||
return self.dir / "spack/etc"
|
||||
|
||||
return ep
|
||||
|
||||
|
||||
class MockExtensionsEntryPoint:
|
||||
def __init__(self, tmp_path):
|
||||
self.dir = tmp_path
|
||||
self.name = "mypackage_extensions"
|
||||
|
||||
def load(self):
|
||||
cmd_path = self.dir.joinpath("spack/spack-myext/myext/cmd")
|
||||
cmd_path.mkdir(exist_ok=True, parents=True)
|
||||
f = self.dir / "spack/spack-myext/myext/cmd/spam.py"
|
||||
with open(f, "w") as fh:
|
||||
fh.write("description = 'hello world extension command'\n")
|
||||
fh.write("section = 'test command'\n")
|
||||
fh.write("level = 'long'\n")
|
||||
fh.write("def setup_parser(subparser):\n pass\n")
|
||||
fh.write("def spam(parser, args):\n print('spam for all!')\n")
|
||||
|
||||
def ep():
|
||||
return self.dir / "spack/spack-myext"
|
||||
|
||||
return ep
|
||||
|
||||
|
||||
def entry_points_factory(tmp_path):
|
||||
def entry_points(group=None):
|
||||
if group == "spack.config":
|
||||
return (MockConfigEntryPoint(tmp_path),)
|
||||
elif group == "spack.extensions":
|
||||
return (MockExtensionsEntryPoint(tmp_path),)
|
||||
return ()
|
||||
|
||||
return entry_points
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def mock_get_entry_points(tmp_path, monkeypatch):
|
||||
entry_points = entry_points_factory(tmp_path)
|
||||
monkeypatch.setattr(llnl.util.lang, "get_entry_points", entry_points)
|
||||
|
||||
|
||||
def test_spack_entry_point_config(tmp_path, mock_get_entry_points):
|
||||
"""Test config scope entry point"""
|
||||
config_paths = dict(spack.config.config_paths_from_entry_points())
|
||||
config_path = config_paths.get("plugin-mypackage_config")
|
||||
my_config_path = tmp_path / "spack/etc"
|
||||
if config_path is None:
|
||||
raise ValueError("Did not find entry point config in %s" % str(config_paths))
|
||||
else:
|
||||
assert os.path.samefile(config_path, my_config_path)
|
||||
config = spack.config.create()
|
||||
assert config.get("config:install_tree:root", scope="plugin-mypackage_config") == "/spam/opt"
|
||||
|
||||
|
||||
def test_spack_entry_point_extension(tmp_path, mock_get_entry_points):
|
||||
"""Test config scope entry point"""
|
||||
my_ext = tmp_path / "spack/spack-myext"
|
||||
extensions = spack.extensions.get_extension_paths()
|
||||
found = bool([ext for ext in extensions if os.path.samefile(ext, my_ext)])
|
||||
if not found:
|
||||
raise ValueError("Did not find extension in %s" % ", ".join(extensions))
|
||||
extensions = spack.extensions.extension_paths_from_entry_points()
|
||||
found = bool([ext for ext in extensions if os.path.samefile(ext, my_ext)])
|
||||
if not found:
|
||||
raise ValueError("Did not find extension in %s" % ", ".join(extensions))
|
||||
root = spack.extensions.load_extension("myext")
|
||||
assert os.path.samefile(root, my_ext)
|
||||
module = spack.extensions.get_module("spam")
|
||||
assert module is not None
|
||||
|
||||
|
||||
@pytest.mark.skipif(sys.version_info[:2] < (3, 8), reason="Python>=3.8 required")
|
||||
def test_llnl_util_lang_get_entry_points(tmp_path, monkeypatch):
|
||||
import importlib.metadata # type: ignore # novermin
|
||||
|
||||
monkeypatch.setattr(importlib.metadata, "entry_points", entry_points_factory(tmp_path))
|
||||
|
||||
entry_points = list(llnl.util.lang.get_entry_points(group="spack.config"))
|
||||
assert isinstance(entry_points[0], MockConfigEntryPoint)
|
||||
|
||||
entry_points = list(llnl.util.lang.get_entry_points(group="spack.extensions"))
|
||||
assert isinstance(entry_points[0], MockExtensionsEntryPoint)
|
@@ -6,6 +6,7 @@
|
||||
import collections
|
||||
import filecmp
|
||||
import os
|
||||
import shutil
|
||||
import sys
|
||||
|
||||
import pytest
|
||||
@@ -89,7 +90,6 @@ def test_url_patch(mock_patch_stage, filename, sha256, archive_sha256, config):
|
||||
# Make a patch object
|
||||
url = url_util.path_to_file_url(filename)
|
||||
s = Spec("patch").concretized()
|
||||
patch = spack.patch.UrlPatch(s.package, url, sha256=sha256, archive_sha256=archive_sha256)
|
||||
|
||||
# make a stage
|
||||
with Stage(url) as stage: # TODO: url isn't used; maybe refactor Stage
|
||||
@@ -105,6 +105,8 @@ def test_url_patch(mock_patch_stage, filename, sha256, archive_sha256, config):
|
||||
second line
|
||||
"""
|
||||
)
|
||||
# save it for later comparison
|
||||
shutil.copyfile("foo.txt", "foo-original.txt")
|
||||
# write the expected result of patching.
|
||||
with open("foo-expected.txt", "w") as f:
|
||||
f.write(
|
||||
@@ -115,6 +117,7 @@ def test_url_patch(mock_patch_stage, filename, sha256, archive_sha256, config):
|
||||
"""
|
||||
)
|
||||
# apply the patch and compare files
|
||||
patch = spack.patch.UrlPatch(s.package, url, sha256=sha256, archive_sha256=archive_sha256)
|
||||
with patch.stage:
|
||||
patch.stage.create()
|
||||
patch.stage.fetch()
|
||||
@@ -124,6 +127,19 @@ def test_url_patch(mock_patch_stage, filename, sha256, archive_sha256, config):
|
||||
with working_dir(stage.source_path):
|
||||
assert filecmp.cmp("foo.txt", "foo-expected.txt")
|
||||
|
||||
# apply the patch in reverse and compare files
|
||||
patch = spack.patch.UrlPatch(
|
||||
s.package, url, sha256=sha256, archive_sha256=archive_sha256, reverse=True
|
||||
)
|
||||
with patch.stage:
|
||||
patch.stage.create()
|
||||
patch.stage.fetch()
|
||||
patch.stage.expand_archive()
|
||||
patch.apply(stage)
|
||||
|
||||
with working_dir(stage.source_path):
|
||||
assert filecmp.cmp("foo.txt", "foo-original.txt")
|
||||
|
||||
|
||||
def test_patch_in_spec(mock_packages, config):
|
||||
"""Test whether patches in a package appear in the spec."""
|
||||
@@ -252,7 +268,7 @@ def trigger_bad_patch(pkg):
|
||||
|
||||
|
||||
def test_patch_failure_develop_spec_exits_gracefully(
|
||||
mock_packages, config, install_mockery, mock_fetch, tmpdir
|
||||
mock_packages, config, install_mockery, mock_fetch, tmpdir, mock_stage
|
||||
):
|
||||
"""
|
||||
ensure that a failing patch does not trigger exceptions
|
||||
@@ -425,6 +441,19 @@ def test_patch_no_file():
|
||||
patch.apply("")
|
||||
|
||||
|
||||
def test_patch_no_sha256():
|
||||
# Give it the attributes we need to construct the error message
|
||||
FakePackage = collections.namedtuple("FakePackage", ["name", "namespace", "fullname"])
|
||||
fp = FakePackage("fake-package", "test", "fake-package")
|
||||
url = url_util.path_to_file_url("foo.tgz")
|
||||
match = "Compressed patches require 'archive_sha256' and patch 'sha256' attributes: file://"
|
||||
with pytest.raises(spack.patch.PatchDirectiveError, match=match):
|
||||
spack.patch.UrlPatch(fp, url, sha256="", archive_sha256="")
|
||||
match = "URL patches require a sha256 checksum"
|
||||
with pytest.raises(spack.patch.PatchDirectiveError, match=match):
|
||||
spack.patch.UrlPatch(fp, url, sha256="", archive_sha256="abc")
|
||||
|
||||
|
||||
@pytest.mark.parametrize("level", [-1, 0.0, "1"])
|
||||
def test_invalid_level(level):
|
||||
# Give it the attributes we need to construct the error message
|
||||
@@ -432,3 +461,41 @@ def test_invalid_level(level):
|
||||
fp = FakePackage("fake-package", "test")
|
||||
with pytest.raises(ValueError, match="Patch level needs to be a non-negative integer."):
|
||||
spack.patch.Patch(fp, "nonexistent_file", level, "")
|
||||
|
||||
|
||||
def test_equality():
|
||||
FakePackage = collections.namedtuple("FakePackage", ["name", "namespace", "fullname"])
|
||||
fp = FakePackage("fake-package", "test", "fake-package")
|
||||
patch1 = spack.patch.UrlPatch(fp, "nonexistent_url1", sha256="abc")
|
||||
patch2 = spack.patch.UrlPatch(fp, "nonexistent_url2", sha256="def")
|
||||
assert patch1 == patch1
|
||||
assert patch1 != patch2
|
||||
assert patch1 != "not a patch"
|
||||
|
||||
|
||||
def test_sha256_setter(mock_patch_stage, config):
|
||||
path = os.path.join(data_path, "foo.patch")
|
||||
s = Spec("patch").concretized()
|
||||
patch = spack.patch.FilePatch(s.package, path, level=1, working_dir=".")
|
||||
patch.sha256 = "abc"
|
||||
|
||||
|
||||
def test_invalid_from_dict(mock_packages, config):
|
||||
dictionary = {}
|
||||
with pytest.raises(ValueError, match="Invalid patch dictionary:"):
|
||||
spack.patch.from_dict(dictionary)
|
||||
|
||||
dictionary = {"owner": "patch"}
|
||||
with pytest.raises(ValueError, match="Invalid patch dictionary:"):
|
||||
spack.patch.from_dict(dictionary)
|
||||
|
||||
dictionary = {
|
||||
"owner": "patch",
|
||||
"relative_path": "foo.patch",
|
||||
"level": 1,
|
||||
"working_dir": ".",
|
||||
"reverse": False,
|
||||
"sha256": bar_sha256,
|
||||
}
|
||||
with pytest.raises(spack.fetch_strategy.ChecksumError, match="sha256 checksum failed for"):
|
||||
spack.patch.from_dict(dictionary)
|
||||
|
19
lib/spack/spack/test/projections.py
Normal file
19
lib/spack/spack/test/projections.py
Normal file
@@ -0,0 +1,19 @@
|
||||
# Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
from datetime import date
|
||||
|
||||
import spack.projections
|
||||
import spack.spec
|
||||
|
||||
|
||||
def test_projection_expansion(mock_packages, monkeypatch):
|
||||
"""Test that env variables and spack config variables are expanded in projections"""
|
||||
|
||||
monkeypatch.setenv("FOO_ENV_VAR", "test-string")
|
||||
projections = {"all": "{name}-{version}/$FOO_ENV_VAR/$date"}
|
||||
spec = spack.spec.Spec("fake@1.0")
|
||||
projection = spack.projections.get_projection(projections, spec)
|
||||
assert "{name}-{version}/test-string/%s" % date.today().strftime("%Y-%m-%d") == projection
|
@@ -22,7 +22,7 @@
|
||||
import spack.util.executable
|
||||
import spack.util.url as url_util
|
||||
from spack.resource import Resource
|
||||
from spack.stage import DIYStage, ResourceStage, Stage, StageComposite
|
||||
from spack.stage import DevelopStage, DIYStage, ResourceStage, Stage, StageComposite
|
||||
from spack.util.path import canonicalize_path
|
||||
|
||||
# The following values are used for common fetch and stage mocking fixtures:
|
||||
@@ -145,7 +145,7 @@ def check_destroy(stage, stage_name):
|
||||
assert not os.path.exists(stage_path)
|
||||
|
||||
# tmp stage needs to remove tmp dir too.
|
||||
if not stage.managed_by_spack:
|
||||
if not isinstance(stage, DIYStage):
|
||||
target = os.path.realpath(stage_path)
|
||||
assert not os.path.exists(target)
|
||||
|
||||
@@ -857,6 +857,73 @@ def test_diystage_preserve_file(self, tmpdir):
|
||||
_file.read() == _readme_contents
|
||||
|
||||
|
||||
def _create_files_from_tree(base, tree):
|
||||
for name, content in tree.items():
|
||||
sub_base = os.path.join(base, name)
|
||||
if isinstance(content, dict):
|
||||
os.mkdir(sub_base)
|
||||
_create_files_from_tree(sub_base, content)
|
||||
else:
|
||||
assert (content is None) or (isinstance(content, str))
|
||||
with open(sub_base, "w") as f:
|
||||
if content:
|
||||
f.write(content)
|
||||
|
||||
|
||||
def _create_tree_from_dir_recursive(path):
|
||||
if os.path.islink(path):
|
||||
return os.readlink(path)
|
||||
elif os.path.isdir(path):
|
||||
tree = {}
|
||||
for name in os.listdir(path):
|
||||
sub_path = os.path.join(path, name)
|
||||
tree[name] = _create_tree_from_dir_recursive(sub_path)
|
||||
return tree
|
||||
else:
|
||||
with open(path, "r") as f:
|
||||
content = f.read() or None
|
||||
return content
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def develop_path(tmpdir):
|
||||
dir_structure = {"a1": {"b1": None, "b2": "b1content"}, "a2": None}
|
||||
srcdir = str(tmpdir.join("test-src"))
|
||||
os.mkdir(srcdir)
|
||||
_create_files_from_tree(srcdir, dir_structure)
|
||||
yield dir_structure, srcdir
|
||||
|
||||
|
||||
class TestDevelopStage:
|
||||
def test_sanity_check_develop_path(self, develop_path):
|
||||
_, srcdir = develop_path
|
||||
with open(os.path.join(srcdir, "a1", "b2")) as f:
|
||||
assert f.read() == "b1content"
|
||||
|
||||
assert os.path.exists(os.path.join(srcdir, "a2"))
|
||||
|
||||
def test_develop_stage(self, develop_path, tmp_build_stage_dir):
|
||||
"""Check that (a) develop stages update the given
|
||||
`dev_path` with a symlink that points to the stage dir and
|
||||
(b) that destroying the stage does not destroy `dev_path`
|
||||
"""
|
||||
devtree, srcdir = develop_path
|
||||
stage = DevelopStage("test-stage", srcdir, reference_link="link-to-stage")
|
||||
stage.create()
|
||||
srctree1 = _create_tree_from_dir_recursive(stage.source_path)
|
||||
assert os.path.samefile(srctree1["link-to-stage"], stage.path)
|
||||
del srctree1["link-to-stage"]
|
||||
assert srctree1 == devtree
|
||||
|
||||
stage.destroy()
|
||||
# Make sure destroying the stage doesn't change anything
|
||||
# about the path
|
||||
assert not os.path.exists(stage.path)
|
||||
srctree2 = _create_tree_from_dir_recursive(srcdir)
|
||||
del srctree2["link-to-stage"] # Note the symlink persists but is broken
|
||||
assert srctree2 == devtree
|
||||
|
||||
|
||||
def test_stage_create_replace_path(tmp_build_stage_dir):
|
||||
"""Ensure stage creation replaces a non-directory path."""
|
||||
_, test_stage_path = tmp_build_stage_dir
|
||||
|
@@ -740,7 +740,7 @@ class VersionList:
|
||||
"""Sorted, non-redundant list of Version and ClosedOpenRange elements."""
|
||||
|
||||
def __init__(self, vlist=None):
|
||||
self.versions: List[StandardVersion, GitVersion, ClosedOpenRange] = []
|
||||
self.versions: List[Union[StandardVersion, GitVersion, ClosedOpenRange]] = []
|
||||
if vlist is None:
|
||||
pass
|
||||
elif isinstance(vlist, str):
|
||||
@@ -814,16 +814,20 @@ def copy(self):
|
||||
|
||||
def lowest(self) -> Optional[StandardVersion]:
|
||||
"""Get the lowest version in the list."""
|
||||
return None if not self else self[0]
|
||||
return next((v for v in self.versions if isinstance(v, StandardVersion)), None)
|
||||
|
||||
def highest(self) -> Optional[StandardVersion]:
|
||||
"""Get the highest version in the list."""
|
||||
return None if not self else self[-1]
|
||||
return next((v for v in reversed(self.versions) if isinstance(v, StandardVersion)), None)
|
||||
|
||||
def highest_numeric(self) -> Optional[StandardVersion]:
|
||||
"""Get the highest numeric version in the list."""
|
||||
numeric_versions = list(filter(lambda v: str(v) not in infinity_versions, self.versions))
|
||||
return None if not any(numeric_versions) else numeric_versions[-1]
|
||||
numeric = (
|
||||
v
|
||||
for v in reversed(self.versions)
|
||||
if isinstance(v, StandardVersion) and not v.isdevelop()
|
||||
)
|
||||
return next(numeric, None)
|
||||
|
||||
def preferred(self) -> Optional[StandardVersion]:
|
||||
"""Get the preferred (latest) version in the list."""
|
||||
|
@@ -154,11 +154,13 @@ ignore_missing_imports = true
|
||||
'boto3',
|
||||
'botocore',
|
||||
'distro',
|
||||
'importlib.metadata',
|
||||
'jinja2',
|
||||
'jsonschema',
|
||||
'macholib',
|
||||
'markupsafe',
|
||||
'numpy',
|
||||
'pkg_resources',
|
||||
'pyristent',
|
||||
'pytest',
|
||||
'ruamel.yaml',
|
||||
|
@@ -41,6 +41,9 @@ spack:
|
||||
variants: fabrics=sockets,tcp,udp,rxm
|
||||
libunwind:
|
||||
variants: +pic +xz
|
||||
mgard:
|
||||
require:
|
||||
- "@2023-01-10:"
|
||||
mpich:
|
||||
variants: ~wrapperrpath
|
||||
ncurses:
|
||||
@@ -81,6 +84,7 @@ spack:
|
||||
- chai ~benchmarks ~tests
|
||||
- charliecloud
|
||||
- conduit
|
||||
# - cp2k +mpi # libxsmm: ftn-78 ftn: ERROR in command linel; The -f option has an invalid argument, "tree-vectorize".
|
||||
- datatransferkit
|
||||
- flecsi
|
||||
- flit
|
||||
@@ -102,14 +106,13 @@ spack:
|
||||
- lammps
|
||||
- legion
|
||||
- libnrm
|
||||
- libpressio +bitgrooming +bzip2 ~cuda ~cusz +fpzip +hdf5 +libdistributed
|
||||
+lua +openmp +python +sz +sz3 +unix +zfp +json +remote +netcdf +mgard
|
||||
#- libpressio +bitgrooming +bzip2 ~cuda ~cusz +fpzip +hdf5 +libdistributed +lua +openmp +python +sz +sz3 +unix +zfp +json +remote +netcdf +mgard # mgard:
|
||||
- libquo
|
||||
- libunwind
|
||||
- mercury
|
||||
- metall
|
||||
- mfem
|
||||
- mgard +serial +openmp +timing +unstructured ~cuda
|
||||
# - mgard +serial +openmp +timing +unstructured ~cuda # mgard
|
||||
- mpark-variant
|
||||
- mpifileutils ~xattr
|
||||
- nccmp
|
||||
@@ -117,7 +120,7 @@ spack:
|
||||
- netlib-scalapack
|
||||
- omega-h
|
||||
- openmpi
|
||||
- openpmd-api
|
||||
- openpmd-api ^adios2~mgard
|
||||
- papi
|
||||
- papyrus
|
||||
- pdt
|
||||
|
@@ -73,6 +73,7 @@ spack:
|
||||
- cabana
|
||||
- chai ~benchmarks ~tests
|
||||
- conduit
|
||||
# - cp2k +mpi # cp2k: Error: Type mismatch between actual argument at (1) and actual argument at (2) (LOGICAL(4)/COMPLEX(4)).
|
||||
- datatransferkit
|
||||
- flecsi
|
||||
- fortrilinos
|
||||
|
@@ -67,6 +67,7 @@ spack:
|
||||
- chai ~benchmarks ~tests
|
||||
- charliecloud
|
||||
- conduit
|
||||
- cp2k +mpi
|
||||
- datatransferkit
|
||||
- dyninst
|
||||
- ecp-data-vis-sdk ~cuda ~rocm +adios2 +ascent +cinema +darshan +faodel +hdf5 +paraview +pnetcdf +sz +unifyfs +veloc ~visit +vtkm +zfp # +visit: ?
|
||||
@@ -81,7 +82,7 @@ spack:
|
||||
- gmp
|
||||
- gotcha
|
||||
- gptune ~mpispawn
|
||||
- gromacs +cp2k ^cp2k+dlaf build_system=cmake
|
||||
- gromacs +cp2k ^cp2k +mpi +dlaf build_system=cmake
|
||||
- h5bench
|
||||
- hdf5-vol-async
|
||||
- hdf5-vol-cache
|
||||
@@ -214,6 +215,7 @@ spack:
|
||||
- cabana +cuda cuda_arch=75 ^kokkos +wrapper +cuda_lambda +cuda cuda_arch=75
|
||||
- caliper +cuda cuda_arch=75
|
||||
- chai ~benchmarks ~tests +cuda cuda_arch=75 ^umpire ~shared
|
||||
# - cp2k +mpi +cuda cuda_arch=75 # cp2k: cp2k only supports cuda_arch ('35', '37', '60', '70', '80')
|
||||
- flecsi +cuda cuda_arch=75
|
||||
- ginkgo +cuda cuda_arch=75
|
||||
- gromacs +cuda cuda_arch=75
|
||||
@@ -260,6 +262,7 @@ spack:
|
||||
- cabana +cuda cuda_arch=80 ^kokkos +wrapper +cuda_lambda +cuda cuda_arch=80
|
||||
- caliper +cuda cuda_arch=80
|
||||
- chai ~benchmarks ~tests +cuda cuda_arch=80 ^umpire ~shared
|
||||
# - cp2k +mpi +cuda cuda_arch=80 # cp2k: Error: KeyError: 'Point environment variable LIBSMM_PATH to the absolute path of the libsmm.a file'
|
||||
- flecsi +cuda cuda_arch=80
|
||||
- ginkgo +cuda cuda_arch=80
|
||||
- gromacs +cuda cuda_arch=80
|
||||
@@ -306,6 +309,7 @@ spack:
|
||||
- cabana +cuda cuda_arch=90 ^kokkos +wrapper +cuda_lambda +cuda cuda_arch=90
|
||||
- caliper +cuda cuda_arch=90
|
||||
- chai ~benchmarks ~tests +cuda cuda_arch=90 ^umpire ~shared
|
||||
# - cp2k +mpi +cuda cuda_arch=90 # cp2k: cp2k only supports cuda_arch ('35', '37', '60', '70', '80')
|
||||
- flecsi +cuda cuda_arch=90
|
||||
- ginkgo +cuda cuda_arch=90
|
||||
- gromacs +cuda cuda_arch=90
|
||||
|
@@ -67,6 +67,7 @@ spack:
|
||||
- chai ~benchmarks ~tests
|
||||
- charliecloud
|
||||
- conduit
|
||||
- cp2k +mpi
|
||||
- datatransferkit
|
||||
- dyninst
|
||||
- ecp-data-vis-sdk ~cuda ~rocm +adios2 +ascent +cinema +darshan +faodel +hdf5 +paraview +pnetcdf +sz +unifyfs +veloc ~visit +vtkm +zfp # +visit: ?
|
||||
@@ -81,7 +82,7 @@ spack:
|
||||
- gmp
|
||||
- gotcha
|
||||
- gptune ~mpispawn
|
||||
- gromacs +cp2k ^cp2k+dlaf build_system=cmake
|
||||
- gromacs +cp2k ^cp2k +mpi +dlaf build_system=cmake
|
||||
- h5bench
|
||||
- hdf5-vol-async
|
||||
- hdf5-vol-cache
|
||||
@@ -214,6 +215,7 @@ spack:
|
||||
- cabana +cuda cuda_arch=75 ^kokkos +wrapper +cuda_lambda +cuda cuda_arch=75
|
||||
- caliper +cuda cuda_arch=75
|
||||
- chai ~benchmarks ~tests +cuda cuda_arch=75 ^umpire ~shared
|
||||
# - cp2k +mpi +cuda cuda_arch=75 # cp2k: cp2k only supports cuda_arch ('35', '37', '60', '70', '80')
|
||||
- flecsi +cuda cuda_arch=75
|
||||
- ginkgo +cuda cuda_arch=75
|
||||
- gromacs +cuda cuda_arch=75
|
||||
@@ -260,6 +262,7 @@ spack:
|
||||
- cabana +cuda cuda_arch=80 ^kokkos +wrapper +cuda_lambda +cuda cuda_arch=80
|
||||
- caliper +cuda cuda_arch=80
|
||||
- chai ~benchmarks ~tests +cuda cuda_arch=80 ^umpire ~shared
|
||||
# - cp2k +mpi +cuda cuda_arch=80 # cp2k: Error: KeyError: 'Point environment variable LIBSMM_PATH to the absolute path of the libsmm.a file'
|
||||
- flecsi +cuda cuda_arch=80
|
||||
- ginkgo +cuda cuda_arch=80
|
||||
- gromacs +cuda cuda_arch=80
|
||||
@@ -306,6 +309,7 @@ spack:
|
||||
- cabana +cuda cuda_arch=90 ^kokkos +wrapper +cuda_lambda +cuda cuda_arch=90
|
||||
- caliper +cuda cuda_arch=90
|
||||
- chai ~benchmarks ~tests +cuda cuda_arch=90 ^umpire ~shared
|
||||
# - cp2k +mpi +cuda cuda_arch=90 # cp2k: cp2k only supports cuda_arch ('35', '37', '60', '70', '80')
|
||||
- flecsi +cuda cuda_arch=90
|
||||
- ginkgo +cuda cuda_arch=90
|
||||
- gromacs +cuda cuda_arch=90
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user