Compare commits
293 Commits
e4s-21.08
...
features/g
Author | SHA1 | Date | |
---|---|---|---|
![]() |
0d092d671f | ||
![]() |
9956841331 | ||
![]() |
e9f1cfdaaf | ||
![]() |
060582a21d | ||
![]() |
c392454125 | ||
![]() |
fca81c2ac8 | ||
![]() |
bafd84e191 | ||
![]() |
1e08f31e16 | ||
![]() |
1da7839898 | ||
![]() |
a4a22a6926 | ||
![]() |
ceb94bd6ae | ||
![]() |
b745e208a3 | ||
![]() |
6a8383b2a7 | ||
![]() |
6210e694e1 | ||
![]() |
2f889b045c | ||
![]() |
bc3b90f6ac | ||
![]() |
1f323d296d | ||
![]() |
819cd41ee4 | ||
![]() |
c0069210e2 | ||
![]() |
a1d792af4c | ||
![]() |
b7e61a4b75 | ||
![]() |
59832fb0ac | ||
![]() |
5fa075f5b4 | ||
![]() |
4573741baa | ||
![]() |
e0d8f67f34 | ||
![]() |
0545f7d5cc | ||
![]() |
3a9028427c | ||
![]() |
729726d157 | ||
![]() |
81962f100c | ||
![]() |
d54a692e09 | ||
![]() |
b12f38383c | ||
![]() |
9084ad69b4 | ||
![]() |
59d8031076 | ||
![]() |
5fddd48f80 | ||
![]() |
d8b95a496c | ||
![]() |
161f0d5045 | ||
![]() |
d83f7110d5 | ||
![]() |
edb1d75b1b | ||
![]() |
ed9b38c8e3 | ||
![]() |
1a5891754a | ||
![]() |
d916d801f2 | ||
![]() |
46d770b416 | ||
![]() |
6979a63396 | ||
![]() |
f9314d38b0 | ||
![]() |
e47f0d486c | ||
![]() |
fd0884c273 | ||
![]() |
4033cc0250 | ||
![]() |
c309adb4b3 | ||
![]() |
c2a6ccbea8 | ||
![]() |
ca94240dd4 | ||
![]() |
0ac751b27b | ||
![]() |
9ef1dbd0ef | ||
![]() |
c3d5232d5b | ||
![]() |
f4e66b306e | ||
![]() |
c33382b607 | ||
![]() |
47b16b39a3 | ||
![]() |
7018a42211 | ||
![]() |
28f71c4d12 | ||
![]() |
26455a4ac2 | ||
![]() |
4e4b199f16 | ||
![]() |
0fb5a39c17 | ||
![]() |
a86279cc52 | ||
![]() |
fd111a3395 | ||
![]() |
32b6da8d57 | ||
![]() |
c6e538583f | ||
![]() |
83298160cc | ||
![]() |
54fbe555cd | ||
![]() |
c4e85faa2d | ||
![]() |
a59edb2826 | ||
![]() |
2e2fbc6408 | ||
![]() |
5abbd094c7 | ||
![]() |
ca58cb701c | ||
![]() |
a358358aa7 | ||
![]() |
84d525dbdf | ||
![]() |
b5fa64fb10 | ||
![]() |
c424b86a64 | ||
![]() |
ca50c91469 | ||
![]() |
13c0b0dcb3 | ||
![]() |
834155fdb8 | ||
![]() |
1badb47b80 | ||
![]() |
1a48c0f51c | ||
![]() |
92e4db4681 | ||
![]() |
dd8dc08a90 | ||
![]() |
3e4b576f83 | ||
![]() |
526315410a | ||
![]() |
66526cb57a | ||
![]() |
beff29176c | ||
![]() |
2af6c57afa | ||
![]() |
accd6dd228 | ||
![]() |
0ad54e0679 | ||
![]() |
bbc9d7d965 | ||
![]() |
6f5ec73087 | ||
![]() |
0aedafda19 | ||
![]() |
ac3ccad1e2 | ||
![]() |
5180b0b454 | ||
![]() |
432f577a0c | ||
![]() |
f6060c9894 | ||
![]() |
c6c9213766 | ||
![]() |
64407e253c | ||
![]() |
de492e73d5 | ||
![]() |
29d344e4c7 | ||
![]() |
95586335f7 | ||
![]() |
f56f4677cf | ||
![]() |
043f0cd014 | ||
![]() |
9841d1f571 | ||
![]() |
d61439c26a | ||
![]() |
0b9baf9ae3 | ||
![]() |
2331148f4b | ||
![]() |
ffec74c359 | ||
![]() |
f162dd4f5b | ||
![]() |
8149048a78 | ||
![]() |
2b7a2f66b7 | ||
![]() |
551766e3c6 | ||
![]() |
35fe188d22 | ||
![]() |
5f4fcea79c | ||
![]() |
487edcc416 | ||
![]() |
f71d93fc55 | ||
![]() |
2d78045cdd | ||
![]() |
8e61f54260 | ||
![]() |
8a7af82a82 | ||
![]() |
0c61b31922 | ||
![]() |
156edffec2 | ||
![]() |
6d484a055a | ||
![]() |
03331de0f2 | ||
![]() |
aabece46ba | ||
![]() |
4c23059017 | ||
![]() |
ab37ac95bf | ||
![]() |
2411a9599e | ||
![]() |
b0ee7deaa7 | ||
![]() |
7adacf967d | ||
![]() |
2633cf7da6 | ||
![]() |
3c6050d3a2 | ||
![]() |
b34f289796 | ||
![]() |
beb3524392 | ||
![]() |
8ee5bf6d03 | ||
![]() |
a018f48df9 | ||
![]() |
e51463f587 | ||
![]() |
378543b554 | ||
![]() |
3cd224afbf | ||
![]() |
44c0089be4 | ||
![]() |
65584a3b92 | ||
![]() |
ab657d7b53 | ||
![]() |
d3d0ee7328 | ||
![]() |
ed17c3638b | ||
![]() |
2f777d08a2 | ||
![]() |
9be81ac4d9 | ||
![]() |
e60e41d9ca | ||
![]() |
a2293e6ee1 | ||
![]() |
d381ab77b2 | ||
![]() |
1bf051e229 | ||
![]() |
506f62ddfe | ||
![]() |
a6a448b16c | ||
![]() |
0dfa49af8e | ||
![]() |
b3128af901 | ||
![]() |
5029b8ca55 | ||
![]() |
0d226aa710 | ||
![]() |
07a9cb87ef | ||
![]() |
7cafe7dd66 | ||
![]() |
40788cf49a | ||
![]() |
025dbb2162 | ||
![]() |
e2b9ba3001 | ||
![]() |
3a4073cfff | ||
![]() |
9577d890c4 | ||
![]() |
f5ab3ad82a | ||
![]() |
b5d3c48824 | ||
![]() |
9d17d474ff | ||
![]() |
50411f8394 | ||
![]() |
32210b0658 | ||
![]() |
98e6e4a3a5 | ||
![]() |
a7c6224b3a | ||
![]() |
9d95125d6a | ||
![]() |
ed07fa4c37 | ||
![]() |
2d97d877e4 | ||
![]() |
7fd4dee962 | ||
![]() |
4f3a538519 | ||
![]() |
5c1710f7dc | ||
![]() |
97ea57e59f | ||
![]() |
c152e558e9 | ||
![]() |
12e87ebf14 | ||
![]() |
1113705080 | ||
![]() |
c3dabf05f4 | ||
![]() |
d0d6b29c9e | ||
![]() |
74389472ab | ||
![]() |
f5d4f5bdac | ||
![]() |
1d4e00a9ff | ||
![]() |
8530ea88a3 | ||
![]() |
e57780d7f0 | ||
![]() |
e39c9a7656 | ||
![]() |
bdb02ed535 | ||
![]() |
b5f812cd32 | ||
![]() |
abfd8fa70b | ||
![]() |
6eb942cf45 | ||
![]() |
9dab298f0d | ||
![]() |
6a26322eb3 | ||
![]() |
23106ac0f5 | ||
![]() |
29d1bc6546 | ||
![]() |
c963bdee8b | ||
![]() |
6b3518d6fd | ||
![]() |
6a31ca7386 | ||
![]() |
8664abc178 | ||
![]() |
a3d8e95e76 | ||
![]() |
1ab6f30fdd | ||
![]() |
7dd3592eab | ||
![]() |
e602c40d09 | ||
![]() |
270cbf08e3 | ||
![]() |
4ddc0ff218 | ||
![]() |
73005166ef | ||
![]() |
204b49fc1f | ||
![]() |
de3c0e62d0 | ||
![]() |
e4e4bf75ca | ||
![]() |
af2f07852c | ||
![]() |
e2e7b0788f | ||
![]() |
d27e0bff5a | ||
![]() |
fafe1cb7e8 | ||
![]() |
3e2f890467 | ||
![]() |
6fab0e1b9c | ||
![]() |
f4c9161f84 | ||
![]() |
fd095a3660 | ||
![]() |
f6a9ef5ef5 | ||
![]() |
df10e88e97 | ||
![]() |
99076660d4 | ||
![]() |
80713e234c | ||
![]() |
1374fea5d9 | ||
![]() |
7274d8bca2 | ||
![]() |
73208f5835 | ||
![]() |
107693fbd1 | ||
![]() |
31dcdf7262 | ||
![]() |
b2968c817f | ||
![]() |
213ec6df5f | ||
![]() |
5823a9b302 | ||
![]() |
01cbf3b81c | ||
![]() |
6e68792ded | ||
![]() |
2971a630b8 | ||
![]() |
bf7ce7e4e9 | ||
![]() |
c5c809ee3e | ||
![]() |
9a8d7ea3cb | ||
![]() |
a68701c636 | ||
![]() |
1212847eee | ||
![]() |
768ea7e8f7 | ||
![]() |
9b66138054 | ||
![]() |
cf7e40f03c | ||
![]() |
8c25b17d8e | ||
![]() |
a7a37e4de6 | ||
![]() |
37a1885deb | ||
![]() |
81e4155eaf | ||
![]() |
7d666fc220 | ||
![]() |
c4e50c9efb | ||
![]() |
caed90fcf2 | ||
![]() |
84100afc91 | ||
![]() |
37e4d32d53 | ||
![]() |
c0bb2b9943 | ||
![]() |
b1755c4fb3 | ||
![]() |
2474b91078 | ||
![]() |
dcd19e7982 | ||
![]() |
8be614729c | ||
![]() |
201f5bdfe8 | ||
![]() |
ef32ff0e4c | ||
![]() |
2754f2f506 | ||
![]() |
89f442392e | ||
![]() |
9ead83caa7 | ||
![]() |
737f09f2b0 | ||
![]() |
0c25015fdc | ||
![]() |
e6d1485c28 | ||
![]() |
e9e0cd0728 | ||
![]() |
09b52c4f04 | ||
![]() |
eb5061d54c | ||
![]() |
b124fbb0c8 | ||
![]() |
9b239392b1 | ||
![]() |
05e933d7af | ||
![]() |
65a7ceb3ce | ||
![]() |
bdf7754552 | ||
![]() |
b6f7fa6eb5 | ||
![]() |
2f85d3cdb9 | ||
![]() |
a6d26598ef | ||
![]() |
d699478ab8 | ||
![]() |
68d488546f | ||
![]() |
1d2798dd77 | ||
![]() |
6ce0d934cf | ||
![]() |
ec720dd148 | ||
![]() |
d52a1b8279 | ||
![]() |
e8bcb43695 | ||
![]() |
220a87812c | ||
![]() |
10695f1ed3 | ||
![]() |
350372e3bf | ||
![]() |
cd91abcf88 | ||
![]() |
c865aaaa0f | ||
![]() |
4318ceb2b3 | ||
![]() |
8a32f72829 | ||
![]() |
06c8fdafd4 | ||
![]() |
b22728d55c | ||
![]() |
c869f3639d | ||
![]() |
d00fc55e41 | ||
![]() |
09378f56c0 |
56
.github/workflows/bootstrap.yml
vendored
56
.github/workflows/bootstrap.yml
vendored
@@ -11,6 +11,7 @@ on:
|
||||
- 'var/spack/repos/builtin/**'
|
||||
- '!var/spack/repos/builtin/packages/clingo-bootstrap/**'
|
||||
- '!var/spack/repos/builtin/packages/python/**'
|
||||
- '!var/spack/repos/builtin/packages/re2c/**'
|
||||
- 'lib/spack/docs/**'
|
||||
schedule:
|
||||
# nightly at 2:16 AM
|
||||
@@ -18,7 +19,7 @@ on:
|
||||
|
||||
jobs:
|
||||
|
||||
fedora:
|
||||
fedora-sources:
|
||||
runs-on: ubuntu-latest
|
||||
container: "fedora:latest"
|
||||
steps:
|
||||
@@ -40,11 +41,12 @@ jobs:
|
||||
shell: runuser -u spack-test -- bash {0}
|
||||
run: |
|
||||
source share/spack/setup-env.sh
|
||||
spack bootstrap untrust github-actions
|
||||
spack external find cmake bison
|
||||
spack -d solve zlib
|
||||
tree ~/.spack/bootstrap/store/
|
||||
|
||||
ubuntu:
|
||||
ubuntu-sources:
|
||||
runs-on: ubuntu-latest
|
||||
container: "ubuntu:latest"
|
||||
steps:
|
||||
@@ -69,11 +71,12 @@ jobs:
|
||||
shell: runuser -u spack-test -- bash {0}
|
||||
run: |
|
||||
source share/spack/setup-env.sh
|
||||
spack bootstrap untrust github-actions
|
||||
spack external find cmake bison
|
||||
spack -d solve zlib
|
||||
tree ~/.spack/bootstrap/store/
|
||||
|
||||
opensuse:
|
||||
opensuse-sources:
|
||||
runs-on: ubuntu-latest
|
||||
container: "opensuse/tumbleweed:latest"
|
||||
steps:
|
||||
@@ -93,11 +96,12 @@ jobs:
|
||||
- name: Bootstrap clingo
|
||||
run: |
|
||||
source share/spack/setup-env.sh
|
||||
spack bootstrap untrust github-actions
|
||||
spack external find cmake bison
|
||||
spack -d solve zlib
|
||||
tree ~/.spack/bootstrap/store/
|
||||
|
||||
macos:
|
||||
macos-sources:
|
||||
runs-on: macos-latest
|
||||
steps:
|
||||
- name: Install dependencies
|
||||
@@ -108,6 +112,50 @@ jobs:
|
||||
run: |
|
||||
source share/spack/setup-env.sh
|
||||
export PATH=/usr/local/opt/bison@2.7/bin:$PATH
|
||||
spack bootstrap untrust github-actions
|
||||
spack external find --not-buildable cmake bison
|
||||
spack -d solve zlib
|
||||
tree ~/.spack/bootstrap/store/
|
||||
|
||||
macos-clingo-binaries:
|
||||
runs-on: macos-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ['3.5', '3.6', '3.7', '3.8', '3.9']
|
||||
steps:
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
brew install tree
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- name: Bootstrap clingo
|
||||
run: |
|
||||
source share/spack/setup-env.sh
|
||||
spack bootstrap untrust spack-install
|
||||
spack -d solve zlib
|
||||
tree ~/.spack/bootstrap/store/
|
||||
|
||||
|
||||
ubuntu-clingo-binaries:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ['2.7', '3.5', '3.6', '3.7', '3.8', '3.9']
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- name: Setup repo and non-root user
|
||||
run: |
|
||||
git --version
|
||||
git fetch --unshallow
|
||||
. .github/workflows/setup_git.sh
|
||||
- name: Bootstrap clingo
|
||||
run: |
|
||||
source share/spack/setup-env.sh
|
||||
spack bootstrap untrust spack-install
|
||||
spack -d solve zlib
|
||||
tree ~/.spack/bootstrap/store/
|
||||
|
64
.github/workflows/unit_tests.yaml
vendored
64
.github/workflows/unit_tests.yaml
vendored
@@ -131,10 +131,7 @@ jobs:
|
||||
# Needed for unit tests
|
||||
sudo apt-get -y install \
|
||||
coreutils cvs gfortran graphviz gnupg2 mercurial ninja-build \
|
||||
patchelf
|
||||
# Needed for kcov
|
||||
sudo apt-get -y install cmake binutils-dev libcurl4-openssl-dev
|
||||
sudo apt-get -y install zlib1g-dev libdw-dev libiberty-dev
|
||||
patchelf cmake bison libbison-dev kcov
|
||||
- name: Install Python packages
|
||||
run: |
|
||||
pip install --upgrade pip six setuptools codecov coverage[toml]
|
||||
@@ -148,24 +145,13 @@ jobs:
|
||||
# Need this for the git tests to succeed.
|
||||
git --version
|
||||
. .github/workflows/setup_git.sh
|
||||
- name: Install kcov for bash script coverage
|
||||
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
|
||||
env:
|
||||
KCOV_VERSION: 34
|
||||
run: |
|
||||
KCOV_ROOT=$(mktemp -d)
|
||||
wget --output-document=${KCOV_ROOT}/${KCOV_VERSION}.tar.gz https://github.com/SimonKagstrom/kcov/archive/v${KCOV_VERSION}.tar.gz
|
||||
tar -C ${KCOV_ROOT} -xzvf ${KCOV_ROOT}/${KCOV_VERSION}.tar.gz
|
||||
mkdir -p ${KCOV_ROOT}/build
|
||||
cd ${KCOV_ROOT}/build && cmake -Wno-dev ${KCOV_ROOT}/kcov-${KCOV_VERSION} && cd -
|
||||
make -C ${KCOV_ROOT}/build && sudo make -C ${KCOV_ROOT}/build install
|
||||
- name: Bootstrap clingo from sources
|
||||
- name: Bootstrap clingo
|
||||
if: ${{ matrix.concretizer == 'clingo' }}
|
||||
env:
|
||||
SPACK_PYTHON: python
|
||||
run: |
|
||||
. share/spack/setup-env.sh
|
||||
spack external find --not-buildable cmake bison
|
||||
spack bootstrap untrust spack-install
|
||||
spack -v solve zlib
|
||||
- name: Run unit tests (full suite with coverage)
|
||||
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
|
||||
@@ -185,7 +171,7 @@ jobs:
|
||||
SPACK_TEST_SOLVER: ${{ matrix.concretizer }}
|
||||
run: |
|
||||
share/spack/qa/run-unit-tests
|
||||
- uses: codecov/codecov-action@v2.0.2
|
||||
- uses: codecov/codecov-action@v2.0.3
|
||||
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
|
||||
with:
|
||||
flags: unittests,linux,${{ matrix.concretizer }}
|
||||
@@ -204,10 +190,7 @@ jobs:
|
||||
run: |
|
||||
sudo apt-get -y update
|
||||
# Needed for shell tests
|
||||
sudo apt-get install -y coreutils csh zsh tcsh fish dash bash
|
||||
# Needed for kcov
|
||||
sudo apt-get -y install cmake binutils-dev libcurl4-openssl-dev
|
||||
sudo apt-get -y install zlib1g-dev libdw-dev libiberty-dev
|
||||
sudo apt-get install -y coreutils kcov csh zsh tcsh fish dash bash
|
||||
- name: Install Python packages
|
||||
run: |
|
||||
pip install --upgrade pip six setuptools codecov coverage[toml]
|
||||
@@ -216,17 +199,6 @@ jobs:
|
||||
# Need this for the git tests to succeed.
|
||||
git --version
|
||||
. .github/workflows/setup_git.sh
|
||||
- name: Install kcov for bash script coverage
|
||||
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
|
||||
env:
|
||||
KCOV_VERSION: 38
|
||||
run: |
|
||||
KCOV_ROOT=$(mktemp -d)
|
||||
wget --output-document=${KCOV_ROOT}/${KCOV_VERSION}.tar.gz https://github.com/SimonKagstrom/kcov/archive/v${KCOV_VERSION}.tar.gz
|
||||
tar -C ${KCOV_ROOT} -xzvf ${KCOV_ROOT}/${KCOV_VERSION}.tar.gz
|
||||
mkdir -p ${KCOV_ROOT}/build
|
||||
cd ${KCOV_ROOT}/build && cmake -Wno-dev ${KCOV_ROOT}/kcov-${KCOV_VERSION} && cd -
|
||||
make -C ${KCOV_ROOT}/build && sudo make -C ${KCOV_ROOT}/build install
|
||||
- name: Run shell tests (without coverage)
|
||||
if: ${{ needs.changes.outputs.with_coverage == 'false' }}
|
||||
run: |
|
||||
@@ -237,7 +209,7 @@ jobs:
|
||||
COVERAGE: true
|
||||
run: |
|
||||
share/spack/qa/run-shell-tests
|
||||
- uses: codecov/codecov-action@v2.0.2
|
||||
- uses: codecov/codecov-action@v2.0.3
|
||||
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
|
||||
with:
|
||||
flags: shelltests,linux
|
||||
@@ -314,21 +286,7 @@ jobs:
|
||||
# Needed for unit tests
|
||||
sudo apt-get -y install \
|
||||
coreutils cvs gfortran graphviz gnupg2 mercurial ninja-build \
|
||||
patchelf
|
||||
# Needed for kcov
|
||||
sudo apt-get -y install cmake binutils-dev libcurl4-openssl-dev
|
||||
sudo apt-get -y install zlib1g-dev libdw-dev libiberty-dev
|
||||
- name: Install kcov for bash script coverage
|
||||
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
|
||||
env:
|
||||
KCOV_VERSION: 34
|
||||
run: |
|
||||
KCOV_ROOT=$(mktemp -d)
|
||||
wget --output-document=${KCOV_ROOT}/${KCOV_VERSION}.tar.gz https://github.com/SimonKagstrom/kcov/archive/v${KCOV_VERSION}.tar.gz
|
||||
tar -C ${KCOV_ROOT} -xzvf ${KCOV_ROOT}/${KCOV_VERSION}.tar.gz
|
||||
mkdir -p ${KCOV_ROOT}/build
|
||||
cd ${KCOV_ROOT}/build && cmake -Wno-dev ${KCOV_ROOT}/kcov-${KCOV_VERSION} && cd -
|
||||
make -C ${KCOV_ROOT}/build && sudo make -C ${KCOV_ROOT}/build install
|
||||
patchelf kcov
|
||||
- name: Install Python packages
|
||||
run: |
|
||||
pip install --upgrade pip six setuptools codecov coverage[toml] clingo
|
||||
@@ -353,7 +311,7 @@ jobs:
|
||||
SPACK_TEST_SOLVER: clingo
|
||||
run: |
|
||||
share/spack/qa/run-unit-tests
|
||||
- uses: codecov/codecov-action@v2.0.2
|
||||
- uses: codecov/codecov-action@v2.0.3
|
||||
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
|
||||
with:
|
||||
flags: unittests,linux,clingo
|
||||
@@ -379,10 +337,14 @@ jobs:
|
||||
run: |
|
||||
brew install dash fish gcc gnupg2 kcov
|
||||
- name: Run unit tests
|
||||
env:
|
||||
SPACK_TEST_SOLVER: clingo
|
||||
run: |
|
||||
git --version
|
||||
. .github/workflows/setup_git.sh
|
||||
. share/spack/setup-env.sh
|
||||
$(which spack) bootstrap untrust spack-install
|
||||
$(which spack) solve zlib
|
||||
if [ "${{ needs.changes.outputs.with_coverage }}" == "true" ]
|
||||
then
|
||||
coverage run $(which spack) unit-test -x
|
||||
@@ -395,7 +357,7 @@ jobs:
|
||||
echo "ONLY PACKAGE RECIPES CHANGED [skipping coverage]"
|
||||
$(which spack) unit-test -x -m "not maybeslow" -k "package_sanity"
|
||||
fi
|
||||
- uses: codecov/codecov-action@v2.0.2
|
||||
- uses: codecov/codecov-action@v2.0.3
|
||||
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
|
||||
with:
|
||||
files: ./coverage.xml
|
||||
|
1
.gitignore
vendored
1
.gitignore
vendored
@@ -132,6 +132,7 @@ celerybeat.pid
|
||||
.env
|
||||
.venv
|
||||
env/
|
||||
!/lib/spack/env
|
||||
venv/
|
||||
ENV/
|
||||
env.bak/
|
||||
|
23
bin/spack
23
bin/spack
@@ -28,6 +28,7 @@ exit 1
|
||||
from __future__ import print_function
|
||||
|
||||
import os
|
||||
import os.path
|
||||
import sys
|
||||
|
||||
min_python3 = (3, 5)
|
||||
@@ -70,6 +71,28 @@ if "ruamel.yaml" in sys.modules:
|
||||
if "ruamel" in sys.modules:
|
||||
del sys.modules["ruamel"]
|
||||
|
||||
# The following code is here to avoid failures when updating
|
||||
# the develop version, due to spurious argparse.pyc files remaining
|
||||
# in the libs/spack/external directory, see:
|
||||
# https://github.com/spack/spack/pull/25376
|
||||
# TODO: Remove in v0.18.0 or later
|
||||
try:
|
||||
import argparse
|
||||
except ImportError:
|
||||
argparse_pyc = os.path.join(spack_external_libs, 'argparse.pyc')
|
||||
if not os.path.exists(argparse_pyc):
|
||||
raise
|
||||
try:
|
||||
os.remove(argparse_pyc)
|
||||
import argparse # noqa
|
||||
except Exception:
|
||||
msg = ('The file\n\n\t{0}\n\nis corrupted and cannot be deleted by Spack. '
|
||||
'Either delete it manually or ask some administrator to '
|
||||
'delete it for you.')
|
||||
print(msg.format(argparse_pyc))
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
import spack.main # noqa
|
||||
|
||||
# Once we've set up the system path, run the spack main method
|
||||
|
@@ -5,3 +5,28 @@ bootstrap:
|
||||
# Root directory for bootstrapping work. The software bootstrapped
|
||||
# by Spack is installed in a "store" subfolder of this root directory
|
||||
root: ~/.spack/bootstrap
|
||||
# Methods that can be used to bootstrap software. Each method may or
|
||||
# may not be able to bootstrap all of the software that Spack needs,
|
||||
# depending on its type.
|
||||
sources:
|
||||
- name: 'github-actions'
|
||||
type: buildcache
|
||||
description: |
|
||||
Buildcache generated from a public workflow using Github Actions.
|
||||
The sha256 checksum of binaries is checked before installation.
|
||||
info:
|
||||
url: https://mirror.spack.io/bootstrap/github-actions/v0.1
|
||||
homepage: https://github.com/alalazo/spack-bootstrap-mirrors
|
||||
releases: https://github.com/alalazo/spack-bootstrap-mirrors/releases
|
||||
# This method is just Spack bootstrapping the software it needs from sources.
|
||||
# It has been added here so that users can selectively disable bootstrapping
|
||||
# from sources by "untrusting" it.
|
||||
- name: spack-install
|
||||
type: install
|
||||
description: |
|
||||
Specs built from sources by Spack. May take a long time.
|
||||
trusted:
|
||||
# By default we trust bootstrapping from sources and from binaries
|
||||
# produced on Github via the workflow
|
||||
github-actions: true
|
||||
spack-install: true
|
@@ -757,7 +757,7 @@ The output is colored, and written in the style of a git diff. This means that y
|
||||
can copy and paste it into a GitHub markdown as a code block with language "diff"
|
||||
and it will render nicely! Here is an example:
|
||||
|
||||
.. code-block:: markdown
|
||||
.. code-block:: md
|
||||
|
||||
```diff
|
||||
--- zlib@1.2.11/efzjziyc3dmb5h5u5azsthgbgog5mj7g
|
||||
|
@@ -63,6 +63,7 @@ on these ideas for each distinct build system that Spack supports:
|
||||
build_systems/intelpackage
|
||||
build_systems/rocmpackage
|
||||
build_systems/custompackage
|
||||
build_systems/multiplepackage
|
||||
|
||||
For reference, the :py:mod:`Build System API docs <spack.build_systems>`
|
||||
provide a list of build systems and methods/attributes that can be
|
||||
|
350
lib/spack/docs/build_systems/multiplepackage.rst
Normal file
350
lib/spack/docs/build_systems/multiplepackage.rst
Normal file
@@ -0,0 +1,350 @@
|
||||
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
|
||||
Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
.. _multiplepackage:
|
||||
|
||||
----------------------
|
||||
Multiple Build Systems
|
||||
----------------------
|
||||
|
||||
Quite frequently, a package will change build systems from one version to the
|
||||
next. For example, a small project that once used a single Makefile to build
|
||||
may now require Autotools to handle the increased number of files that need to
|
||||
be compiled. Or, a package that once used Autotools may switch to CMake for
|
||||
Windows support. In this case, it becomes a bit more challenging to write a
|
||||
single build recipe for this package in Spack.
|
||||
|
||||
There are several ways that this can be handled in Spack:
|
||||
|
||||
#. Subclass the new build system, and override phases as needed (preferred)
|
||||
#. Subclass ``Package`` and implement ``install`` as needed
|
||||
#. Create separate ``*-cmake``, ``*-autotools``, etc. packages for each build system
|
||||
#. Rename the old package to ``*-legacy`` and create a new package
|
||||
#. Move the old package to a ``legacy`` repository and create a new package
|
||||
#. Drop older versions that only support the older build system
|
||||
|
||||
Of these options, 1 is preferred, and will be demonstrated in this
|
||||
documentation. Options 3-5 have issues with concretization, so shouldn't be
|
||||
used. Options 4-5 also don't support more than two build systems. Option 6 only
|
||||
works if the old versions are no longer needed. Option 1 is preferred over 2
|
||||
because it makes it easier to drop the old build system entirely.
|
||||
|
||||
The exact syntax of the package depends on which build systems you need to
|
||||
support. Below are a couple of common examples.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
Makefile -> Autotools
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Let's say we have the following package:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
class Foo(MakefilePackage):
|
||||
version("1.2.0", sha256="...")
|
||||
|
||||
def edit(self, spec, prefix):
|
||||
filter_file("CC=", "CC=" + spack_cc, "Makefile")
|
||||
|
||||
def install(self, spec, prefix):
|
||||
install_tree(".", prefix)
|
||||
|
||||
|
||||
The package subclasses from :ref:`makefilepackage`, which has three phases:
|
||||
|
||||
#. ``edit`` (does nothing by default)
|
||||
#. ``build`` (runs ``make`` by default)
|
||||
#. ``install`` (runs ``make install`` by default)
|
||||
|
||||
In this case, the ``install`` phase needed to be overridden because the
|
||||
Makefile did not have an install target. We also modify the Makefile to use
|
||||
Spack's compiler wrappers. The default ``build`` phase is not changed.
|
||||
|
||||
Starting with version 1.3.0, we want to use Autotools to build instead.
|
||||
:ref:`autotoolspackage` has four phases:
|
||||
|
||||
#. ``autoreconf`` (does not if a configure script already exists)
|
||||
#. ``configure`` (runs ``./configure --prefix=...`` by default)
|
||||
#. ``build`` (runs ``make`` by default)
|
||||
#. ``install`` (runs ``make install`` by default)
|
||||
|
||||
If the only version we need to support is 1.3.0, the package would look as
|
||||
simple as:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
class Foo(AutotoolsPackage):
|
||||
version("1.3.0", sha256="...")
|
||||
|
||||
def configure_args(self):
|
||||
return ["--enable-shared"]
|
||||
|
||||
|
||||
In this case, we use the default methods for each phase and only override
|
||||
``configure_args`` to specify additional flags to pass to ``./configure``.
|
||||
|
||||
If we wanted to write a single package that supports both versions 1.2.0 and
|
||||
1.3.0, it would look something like:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
class Foo(AutotoolsPackage):
|
||||
version("1.3.0", sha256="...")
|
||||
version("1.2.0", sha256="...", deprecated=True)
|
||||
|
||||
def configure_args(self):
|
||||
return ["--enable-shared"]
|
||||
|
||||
# Remove the following once version 1.2.0 is dropped
|
||||
@when("@:1.2")
|
||||
def patch(self):
|
||||
filter_file("CC=", "CC=" + spack_cc, "Makefile")
|
||||
|
||||
@when("@:1.2")
|
||||
def autoreconf(self, spec, prefix):
|
||||
pass
|
||||
|
||||
@when("@:1.2")
|
||||
def configure(self, spec, prefix):
|
||||
pass
|
||||
|
||||
@when("@:1.2")
|
||||
def install(self, spec, prefix):
|
||||
install_tree(".", prefix)
|
||||
|
||||
|
||||
There are a few interesting things to note here:
|
||||
|
||||
* We added ``deprecated=True`` to version 1.2.0. This signifies that version
|
||||
1.2.0 is deprecated and shouldn't be used. However, if a user still relies
|
||||
on version 1.2.0, it's still there and builds just fine.
|
||||
* We moved the contents of the ``edit`` phase to the ``patch`` function. Since
|
||||
``AutotoolsPackage`` doesn't have an ``edit`` phase, the only way for this
|
||||
step to be executed is to move it to the ``patch`` function, which always
|
||||
gets run.
|
||||
* The ``autoreconf`` and ``configure`` phases become no-ops. Since the old
|
||||
Makefile-based build system doesn't use these, we ignore these phases when
|
||||
building ``foo@1.2.0``.
|
||||
* The ``@when`` decorator is used to override these phases only for older
|
||||
versions. The default methods are used for ``foo@1.3:``.
|
||||
|
||||
Once a new Spack release comes out, version 1.2.0 and everything below the
|
||||
comment can be safely deleted. The result is the same as if we had written a
|
||||
package for version 1.3.0 from scratch.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^
|
||||
Autotools -> CMake
|
||||
^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Let's say we have the following package:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
class Bar(AutotoolsPackage):
|
||||
version("1.2.0", sha256="...")
|
||||
|
||||
def configure_args(self):
|
||||
return ["--enable-shared"]
|
||||
|
||||
|
||||
The package subclasses from :ref:`autotoolspackage`, which has four phases:
|
||||
|
||||
#. ``autoreconf`` (does not if a configure script already exists)
|
||||
#. ``configure`` (runs ``./configure --prefix=...`` by default)
|
||||
#. ``build`` (runs ``make`` by default)
|
||||
#. ``install`` (runs ``make install`` by default)
|
||||
|
||||
In this case, we use the default methods for each phase and only override
|
||||
``configure_args`` to specify additional flags to pass to ``./configure``.
|
||||
|
||||
Starting with version 1.3.0, we want to use CMake to build instead.
|
||||
:ref:`cmakepackage` has three phases:
|
||||
|
||||
#. ``cmake`` (runs ``cmake ...`` by default)
|
||||
#. ``build`` (runs ``make`` by default)
|
||||
#. ``install`` (runs ``make install`` by default)
|
||||
|
||||
If the only version we need to support is 1.3.0, the package would look as
|
||||
simple as:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
class Bar(CMakePackage):
|
||||
version("1.3.0", sha256="...")
|
||||
|
||||
def cmake_args(self):
|
||||
return [self.define("BUILD_SHARED_LIBS", True)]
|
||||
|
||||
|
||||
In this case, we use the default methods for each phase and only override
|
||||
``cmake_args`` to specify additional flags to pass to ``cmake``.
|
||||
|
||||
If we wanted to write a single package that supports both versions 1.2.0 and
|
||||
1.3.0, it would look something like:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
class Bar(CMakePackage):
|
||||
version("1.3.0", sha256="...")
|
||||
version("1.2.0", sha256="...", deprecated=True)
|
||||
|
||||
def cmake_args(self):
|
||||
return [self.define("BUILD_SHARED_LIBS", True)]
|
||||
|
||||
# Remove the following once version 1.2.0 is dropped
|
||||
def configure_args(self):
|
||||
return ["--enable-shared"]
|
||||
|
||||
@when("@:1.2")
|
||||
def cmake(self, spec, prefix):
|
||||
configure("--prefix=" + prefix, *self.configure_args())
|
||||
|
||||
|
||||
There are a few interesting things to note here:
|
||||
|
||||
* We added ``deprecated=True`` to version 1.2.0. This signifies that version
|
||||
1.2.0 is deprecated and shouldn't be used. However, if a user still relies
|
||||
on version 1.2.0, it's still there and builds just fine.
|
||||
* Since CMake and Autotools are so similar, we only need to override the
|
||||
``cmake`` phase, we can use the default ``build`` and ``install`` phases.
|
||||
* We override ``cmake`` to run ``./configure`` for older versions.
|
||||
``configure_args`` remains the same.
|
||||
* The ``@when`` decorator is used to override these phases only for older
|
||||
versions. The default methods are used for ``bar@1.3:``.
|
||||
|
||||
Once a new Spack release comes out, version 1.2.0 and everything below the
|
||||
comment can be safely deleted. The result is the same as if we had written a
|
||||
package for version 1.3.0 from scratch.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Multiple build systems for the same version
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
During the transition from one build system to another, developers often
|
||||
support multiple build systems at the same time. Spack can only use a single
|
||||
build system for a single version. To decide which build system to use for a
|
||||
particular version, take the following things into account:
|
||||
|
||||
1. If the developers explicitly state that one build system is preferred over
|
||||
another, use that one.
|
||||
2. If one build system is considered "experimental" while another is considered
|
||||
"stable", use the stable build system.
|
||||
3. Otherwise, use the newer build system.
|
||||
|
||||
The developer preference for which build system to use can change over time as
|
||||
a newer build system becomes stable/recommended.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Dropping support for old build systems
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
When older versions of a package don't support a newer build system, it can be
|
||||
tempting to simply delete them from a package. This significantly reduces
|
||||
package complexity and makes the build recipe much easier to maintain. However,
|
||||
other packages or Spack users may rely on these older versions. The recommended
|
||||
approach is to first support both build systems (as demonstrated above),
|
||||
:ref:`deprecate <deprecate>` versions that rely on the old build system, and
|
||||
remove those versions and any phases that needed to be overridden in the next
|
||||
Spack release.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Three or more build systems
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
In rare cases, a package may change build systems multiple times. For example,
|
||||
a package may start with Makefiles, then switch to Autotools, then switch to
|
||||
CMake. The same logic used above can be extended to any number of build systems.
|
||||
For example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
class Baz(CMakePackage):
|
||||
version("1.4.0", sha256="...") # CMake
|
||||
version("1.3.0", sha256="...") # Autotools
|
||||
version("1.2.0", sha256="...") # Makefile
|
||||
|
||||
def cmake_args(self):
|
||||
return [self.define("BUILD_SHARED_LIBS", True)]
|
||||
|
||||
# Remove the following once version 1.3.0 is dropped
|
||||
def configure_args(self):
|
||||
return ["--enable-shared"]
|
||||
|
||||
@when("@1.3")
|
||||
def cmake(self, spec, prefix):
|
||||
configure("--prefix=" + prefix, *self.configure_args())
|
||||
|
||||
# Remove the following once version 1.2.0 is dropped
|
||||
@when("@:1.2")
|
||||
def patch(self):
|
||||
filter_file("CC=", "CC=" + spack_cc, "Makefile")
|
||||
|
||||
@when("@:1.2")
|
||||
def cmake(self, spec, prefix):
|
||||
pass
|
||||
|
||||
@when("@:1.2")
|
||||
def install(self, spec, prefix):
|
||||
install_tree(".", prefix)
|
||||
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^
|
||||
Additional examples
|
||||
^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
When writing new packages, it often helps to see examples of existing packages.
|
||||
Here is an incomplete list of existing Spack packages that have changed build
|
||||
systems before:
|
||||
|
||||
================ ===================== ================
|
||||
Package Previous Build System New Build System
|
||||
================ ===================== ================
|
||||
amber custom CMake
|
||||
arpack-ng Autotools CMake
|
||||
atk Autotools Meson
|
||||
blast None Autotools
|
||||
dyninst Autotools CMake
|
||||
evtgen Autotools CMake
|
||||
fish Autotools CMake
|
||||
gdk-pixbuf Autotools Meson
|
||||
glib Autotools Meson
|
||||
glog Autotools CMake
|
||||
gmt Autotools CMake
|
||||
gtkplus Autotools Meson
|
||||
hpl Makefile Autotools
|
||||
interproscan Perl Maven
|
||||
jasper Autotools CMake
|
||||
kahip SCons CMake
|
||||
kokkos Makefile CMake
|
||||
kokkos-kernels Makefile CMake
|
||||
leveldb Makefile CMake
|
||||
libdrm Autotools Meson
|
||||
libjpeg-turbo Autotools CMake
|
||||
mesa Autotools Meson
|
||||
metis None CMake
|
||||
mpifileutils Autotools CMake
|
||||
muparser Autotools CMake
|
||||
mxnet Makefile CMake
|
||||
nest Autotools CMake
|
||||
neuron Autotools CMake
|
||||
nsimd CMake nsconfig
|
||||
opennurbs Makefile CMake
|
||||
optional-lite None CMake
|
||||
plasma Makefile CMake
|
||||
preseq Makefile Autotools
|
||||
protobuf Autotools CMake
|
||||
py-pygobject Autotools Python
|
||||
singularity Autotools Makefile
|
||||
span-lite None CMake
|
||||
ssht Makefile CMake
|
||||
string-view-lite None CMake
|
||||
superlu Makefile CMake
|
||||
superlu-dist Makefile CMake
|
||||
uncrustify Autotools CMake
|
||||
================ ===================== ================
|
||||
|
||||
Packages that support multiple build systems can be a bit confusing to write.
|
||||
Don't hesitate to open an issue or draft pull request and ask for advice from
|
||||
other Spack developers!
|
@@ -211,7 +211,7 @@ Spec-related modules
|
||||
yet.
|
||||
|
||||
:mod:`spack.architecture`
|
||||
:func:`architecture.sys_type <spack.architecture.sys_type>` is used
|
||||
:func:`architecture.default_arch <spack.architecture.default_arch>` is used
|
||||
to determine the host architecture while building.
|
||||
|
||||
.. warning::
|
||||
|
@@ -732,13 +732,17 @@ Configuring environment views
|
||||
The Spack Environment manifest file has a top-level keyword
|
||||
``view``. Each entry under that heading is a view descriptor, headed
|
||||
by a name. The view descriptor contains the root of the view, and
|
||||
optionally the projections for the view, and ``select`` and
|
||||
``exclude`` lists for the view. For example, in the following manifest
|
||||
optionally the projections for the view, ``select`` and
|
||||
``exclude`` lists for the view and link information via ``link`` and
|
||||
``link_type``. For example, in the following manifest
|
||||
file snippet we define a view named ``mpis``, rooted at
|
||||
``/path/to/view`` in which all projections use the package name,
|
||||
version, and compiler name to determine the path for a given
|
||||
package. This view selects all packages that depend on MPI, and
|
||||
excludes those built with the PGI compiler at version 18.5.
|
||||
All the dependencies of each root spec in the environment will be linked
|
||||
in the view due to the command ``link: all`` and the files in the view will
|
||||
be symlinks to the spack install directories.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
@@ -751,11 +755,16 @@ excludes those built with the PGI compiler at version 18.5.
|
||||
exclude: ['%pgi@18.5']
|
||||
projections:
|
||||
all: {name}/{version}-{compiler.name}
|
||||
link: all
|
||||
link_type: symlink
|
||||
|
||||
For more information on using view projections, see the section on
|
||||
:ref:`adding_projections_to_views`. The default for the ``select`` and
|
||||
``exclude`` values is to select everything and exclude nothing. The
|
||||
default projection is the default view projection (``{}``).
|
||||
default projection is the default view projection (``{}``). The ``link``
|
||||
defaults to ``all`` but can also be ``roots`` when only the root specs
|
||||
in the environment are desired in the view. The ``link_type`` defaults
|
||||
to ``symlink`` but can also take the value of ``hardlink`` or ``copy``.
|
||||
|
||||
Any number of views may be defined under the ``view`` heading in a
|
||||
Spack Environment.
|
||||
|
@@ -9,22 +9,16 @@
|
||||
Getting Started
|
||||
===============
|
||||
|
||||
-------------
|
||||
Prerequisites
|
||||
-------------
|
||||
--------------------
|
||||
System Prerequisites
|
||||
--------------------
|
||||
|
||||
Spack has the following minimum requirements, which must be installed
|
||||
before Spack is run:
|
||||
Spack has the following minimum system requirements, which are assumed to
|
||||
be present on the machine where Spack is run:
|
||||
|
||||
#. Python 2 (2.6 or 2.7) or 3 (3.5 - 3.9) to run Spack
|
||||
#. A C/C++ compiler for building and the ``bash`` shell for Spack's compiler
|
||||
wrapper
|
||||
#. The ``make`` executable for building
|
||||
#. The ``tar``, ``gzip``, ``unzip``, ``bzip2``, ``xz`` and optionally ``zstd``
|
||||
executables for extracting source code
|
||||
#. The ``patch`` command to apply patches
|
||||
#. The ``git`` and ``curl`` commands for fetching
|
||||
#. If using the ``gpg`` subcommand, ``gnupg2`` is required
|
||||
.. csv-table:: System prerequisites for Spack
|
||||
:file: tables/system_prerequisites.csv
|
||||
:header-rows: 1
|
||||
|
||||
These requirements can be easily installed on most modern Linux systems;
|
||||
on macOS, XCode is required. Spack is designed to run on HPC
|
||||
@@ -90,6 +84,151 @@ sourcing time, ensuring future invocations of the ``spack`` command will
|
||||
continue to use the same consistent python version regardless of changes in
|
||||
the environment.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^
|
||||
Bootstrapping clingo
|
||||
^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Spack supports using ``clingo`` as an external solver to compute which software
|
||||
needs to be installed. The default configuration allows Spack to install
|
||||
``clingo`` from a public buildcache, created by a Github Action workflow. In this
|
||||
case the bootstrapping procedure is transparent to the user, except for a
|
||||
slightly long waiting time on the first concretization of a spec:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack find -b
|
||||
==> Showing internal bootstrap store at "/home/spack/.spack/bootstrap/store"
|
||||
==> 0 installed packages
|
||||
|
||||
$ time spack solve zlib
|
||||
==> Best of 2 considered solutions.
|
||||
==> Optimization Criteria:
|
||||
Priority Criterion Value
|
||||
1 deprecated versions used 0
|
||||
2 version weight 0
|
||||
3 number of non-default variants (roots) 0
|
||||
4 multi-valued variants 0
|
||||
5 preferred providers for roots 0
|
||||
6 number of non-default variants (non-roots) 0
|
||||
7 preferred providers (non-roots) 0
|
||||
8 compiler mismatches 0
|
||||
9 version badness 0
|
||||
10 count of non-root multi-valued variants 0
|
||||
11 non-preferred compilers 0
|
||||
12 target mismatches 0
|
||||
13 non-preferred targets 0
|
||||
|
||||
zlib@1.2.11%gcc@11.1.0+optimize+pic+shared arch=linux-ubuntu18.04-broadwell
|
||||
|
||||
real 0m30,618s
|
||||
user 0m27,278s
|
||||
sys 0m1,549s
|
||||
|
||||
After this command you'll see that ``clingo`` has been installed for Spack's own use:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack find -b
|
||||
==> Showing internal bootstrap store at "/home/spack/.spack/bootstrap/store"
|
||||
==> 2 installed packages
|
||||
-- linux-rhel5-x86_64 / gcc@9.3.0 -------------------------------
|
||||
clingo-bootstrap@spack python@3.6
|
||||
|
||||
Subsequent calls to the concretizer will then be much faster:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ time spack solve zlib
|
||||
[ ... ]
|
||||
real 0m1,222s
|
||||
user 0m1,146s
|
||||
sys 0m0,059s
|
||||
|
||||
If for security or for other reasons you don't want to or can't install precompiled
|
||||
binaries, Spack can fall-back to bootstrap ``clingo`` from source files. To forbid
|
||||
Spack from retrieving binaries from the bootstrapping buildcache, the following
|
||||
command must be given:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack bootstrap untrust github-actions
|
||||
==> "github-actions" is now untrusted and will not be used for bootstrapping
|
||||
|
||||
since an "untrusted" way of bootstrapping software will not be considered
|
||||
by Spack. You can verify the new settings are effective with:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack bootstrap list
|
||||
Name: github-actions UNTRUSTED
|
||||
|
||||
Type: buildcache
|
||||
|
||||
Info:
|
||||
url: https://mirror.spack.io/bootstrap/github-actions/v0.1
|
||||
homepage: https://github.com/alalazo/spack-bootstrap-mirrors
|
||||
releases: https://github.com/alalazo/spack-bootstrap-mirrors/releases
|
||||
|
||||
Description:
|
||||
Buildcache generated from a public workflow using Github Actions.
|
||||
The sha256 checksum of binaries is checked before installation.
|
||||
|
||||
|
||||
Name: spack-install TRUSTED
|
||||
|
||||
Type: install
|
||||
|
||||
Description:
|
||||
Specs built from sources by Spack. May take a long time.
|
||||
|
||||
When bootstrapping from sources, Spack requires a compiler with support
|
||||
for C++14 (GCC on ``linux``, Apple Clang on ``darwin``) and static C++
|
||||
standard libraries on ``linux``. Spack will build the required software
|
||||
on the first request to concretize a spec:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack solve zlib
|
||||
[+] /usr (external bison-3.0.4-wu5pgjchxzemk5ya2l3ddqug2d7jv6eb)
|
||||
[+] /usr (external cmake-3.19.4-a4kmcfzxxy45mzku4ipmj5kdiiz5a57b)
|
||||
[+] /usr (external python-3.6.9-x4fou4iqqlh5ydwddx3pvfcwznfrqztv)
|
||||
==> Installing re2c-1.2.1-e3x6nxtk3ahgd63ykgy44mpuva6jhtdt
|
||||
[ ... ]
|
||||
==> Optimization: [0, 0, 0, 0, 0, 1, 0, 0, 0]
|
||||
zlib@1.2.11%gcc@10.1.0+optimize+pic+shared arch=linux-ubuntu18.04-broadwell
|
||||
|
||||
.. tip::
|
||||
|
||||
If you want to speed-up bootstrapping ``clingo`` from sources, you may try to
|
||||
search for ``cmake`` and ``bison`` on your system:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack external find cmake bison
|
||||
==> The following specs have been detected on this system and added to /home/spack/.spack/packages.yaml
|
||||
bison@3.0.4 cmake@3.19.4
|
||||
|
||||
"""""""""""""""""""
|
||||
The Bootstrap Store
|
||||
"""""""""""""""""""
|
||||
|
||||
All the tools Spack needs for its own functioning are installed in a separate store, which lives
|
||||
under the ``${HOME}/.spack`` directory. The software installed there can be queried with:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack find --bootstrap
|
||||
==> Showing internal bootstrap store at "/home/spack/.spack/bootstrap/store"
|
||||
==> 3 installed packages
|
||||
-- linux-ubuntu18.04-x86_64 / gcc@10.1.0 ------------------------
|
||||
clingo-bootstrap@spack python@3.6.9 re2c@1.2.1
|
||||
|
||||
In case it's needed the bootstrap store can also be cleaned with:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack clean -b
|
||||
==> Removing software in "/home/spack/.spack/bootstrap/store"
|
||||
|
||||
^^^^^^^^^^^^^^^^^^
|
||||
Check Installation
|
||||
@@ -118,53 +257,6 @@ environment*, especially for ``PATH``. Only software that comes with
|
||||
the system, or that you know you wish to use with Spack, should be
|
||||
included. This procedure will avoid many strange build errors.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Optional: Bootstrapping clingo
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Spack supports using clingo as an external solver to compute which software
|
||||
needs to be installed. If you have a default compiler supporting C++14 Spack
|
||||
can automatically bootstrap this tool from sources the first time it is
|
||||
needed:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack solve zlib
|
||||
[+] /usr (external bison-3.0.4-wu5pgjchxzemk5ya2l3ddqug2d7jv6eb)
|
||||
[+] /usr (external cmake-3.19.4-a4kmcfzxxy45mzku4ipmj5kdiiz5a57b)
|
||||
[+] /usr (external python-3.6.9-x4fou4iqqlh5ydwddx3pvfcwznfrqztv)
|
||||
==> Installing re2c-1.2.1-e3x6nxtk3ahgd63ykgy44mpuva6jhtdt
|
||||
[ ... ]
|
||||
==> Optimization: [0, 0, 0, 0, 0, 1, 0, 0, 0]
|
||||
zlib@1.2.11%gcc@10.1.0+optimize+pic+shared arch=linux-ubuntu18.04-broadwell
|
||||
|
||||
If you want to speed-up bootstrapping, you may try to search for ``cmake`` and ``bison``
|
||||
on your system:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack external find cmake bison
|
||||
==> The following specs have been detected on this system and added to /home/spack/.spack/packages.yaml
|
||||
bison@3.0.4 cmake@3.19.4
|
||||
|
||||
All the tools Spack needs for its own functioning are installed in a separate store, which lives
|
||||
under the ``${HOME}/.spack`` directory. The software installed there can be queried with:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack find --bootstrap
|
||||
==> Showing internal bootstrap store at "/home/spack/.spack/bootstrap/store"
|
||||
==> 3 installed packages
|
||||
-- linux-ubuntu18.04-x86_64 / gcc@10.1.0 ------------------------
|
||||
clingo-bootstrap@spack python@3.6.9 re2c@1.2.1
|
||||
|
||||
In case it's needed the bootstrap store can also be cleaned with:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack clean -b
|
||||
==> Removing software in "/home/spack/.spack/bootstrap/store"
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Optional: Alternate Prefix
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
@@ -368,6 +460,34 @@ then inject those flags into the compiler command. Compiler flags
|
||||
entered from the command line will be discussed in more detail in the
|
||||
following section.
|
||||
|
||||
Some compilers also require additional environment configuration.
|
||||
Examples include Intels oneAPI and AMDs AOCC compiler suites,
|
||||
which have custom scripts for loading environment variables and setting paths.
|
||||
These variables should be specified in the ``environment`` section of the compiler
|
||||
specification. The operations available to modify the environment are ``set``, ``unset``,
|
||||
``prepend_path``, ``append_path``, and ``remove_path``. For example:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
compilers:
|
||||
- compiler:
|
||||
modules: []
|
||||
operating_system: centos6
|
||||
paths:
|
||||
cc: /opt/intel/oneapi/compiler/latest/linux/bin/icx
|
||||
cxx: /opt/intel/oneapi/compiler/latest/linux/bin/icpx
|
||||
f77: /opt/intel/oneapi/compiler/latest/linux/bin/ifx
|
||||
fc: /opt/intel/oneapi/compiler/latest/linux/bin/ifx
|
||||
spec: oneapi@latest
|
||||
environment:
|
||||
set:
|
||||
MKL_ROOT: "/path/to/mkl/root"
|
||||
unset: # A list of environment variables to unset
|
||||
- CC
|
||||
prepend_path: # Similar for append|remove_path
|
||||
LD_LIBRARY_PATH: /ld/paths/added/by/setvars/sh
|
||||
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Build Your Own Compiler
|
||||
^^^^^^^^^^^^^^^^^^^^^^^
|
||||
@@ -522,8 +642,9 @@ Fortran.
|
||||
#. Run ``spack compiler find`` to locate Clang.
|
||||
|
||||
#. There are different ways to get ``gfortran`` on macOS. For example, you can
|
||||
install GCC with Spack (``spack install gcc``) or with Homebrew
|
||||
(``brew install gcc``).
|
||||
install GCC with Spack (``spack install gcc``), with Homebrew (``brew install
|
||||
gcc``), or from a `DMG installer
|
||||
<https://github.com/fxcoudert/gfortran-for-macOS/releases>`_.
|
||||
|
||||
#. The only thing left to do is to edit ``~/.spack/darwin/compilers.yaml`` to provide
|
||||
the path to ``gfortran``:
|
||||
@@ -544,7 +665,8 @@ Fortran.
|
||||
If you used Spack to install GCC, you can get the installation prefix by
|
||||
``spack location -i gcc`` (this will only work if you have a single version
|
||||
of GCC installed). Whereas for Homebrew, GCC is installed in
|
||||
``/usr/local/Cellar/gcc/x.y.z``.
|
||||
``/usr/local/Cellar/gcc/x.y.z``. With the DMG installer, the correct path
|
||||
will be ``/usr/local/gfortran``.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
Compiler Verification
|
||||
|
@@ -612,6 +612,7 @@ it executable, then runs it with some arguments.
|
||||
installer = Executable(self.stage.archive_file)
|
||||
installer('--prefix=%s' % prefix, 'arg1', 'arg2', 'etc.')
|
||||
|
||||
.. _deprecate:
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Deprecating old versions
|
||||
@@ -4367,9 +4368,9 @@ The signature for ``cache_extra_test_sources`` is:
|
||||
|
||||
where ``srcs`` is a string or a list of strings corresponding to
|
||||
the paths for the files and or subdirectories, relative to the staged
|
||||
source, that are to be copied to the corresponding path relative to
|
||||
``self.install_test_root``. All of the contents within each subdirectory
|
||||
will be also be copied.
|
||||
source, that are to be copied to the corresponding relative test path
|
||||
under the prefix. All of the contents within each subdirectory will
|
||||
also be copied.
|
||||
|
||||
For example, a package method for copying everything in the ``tests``
|
||||
subdirectory plus the ``foo.c`` and ``bar.c`` files from ``examples``
|
||||
@@ -4377,8 +4378,13 @@ can be implemented as shown below.
|
||||
|
||||
.. note::
|
||||
|
||||
The ``run_after`` directive ensures associated files are copied
|
||||
**after** the package is installed by the build process.
|
||||
The method name ``copy_test_sources`` here is for illustration
|
||||
purposes. You are free to use a name that is more suited to your
|
||||
package.
|
||||
|
||||
The key to copying the files at build time for stand-alone testing
|
||||
is use of the ``run_after`` directive, which ensures the associated
|
||||
files are copied **after** the provided build stage.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@@ -4395,18 +4401,13 @@ can be implemented as shown below.
|
||||
In this case, the method copies the associated files from the build
|
||||
stage **after** the software is installed to the package's metadata
|
||||
directory. The result is the directory and files will be cached in
|
||||
paths under ``self.install_test_root`` as follows:
|
||||
|
||||
* ``join_path(self.install_test_root, 'tests')`` along with its files
|
||||
and subdirectories
|
||||
* ``join_path(self.install_test_root, 'examples', 'foo.c')``
|
||||
* ``join_path(self.install_test_root, 'examples', 'bar.c')``
|
||||
a special test subdirectory under the installation prefix.
|
||||
|
||||
These paths are **automatically copied** to the test stage directory
|
||||
where they are available to the package's ``test`` method through the
|
||||
``self.test_suite.current_test_cache_dir`` property. In our example,
|
||||
the method can access the directory and files using the following
|
||||
paths:
|
||||
during stand-alone testing. The package's ``test`` method can access
|
||||
them using the ``self.test_suite.current_test_cache_dir`` property.
|
||||
In our example, the method would use the following paths to reference
|
||||
the copy of each entry listed in ``srcs``, respectively:
|
||||
|
||||
* ``join_path(self.test_suite.current_test_cache_dir, 'tests')``
|
||||
* ``join_path(self.test_suite.current_test_cache_dir, 'examples', 'foo.c')``
|
||||
@@ -4414,9 +4415,8 @@ paths:
|
||||
|
||||
.. note::
|
||||
|
||||
Library developers will want to build the associated tests under
|
||||
the ``self.test_suite.current_test_cache_dir`` and against their
|
||||
**installed** libraries before running them.
|
||||
Library developers will want to build the associated tests
|
||||
against their **installed** libraries before running them.
|
||||
|
||||
.. note::
|
||||
|
||||
@@ -4426,11 +4426,6 @@ paths:
|
||||
would be appropriate for ensuring the installed software continues
|
||||
to work as the underlying system evolves.
|
||||
|
||||
.. note::
|
||||
|
||||
You are free to use a method name that is more suitable for
|
||||
your package.
|
||||
|
||||
.. _cache_custom_files:
|
||||
|
||||
"""""""""""""""""""
|
||||
@@ -4509,7 +4504,8 @@ can retrieve the expected output from ``examples/foo.out`` using:
|
||||
|
||||
def test(self):
|
||||
..
|
||||
filename = join_path(self.install_test_root, 'examples', 'foo.out')
|
||||
filename = join_path(self.test_suite.current_test_cache_dir,
|
||||
'examples', 'foo.out')
|
||||
expected = get_escaped_text_output(filename)
|
||||
..
|
||||
|
||||
@@ -4677,9 +4673,6 @@ directory paths are provided in the table below.
|
||||
* - Test Suite Stage Files
|
||||
- ``self.test_suite.stage``
|
||||
- ``join_path(self.test_suite.stage, 'results.txt')``
|
||||
* - Cached Build-time Files
|
||||
- ``self.install_test_root``
|
||||
- ``join_path(self.install_test_root, 'examples', 'foo.c')``
|
||||
* - Staged Cached Build-time Files
|
||||
- ``self.test_suite.current_test_cache_dir``
|
||||
- ``join_path(self.test_suite.current_test_cache_dir, 'examples', 'foo.c')``
|
||||
|
17
lib/spack/docs/tables/system_prerequisites.csv
Normal file
17
lib/spack/docs/tables/system_prerequisites.csv
Normal file
@@ -0,0 +1,17 @@
|
||||
Name, Supported Versions, Notes, Requirement Reason
|
||||
Python, 2.6/2.7/3.5-3.9, , Interpreter for Spack
|
||||
C/C++ Compilers, , , Building software
|
||||
make, , , Build software
|
||||
patch, , , Build software
|
||||
bash, , , Compiler wrappers
|
||||
tar, , , Extract/create archives
|
||||
gzip, , , Compress/Decompress archives
|
||||
unzip, , , Compress/Decompress archives
|
||||
bzip, , , Compress/Decompress archives
|
||||
xz, , , Compress/Decompress archives
|
||||
zstd, , Optional, Compress/Decompress archives
|
||||
file, , , Create/Use Buildcaches
|
||||
gnupg2, , , Sign/Verify Buildcaches
|
||||
git, , , Manage Software Repositories
|
||||
svn, , Optional, Manage Software Repositories
|
||||
hg, , Optional, Manage Software Repositories
|
|
@@ -387,7 +387,7 @@ some nice features:
|
||||
Spack-built compiler can be given to an IDE without requiring the
|
||||
IDE to load that compiler's module.
|
||||
|
||||
Unfortunately, Spack's RPATH support does not work in all case. For example:
|
||||
Unfortunately, Spack's RPATH support does not work in every case. For example:
|
||||
|
||||
#. Software comes in many forms --- not just compiled ELF binaries,
|
||||
but also as interpreted code in Python, R, JVM bytecode, etc.
|
||||
|
7
lib/spack/env/cc
vendored
7
lib/spack/env/cc
vendored
@@ -163,7 +163,7 @@ case "$command" in
|
||||
lang_flags=F
|
||||
debug_flags="-g"
|
||||
;;
|
||||
ld)
|
||||
ld|ld.gold|ld.lld)
|
||||
mode=ld
|
||||
;;
|
||||
*)
|
||||
@@ -247,7 +247,7 @@ export PATH=""
|
||||
for dir in "${env_path[@]}"; do
|
||||
addpath=true
|
||||
for env_dir in "${spack_env_dirs[@]}"; do
|
||||
if [[ "$dir" == "$env_dir" ]]; then
|
||||
if [[ "${dir%%/}" == "$env_dir" ]]; then
|
||||
addpath=false
|
||||
break
|
||||
fi
|
||||
@@ -616,6 +616,9 @@ if [[ $SPACK_TEST_COMMAND == dump-args ]]; then
|
||||
IFS="
|
||||
" && echo "${full_command[*]}"
|
||||
exit
|
||||
elif [[ $SPACK_TEST_COMMAND =~ dump-env-* ]]; then
|
||||
var=${SPACK_TEST_COMMAND#dump-env-}
|
||||
echo "$0: $var: ${!var}"
|
||||
elif [[ -n $SPACK_TEST_COMMAND ]]; then
|
||||
die "ERROR: Unknown test command"
|
||||
fi
|
||||
|
1
lib/spack/env/ld.gold
vendored
Symbolic link
1
lib/spack/env/ld.gold
vendored
Symbolic link
@@ -0,0 +1 @@
|
||||
cc
|
1
lib/spack/env/ld.lld
vendored
Symbolic link
1
lib/spack/env/ld.lld
vendored
Symbolic link
@@ -0,0 +1 @@
|
||||
cc
|
@@ -692,7 +692,7 @@ def replace_directory_transaction(directory_name, tmp_root=None):
|
||||
|
||||
try:
|
||||
yield tmp_dir
|
||||
except (Exception, KeyboardInterrupt, SystemExit) as e:
|
||||
except (Exception, KeyboardInterrupt, SystemExit):
|
||||
# Delete what was there, before copying back the original content
|
||||
if os.path.exists(directory_name):
|
||||
shutil.rmtree(directory_name)
|
||||
@@ -701,10 +701,7 @@ def replace_directory_transaction(directory_name, tmp_root=None):
|
||||
dst=os.path.dirname(directory_name)
|
||||
)
|
||||
tty.debug('DIRECTORY RECOVERED [{0}]'.format(directory_name))
|
||||
|
||||
msg = 'the transactional move of "{0}" failed.'
|
||||
msg += '\n ' + str(e)
|
||||
raise RuntimeError(msg.format(directory_name))
|
||||
raise
|
||||
else:
|
||||
# Otherwise delete the temporary directory
|
||||
shutil.rmtree(tmp_dir)
|
||||
|
@@ -7,7 +7,6 @@
|
||||
|
||||
import functools
|
||||
import inspect
|
||||
import multiprocessing
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
@@ -31,23 +30,6 @@
|
||||
ignore_modules = [r'^\.#', '~$']
|
||||
|
||||
|
||||
# On macOS, Python 3.8 multiprocessing now defaults to the 'spawn' start
|
||||
# method. Spack cannot currently handle this, so force the process to start
|
||||
# using the 'fork' start method.
|
||||
#
|
||||
# TODO: This solution is not ideal, as the 'fork' start method can lead to
|
||||
# crashes of the subprocess. Figure out how to make 'spawn' work.
|
||||
#
|
||||
# See:
|
||||
# * https://github.com/spack/spack/pull/18124
|
||||
# * https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods # noqa: E501
|
||||
# * https://bugs.python.org/issue33725
|
||||
if sys.version_info >= (3,): # novm
|
||||
fork_context = multiprocessing.get_context('fork')
|
||||
else:
|
||||
fork_context = multiprocessing
|
||||
|
||||
|
||||
def index_by(objects, *funcs):
|
||||
"""Create a hierarchy of dictionaries by splitting the supplied
|
||||
set of objects on unique values of the supplied functions.
|
||||
@@ -933,3 +915,19 @@ class Devnull(object):
|
||||
"""
|
||||
def write(self, *_):
|
||||
pass
|
||||
|
||||
|
||||
def elide_list(line_list, max_num=10):
|
||||
"""Takes a long list and limits it to a smaller number of elements,
|
||||
replacing intervening elements with '...'. For example::
|
||||
|
||||
elide_list([1,2,3,4,5,6], 4)
|
||||
|
||||
gives::
|
||||
|
||||
[1, 2, 3, '...', 6]
|
||||
"""
|
||||
if len(line_list) > max_num:
|
||||
return line_list[:max_num - 1] + ['...'] + line_list[-1:]
|
||||
else:
|
||||
return line_list
|
||||
|
@@ -9,6 +9,7 @@
|
||||
import socket
|
||||
import time
|
||||
from datetime import datetime
|
||||
from typing import Dict, Tuple # novm
|
||||
|
||||
import llnl.util.tty as tty
|
||||
|
||||
@@ -36,6 +37,126 @@
|
||||
true_fn = lambda: True
|
||||
|
||||
|
||||
class OpenFile(object):
|
||||
"""Record for keeping track of open lockfiles (with reference counting).
|
||||
|
||||
There's really only one ``OpenFile`` per inode, per process, but we record the
|
||||
filehandle here as it's the thing we end up using in python code. You can get
|
||||
the file descriptor from the file handle if needed -- or we could make this track
|
||||
file descriptors as well in the future.
|
||||
"""
|
||||
def __init__(self, fh):
|
||||
self.fh = fh
|
||||
self.refs = 0
|
||||
|
||||
|
||||
class OpenFileTracker(object):
|
||||
"""Track open lockfiles, to minimize number of open file descriptors.
|
||||
|
||||
The ``fcntl`` locks that Spack uses are associated with an inode and a process.
|
||||
This is convenient, because if a process exits, it releases its locks.
|
||||
Unfortunately, this also means that if you close a file, *all* locks associated
|
||||
with that file's inode are released, regardless of whether the process has any
|
||||
other open file descriptors on it.
|
||||
|
||||
Because of this, we need to track open lock files so that we only close them when
|
||||
a process no longer needs them. We do this by tracking each lockfile by its
|
||||
inode and process id. This has several nice properties:
|
||||
|
||||
1. Tracking by pid ensures that, if we fork, we don't inadvertently track the parent
|
||||
process's lockfiles. ``fcntl`` locks are not inherited across forks, so we'll
|
||||
just track new lockfiles in the child.
|
||||
2. Tracking by inode ensures that referencs are counted per inode, and that we don't
|
||||
inadvertently close a file whose inode still has open locks.
|
||||
3. Tracking by both pid and inode ensures that we only open lockfiles the minimum
|
||||
number of times necessary for the locks we have.
|
||||
|
||||
Note: as mentioned elsewhere, these locks aren't thread safe -- they're designed to
|
||||
work in Python and assume the GIL.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
"""Create a new ``OpenFileTracker``."""
|
||||
self._descriptors = {} # type: Dict[Tuple[int, int], OpenFile]
|
||||
|
||||
def get_fh(self, path):
|
||||
"""Get a filehandle for a lockfile.
|
||||
|
||||
This routine will open writable files for read/write even if you're asking
|
||||
for a shared (read-only) lock. This is so that we can upgrade to an exclusive
|
||||
(write) lock later if requested.
|
||||
|
||||
Arguments:
|
||||
path (str): path to lock file we want a filehandle for
|
||||
"""
|
||||
# Open writable files as 'r+' so we can upgrade to write later
|
||||
os_mode, fh_mode = (os.O_RDWR | os.O_CREAT), 'r+'
|
||||
|
||||
pid = os.getpid()
|
||||
open_file = None # OpenFile object, if there is one
|
||||
stat = None # stat result for the lockfile, if it exists
|
||||
|
||||
try:
|
||||
# see whether we've seen this inode/pid before
|
||||
stat = os.stat(path)
|
||||
key = (stat.st_ino, pid)
|
||||
open_file = self._descriptors.get(key)
|
||||
|
||||
except OSError as e:
|
||||
if e.errno != errno.ENOENT: # only handle file not found
|
||||
raise
|
||||
|
||||
# path does not exist -- fail if we won't be able to create it
|
||||
parent = os.path.dirname(path) or '.'
|
||||
if not os.access(parent, os.W_OK):
|
||||
raise CantCreateLockError(path)
|
||||
|
||||
# if there was no already open file, we'll need to open one
|
||||
if not open_file:
|
||||
if stat and not os.access(path, os.W_OK):
|
||||
# we know path exists but not if it's writable. If it's read-only,
|
||||
# only open the file for reading (and fail if we're trying to get
|
||||
# an exclusive (write) lock on it)
|
||||
os_mode, fh_mode = os.O_RDONLY, 'r'
|
||||
|
||||
fd = os.open(path, os_mode)
|
||||
fh = os.fdopen(fd, fh_mode)
|
||||
open_file = OpenFile(fh)
|
||||
|
||||
# if we just created the file, we'll need to get its inode here
|
||||
if not stat:
|
||||
inode = os.fstat(fd).st_ino
|
||||
key = (inode, pid)
|
||||
|
||||
self._descriptors[key] = open_file
|
||||
|
||||
open_file.refs += 1
|
||||
return open_file.fh
|
||||
|
||||
def release_fh(self, path):
|
||||
"""Release a filehandle, only closing it if there are no more references."""
|
||||
try:
|
||||
inode = os.stat(path).st_ino
|
||||
except OSError as e:
|
||||
if e.errno != errno.ENOENT: # only handle file not found
|
||||
raise
|
||||
inode = None # this will not be in self._descriptors
|
||||
|
||||
key = (inode, os.getpid())
|
||||
open_file = self._descriptors.get(key)
|
||||
assert open_file, "Attempted to close non-existing lock path: %s" % path
|
||||
|
||||
open_file.refs -= 1
|
||||
if not open_file.refs:
|
||||
del self._descriptors[key]
|
||||
open_file.fh.close()
|
||||
|
||||
|
||||
#: Open file descriptors for locks in this process. Used to prevent one process
|
||||
#: from opening the sam file many times for different byte range locks
|
||||
file_tracker = OpenFileTracker()
|
||||
|
||||
|
||||
def _attempts_str(wait_time, nattempts):
|
||||
# Don't print anything if we succeeded on the first try
|
||||
if nattempts <= 1:
|
||||
@@ -56,7 +177,8 @@ class Lock(object):
|
||||
Note that this is for managing contention over resources *between*
|
||||
processes and not for managing contention between threads in a process: the
|
||||
functions of this object are not thread-safe. A process also must not
|
||||
maintain multiple locks on the same file.
|
||||
maintain multiple locks on the same file (or, more specifically, on
|
||||
overlapping byte ranges in the same file).
|
||||
"""
|
||||
|
||||
def __init__(self, path, start=0, length=0, default_timeout=None,
|
||||
@@ -161,25 +283,10 @@ def _lock(self, op, timeout=None):
|
||||
|
||||
# Create file and parent directories if they don't exist.
|
||||
if self._file is None:
|
||||
parent = self._ensure_parent_directory()
|
||||
self._ensure_parent_directory()
|
||||
self._file = file_tracker.get_fh(self.path)
|
||||
|
||||
# Open writable files as 'r+' so we can upgrade to write later
|
||||
os_mode, fd_mode = (os.O_RDWR | os.O_CREAT), 'r+'
|
||||
if os.path.exists(self.path):
|
||||
if not os.access(self.path, os.W_OK):
|
||||
if op == fcntl.LOCK_SH:
|
||||
# can still lock read-only files if we open 'r'
|
||||
os_mode, fd_mode = os.O_RDONLY, 'r'
|
||||
else:
|
||||
raise LockROFileError(self.path)
|
||||
|
||||
elif not os.access(parent, os.W_OK):
|
||||
raise CantCreateLockError(self.path)
|
||||
|
||||
fd = os.open(self.path, os_mode)
|
||||
self._file = os.fdopen(fd, fd_mode)
|
||||
|
||||
elif op == fcntl.LOCK_EX and self._file.mode == 'r':
|
||||
if op == fcntl.LOCK_EX and self._file.mode == 'r':
|
||||
# Attempt to upgrade to write lock w/a read-only file.
|
||||
# If the file were writable, we'd have opened it 'r+'
|
||||
raise LockROFileError(self.path)
|
||||
@@ -292,7 +399,8 @@ def _unlock(self):
|
||||
"""
|
||||
fcntl.lockf(self._file, fcntl.LOCK_UN,
|
||||
self._length, self._start, os.SEEK_SET)
|
||||
self._file.close()
|
||||
|
||||
file_tracker.release_fh(self.path)
|
||||
self._file = None
|
||||
self._reads = 0
|
||||
self._writes = 0
|
||||
|
@@ -2,28 +2,24 @@
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
"""
|
||||
This module contains all the elements that are required to create an
|
||||
architecture object. These include, the target processor, the operating system,
|
||||
and the architecture platform (i.e. cray, darwin, linux, etc) classes.
|
||||
"""Aggregate the target processor, the operating system and the target
|
||||
platform into an architecture object.
|
||||
|
||||
On a multiple architecture machine, the architecture spec field can be set to
|
||||
build a package against any target and operating system that is present on the
|
||||
platform. On Cray platforms or any other architecture that has different front
|
||||
and back end environments, the operating system will determine the method of
|
||||
compiler
|
||||
detection.
|
||||
compiler detection.
|
||||
|
||||
There are two different types of compiler detection:
|
||||
|
||||
1. Through the $PATH env variable (front-end detection)
|
||||
2. Through the tcl module system. (back-end detection)
|
||||
2. Through the module system. (back-end detection)
|
||||
|
||||
Depending on which operating system is specified, the compiler will be detected
|
||||
using one of those methods.
|
||||
|
||||
For platforms such as linux and darwin, the operating system is autodetected
|
||||
and the target is set to be x86_64.
|
||||
For platforms such as linux and darwin, the operating system is autodetected.
|
||||
|
||||
The command line syntax for specifying an architecture is as follows:
|
||||
|
||||
@@ -33,10 +29,8 @@
|
||||
the command line and Spack will concretize using the default. These defaults
|
||||
are set in the 'platforms/' directory which contains the different subclasses
|
||||
for platforms. If the machine has multiple architectures, the user can
|
||||
also enter front-end, or fe or back-end or be. These settings will concretize
|
||||
to their respective front-end and back-end targets and operating systems.
|
||||
Additional platforms can be added by creating a subclass of Platform
|
||||
and adding it inside the platform directory.
|
||||
also enter frontend, or fe or backend or be. These settings will concretize
|
||||
to their respective frontend and backend targets and operating systems.
|
||||
|
||||
Platforms are an abstract class that are extended by subclasses. If the user
|
||||
wants to add a new type of platform (such as cray_xe), they can create a
|
||||
@@ -47,335 +41,30 @@
|
||||
new platform is added and the user wants that to be detected first.
|
||||
|
||||
Targets are created inside the platform subclasses. Most architecture
|
||||
(like linux, and darwin) will have only one target (x86_64) but in the case of
|
||||
(like linux, and darwin) will have only one target family (x86_64) but in the case of
|
||||
Cray machines, there is both a frontend and backend processor. The user can
|
||||
specify which targets are present on front-end and back-end architecture
|
||||
|
||||
Depending on the platform, operating systems are either auto-detected or are
|
||||
set. The user can set the front-end and back-end operating setting by the class
|
||||
Depending on the platform, operating systems are either autodetected or are
|
||||
set. The user can set the frontend and backend operating setting by the class
|
||||
attributes front_os and back_os. The operating system as described earlier,
|
||||
will be responsible for compiler detection.
|
||||
"""
|
||||
import contextlib
|
||||
import functools
|
||||
import warnings
|
||||
|
||||
import six
|
||||
|
||||
import archspec.cpu
|
||||
|
||||
import llnl.util.lang as lang
|
||||
import llnl.util.tty as tty
|
||||
|
||||
import spack.compiler
|
||||
import spack.compilers
|
||||
import spack.config
|
||||
import spack.error as serr
|
||||
import spack.paths
|
||||
import spack.util.classes
|
||||
import spack.util.executable
|
||||
import spack.operating_systems
|
||||
import spack.platforms
|
||||
import spack.spec
|
||||
import spack.target
|
||||
import spack.util.spack_yaml as syaml
|
||||
import spack.version
|
||||
from spack.util.spack_yaml import syaml_dict
|
||||
|
||||
|
||||
class NoPlatformError(serr.SpackError):
|
||||
def __init__(self):
|
||||
super(NoPlatformError, self).__init__(
|
||||
"Could not determine a platform for this machine.")
|
||||
|
||||
|
||||
def _ensure_other_is_target(method):
|
||||
"""Decorator to be used in dunder methods taking a single argument to
|
||||
ensure that the argument is an instance of ``Target`` too.
|
||||
"""
|
||||
@functools.wraps(method)
|
||||
def _impl(self, other):
|
||||
if isinstance(other, six.string_types):
|
||||
other = Target(other)
|
||||
|
||||
if not isinstance(other, Target):
|
||||
return NotImplemented
|
||||
|
||||
return method(self, other)
|
||||
|
||||
return _impl
|
||||
|
||||
|
||||
class Target(object):
|
||||
def __init__(self, name, module_name=None):
|
||||
"""Target models microarchitectures and their compatibility.
|
||||
|
||||
Args:
|
||||
name (str or Microarchitecture):micro-architecture of the
|
||||
target
|
||||
module_name (str): optional module name to get access to the
|
||||
current target. This is typically used on machines
|
||||
like Cray (e.g. craype-compiler)
|
||||
"""
|
||||
if not isinstance(name, archspec.cpu.Microarchitecture):
|
||||
name = archspec.cpu.TARGETS.get(
|
||||
name, archspec.cpu.generic_microarchitecture(name)
|
||||
)
|
||||
self.microarchitecture = name
|
||||
self.module_name = module_name
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
return self.microarchitecture.name
|
||||
|
||||
@_ensure_other_is_target
|
||||
def __eq__(self, other):
|
||||
return self.microarchitecture == other.microarchitecture and \
|
||||
self.module_name == other.module_name
|
||||
|
||||
def __ne__(self, other):
|
||||
# This method is necessary as long as we support Python 2. In Python 3
|
||||
# __ne__ defaults to the implementation below
|
||||
return not self == other
|
||||
|
||||
@_ensure_other_is_target
|
||||
def __lt__(self, other):
|
||||
# TODO: In the future it would be convenient to say
|
||||
# TODO: `spec.architecture.target < other.architecture.target`
|
||||
# TODO: and change the semantic of the comparison operators
|
||||
|
||||
# This is needed to sort deterministically specs in a list.
|
||||
# It doesn't implement a total ordering semantic.
|
||||
return self.microarchitecture.name < other.microarchitecture.name
|
||||
|
||||
def __hash__(self):
|
||||
return hash((self.name, self.module_name))
|
||||
|
||||
@staticmethod
|
||||
def from_dict_or_value(dict_or_value):
|
||||
# A string here represents a generic target (like x86_64 or ppc64) or
|
||||
# a custom micro-architecture
|
||||
if isinstance(dict_or_value, six.string_types):
|
||||
return Target(dict_or_value)
|
||||
|
||||
# TODO: From a dict we actually retrieve much more information than
|
||||
# TODO: just the name. We can use that information to reconstruct an
|
||||
# TODO: "old" micro-architecture or check the current definition.
|
||||
target_info = dict_or_value
|
||||
return Target(target_info['name'])
|
||||
|
||||
def to_dict_or_value(self):
|
||||
"""Returns a dict or a value representing the current target.
|
||||
|
||||
String values are used to keep backward compatibility with generic
|
||||
targets, like e.g. x86_64 or ppc64. More specific micro-architectures
|
||||
will return a dictionary which contains information on the name,
|
||||
features, vendor, generation and parents of the current target.
|
||||
"""
|
||||
# Generic targets represent either an architecture
|
||||
# family (like x86_64) or a custom micro-architecture
|
||||
if self.microarchitecture.vendor == 'generic':
|
||||
return str(self)
|
||||
|
||||
return syaml_dict(
|
||||
self.microarchitecture.to_dict(return_list_of_items=True)
|
||||
)
|
||||
|
||||
def __repr__(self):
|
||||
cls_name = self.__class__.__name__
|
||||
fmt = cls_name + '({0}, {1})'
|
||||
return fmt.format(repr(self.microarchitecture),
|
||||
repr(self.module_name))
|
||||
|
||||
def __str__(self):
|
||||
return str(self.microarchitecture)
|
||||
|
||||
def __contains__(self, cpu_flag):
|
||||
return cpu_flag in self.microarchitecture
|
||||
|
||||
def optimization_flags(self, compiler):
|
||||
"""Returns the flags needed to optimize for this target using
|
||||
the compiler passed as argument.
|
||||
|
||||
Args:
|
||||
compiler (spack.spec.CompilerSpec or spack.compiler.Compiler): object that
|
||||
contains both the name and the version of the compiler we want to use
|
||||
"""
|
||||
# Mixed toolchains are not supported yet
|
||||
import spack.compilers
|
||||
if isinstance(compiler, spack.compiler.Compiler):
|
||||
if spack.compilers.is_mixed_toolchain(compiler):
|
||||
msg = ('microarchitecture specific optimizations are not '
|
||||
'supported yet on mixed compiler toolchains [check'
|
||||
' {0.name}@{0.version} for further details]')
|
||||
warnings.warn(msg.format(compiler))
|
||||
return ''
|
||||
|
||||
# Try to check if the current compiler comes with a version number or
|
||||
# has an unexpected suffix. If so, treat it as a compiler with a
|
||||
# custom spec.
|
||||
compiler_version = compiler.version
|
||||
version_number, suffix = archspec.cpu.version_components(
|
||||
compiler.version
|
||||
)
|
||||
if not version_number or suffix not in ('', 'apple'):
|
||||
# Try to deduce the underlying version of the compiler, regardless
|
||||
# of its name in compilers.yaml. Depending on where this function
|
||||
# is called we might get either a CompilerSpec or a fully fledged
|
||||
# compiler object.
|
||||
import spack.spec
|
||||
if isinstance(compiler, spack.spec.CompilerSpec):
|
||||
compiler = spack.compilers.compilers_for_spec(compiler).pop()
|
||||
try:
|
||||
compiler_version = compiler.real_version
|
||||
except spack.util.executable.ProcessError as e:
|
||||
# log this and just return compiler.version instead
|
||||
tty.debug(str(e))
|
||||
|
||||
return self.microarchitecture.optimization_flags(
|
||||
compiler.name, str(compiler_version)
|
||||
)
|
||||
|
||||
|
||||
@lang.lazy_lexicographic_ordering
|
||||
class Platform(object):
|
||||
""" Abstract class that each type of Platform will subclass.
|
||||
Will return a instance of it once it is returned.
|
||||
"""
|
||||
|
||||
# Subclass sets number. Controls detection order
|
||||
priority = None # type: int
|
||||
|
||||
#: binary formats used on this platform; used by relocation logic
|
||||
binary_formats = ['elf']
|
||||
|
||||
front_end = None # type: str
|
||||
back_end = None # type: str
|
||||
default = None # type: str # The default back end target.
|
||||
|
||||
front_os = None # type: str
|
||||
back_os = None # type: str
|
||||
default_os = None # type: str
|
||||
|
||||
reserved_targets = ['default_target', 'frontend', 'fe', 'backend', 'be']
|
||||
reserved_oss = ['default_os', 'frontend', 'fe', 'backend', 'be']
|
||||
|
||||
def __init__(self, name):
|
||||
self.targets = {}
|
||||
self.operating_sys = {}
|
||||
self.name = name
|
||||
|
||||
def add_target(self, name, target):
|
||||
"""Used by the platform specific subclass to list available targets.
|
||||
Raises an error if the platform specifies a name
|
||||
that is reserved by spack as an alias.
|
||||
"""
|
||||
if name in Platform.reserved_targets:
|
||||
raise ValueError(
|
||||
"%s is a spack reserved alias "
|
||||
"and cannot be the name of a target" % name)
|
||||
self.targets[name] = target
|
||||
|
||||
def target(self, name):
|
||||
"""This is a getter method for the target dictionary
|
||||
that handles defaulting based on the values provided by default,
|
||||
front-end, and back-end. This can be overwritten
|
||||
by a subclass for which we want to provide further aliasing options.
|
||||
"""
|
||||
# TODO: Check if we can avoid using strings here
|
||||
name = str(name)
|
||||
if name == 'default_target':
|
||||
name = self.default
|
||||
elif name == 'frontend' or name == 'fe':
|
||||
name = self.front_end
|
||||
elif name == 'backend' or name == 'be':
|
||||
name = self.back_end
|
||||
|
||||
return self.targets.get(name, None)
|
||||
|
||||
def add_operating_system(self, name, os_class):
|
||||
""" Add the operating_system class object into the
|
||||
platform.operating_sys dictionary
|
||||
"""
|
||||
if name in Platform.reserved_oss:
|
||||
raise ValueError(
|
||||
"%s is a spack reserved alias "
|
||||
"and cannot be the name of an OS" % name)
|
||||
self.operating_sys[name] = os_class
|
||||
|
||||
def operating_system(self, name):
|
||||
if name == 'default_os':
|
||||
name = self.default_os
|
||||
if name == 'frontend' or name == "fe":
|
||||
name = self.front_os
|
||||
if name == 'backend' or name == 'be':
|
||||
name = self.back_os
|
||||
|
||||
return self.operating_sys.get(name, None)
|
||||
|
||||
@classmethod
|
||||
def setup_platform_environment(cls, pkg, env):
|
||||
""" Subclass can override this method if it requires any
|
||||
platform-specific build environment modifications.
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def detect(cls):
|
||||
""" Subclass is responsible for implementing this method.
|
||||
Returns True if the Platform class detects that
|
||||
it is the current platform
|
||||
and False if it's not.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def __repr__(self):
|
||||
return self.__str__()
|
||||
|
||||
def __str__(self):
|
||||
return self.name
|
||||
|
||||
def _cmp_iter(self):
|
||||
yield self.name
|
||||
yield self.default
|
||||
yield self.front_end
|
||||
yield self.back_end
|
||||
yield self.default_os
|
||||
yield self.front_os
|
||||
yield self.back_os
|
||||
|
||||
def targets():
|
||||
for t in sorted(self.targets.values()):
|
||||
yield t._cmp_iter
|
||||
yield targets
|
||||
|
||||
def oses():
|
||||
for o in sorted(self.operating_sys.values()):
|
||||
yield o._cmp_iter
|
||||
yield oses
|
||||
|
||||
|
||||
@lang.lazy_lexicographic_ordering
|
||||
class OperatingSystem(object):
|
||||
""" Operating System will be like a class similar to platform extended
|
||||
by subclasses for the specifics. Operating System will contain the
|
||||
compiler finding logic. Instead of calling two separate methods to
|
||||
find compilers we call find_compilers method for each operating system
|
||||
"""
|
||||
|
||||
def __init__(self, name, version):
|
||||
self.name = name.replace('-', '_')
|
||||
self.version = str(version).replace('-', '_')
|
||||
|
||||
def __str__(self):
|
||||
return "%s%s" % (self.name, self.version)
|
||||
|
||||
def __repr__(self):
|
||||
return self.__str__()
|
||||
|
||||
def _cmp_iter(self):
|
||||
yield self.name
|
||||
yield self.version
|
||||
|
||||
def to_dict(self):
|
||||
return syaml_dict([
|
||||
('name', self.name),
|
||||
('version', self.version)
|
||||
])
|
||||
|
||||
|
||||
@lang.lazy_lexicographic_ordering
|
||||
@@ -400,11 +89,13 @@ def __init__(self, plat=None, os=None, target=None):
|
||||
|
||||
@property
|
||||
def concrete(self):
|
||||
return all((self.platform is not None,
|
||||
isinstance(self.platform, Platform),
|
||||
self.os is not None,
|
||||
isinstance(self.os, OperatingSystem),
|
||||
self.target is not None, isinstance(self.target, Target)))
|
||||
return all(
|
||||
(self.platform is not None,
|
||||
isinstance(self.platform, spack.platforms.Platform),
|
||||
self.os is not None,
|
||||
isinstance(self.os, spack.operating_systems.OperatingSystem),
|
||||
self.target is not None, isinstance(self.target, spack.target.Target))
|
||||
)
|
||||
|
||||
def __str__(self):
|
||||
if self.platform or self.os or self.target:
|
||||
@@ -429,28 +120,28 @@ def __nonzero__(self):
|
||||
__bool__ = __nonzero__
|
||||
|
||||
def _cmp_iter(self):
|
||||
if isinstance(self.platform, Platform):
|
||||
if isinstance(self.platform, spack.platforms.Platform):
|
||||
yield self.platform.name
|
||||
else:
|
||||
yield self.platform
|
||||
|
||||
if isinstance(self.os, OperatingSystem):
|
||||
if isinstance(self.os, spack.operating_systems.OperatingSystem):
|
||||
yield self.os.name
|
||||
else:
|
||||
yield self.os
|
||||
|
||||
if isinstance(self.target, Target):
|
||||
if isinstance(self.target, spack.target.Target):
|
||||
yield self.target.microarchitecture
|
||||
else:
|
||||
yield self.target
|
||||
|
||||
def to_dict(self):
|
||||
str_or_none = lambda v: str(v) if v else None
|
||||
d = syaml_dict([
|
||||
d = syaml.syaml_dict([
|
||||
('platform', str_or_none(self.platform)),
|
||||
('platform_os', str_or_none(self.os)),
|
||||
('target', self.target.to_dict_or_value())])
|
||||
return syaml_dict([('arch', d)])
|
||||
return syaml.syaml_dict([('arch', d)])
|
||||
|
||||
def to_spec(self):
|
||||
"""Convert this Arch to an anonymous Spec with architecture defined."""
|
||||
@@ -464,64 +155,25 @@ def from_dict(d):
|
||||
return arch_for_spec(spec)
|
||||
|
||||
|
||||
@lang.memoized
|
||||
def get_platform(platform_name):
|
||||
"""Returns a platform object that corresponds to the given name."""
|
||||
platform_list = all_platforms()
|
||||
for p in platform_list:
|
||||
if platform_name.replace("_", "").lower() == p.__name__.lower():
|
||||
return p()
|
||||
|
||||
|
||||
def verify_platform(platform_name):
|
||||
""" Determines whether or not the platform with the given name is supported
|
||||
in Spack. For more information, see the 'spack.platforms' submodule.
|
||||
"""
|
||||
platform_name = platform_name.replace("_", "").lower()
|
||||
platform_names = [p.__name__.lower() for p in all_platforms()]
|
||||
|
||||
if platform_name not in platform_names:
|
||||
tty.die("%s is not a supported platform; supported platforms are %s" %
|
||||
(platform_name, platform_names))
|
||||
|
||||
|
||||
def arch_for_spec(arch_spec):
|
||||
"""Transforms the given architecture spec into an architecture object."""
|
||||
arch_spec = spack.spec.ArchSpec(arch_spec)
|
||||
assert arch_spec.concrete
|
||||
|
||||
arch_plat = get_platform(arch_spec.platform)
|
||||
arch_plat = spack.platforms.by_name(arch_spec.platform)
|
||||
if not (arch_plat.operating_system(arch_spec.os) and
|
||||
arch_plat.target(arch_spec.target)):
|
||||
raise ValueError(
|
||||
"Can't recreate arch for spec %s on current arch %s; "
|
||||
"spec architecture is too different" % (arch_spec, sys_type()))
|
||||
sys_type = str(default_arch())
|
||||
msg = ("Can't recreate arch for spec {0} on current arch {1}; "
|
||||
"spec architecture is too different")
|
||||
raise ValueError(msg.format(arch_spec, sys_type))
|
||||
|
||||
return Arch(arch_plat, arch_spec.os, arch_spec.target)
|
||||
|
||||
|
||||
@lang.memoized
|
||||
def _all_platforms():
|
||||
mod_path = spack.paths.platform_path
|
||||
return spack.util.classes.list_classes("spack.platforms", mod_path)
|
||||
|
||||
|
||||
@lang.memoized
|
||||
def _platform():
|
||||
"""Detects the platform for this machine.
|
||||
|
||||
Gather a list of all available subclasses of platforms.
|
||||
Sorts the list according to their priority looking. Priority is
|
||||
an arbitrarily set number. Detects platform either using uname or
|
||||
a file path (/opt/cray...)
|
||||
"""
|
||||
# Try to create a Platform object using the config file FIRST
|
||||
platform_list = _all_platforms()
|
||||
platform_list.sort(key=lambda a: a.priority)
|
||||
|
||||
for platform_cls in platform_list:
|
||||
if platform_cls.detect():
|
||||
return platform_cls()
|
||||
return spack.platforms.host()
|
||||
|
||||
|
||||
#: The "real" platform of the host running Spack. This should not be changed
|
||||
@@ -532,44 +184,23 @@ def _platform():
|
||||
#: context manager.
|
||||
platform = _platform
|
||||
|
||||
#: The list of all platform classes. May be swapped by the use_platform
|
||||
#: context manager.
|
||||
all_platforms = _all_platforms
|
||||
|
||||
|
||||
@lang.memoized
|
||||
def default_arch():
|
||||
"""Default ``Arch`` object for this machine.
|
||||
|
||||
See ``sys_type()``.
|
||||
"""
|
||||
"""Default ``Arch`` object for this machine"""
|
||||
return Arch(platform(), 'default_os', 'default_target')
|
||||
|
||||
|
||||
def sys_type():
|
||||
"""Print out the "default" platform-os-target tuple for this machine.
|
||||
|
||||
On machines with only one target OS/target, prints out the
|
||||
platform-os-target for the frontend. For machines with a frontend
|
||||
and a backend, prints the default backend.
|
||||
|
||||
TODO: replace with use of more explicit methods to get *all* the
|
||||
backends, as client code should really be aware of cross-compiled
|
||||
architectures.
|
||||
|
||||
"""
|
||||
return str(default_arch())
|
||||
|
||||
|
||||
@lang.memoized
|
||||
def compatible_sys_types():
|
||||
"""Returns a list of all the systypes compatible with the current host."""
|
||||
compatible_archs = []
|
||||
"""Return a list of all the platform-os-target tuples compatible
|
||||
with the current host.
|
||||
"""
|
||||
current_host = archspec.cpu.host()
|
||||
compatible_targets = [current_host] + current_host.ancestors
|
||||
for target in compatible_targets:
|
||||
arch = Arch(platform(), 'default_os', target)
|
||||
compatible_archs.append(str(arch))
|
||||
compatible_archs = [
|
||||
str(Arch(platform(), 'default_os', target)) for target in compatible_targets
|
||||
]
|
||||
return compatible_archs
|
||||
|
||||
|
||||
@@ -587,16 +218,15 @@ def __call__(self):
|
||||
|
||||
@contextlib.contextmanager
|
||||
def use_platform(new_platform):
|
||||
global platform, all_platforms
|
||||
global platform
|
||||
|
||||
msg = '"{0}" must be an instance of Platform'
|
||||
assert isinstance(new_platform, Platform), msg.format(new_platform)
|
||||
assert isinstance(new_platform, spack.platforms.Platform), msg.format(new_platform)
|
||||
|
||||
original_platform_fn, original_all_platforms_fn = platform, all_platforms
|
||||
original_platform_fn = platform
|
||||
|
||||
try:
|
||||
platform = _PickleableCallable(new_platform)
|
||||
all_platforms = _PickleableCallable([type(new_platform)])
|
||||
|
||||
# Clear configuration and compiler caches
|
||||
spack.config.config.clear_caches()
|
||||
@@ -605,7 +235,7 @@ def use_platform(new_platform):
|
||||
yield new_platform
|
||||
|
||||
finally:
|
||||
platform, all_platforms = original_platform_fn, original_all_platforms_fn
|
||||
platform = original_platform_fn
|
||||
|
||||
# Clear configuration and compiler caches
|
||||
spack.config.config.clear_caches()
|
||||
|
@@ -37,12 +37,16 @@ def _search_duplicate_compilers(error_cls):
|
||||
"""
|
||||
import collections
|
||||
import itertools
|
||||
import re
|
||||
|
||||
from six.moves.urllib.request import urlopen
|
||||
|
||||
try:
|
||||
from collections.abc import Sequence # novm
|
||||
except ImportError:
|
||||
from collections import Sequence
|
||||
|
||||
|
||||
#: Map an audit tag to a list of callables implementing checks
|
||||
CALLBACKS = {}
|
||||
|
||||
@@ -261,6 +265,45 @@ def _search_duplicate_specs_in_externals(error_cls):
|
||||
kwargs=('pkgs',)
|
||||
)
|
||||
|
||||
#: Sanity checks on linting
|
||||
# This can take some time, so it's run separately from packages
|
||||
package_https_directives = AuditClass(
|
||||
group='packages-https',
|
||||
tag='PKG-HTTPS-DIRECTIVES',
|
||||
description='Sanity checks on https checks of package urls, etc.',
|
||||
kwargs=('pkgs',)
|
||||
)
|
||||
|
||||
|
||||
@package_https_directives
|
||||
def _linting_package_file(pkgs, error_cls):
|
||||
"""Check for correctness of links
|
||||
"""
|
||||
import llnl.util.lang
|
||||
|
||||
import spack.repo
|
||||
import spack.spec
|
||||
|
||||
errors = []
|
||||
for pkg_name in pkgs:
|
||||
pkg = spack.repo.get(pkg_name)
|
||||
|
||||
# Does the homepage have http, and if so, does https work?
|
||||
if pkg.homepage.startswith('http://'):
|
||||
https = re.sub("http", "https", pkg.homepage, 1)
|
||||
try:
|
||||
response = urlopen(https)
|
||||
except Exception as e:
|
||||
msg = 'Error with attempting https for "{0}": '
|
||||
errors.append(error_cls(msg.format(pkg.name), [str(e)]))
|
||||
continue
|
||||
|
||||
if response.getcode() == 200:
|
||||
msg = 'Package "{0}" uses http but has a valid https endpoint.'
|
||||
errors.append(msg.format(pkg.name))
|
||||
|
||||
return llnl.util.lang.dedupe(errors)
|
||||
|
||||
|
||||
@package_directives
|
||||
def _unknown_variants_in_directives(pkgs, error_cls):
|
||||
|
@@ -29,7 +29,9 @@
|
||||
import spack.database as spack_db
|
||||
import spack.fetch_strategy as fs
|
||||
import spack.hash_types as ht
|
||||
import spack.hooks.sbang
|
||||
import spack.mirror
|
||||
import spack.platforms
|
||||
import spack.relocate as relocate
|
||||
import spack.util.file_cache as file_cache
|
||||
import spack.util.gpg
|
||||
@@ -615,9 +617,8 @@ def write_buildinfo_file(spec, workdir, rel=False):
|
||||
prefix_to_hash[str(d.prefix)] = d.dag_hash()
|
||||
|
||||
# Create buildinfo data and write it to disk
|
||||
import spack.hooks.sbang as sbang
|
||||
buildinfo = {}
|
||||
buildinfo['sbang_install_path'] = sbang.sbang_install_path()
|
||||
buildinfo['sbang_install_path'] = spack.hooks.sbang.sbang_install_path()
|
||||
buildinfo['relative_rpaths'] = rel
|
||||
buildinfo['buildpath'] = spack.store.layout.root
|
||||
buildinfo['spackprefix'] = spack.paths.prefix
|
||||
@@ -708,14 +709,14 @@ def generate_package_index(cache_prefix):
|
||||
"""Create the build cache index page.
|
||||
|
||||
Creates (or replaces) the "index.json" page at the location given in
|
||||
cache_prefix. This page contains a link for each binary package (.yaml)
|
||||
under cache_prefix.
|
||||
cache_prefix. This page contains a link for each binary package (.yaml or
|
||||
.json) under cache_prefix.
|
||||
"""
|
||||
try:
|
||||
file_list = (
|
||||
entry
|
||||
for entry in web_util.list_url(cache_prefix)
|
||||
if entry.endswith('.yaml'))
|
||||
if entry.endswith('.yaml') or entry.endswith('spec.json'))
|
||||
except KeyError as inst:
|
||||
msg = 'No packages at {0}: {1}'.format(cache_prefix, inst)
|
||||
tty.warn(msg)
|
||||
@@ -729,28 +730,33 @@ def generate_package_index(cache_prefix):
|
||||
tty.warn(msg)
|
||||
return
|
||||
|
||||
tty.debug('Retrieving spec.yaml files from {0} to build index'.format(
|
||||
tty.debug('Retrieving spec descriptor files from {0} to build index'.format(
|
||||
cache_prefix))
|
||||
|
||||
all_mirror_specs = {}
|
||||
|
||||
for file_path in file_list:
|
||||
try:
|
||||
yaml_url = url_util.join(cache_prefix, file_path)
|
||||
tty.debug('fetching {0}'.format(yaml_url))
|
||||
_, _, yaml_file = web_util.read_from_url(yaml_url)
|
||||
yaml_contents = codecs.getreader('utf-8')(yaml_file).read()
|
||||
spec_dict = syaml.load(yaml_contents)
|
||||
s = Spec.from_yaml(yaml_contents)
|
||||
spec_url = url_util.join(cache_prefix, file_path)
|
||||
tty.debug('fetching {0}'.format(spec_url))
|
||||
_, _, spec_file = web_util.read_from_url(spec_url)
|
||||
spec_file_contents = codecs.getreader('utf-8')(spec_file).read()
|
||||
# Need full spec.json name or this gets confused with index.json.
|
||||
if spec_url.endswith('.json'):
|
||||
spec_dict = sjson.load(spec_file_contents)
|
||||
s = Spec.from_json(spec_file_contents)
|
||||
elif spec_url.endswith('.yaml'):
|
||||
spec_dict = syaml.load(spec_file_contents)
|
||||
s = Spec.from_yaml(spec_file_contents)
|
||||
all_mirror_specs[s.dag_hash()] = {
|
||||
'yaml_url': yaml_url,
|
||||
'spec_url': spec_url,
|
||||
'spec': s,
|
||||
'num_deps': len(list(s.traverse(root=False))),
|
||||
'binary_cache_checksum': spec_dict['binary_cache_checksum'],
|
||||
'buildinfo': spec_dict['buildinfo'],
|
||||
}
|
||||
except (URLError, web_util.SpackWebError) as url_err:
|
||||
tty.error('Error reading spec.yaml: {0}'.format(file_path))
|
||||
tty.error('Error reading specfile: {0}'.format(file_path))
|
||||
tty.error(url_err)
|
||||
|
||||
sorted_specs = sorted(all_mirror_specs.keys(),
|
||||
@@ -776,7 +782,7 @@ def generate_package_index(cache_prefix):
|
||||
# full hash. If the full hash we have for any deps does not
|
||||
# match what those deps have themselves, then we need to splice
|
||||
# this spec with those deps, and push this spliced spec
|
||||
# (spec.yaml file) back to the mirror, as well as update the
|
||||
# (spec.json file) back to the mirror, as well as update the
|
||||
# all_mirror_specs dictionary with this spliced spec.
|
||||
to_splice = []
|
||||
for dep in s.dependencies():
|
||||
@@ -794,25 +800,25 @@ def generate_package_index(cache_prefix):
|
||||
s = s.splice(true_dep, True)
|
||||
|
||||
# Push this spliced spec back to the mirror
|
||||
spliced_yaml = s.to_dict(hash=ht.full_hash)
|
||||
spliced_spec_dict = s.to_dict(hash=ht.full_hash)
|
||||
for key in ['binary_cache_checksum', 'buildinfo']:
|
||||
spliced_yaml[key] = spec_record[key]
|
||||
spliced_spec_dict[key] = spec_record[key]
|
||||
|
||||
temp_yaml_path = os.path.join(tmpdir, 'spliced.spec.yaml')
|
||||
with open(temp_yaml_path, 'w') as fd:
|
||||
fd.write(syaml.dump(spliced_yaml))
|
||||
temp_json_path = os.path.join(tmpdir, 'spliced.spec.json')
|
||||
with open(temp_json_path, 'w') as fd:
|
||||
fd.write(sjson.dump(spliced_spec_dict))
|
||||
|
||||
spliced_yaml_url = spec_record['yaml_url']
|
||||
spliced_spec_url = spec_record['spec_url']
|
||||
web_util.push_to_url(
|
||||
temp_yaml_path, spliced_yaml_url, keep_original=False)
|
||||
temp_json_path, spliced_spec_url, keep_original=False)
|
||||
tty.debug(' spliced and wrote {0}'.format(
|
||||
spliced_yaml_url))
|
||||
spliced_spec_url))
|
||||
spec_record['spec'] = s
|
||||
|
||||
db.add(s, None)
|
||||
db.mark(s, 'in_buildcache', True)
|
||||
|
||||
# Now that we have fixed any old spec yamls that might have had the wrong
|
||||
# Now that we have fixed any old specfiles that might have had the wrong
|
||||
# full hash for their dependencies, we can generate the index, compute
|
||||
# the hash, and push those files to the mirror.
|
||||
index_json_path = os.path.join(db_root_dir, 'index.json')
|
||||
@@ -948,19 +954,27 @@ def build_tarball(spec, outdir, force=False, rel=False, unsigned=False,
|
||||
# need to copy the spec file so the build cache can be downloaded
|
||||
# without concretizing with the current spack packages
|
||||
# and preferences
|
||||
spec_file = os.path.join(spec.prefix, ".spack", "spec.yaml")
|
||||
specfile_name = tarball_name(spec, '.spec.yaml')
|
||||
specfile_path = os.path.realpath(
|
||||
os.path.join(cache_prefix, specfile_name))
|
||||
|
||||
spec_file = spack.store.layout.spec_file_path(spec)
|
||||
specfile_name = tarball_name(spec, '.spec.json')
|
||||
specfile_path = os.path.realpath(os.path.join(cache_prefix, specfile_name))
|
||||
deprecated_specfile_path = specfile_path.replace('.spec.json', '.spec.yaml')
|
||||
|
||||
remote_specfile_path = url_util.join(
|
||||
outdir, os.path.relpath(specfile_path, os.path.realpath(tmpdir)))
|
||||
remote_specfile_path_deprecated = url_util.join(
|
||||
outdir, os.path.relpath(deprecated_specfile_path,
|
||||
os.path.realpath(tmpdir)))
|
||||
|
||||
if web_util.url_exists(remote_specfile_path):
|
||||
if force:
|
||||
# If force and exists, overwrite. Otherwise raise exception on collision.
|
||||
if force:
|
||||
if web_util.url_exists(remote_specfile_path):
|
||||
web_util.remove_url(remote_specfile_path)
|
||||
else:
|
||||
raise NoOverwriteException(url_util.format(remote_specfile_path))
|
||||
if web_util.url_exists(remote_specfile_path_deprecated):
|
||||
web_util.remove_url(remote_specfile_path_deprecated)
|
||||
elif (web_util.url_exists(remote_specfile_path) or
|
||||
web_util.url_exists(remote_specfile_path_deprecated)):
|
||||
raise NoOverwriteException(url_util.format(remote_specfile_path))
|
||||
|
||||
# make a copy of the install directory to work with
|
||||
workdir = os.path.join(tmpdir, os.path.basename(spec.prefix))
|
||||
@@ -1008,15 +1022,23 @@ def build_tarball(spec, outdir, force=False, rel=False, unsigned=False,
|
||||
# get the sha256 checksum of the tarball
|
||||
checksum = checksum_tarball(tarfile_path)
|
||||
|
||||
# add sha256 checksum to spec.yaml
|
||||
# add sha256 checksum to spec.json
|
||||
|
||||
with open(spec_file, 'r') as inputfile:
|
||||
content = inputfile.read()
|
||||
spec_dict = yaml.load(content)
|
||||
if spec_file.endswith('.yaml'):
|
||||
spec_dict = yaml.load(content)
|
||||
elif spec_file.endswith('.json'):
|
||||
spec_dict = sjson.load(content)
|
||||
else:
|
||||
raise ValueError(
|
||||
'{0} not a valid spec file type (json or yaml)'.format(
|
||||
spec_file))
|
||||
bchecksum = {}
|
||||
bchecksum['hash_algorithm'] = 'sha256'
|
||||
bchecksum['hash'] = checksum
|
||||
spec_dict['binary_cache_checksum'] = bchecksum
|
||||
# Add original install prefix relative to layout root to spec.yaml.
|
||||
# Add original install prefix relative to layout root to spec.json.
|
||||
# This will be used to determine is the directory layout has changed.
|
||||
buildinfo = {}
|
||||
buildinfo['relative_prefix'] = os.path.relpath(
|
||||
@@ -1025,7 +1047,7 @@ def build_tarball(spec, outdir, force=False, rel=False, unsigned=False,
|
||||
spec_dict['buildinfo'] = buildinfo
|
||||
|
||||
with open(specfile_path, 'w') as outfile:
|
||||
outfile.write(syaml.dump(spec_dict))
|
||||
outfile.write(sjson.dump(spec_dict))
|
||||
|
||||
# sign the tarball and spec file with gpg
|
||||
if not unsigned:
|
||||
@@ -1135,7 +1157,7 @@ def make_package_relative(workdir, spec, allow_root):
|
||||
orig_path_names.append(os.path.join(prefix, filename))
|
||||
cur_path_names.append(os.path.join(workdir, filename))
|
||||
|
||||
platform = spack.architecture.get_platform(spec.platform)
|
||||
platform = spack.platforms.by_name(spec.platform)
|
||||
if 'macho' in platform.binary_formats:
|
||||
relocate.make_macho_binaries_relative(
|
||||
cur_path_names, orig_path_names, old_layout_root)
|
||||
@@ -1169,8 +1191,6 @@ def relocate_package(spec, allow_root):
|
||||
"""
|
||||
Relocate the given package
|
||||
"""
|
||||
import spack.hooks.sbang as sbang
|
||||
|
||||
workdir = str(spec.prefix)
|
||||
buildinfo = read_buildinfo_file(workdir)
|
||||
new_layout_root = str(spack.store.layout.root)
|
||||
@@ -1209,7 +1229,8 @@ def relocate_package(spec, allow_root):
|
||||
prefix_to_prefix_bin = OrderedDict({})
|
||||
|
||||
if old_sbang_install_path:
|
||||
prefix_to_prefix_text[old_sbang_install_path] = sbang.sbang_install_path()
|
||||
install_path = spack.hooks.sbang.sbang_install_path()
|
||||
prefix_to_prefix_text[old_sbang_install_path] = install_path
|
||||
|
||||
prefix_to_prefix_text[old_prefix] = new_prefix
|
||||
prefix_to_prefix_bin[old_prefix] = new_prefix
|
||||
@@ -1223,7 +1244,7 @@ def relocate_package(spec, allow_root):
|
||||
# now a POSIX script that lives in the install prefix. Old packages
|
||||
# will have the old sbang location in their shebangs.
|
||||
orig_sbang = '#!/bin/bash {0}/bin/sbang'.format(old_spack_prefix)
|
||||
new_sbang = sbang.sbang_shebang_line()
|
||||
new_sbang = spack.hooks.sbang.sbang_shebang_line()
|
||||
prefix_to_prefix_text[orig_sbang] = new_sbang
|
||||
|
||||
tty.debug("Relocating package from",
|
||||
@@ -1247,7 +1268,7 @@ def is_backup_file(file):
|
||||
]
|
||||
# If the buildcache was not created with relativized rpaths
|
||||
# do the relocation of path in binaries
|
||||
platform = spack.architecture.get_platform(spec.platform)
|
||||
platform = spack.platforms.by_name(spec.platform)
|
||||
if 'macho' in platform.binary_formats:
|
||||
relocate.relocate_macho_binaries(files_to_relocate,
|
||||
old_layout_root,
|
||||
@@ -1306,15 +1327,26 @@ def extract_tarball(spec, filename, allow_root=False, unsigned=False,
|
||||
spackfile_path = os.path.join(stagepath, spackfile_name)
|
||||
tarfile_name = tarball_name(spec, '.tar.gz')
|
||||
tarfile_path = os.path.join(tmpdir, tarfile_name)
|
||||
specfile_name = tarball_name(spec, '.spec.yaml')
|
||||
specfile_path = os.path.join(tmpdir, specfile_name)
|
||||
|
||||
specfile_is_json = True
|
||||
deprecated_yaml_name = tarball_name(spec, '.spec.yaml')
|
||||
deprecated_yaml_path = os.path.join(tmpdir, deprecated_yaml_name)
|
||||
json_name = tarball_name(spec, '.spec.json')
|
||||
json_path = os.path.join(tmpdir, json_name)
|
||||
with closing(tarfile.open(spackfile_path, 'r')) as tar:
|
||||
tar.extractall(tmpdir)
|
||||
# some buildcache tarfiles use bzip2 compression
|
||||
if not os.path.exists(tarfile_path):
|
||||
tarfile_name = tarball_name(spec, '.tar.bz2')
|
||||
tarfile_path = os.path.join(tmpdir, tarfile_name)
|
||||
|
||||
if os.path.exists(json_path):
|
||||
specfile_path = json_path
|
||||
elif os.path.exists(deprecated_yaml_path):
|
||||
specfile_is_json = False
|
||||
specfile_path = deprecated_yaml_path
|
||||
else:
|
||||
raise ValueError('Cannot find spec file for {0}.'.format(tmpdir))
|
||||
|
||||
if not unsigned:
|
||||
if os.path.exists('%s.asc' % specfile_path):
|
||||
try:
|
||||
@@ -1337,7 +1369,10 @@ def extract_tarball(spec, filename, allow_root=False, unsigned=False,
|
||||
spec_dict = {}
|
||||
with open(specfile_path, 'r') as inputfile:
|
||||
content = inputfile.read()
|
||||
spec_dict = syaml.load(content)
|
||||
if specfile_is_json:
|
||||
spec_dict = sjson.load(content)
|
||||
else:
|
||||
spec_dict = syaml.load(content)
|
||||
bchecksum = spec_dict['binary_cache_checksum']
|
||||
|
||||
# if the checksums don't match don't install
|
||||
@@ -1414,27 +1449,39 @@ def try_direct_fetch(spec, full_hash_match=False, mirrors=None):
|
||||
"""
|
||||
Try to find the spec directly on the configured mirrors
|
||||
"""
|
||||
specfile_name = tarball_name(spec, '.spec.yaml')
|
||||
deprecated_specfile_name = tarball_name(spec, '.spec.yaml')
|
||||
specfile_name = tarball_name(spec, '.spec.json')
|
||||
specfile_is_json = True
|
||||
lenient = not full_hash_match
|
||||
found_specs = []
|
||||
spec_full_hash = spec.full_hash()
|
||||
|
||||
for mirror in spack.mirror.MirrorCollection(mirrors=mirrors).values():
|
||||
buildcache_fetch_url = url_util.join(
|
||||
buildcache_fetch_url_yaml = url_util.join(
|
||||
mirror.fetch_url, _build_cache_relative_path, deprecated_specfile_name)
|
||||
buildcache_fetch_url_json = url_util.join(
|
||||
mirror.fetch_url, _build_cache_relative_path, specfile_name)
|
||||
|
||||
try:
|
||||
_, _, fs = web_util.read_from_url(buildcache_fetch_url)
|
||||
fetched_spec_yaml = codecs.getreader('utf-8')(fs).read()
|
||||
_, _, fs = web_util.read_from_url(buildcache_fetch_url_json)
|
||||
except (URLError, web_util.SpackWebError, HTTPError) as url_err:
|
||||
tty.debug('Did not find {0} on {1}'.format(
|
||||
specfile_name, buildcache_fetch_url), url_err)
|
||||
continue
|
||||
try:
|
||||
_, _, fs = web_util.read_from_url(buildcache_fetch_url_yaml)
|
||||
specfile_is_json = False
|
||||
except (URLError, web_util.SpackWebError, HTTPError) as url_err_y:
|
||||
tty.debug('Did not find {0} on {1}'.format(
|
||||
specfile_name, buildcache_fetch_url_json), url_err)
|
||||
tty.debug('Did not find {0} on {1}'.format(
|
||||
specfile_name, buildcache_fetch_url_yaml), url_err_y)
|
||||
continue
|
||||
specfile_contents = codecs.getreader('utf-8')(fs).read()
|
||||
|
||||
# read the spec from the build cache file. All specs in build caches
|
||||
# are concrete (as they are built) so we need to mark this spec
|
||||
# concrete on read-in.
|
||||
fetched_spec = Spec.from_yaml(fetched_spec_yaml)
|
||||
if specfile_is_json:
|
||||
fetched_spec = Spec.from_json(specfile_contents)
|
||||
else:
|
||||
fetched_spec = Spec.from_yaml(specfile_contents)
|
||||
fetched_spec._mark_concrete()
|
||||
|
||||
# Do not recompute the full hash for the fetched spec, instead just
|
||||
@@ -1462,7 +1509,7 @@ def get_mirrors_for_spec(spec=None, full_hash_match=False,
|
||||
is included in the results.
|
||||
mirrors_to_check (dict): Optionally override the configured mirrors
|
||||
with the mirrors in this dictionary.
|
||||
index_only (bool): Do not attempt direct fetching of ``spec.yaml``
|
||||
index_only (bool): Do not attempt direct fetching of ``spec.json``
|
||||
files from remote mirrors, only consider the indices.
|
||||
|
||||
Return:
|
||||
@@ -1659,57 +1706,91 @@ def needs_rebuild(spec, mirror_url, rebuild_on_errors=False):
|
||||
pkg_name, pkg_version, pkg_hash, pkg_full_hash))
|
||||
tty.debug(spec.tree())
|
||||
|
||||
# Try to retrieve the .spec.yaml directly, based on the known
|
||||
# Try to retrieve the specfile directly, based on the known
|
||||
# format of the name, in order to determine if the package
|
||||
# needs to be rebuilt.
|
||||
cache_prefix = build_cache_prefix(mirror_url)
|
||||
spec_yaml_file_name = tarball_name(spec, '.spec.yaml')
|
||||
file_path = os.path.join(cache_prefix, spec_yaml_file_name)
|
||||
specfile_is_json = True
|
||||
specfile_name = tarball_name(spec, '.spec.json')
|
||||
deprecated_specfile_name = tarball_name(spec, '.spec.yaml')
|
||||
specfile_path = os.path.join(cache_prefix, specfile_name)
|
||||
deprecated_specfile_path = os.path.join(cache_prefix,
|
||||
deprecated_specfile_name)
|
||||
|
||||
result_of_error = 'Package ({0}) will {1}be rebuilt'.format(
|
||||
spec.short_spec, '' if rebuild_on_errors else 'not ')
|
||||
|
||||
try:
|
||||
_, _, yaml_file = web_util.read_from_url(file_path)
|
||||
yaml_contents = codecs.getreader('utf-8')(yaml_file).read()
|
||||
_, _, spec_file = web_util.read_from_url(specfile_path)
|
||||
except (URLError, web_util.SpackWebError) as url_err:
|
||||
err_msg = [
|
||||
'Unable to determine whether {0} needs rebuilding,',
|
||||
' caught exception attempting to read from {1}.',
|
||||
]
|
||||
tty.error(''.join(err_msg).format(spec.short_spec, file_path))
|
||||
tty.debug(url_err)
|
||||
try:
|
||||
_, _, spec_file = web_util.read_from_url(deprecated_specfile_path)
|
||||
specfile_is_json = False
|
||||
except (URLError, web_util.SpackWebError) as url_err_y:
|
||||
err_msg = [
|
||||
'Unable to determine whether {0} needs rebuilding,',
|
||||
' caught exception attempting to read from {1} or {2}.',
|
||||
]
|
||||
tty.error(''.join(err_msg).format(
|
||||
spec.short_spec,
|
||||
specfile_path,
|
||||
deprecated_specfile_path))
|
||||
tty.debug(url_err)
|
||||
tty.debug(url_err_y)
|
||||
tty.warn(result_of_error)
|
||||
return rebuild_on_errors
|
||||
|
||||
spec_file_contents = codecs.getreader('utf-8')(spec_file).read()
|
||||
if not spec_file_contents:
|
||||
tty.error('Reading {0} returned nothing'.format(
|
||||
specfile_path if specfile_is_json else deprecated_specfile_path))
|
||||
tty.warn(result_of_error)
|
||||
return rebuild_on_errors
|
||||
|
||||
if not yaml_contents:
|
||||
tty.error('Reading {0} returned nothing'.format(file_path))
|
||||
tty.warn(result_of_error)
|
||||
return rebuild_on_errors
|
||||
spec_dict = (sjson.load(spec_file_contents)
|
||||
if specfile_is_json else syaml.load(spec_file_contents))
|
||||
|
||||
spec_yaml = syaml.load(yaml_contents)
|
||||
|
||||
yaml_spec = spec_yaml['spec']
|
||||
try:
|
||||
nodes = spec_dict['spec']['nodes']
|
||||
except KeyError:
|
||||
# Prior node dict format omitted 'nodes' key
|
||||
nodes = spec_dict['spec']
|
||||
name = spec.name
|
||||
|
||||
# The "spec" key in the yaml is a list of objects, each with a single
|
||||
# In the old format:
|
||||
# The "spec" key represents a list of objects, each with a single
|
||||
# key that is the package name. While the list usually just contains
|
||||
# a single object, we iterate over the list looking for the object
|
||||
# with the name of this concrete spec as a key, out of an abundance
|
||||
# of caution.
|
||||
cached_pkg_specs = [item[name] for item in yaml_spec if name in item]
|
||||
# In format version 2:
|
||||
# ['spec']['nodes'] is still a list of objects, but with a
|
||||
# multitude of keys. The list will commonly contain many objects, and in the
|
||||
# case of build specs, it is highly likely that the same name will occur
|
||||
# once as the actual package, and then again as the build provenance of that
|
||||
# same package. Hence format version 2 matches on the dag hash, not name.
|
||||
if nodes and 'name' not in nodes[0]:
|
||||
# old style
|
||||
cached_pkg_specs = [item[name] for item in nodes if name in item]
|
||||
elif nodes and spec_dict['spec']['_meta']['version'] == 2:
|
||||
cached_pkg_specs = [item for item in nodes
|
||||
if item[ht.dag_hash.name] == spec.dag_hash()]
|
||||
cached_target = cached_pkg_specs[0] if cached_pkg_specs else None
|
||||
|
||||
# If either the full_hash didn't exist in the .spec.yaml file, or it
|
||||
# If either the full_hash didn't exist in the specfile, or it
|
||||
# did, but didn't match the one we computed locally, then we should
|
||||
# just rebuild. This can be simplified once the dag_hash and the
|
||||
# full_hash become the same thing.
|
||||
rebuild = False
|
||||
if not cached_target or 'full_hash' not in cached_target:
|
||||
reason = 'full_hash was missing from remote spec.yaml'
|
||||
|
||||
if not cached_target:
|
||||
reason = 'did not find spec in specfile contents'
|
||||
rebuild = True
|
||||
elif ht.full_hash.name not in cached_target:
|
||||
reason = 'full_hash was missing from remote specfile'
|
||||
rebuild = True
|
||||
else:
|
||||
full_hash = cached_target['full_hash']
|
||||
full_hash = cached_target[ht.full_hash.name]
|
||||
if full_hash != pkg_full_hash:
|
||||
reason = 'hash mismatch, remote = {0}, local = {1}'.format(
|
||||
full_hash, pkg_full_hash)
|
||||
@@ -1771,24 +1852,23 @@ def check_specs_against_mirrors(mirrors, specs, output_file=None,
|
||||
|
||||
def _download_buildcache_entry(mirror_root, descriptions):
|
||||
for description in descriptions:
|
||||
description_url = os.path.join(mirror_root, description['url'])
|
||||
path = description['path']
|
||||
fail_if_missing = description['required']
|
||||
|
||||
mkdirp(path)
|
||||
|
||||
stage = Stage(
|
||||
description_url, name="build_cache", path=path, keep=True)
|
||||
|
||||
try:
|
||||
stage.fetch()
|
||||
except fs.FetchError as e:
|
||||
tty.debug(e)
|
||||
fail_if_missing = description['required']
|
||||
for url in description['url']:
|
||||
description_url = os.path.join(mirror_root, url)
|
||||
stage = Stage(
|
||||
description_url, name="build_cache", path=path, keep=True)
|
||||
try:
|
||||
stage.fetch()
|
||||
break
|
||||
except fs.FetchError as e:
|
||||
tty.debug(e)
|
||||
else:
|
||||
if fail_if_missing:
|
||||
tty.error('Failed to download required url {0}'.format(
|
||||
description_url))
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
|
@@ -2,8 +2,14 @@
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
from __future__ import print_function
|
||||
|
||||
import contextlib
|
||||
import fnmatch
|
||||
import json
|
||||
import os
|
||||
import os.path
|
||||
import re
|
||||
import sys
|
||||
|
||||
try:
|
||||
@@ -18,8 +24,13 @@
|
||||
import llnl.util.tty as tty
|
||||
|
||||
import spack.architecture
|
||||
import spack.binary_distribution
|
||||
import spack.config
|
||||
import spack.environment
|
||||
import spack.main
|
||||
import spack.modules
|
||||
import spack.paths
|
||||
import spack.platforms
|
||||
import spack.repo
|
||||
import spack.spec
|
||||
import spack.store
|
||||
@@ -28,6 +39,277 @@
|
||||
import spack.util.path
|
||||
from spack.util.environment import EnvironmentModifications
|
||||
|
||||
#: Map a bootstrapper type to the corresponding class
|
||||
_bootstrap_methods = {}
|
||||
|
||||
|
||||
def _bootstrapper(type):
|
||||
"""Decorator to register classes implementing bootstrapping
|
||||
methods.
|
||||
|
||||
Args:
|
||||
type (str): string identifying the class
|
||||
"""
|
||||
def _register(cls):
|
||||
_bootstrap_methods[type] = cls
|
||||
return cls
|
||||
return _register
|
||||
|
||||
|
||||
def _try_import_from_store(module, abstract_spec_str):
|
||||
"""Return True if the module can be imported from an already
|
||||
installed spec, False otherwise.
|
||||
|
||||
Args:
|
||||
module: Python module to be imported
|
||||
abstract_spec_str: abstract spec that may provide the module
|
||||
"""
|
||||
bincache_platform = spack.architecture.real_platform()
|
||||
if str(bincache_platform) == 'cray':
|
||||
bincache_platform = spack.platforms.linux.Linux()
|
||||
with spack.architecture.use_platform(bincache_platform):
|
||||
abstract_spec_str = str(spack.spec.Spec(abstract_spec_str))
|
||||
|
||||
# We have to run as part of this python interpreter
|
||||
abstract_spec_str += ' ^' + spec_for_current_python()
|
||||
|
||||
installed_specs = spack.store.db.query(abstract_spec_str, installed=True)
|
||||
|
||||
for candidate_spec in installed_specs:
|
||||
lib_spd = candidate_spec['python'].package.default_site_packages_dir
|
||||
lib64_spd = lib_spd.replace('lib/', 'lib64/')
|
||||
module_paths = [
|
||||
os.path.join(candidate_spec.prefix, lib_spd),
|
||||
os.path.join(candidate_spec.prefix, lib64_spd)
|
||||
]
|
||||
sys.path.extend(module_paths)
|
||||
|
||||
try:
|
||||
_fix_ext_suffix(candidate_spec)
|
||||
if _python_import(module):
|
||||
msg = ('[BOOTSTRAP MODULE {0}] The installed spec "{1}/{2}" '
|
||||
'provides the "{0}" Python module').format(
|
||||
module, abstract_spec_str, candidate_spec.dag_hash()
|
||||
)
|
||||
tty.debug(msg)
|
||||
return True
|
||||
except Exception as e:
|
||||
msg = ('unexpected error while trying to import module '
|
||||
'"{0}" from spec "{1}" [error="{2}"]')
|
||||
tty.warn(msg.format(module, candidate_spec, str(e)))
|
||||
else:
|
||||
msg = "Spec {0} did not provide module {1}"
|
||||
tty.warn(msg.format(candidate_spec, module))
|
||||
|
||||
sys.path = sys.path[:-2]
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def _fix_ext_suffix(candidate_spec):
|
||||
"""Fix the external suffixes of Python extensions on the fly for
|
||||
platforms that may need it
|
||||
|
||||
Args:
|
||||
candidate_spec (Spec): installed spec with a Python module
|
||||
to be checked.
|
||||
"""
|
||||
# Here we map target families to the patterns expected
|
||||
# by pristine CPython. Only architectures with known issues
|
||||
# are included. Known issues:
|
||||
#
|
||||
# [RHEL + ppc64le]: https://github.com/spack/spack/issues/25734
|
||||
#
|
||||
_suffix_to_be_checked = {
|
||||
'ppc64le': {
|
||||
'glob': '*.cpython-*-powerpc64le-linux-gnu.so',
|
||||
're': r'.cpython-[\w]*-powerpc64le-linux-gnu.so',
|
||||
'fmt': r'{module}.cpython-{major}{minor}m-powerpc64le-linux-gnu.so'
|
||||
}
|
||||
}
|
||||
|
||||
# If the current architecture is not problematic return
|
||||
generic_target = archspec.cpu.host().family
|
||||
if str(generic_target) not in _suffix_to_be_checked:
|
||||
return
|
||||
|
||||
# If there's no EXT_SUFFIX (Python < 3.5) or the suffix matches
|
||||
# the expectations, return since the package is surely good
|
||||
ext_suffix = sysconfig.get_config_var('EXT_SUFFIX')
|
||||
if ext_suffix is None:
|
||||
return
|
||||
|
||||
expected = _suffix_to_be_checked[str(generic_target)]
|
||||
if fnmatch.fnmatch(ext_suffix, expected['glob']):
|
||||
return
|
||||
|
||||
# If we are here it means the current interpreter expects different names
|
||||
# than pristine CPython. So:
|
||||
# 1. Find what we have installed
|
||||
# 2. Create symbolic links for the other names, it they're not there already
|
||||
|
||||
# Check if standard names are installed and if we have to create
|
||||
# link for this interpreter
|
||||
standard_extensions = fs.find(candidate_spec.prefix, expected['glob'])
|
||||
link_names = [re.sub(expected['re'], ext_suffix, s) for s in standard_extensions]
|
||||
for file_name, link_name in zip(standard_extensions, link_names):
|
||||
if os.path.exists(link_name):
|
||||
continue
|
||||
os.symlink(file_name, link_name)
|
||||
|
||||
# Check if this interpreter installed something and we have to create
|
||||
# links for a standard CPython interpreter
|
||||
non_standard_extensions = fs.find(candidate_spec.prefix, '*' + ext_suffix)
|
||||
for abs_path in non_standard_extensions:
|
||||
directory, filename = os.path.split(abs_path)
|
||||
module = filename.split('.')[0]
|
||||
link_name = os.path.join(directory, expected['fmt'].format(
|
||||
module=module, major=sys.version_info[0], minor=sys.version_info[1])
|
||||
)
|
||||
if os.path.exists(link_name):
|
||||
continue
|
||||
os.symlink(abs_path, link_name)
|
||||
|
||||
|
||||
@_bootstrapper(type='buildcache')
|
||||
class _BuildcacheBootstrapper(object):
|
||||
"""Install the software needed during bootstrapping from a buildcache."""
|
||||
def __init__(self, conf):
|
||||
self.name = conf['name']
|
||||
self.url = conf['info']['url']
|
||||
|
||||
def try_import(self, module, abstract_spec_str):
|
||||
if _try_import_from_store(module, abstract_spec_str):
|
||||
return True
|
||||
|
||||
# Try to install from an unsigned binary cache
|
||||
abstract_spec = spack.spec.Spec(
|
||||
abstract_spec_str + ' ^' + spec_for_current_python()
|
||||
)
|
||||
|
||||
# On Cray we want to use Linux binaries if available from mirrors
|
||||
bincache_platform = spack.architecture.real_platform()
|
||||
if str(bincache_platform) == 'cray':
|
||||
bincache_platform = spack.platforms.Linux()
|
||||
with spack.architecture.use_platform(bincache_platform):
|
||||
abstract_spec = spack.spec.Spec(
|
||||
abstract_spec_str + ' ^' + spec_for_current_python()
|
||||
)
|
||||
|
||||
# Read information on verified clingo binaries
|
||||
json_filename = '{0}.json'.format(module)
|
||||
json_path = os.path.join(
|
||||
spack.paths.share_path, 'bootstrap', self.name, json_filename
|
||||
)
|
||||
with open(json_path) as f:
|
||||
data = json.load(f)
|
||||
|
||||
buildcache = spack.main.SpackCommand('buildcache')
|
||||
# Ensure we see only the buildcache being used to bootstrap
|
||||
mirror_scope = spack.config.InternalConfigScope(
|
||||
'bootstrap', {'mirrors:': {self.name: self.url}}
|
||||
)
|
||||
with spack.config.override(mirror_scope):
|
||||
# This index is currently needed to get the compiler used to build some
|
||||
# specs that wwe know by dag hash.
|
||||
spack.binary_distribution.binary_index.regenerate_spec_cache()
|
||||
index = spack.binary_distribution.update_cache_and_get_specs()
|
||||
for item in data['verified']:
|
||||
candidate_spec = item['spec']
|
||||
python_spec = item['python']
|
||||
# Skip specs which are not compatible
|
||||
if not abstract_spec.satisfies(candidate_spec):
|
||||
continue
|
||||
|
||||
if python_spec not in abstract_spec:
|
||||
continue
|
||||
|
||||
for pkg_name, pkg_hash, pkg_sha256 in item['binaries']:
|
||||
msg = ('[BOOTSTRAP MODULE {0}] Try installing "{1}" from binary '
|
||||
'cache at "{2}"')
|
||||
tty.debug(msg.format(module, pkg_name, self.url))
|
||||
index_spec = next(x for x in index if x.dag_hash() == pkg_hash)
|
||||
# Reconstruct the compiler that we need to use for bootstrapping
|
||||
compiler_entry = {
|
||||
"modules": [],
|
||||
"operating_system": str(index_spec.os),
|
||||
"paths": {
|
||||
"cc": "/dev/null",
|
||||
"cxx": "/dev/null",
|
||||
"f77": "/dev/null",
|
||||
"fc": "/dev/null"
|
||||
},
|
||||
"spec": str(index_spec.compiler),
|
||||
"target": str(index_spec.target.family)
|
||||
}
|
||||
with spack.architecture.use_platform(bincache_platform):
|
||||
with spack.config.override(
|
||||
'compilers', [{'compiler': compiler_entry}]
|
||||
):
|
||||
spec_str = '/' + pkg_hash
|
||||
install_args = [
|
||||
'install',
|
||||
'--sha256', pkg_sha256,
|
||||
'-a', '-u', '-o', '-f', spec_str
|
||||
]
|
||||
buildcache(*install_args, fail_on_error=False)
|
||||
# TODO: undo installations that didn't complete?
|
||||
|
||||
if _try_import_from_store(module, abstract_spec_str):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
@_bootstrapper(type='install')
|
||||
class _SourceBootstrapper(object):
|
||||
"""Install the software needed during bootstrapping from sources."""
|
||||
def __init__(self, conf):
|
||||
self.conf = conf
|
||||
|
||||
@staticmethod
|
||||
def try_import(module, abstract_spec_str):
|
||||
if _try_import_from_store(module, abstract_spec_str):
|
||||
return True
|
||||
|
||||
# Try to build and install from sources
|
||||
with spack_python_interpreter():
|
||||
# Add hint to use frontend operating system on Cray
|
||||
if str(spack.architecture.platform()) == 'cray':
|
||||
abstract_spec_str += ' os=fe'
|
||||
|
||||
concrete_spec = spack.spec.Spec(
|
||||
abstract_spec_str + ' ^' + spec_for_current_python()
|
||||
)
|
||||
|
||||
if module == 'clingo':
|
||||
# TODO: remove when the old concretizer is deprecated
|
||||
concrete_spec._old_concretize()
|
||||
else:
|
||||
concrete_spec.concretize()
|
||||
|
||||
msg = "[BOOTSTRAP MODULE {0}] Try installing '{1}' from sources"
|
||||
tty.debug(msg.format(module, abstract_spec_str))
|
||||
|
||||
# Install the spec that should make the module importable
|
||||
concrete_spec.package.do_install()
|
||||
|
||||
return _try_import_from_store(module, abstract_spec_str=abstract_spec_str)
|
||||
|
||||
|
||||
def _make_bootstrapper(conf):
|
||||
"""Return a bootstrap object built according to the
|
||||
configuration argument
|
||||
"""
|
||||
btype = conf['type']
|
||||
return _bootstrap_methods[btype](conf)
|
||||
|
||||
|
||||
def _source_is_trusted(conf):
|
||||
trusted, name = spack.config.get('bootstrap:trusted'), conf['name']
|
||||
if name not in trusted:
|
||||
return False
|
||||
return trusted[name]
|
||||
|
||||
|
||||
def spec_for_current_python():
|
||||
"""For bootstrapping purposes we are just interested in the Python
|
||||
@@ -54,7 +336,7 @@ def spack_python_interpreter():
|
||||
which Spack is currently running as the only Python external spec
|
||||
available.
|
||||
"""
|
||||
python_prefix = os.path.dirname(os.path.dirname(sys.executable))
|
||||
python_prefix = sys.exec_prefix
|
||||
external_python = spec_for_current_python()
|
||||
|
||||
entry = {
|
||||
@@ -68,72 +350,58 @@ def spack_python_interpreter():
|
||||
yield
|
||||
|
||||
|
||||
def make_module_available(module, spec=None, install=False):
|
||||
"""Ensure module is importable"""
|
||||
# If we already can import it, that's great
|
||||
try:
|
||||
__import__(module)
|
||||
def ensure_module_importable_or_raise(module, abstract_spec=None):
|
||||
"""Make the requested module available for import, or raise.
|
||||
|
||||
This function tries to import a Python module in the current interpreter
|
||||
using, in order, the methods configured in bootstrap.yaml.
|
||||
|
||||
If none of the methods succeed, an exception is raised. The function exits
|
||||
on first success.
|
||||
|
||||
Args:
|
||||
module (str): module to be imported in the current interpreter
|
||||
abstract_spec (str): abstract spec that might provide the module. If not
|
||||
given it defaults to "module"
|
||||
|
||||
Raises:
|
||||
ImportError: if the module couldn't be imported
|
||||
"""
|
||||
# If we can import it already, that's great
|
||||
tty.debug("[BOOTSTRAP MODULE {0}] Try importing from Python".format(module))
|
||||
if _python_import(module):
|
||||
return
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
# If it's already installed, use it
|
||||
# Search by spec
|
||||
spec = spack.spec.Spec(spec or module)
|
||||
abstract_spec = abstract_spec or module
|
||||
source_configs = spack.config.get('bootstrap:sources', [])
|
||||
for current_config in source_configs:
|
||||
if not _source_is_trusted(current_config):
|
||||
msg = ('[BOOTSTRAP MODULE {0}] Skipping source "{1}" since it is '
|
||||
'not trusted').format(module, current_config['name'])
|
||||
tty.debug(msg)
|
||||
continue
|
||||
|
||||
# We have to run as part of this python
|
||||
# We can constrain by a shortened version in place of a version range
|
||||
# because this spec is only used for querying or as a placeholder to be
|
||||
# replaced by an external that already has a concrete version. This syntax
|
||||
# is not sufficient when concretizing without an external, as it will
|
||||
# concretize to python@X.Y instead of python@X.Y.Z
|
||||
python_requirement = '^' + spec_for_current_python()
|
||||
spec.constrain(python_requirement)
|
||||
installed_specs = spack.store.db.query(spec, installed=True)
|
||||
|
||||
for ispec in installed_specs:
|
||||
lib_spd = ispec['python'].package.default_site_packages_dir
|
||||
lib64_spd = lib_spd.replace('lib/', 'lib64/')
|
||||
module_paths = [
|
||||
os.path.join(ispec.prefix, lib_spd),
|
||||
os.path.join(ispec.prefix, lib64_spd)
|
||||
]
|
||||
b = _make_bootstrapper(current_config)
|
||||
try:
|
||||
sys.path.extend(module_paths)
|
||||
__import__(module)
|
||||
return
|
||||
except ImportError:
|
||||
tty.warn("Spec %s did not provide module %s" % (ispec, module))
|
||||
sys.path = sys.path[:-2]
|
||||
if b.try_import(module, abstract_spec):
|
||||
return
|
||||
except Exception as e:
|
||||
msg = '[BOOTSTRAP MODULE {0}] Unexpected error "{1}"'
|
||||
tty.debug(msg.format(module, str(e)))
|
||||
|
||||
def _raise_error(module_name, module_spec):
|
||||
error_msg = 'cannot import module "{0}"'.format(module_name)
|
||||
if module_spec:
|
||||
error_msg += ' from spec "{0}'.format(module_spec)
|
||||
raise ImportError(error_msg)
|
||||
# We couldn't import in any way, so raise an import error
|
||||
msg = 'cannot bootstrap the "{0}" Python module'.format(module)
|
||||
if abstract_spec:
|
||||
msg += ' from spec "{0}"'.format(abstract_spec)
|
||||
raise ImportError(msg)
|
||||
|
||||
if not install:
|
||||
_raise_error(module, spec)
|
||||
|
||||
with spack_python_interpreter():
|
||||
# We will install for ourselves, using this python if needed
|
||||
# Concretize the spec
|
||||
spec.concretize()
|
||||
spec.package.do_install()
|
||||
|
||||
lib_spd = spec['python'].package.default_site_packages_dir
|
||||
lib64_spd = lib_spd.replace('lib/', 'lib64/')
|
||||
module_paths = [
|
||||
os.path.join(spec.prefix, lib_spd),
|
||||
os.path.join(spec.prefix, lib64_spd)
|
||||
]
|
||||
def _python_import(module):
|
||||
try:
|
||||
sys.path.extend(module_paths)
|
||||
__import__(module)
|
||||
return
|
||||
except ImportError:
|
||||
sys.path = sys.path[:-2]
|
||||
_raise_error(module, spec)
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def get_executable(exe, spec=None, install=False):
|
||||
@@ -147,7 +415,8 @@ def get_executable(exe, spec=None, install=False):
|
||||
When ``install`` is True, Spack will use the python used to run Spack as an
|
||||
external. The ``install`` option should only be used with packages that
|
||||
install quickly (when using external python) or are guaranteed by Spack
|
||||
organization to be in a binary mirror (clingo)."""
|
||||
organization to be in a binary mirror (clingo).
|
||||
"""
|
||||
# Search the system first
|
||||
runner = spack.util.executable.which(exe)
|
||||
if runner:
|
||||
@@ -221,15 +490,17 @@ def _bootstrap_config_scopes():
|
||||
@contextlib.contextmanager
|
||||
def ensure_bootstrap_configuration():
|
||||
bootstrap_store_path = store_path()
|
||||
with spack.architecture.use_platform(spack.architecture.real_platform()):
|
||||
with spack.repo.use_repositories(spack.paths.packages_path):
|
||||
with spack.store.use_store(bootstrap_store_path):
|
||||
# Default configuration scopes excluding command line
|
||||
# and builtin but accounting for platform specific scopes
|
||||
config_scopes = _bootstrap_config_scopes()
|
||||
with spack.config.use_configuration(*config_scopes):
|
||||
with spack_python_interpreter():
|
||||
yield
|
||||
with spack.environment.deactivate_environment():
|
||||
with spack.architecture.use_platform(spack.architecture.real_platform()):
|
||||
with spack.repo.use_repositories(spack.paths.packages_path):
|
||||
with spack.store.use_store(bootstrap_store_path):
|
||||
# Default configuration scopes excluding command line
|
||||
# and builtin but accounting for platform specific scopes
|
||||
config_scopes = _bootstrap_config_scopes()
|
||||
with spack.config.use_configuration(*config_scopes):
|
||||
with spack.modules.disable_modules():
|
||||
with spack_python_interpreter():
|
||||
yield
|
||||
|
||||
|
||||
def store_path():
|
||||
@@ -260,14 +531,17 @@ def clingo_root_spec():
|
||||
else:
|
||||
spec_str += ' %gcc'
|
||||
|
||||
# Add hint to use frontend operating system on Cray
|
||||
if str(spack.architecture.platform()) == 'cray':
|
||||
spec_str += ' os=fe'
|
||||
|
||||
# Add the generic target
|
||||
generic_target = archspec.cpu.host().family
|
||||
spec_str += ' target={0}'.format(str(generic_target))
|
||||
|
||||
tty.debug('[BOOTSTRAP ROOT SPEC] clingo: {0}'.format(spec_str))
|
||||
|
||||
return spack.spec.Spec(spec_str)
|
||||
return spec_str
|
||||
|
||||
|
||||
def ensure_clingo_importable_or_raise():
|
||||
"""Ensure that the clingo module is available for import."""
|
||||
ensure_module_importable_or_raise(
|
||||
module='clingo', abstract_spec=clingo_root_spec()
|
||||
)
|
||||
|
@@ -61,6 +61,7 @@
|
||||
import spack.schema.environment
|
||||
import spack.store
|
||||
import spack.subprocess_context
|
||||
import spack.user_environment
|
||||
import spack.util.path
|
||||
from spack.error import NoHeadersError, NoLibrariesError
|
||||
from spack.util.cpus import cpus_available
|
||||
@@ -69,6 +70,7 @@
|
||||
env_flag,
|
||||
filter_system_paths,
|
||||
get_path,
|
||||
inspect_path,
|
||||
is_system_path,
|
||||
preserve_environment,
|
||||
system_dirs,
|
||||
@@ -781,6 +783,13 @@ def setup_package(pkg, dirty, context='build'):
|
||||
"config to assume that the package is part of the system"
|
||||
" includes and omit it when invoked with '--cflags'.")
|
||||
elif context == 'test':
|
||||
env.extend(
|
||||
inspect_path(
|
||||
pkg.spec.prefix,
|
||||
spack.user_environment.prefix_inspections(pkg.spec.platform),
|
||||
exclude=is_system_path
|
||||
)
|
||||
)
|
||||
pkg.setup_run_environment(env)
|
||||
env.prepend_path('PATH', '.')
|
||||
|
||||
|
@@ -254,9 +254,9 @@ def define_from_variant(self, cmake_var, variant=None):
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
[define_from_variant('BUILD_SHARED_LIBS', 'shared'),
|
||||
define_from_variant('CMAKE_CXX_STANDARD', 'cxxstd'),
|
||||
define_from_variant('SWR')]
|
||||
[self.define_from_variant('BUILD_SHARED_LIBS', 'shared'),
|
||||
self.define_from_variant('CMAKE_CXX_STANDARD', 'cxxstd'),
|
||||
self.define_from_variant('SWR')]
|
||||
|
||||
will generate the following configuration options:
|
||||
|
||||
|
@@ -5,6 +5,7 @@
|
||||
|
||||
import spack.variant
|
||||
from spack.directives import conflicts, depends_on, variant
|
||||
from spack.multimethod import when
|
||||
from spack.package import PackageBase
|
||||
|
||||
|
||||
@@ -85,92 +86,104 @@ def cuda_flags(arch_list):
|
||||
# apply to platform=darwin. We currently do not provide conflicts for
|
||||
# platform=darwin with %apple-clang.
|
||||
|
||||
# GCC
|
||||
# According to
|
||||
# https://github.com/spack/spack/pull/25054#issuecomment-886531664
|
||||
# these conflicts are valid independently from the architecture
|
||||
|
||||
# minimum supported versions
|
||||
conflicts('%gcc@:4', when='+cuda ^cuda@11.0:')
|
||||
conflicts('%gcc@:5', when='+cuda ^cuda@11.4:')
|
||||
|
||||
# maximum supported version
|
||||
# NOTE:
|
||||
# in order to not constrain future cuda version to old gcc versions,
|
||||
# it has been decided to use an upper bound for the latest version.
|
||||
# This implies that the last one in the list has to be updated at
|
||||
# each release of a new cuda minor version.
|
||||
conflicts('%gcc@10:', when='+cuda ^cuda@:11.0')
|
||||
conflicts('%gcc@11:', when='+cuda ^cuda@:11.4')
|
||||
|
||||
# https://gist.github.com/ax3l/9489132#gistcomment-3860114
|
||||
conflicts('%gcc@10', when='+cuda ^cuda@:11.4.0')
|
||||
|
||||
# Linux x86_64 compiler conflicts from here:
|
||||
# https://gist.github.com/ax3l/9489132
|
||||
conflicts('%gcc@5:', when='+cuda ^cuda@:7.5 target=x86_64:')
|
||||
conflicts('%gcc@6:', when='+cuda ^cuda@:8 target=x86_64:')
|
||||
conflicts('%gcc@7:', when='+cuda ^cuda@:9.1 target=x86_64:')
|
||||
conflicts('%gcc@8:', when='+cuda ^cuda@:10.0.130 target=x86_64:')
|
||||
conflicts('%gcc@9:', when='+cuda ^cuda@:10.2.89 target=x86_64:')
|
||||
conflicts('%gcc@:4', when='+cuda ^cuda@11.0.2: target=x86_64:')
|
||||
conflicts('%gcc@10:', when='+cuda ^cuda@:11.0.3 target=x86_64:')
|
||||
conflicts('%gcc@11:', when='+cuda ^cuda@:11.1.0 target=x86_64:')
|
||||
conflicts('%pgi@:14.8', when='+cuda ^cuda@:7.0.27 target=x86_64:')
|
||||
conflicts('%pgi@:15.3,15.5:', when='+cuda ^cuda@7.5 target=x86_64:')
|
||||
conflicts('%pgi@:16.2,16.0:16.3', when='+cuda ^cuda@8 target=x86_64:')
|
||||
conflicts('%pgi@:15,18:', when='+cuda ^cuda@9.0:9.1 target=x86_64:')
|
||||
conflicts('%pgi@:16,19:', when='+cuda ^cuda@9.2.88:10 target=x86_64:')
|
||||
conflicts('%pgi@:17,20:',
|
||||
when='+cuda ^cuda@10.1.105:10.2.89 target=x86_64:')
|
||||
conflicts('%pgi@:17,21:',
|
||||
when='+cuda ^cuda@11.0.2:11.1.0 target=x86_64:')
|
||||
conflicts('%clang@:3.4', when='+cuda ^cuda@:7.5 target=x86_64:')
|
||||
conflicts('%clang@:3.7,4:',
|
||||
when='+cuda ^cuda@8.0:9.0 target=x86_64:')
|
||||
conflicts('%clang@:3.7,4.1:',
|
||||
when='+cuda ^cuda@9.1 target=x86_64:')
|
||||
conflicts('%clang@:3.7,5.1:', when='+cuda ^cuda@9.2 target=x86_64:')
|
||||
conflicts('%clang@:3.7,6.1:', when='+cuda ^cuda@10.0.130 target=x86_64:')
|
||||
conflicts('%clang@:3.7,7.1:', when='+cuda ^cuda@10.1.105 target=x86_64:')
|
||||
conflicts('%clang@:3.7,8.1:',
|
||||
when='+cuda ^cuda@10.1.105:10.1.243 target=x86_64:')
|
||||
conflicts('%clang@:3.2,9:', when='+cuda ^cuda@10.2.89 target=x86_64:')
|
||||
conflicts('%clang@:5', when='+cuda ^cuda@11.0.2: target=x86_64:')
|
||||
conflicts('%clang@10:', when='+cuda ^cuda@:11.0.3 target=x86_64:')
|
||||
conflicts('%clang@11:', when='+cuda ^cuda@:11.1.0 target=x86_64:')
|
||||
with when('~allow-unsupported-compilers'):
|
||||
conflicts('%gcc@5:', when='+cuda ^cuda@:7.5 target=x86_64:')
|
||||
conflicts('%gcc@6:', when='+cuda ^cuda@:8 target=x86_64:')
|
||||
conflicts('%gcc@7:', when='+cuda ^cuda@:9.1 target=x86_64:')
|
||||
conflicts('%gcc@8:', when='+cuda ^cuda@:10.0.130 target=x86_64:')
|
||||
conflicts('%gcc@9:', when='+cuda ^cuda@:10.2.89 target=x86_64:')
|
||||
conflicts('%pgi@:14.8', when='+cuda ^cuda@:7.0.27 target=x86_64:')
|
||||
conflicts('%pgi@:15.3,15.5:', when='+cuda ^cuda@7.5 target=x86_64:')
|
||||
conflicts('%pgi@:16.2,16.0:16.3', when='+cuda ^cuda@8 target=x86_64:')
|
||||
conflicts('%pgi@:15,18:', when='+cuda ^cuda@9.0:9.1 target=x86_64:')
|
||||
conflicts('%pgi@:16,19:', when='+cuda ^cuda@9.2.88:10 target=x86_64:')
|
||||
conflicts('%pgi@:17,20:', when='+cuda ^cuda@10.1.105:10.2.89 target=x86_64:')
|
||||
conflicts('%pgi@:17,21:', when='+cuda ^cuda@11.0.2:11.1.0 target=x86_64:')
|
||||
conflicts('%clang@:3.4', when='+cuda ^cuda@:7.5 target=x86_64:')
|
||||
conflicts('%clang@:3.7,4:', when='+cuda ^cuda@8.0:9.0 target=x86_64:')
|
||||
conflicts('%clang@:3.7,4.1:', when='+cuda ^cuda@9.1 target=x86_64:')
|
||||
conflicts('%clang@:3.7,5.1:', when='+cuda ^cuda@9.2 target=x86_64:')
|
||||
conflicts('%clang@:3.7,6.1:', when='+cuda ^cuda@10.0.130 target=x86_64:')
|
||||
conflicts('%clang@:3.7,7.1:', when='+cuda ^cuda@10.1.105 target=x86_64:')
|
||||
conflicts('%clang@:3.7,8.1:',
|
||||
when='+cuda ^cuda@10.1.105:10.1.243 target=x86_64:')
|
||||
conflicts('%clang@:3.2,9:', when='+cuda ^cuda@10.2.89 target=x86_64:')
|
||||
conflicts('%clang@:5', when='+cuda ^cuda@11.0.2: target=x86_64:')
|
||||
conflicts('%clang@10:', when='+cuda ^cuda@:11.0.3 target=x86_64:')
|
||||
conflicts('%clang@11:', when='+cuda ^cuda@:11.1.0 target=x86_64:')
|
||||
|
||||
# x86_64 vs. ppc64le differ according to NVidia docs
|
||||
# Linux ppc64le compiler conflicts from Table from the docs below:
|
||||
# https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html
|
||||
# https://docs.nvidia.com/cuda/archive/9.2/cuda-installation-guide-linux/index.html
|
||||
# https://docs.nvidia.com/cuda/archive/9.1/cuda-installation-guide-linux/index.html
|
||||
# https://docs.nvidia.com/cuda/archive/9.0/cuda-installation-guide-linux/index.html
|
||||
# https://docs.nvidia.com/cuda/archive/8.0/cuda-installation-guide-linux/index.html
|
||||
# x86_64 vs. ppc64le differ according to NVidia docs
|
||||
# Linux ppc64le compiler conflicts from Table from the docs below:
|
||||
# https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html
|
||||
# https://docs.nvidia.com/cuda/archive/9.2/cuda-installation-guide-linux/index.html
|
||||
# https://docs.nvidia.com/cuda/archive/9.1/cuda-installation-guide-linux/index.html
|
||||
# https://docs.nvidia.com/cuda/archive/9.0/cuda-installation-guide-linux/index.html
|
||||
# https://docs.nvidia.com/cuda/archive/8.0/cuda-installation-guide-linux/index.html
|
||||
|
||||
# information prior to CUDA 9 difficult to find
|
||||
conflicts('%gcc@6:', when='+cuda ^cuda@:9 target=ppc64le:')
|
||||
conflicts('%gcc@8:', when='+cuda ^cuda@:10.0.130 target=ppc64le:')
|
||||
conflicts('%gcc@9:', when='+cuda ^cuda@:10.1.243 target=ppc64le:')
|
||||
# officially, CUDA 11.0.2 only supports the system GCC 8.3 on ppc64le
|
||||
conflicts('%gcc@:4', when='+cuda ^cuda@11.0.2: target=ppc64le:')
|
||||
conflicts('%gcc@10:', when='+cuda ^cuda@:11.0.3 target=ppc64le:')
|
||||
conflicts('%gcc@11:', when='+cuda ^cuda@:11.1.0 target=ppc64le:')
|
||||
conflicts('%pgi', when='+cuda ^cuda@:8 target=ppc64le:')
|
||||
conflicts('%pgi@:16', when='+cuda ^cuda@:9.1.185 target=ppc64le:')
|
||||
conflicts('%pgi@:17', when='+cuda ^cuda@:10 target=ppc64le:')
|
||||
conflicts('%clang@4:', when='+cuda ^cuda@:9.0.176 target=ppc64le:')
|
||||
conflicts('%clang@5:', when='+cuda ^cuda@:9.1 target=ppc64le:')
|
||||
conflicts('%clang@6:', when='+cuda ^cuda@:9.2 target=ppc64le:')
|
||||
conflicts('%clang@7:', when='+cuda ^cuda@10.0.130 target=ppc64le:')
|
||||
conflicts('%clang@7.1:', when='+cuda ^cuda@:10.1.105 target=ppc64le:')
|
||||
conflicts('%clang@8.1:', when='+cuda ^cuda@:10.2.89 target=ppc64le:')
|
||||
conflicts('%clang@:5', when='+cuda ^cuda@11.0.2: target=ppc64le:')
|
||||
conflicts('%clang@10:', when='+cuda ^cuda@:11.0.3 target=ppc64le:')
|
||||
conflicts('%clang@11:', when='+cuda ^cuda@:11.1.0 target=ppc64le:')
|
||||
# information prior to CUDA 9 difficult to find
|
||||
conflicts('%gcc@6:', when='+cuda ^cuda@:9 target=ppc64le:')
|
||||
conflicts('%gcc@8:', when='+cuda ^cuda@:10.0.130 target=ppc64le:')
|
||||
conflicts('%gcc@9:', when='+cuda ^cuda@:10.1.243 target=ppc64le:')
|
||||
# officially, CUDA 11.0.2 only supports the system GCC 8.3 on ppc64le
|
||||
conflicts('%pgi', when='+cuda ^cuda@:8 target=ppc64le:')
|
||||
conflicts('%pgi@:16', when='+cuda ^cuda@:9.1.185 target=ppc64le:')
|
||||
conflicts('%pgi@:17', when='+cuda ^cuda@:10 target=ppc64le:')
|
||||
conflicts('%clang@4:', when='+cuda ^cuda@:9.0.176 target=ppc64le:')
|
||||
conflicts('%clang@5:', when='+cuda ^cuda@:9.1 target=ppc64le:')
|
||||
conflicts('%clang@6:', when='+cuda ^cuda@:9.2 target=ppc64le:')
|
||||
conflicts('%clang@7:', when='+cuda ^cuda@10.0.130 target=ppc64le:')
|
||||
conflicts('%clang@7.1:', when='+cuda ^cuda@:10.1.105 target=ppc64le:')
|
||||
conflicts('%clang@8.1:', when='+cuda ^cuda@:10.2.89 target=ppc64le:')
|
||||
conflicts('%clang@:5', when='+cuda ^cuda@11.0.2: target=ppc64le:')
|
||||
conflicts('%clang@10:', when='+cuda ^cuda@:11.0.2 target=ppc64le:')
|
||||
conflicts('%clang@11:', when='+cuda ^cuda@:11.1.0 target=ppc64le:')
|
||||
|
||||
# Intel is mostly relevant for x86_64 Linux, even though it also
|
||||
# exists for Mac OS X. No information prior to CUDA 3.2 or Intel 11.1
|
||||
conflicts('%intel@:11.0', when='+cuda ^cuda@:3.1')
|
||||
conflicts('%intel@:12.0', when='+cuda ^cuda@5.5:')
|
||||
conflicts('%intel@:13.0', when='+cuda ^cuda@6.0:')
|
||||
conflicts('%intel@:13.2', when='+cuda ^cuda@6.5:')
|
||||
conflicts('%intel@:14.9', when='+cuda ^cuda@7:')
|
||||
# Intel 15.x is compatible with CUDA 7 thru current CUDA
|
||||
conflicts('%intel@16.0:', when='+cuda ^cuda@:8.0.43')
|
||||
conflicts('%intel@17.0:', when='+cuda ^cuda@:8.0.60')
|
||||
conflicts('%intel@18.0:', when='+cuda ^cuda@:9.9')
|
||||
conflicts('%intel@19.0:', when='+cuda ^cuda@:10.0')
|
||||
conflicts('%intel@19.1:', when='+cuda ^cuda@:10.1')
|
||||
conflicts('%intel@19.2:', when='+cuda ^cuda@:11.1.0')
|
||||
# Intel is mostly relevant for x86_64 Linux, even though it also
|
||||
# exists for Mac OS X. No information prior to CUDA 3.2 or Intel 11.1
|
||||
conflicts('%intel@:11.0', when='+cuda ^cuda@:3.1')
|
||||
conflicts('%intel@:12.0', when='+cuda ^cuda@5.5:')
|
||||
conflicts('%intel@:13.0', when='+cuda ^cuda@6.0:')
|
||||
conflicts('%intel@:13.2', when='+cuda ^cuda@6.5:')
|
||||
conflicts('%intel@:14.9', when='+cuda ^cuda@7:')
|
||||
# Intel 15.x is compatible with CUDA 7 thru current CUDA
|
||||
conflicts('%intel@16.0:', when='+cuda ^cuda@:8.0.43')
|
||||
conflicts('%intel@17.0:', when='+cuda ^cuda@:8.0.60')
|
||||
conflicts('%intel@18.0:', when='+cuda ^cuda@:9.9')
|
||||
conflicts('%intel@19.0:', when='+cuda ^cuda@:10.0')
|
||||
conflicts('%intel@19.1:', when='+cuda ^cuda@:10.1')
|
||||
conflicts('%intel@19.2:', when='+cuda ^cuda@:11.1.0')
|
||||
|
||||
# XL is mostly relevant for ppc64le Linux
|
||||
conflicts('%xl@:12,14:', when='+cuda ^cuda@:9.1')
|
||||
conflicts('%xl@:12,14:15,17:', when='+cuda ^cuda@9.2')
|
||||
conflicts('%xl@:12,17:', when='+cuda ^cuda@:11.1.0')
|
||||
# XL is mostly relevant for ppc64le Linux
|
||||
conflicts('%xl@:12,14:', when='+cuda ^cuda@:9.1')
|
||||
conflicts('%xl@:12,14:15,17:', when='+cuda ^cuda@9.2')
|
||||
conflicts('%xl@:12,17:', when='+cuda ^cuda@:11.1.0')
|
||||
|
||||
# Darwin.
|
||||
# TODO: add missing conflicts for %apple-clang cuda@:10
|
||||
conflicts('platform=darwin', when='+cuda ^cuda@11.0.2:')
|
||||
# Darwin.
|
||||
# TODO: add missing conflicts for %apple-clang cuda@:10
|
||||
conflicts('platform=darwin', when='+cuda ^cuda@11.0.2: ')
|
||||
|
||||
# Make sure cuda_arch can not be used without +cuda
|
||||
for value in cuda_arch_values:
|
||||
|
@@ -127,7 +127,10 @@ def import_modules(self):
|
||||
list: list of strings of module names
|
||||
"""
|
||||
modules = []
|
||||
root = self.spec['python'].package.get_python_lib(prefix=self.prefix)
|
||||
root = os.path.join(
|
||||
self.prefix,
|
||||
self.spec['python'].package.config_vars['python_lib']['false']['false'],
|
||||
)
|
||||
|
||||
# Some Python libraries are packages: collections of modules
|
||||
# distributed in directories containing __init__.py files
|
||||
@@ -252,12 +255,11 @@ def install_args(self, spec, prefix):
|
||||
# Get all relative paths since we set the root to `prefix`
|
||||
# We query the python with which these will be used for the lib and inc
|
||||
# directories. This ensures we use `lib`/`lib64` as expected by python.
|
||||
pure_site_packages_dir = spec['python'].package.get_python_lib(
|
||||
plat_specific=False, prefix='')
|
||||
plat_site_packages_dir = spec['python'].package.get_python_lib(
|
||||
plat_specific=True, prefix='')
|
||||
inc_dir = spec['python'].package.get_python_inc(
|
||||
plat_specific=True, prefix='')
|
||||
pure_site_packages_dir = spec['python'].package.config_vars[
|
||||
'python_lib']['false']['false']
|
||||
plat_site_packages_dir = spec['python'].package.config_vars[
|
||||
'python_lib']['true']['false']
|
||||
inc_dir = spec['python'].package.config_vars['python_inc']['true']
|
||||
|
||||
args += ['--root=%s' % prefix,
|
||||
'--install-purelib=%s' % pure_site_packages_dir,
|
||||
|
@@ -64,7 +64,10 @@ def import_modules(self):
|
||||
list: list of strings of module names
|
||||
"""
|
||||
modules = []
|
||||
root = self.spec['python'].package.get_python_lib(prefix=self.prefix)
|
||||
root = os.path.join(
|
||||
self.prefix,
|
||||
self.spec['python'].package.config_vars['python_lib']['false']['false'],
|
||||
)
|
||||
|
||||
# Some Python libraries are packages: collections of modules
|
||||
# distributed in directories containing __init__.py files
|
||||
|
@@ -45,6 +45,8 @@
|
||||
]
|
||||
|
||||
SPACK_PR_MIRRORS_ROOT_URL = 's3://spack-binaries-prs'
|
||||
SPACK_SHARED_PR_MIRROR_URL = url_util.join(SPACK_PR_MIRRORS_ROOT_URL,
|
||||
'shared_pr_mirror')
|
||||
TEMP_STORAGE_MIRROR_NAME = 'ci_temporary_mirror'
|
||||
|
||||
spack_gpg = spack.main.SpackCommand('gpg')
|
||||
@@ -612,11 +614,14 @@ def generate_gitlab_ci_yaml(env, print_summary, output_file,
|
||||
'strip-compilers': False,
|
||||
})
|
||||
|
||||
# Add this mirror if it's enabled, as some specs might be up to date
|
||||
# here and thus not need to be rebuilt.
|
||||
# Add per-PR mirror (and shared PR mirror) if enabled, as some specs might
|
||||
# be up to date in one of those and thus not need to be rebuilt.
|
||||
if pr_mirror_url:
|
||||
spack.mirror.add(
|
||||
'ci_pr_mirror', pr_mirror_url, cfg.default_modify_scope())
|
||||
spack.mirror.add('ci_shared_pr_mirror',
|
||||
SPACK_SHARED_PR_MIRROR_URL,
|
||||
cfg.default_modify_scope())
|
||||
|
||||
pipeline_artifacts_dir = artifacts_root
|
||||
if not pipeline_artifacts_dir:
|
||||
@@ -871,6 +876,7 @@ def generate_gitlab_ci_yaml(env, print_summary, output_file,
|
||||
tty.debug(debug_msg)
|
||||
|
||||
if prune_dag and not rebuild_spec:
|
||||
tty.debug('Pruning spec that does not need to be rebuilt.')
|
||||
continue
|
||||
|
||||
# Check if this spec is in our list of known failures, now that
|
||||
@@ -917,7 +923,7 @@ def generate_gitlab_ci_yaml(env, print_summary, output_file,
|
||||
bc_root = os.path.join(
|
||||
local_mirror_dir, 'build_cache')
|
||||
artifact_paths.extend([os.path.join(bc_root, p) for p in [
|
||||
bindist.tarball_name(release_spec, '.spec.yaml'),
|
||||
bindist.tarball_name(release_spec, '.spec.json'),
|
||||
bindist.tarball_name(release_spec, '.cdashid'),
|
||||
bindist.tarball_directory_name(release_spec),
|
||||
]])
|
||||
@@ -1376,13 +1382,13 @@ def read_cdashid_from_mirror(spec, mirror_url):
|
||||
return int(contents)
|
||||
|
||||
|
||||
def push_mirror_contents(env, spec, yaml_path, mirror_url, sign_binaries):
|
||||
def push_mirror_contents(env, spec, specfile_path, mirror_url, sign_binaries):
|
||||
try:
|
||||
unsigned = not sign_binaries
|
||||
tty.debug('Creating buildcache ({0})'.format(
|
||||
'unsigned' if unsigned else 'signed'))
|
||||
spack.cmd.buildcache._createtarball(
|
||||
env, spec_yaml=yaml_path, add_deps=False,
|
||||
env, spec_file=specfile_path, add_deps=False,
|
||||
output_location=mirror_url, force=True, allow_root=True,
|
||||
unsigned=unsigned)
|
||||
except Exception as inst:
|
||||
|
@@ -21,6 +21,7 @@
|
||||
from llnl.util.tty.color import colorize
|
||||
|
||||
import spack.config
|
||||
import spack.environment as ev
|
||||
import spack.error
|
||||
import spack.extensions
|
||||
import spack.paths
|
||||
@@ -186,29 +187,13 @@ def matching_spec_from_env(spec):
|
||||
If no matching spec is found in the environment (or if no environment is
|
||||
active), this will return the given spec but concretized.
|
||||
"""
|
||||
env = spack.environment.get_env({}, cmd_name)
|
||||
env = ev.active_environment()
|
||||
if env:
|
||||
return env.matching_spec(spec) or spec.concretized()
|
||||
else:
|
||||
return spec.concretized()
|
||||
|
||||
|
||||
def elide_list(line_list, max_num=10):
|
||||
"""Takes a long list and limits it to a smaller number of elements,
|
||||
replacing intervening elements with '...'. For example::
|
||||
|
||||
elide_list([1,2,3,4,5,6], 4)
|
||||
|
||||
gives::
|
||||
|
||||
[1, 2, 3, '...', 6]
|
||||
"""
|
||||
if len(line_list) > max_num:
|
||||
return line_list[:max_num - 1] + ['...'] + line_list[-1:]
|
||||
else:
|
||||
return line_list
|
||||
|
||||
|
||||
def disambiguate_spec(spec, env, local=False, installed=True, first=False):
|
||||
"""Given a spec, figure out which installed package it refers to.
|
||||
|
||||
@@ -277,14 +262,14 @@ def display_specs_as_json(specs, deps=False):
|
||||
if spec.dag_hash() in seen:
|
||||
continue
|
||||
seen.add(spec.dag_hash())
|
||||
records.append(spec.to_record_dict())
|
||||
records.append(spec.to_node_dict())
|
||||
|
||||
if deps:
|
||||
for dep in spec.traverse():
|
||||
if dep.dag_hash() in seen:
|
||||
continue
|
||||
seen.add(dep.dag_hash())
|
||||
records.append(dep.to_record_dict())
|
||||
records.append(dep.to_node_dict())
|
||||
|
||||
sjson.dump(records, sys.stdout)
|
||||
|
||||
@@ -501,3 +486,71 @@ def extant_file(f):
|
||||
if not os.path.isfile(f):
|
||||
raise argparse.ArgumentTypeError('%s does not exist' % f)
|
||||
return f
|
||||
|
||||
|
||||
def require_active_env(cmd_name):
|
||||
"""Used by commands to get the active environment
|
||||
|
||||
If an environment is not found, print an error message that says the calling
|
||||
command *needs* an active environment.
|
||||
|
||||
Arguments:
|
||||
cmd_name (str): name of calling command
|
||||
|
||||
Returns:
|
||||
(spack.environment.Environment): the active environment
|
||||
"""
|
||||
env = ev.active_environment()
|
||||
|
||||
if env:
|
||||
return env
|
||||
else:
|
||||
tty.die(
|
||||
'`spack %s` requires an environment' % cmd_name,
|
||||
'activate an environment first:',
|
||||
' spack env activate ENV',
|
||||
'or use:',
|
||||
' spack -e ENV %s ...' % cmd_name)
|
||||
|
||||
|
||||
def find_environment(args):
|
||||
"""Find active environment from args or environment variable.
|
||||
|
||||
Check for an environment in this order:
|
||||
1. via ``spack -e ENV`` or ``spack -D DIR`` (arguments)
|
||||
2. via a path in the spack.environment.spack_env_var environment variable.
|
||||
|
||||
If an environment is found, read it in. If not, return None.
|
||||
|
||||
Arguments:
|
||||
args (argparse.Namespace): argparse namespace with command arguments
|
||||
|
||||
Returns:
|
||||
(spack.environment.Environment): a found environment, or ``None``
|
||||
"""
|
||||
|
||||
# treat env as a name
|
||||
env = args.env
|
||||
if env:
|
||||
if ev.exists(env):
|
||||
return ev.read(env)
|
||||
|
||||
else:
|
||||
# if env was specified, see if it is a directory otherwise, look
|
||||
# at env_dir (env and env_dir are mutually exclusive)
|
||||
env = args.env_dir
|
||||
|
||||
# if no argument, look for the environment variable
|
||||
if not env:
|
||||
env = os.environ.get(ev.spack_env_var)
|
||||
|
||||
# nothing was set; there's no active environment
|
||||
if not env:
|
||||
return None
|
||||
|
||||
# if we get here, env isn't the name of a spack environment; it has
|
||||
# to be a path to an environment, or there is something wrong.
|
||||
if ev.is_env_dir(env):
|
||||
return ev.Environment(env)
|
||||
|
||||
raise ev.SpackEnvironmentError('no environment in %s' % env)
|
||||
|
@@ -30,8 +30,7 @@ def activate(parser, args):
|
||||
if len(specs) != 1:
|
||||
tty.die("activate requires one spec. %d given." % len(specs))
|
||||
|
||||
env = ev.get_env(args, 'activate')
|
||||
spec = spack.cmd.disambiguate_spec(specs[0], env)
|
||||
spec = spack.cmd.disambiguate_spec(specs[0], ev.active_environment())
|
||||
if not spec.package.is_extension:
|
||||
tty.die("%s is not an extension." % spec.name)
|
||||
|
||||
|
@@ -7,7 +7,6 @@
|
||||
|
||||
import spack.cmd
|
||||
import spack.cmd.common.arguments as arguments
|
||||
import spack.environment as ev
|
||||
|
||||
description = 'add a spec to an environment'
|
||||
section = "environments"
|
||||
@@ -22,7 +21,7 @@ def setup_parser(subparser):
|
||||
|
||||
|
||||
def add(parser, args):
|
||||
env = ev.get_env(args, 'add', required=True)
|
||||
env = spack.cmd.require_active_env(cmd_name='add')
|
||||
|
||||
with env.write_transaction():
|
||||
for spec in spack.cmd.parse_specs(args.specs):
|
||||
|
@@ -95,7 +95,7 @@ def analyze(parser, args, **kwargs):
|
||||
sys.exit(0)
|
||||
|
||||
# handle active environment, if any
|
||||
env = ev.get_env(args, 'analyze')
|
||||
env = ev.active_environment()
|
||||
|
||||
# Get an disambiguate spec (we should only have one)
|
||||
specs = spack.cmd.parse_specs(args.spec)
|
||||
|
@@ -2,6 +2,7 @@
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
import llnl.util.tty as tty
|
||||
import llnl.util.tty.color as cl
|
||||
|
||||
import spack.audit
|
||||
@@ -19,12 +20,24 @@ def setup_parser(subparser):
|
||||
# Audit configuration files
|
||||
sp.add_parser('configs', help='audit configuration files')
|
||||
|
||||
# Https and other linting
|
||||
https_parser = sp.add_parser('packages-https', help='check https in packages')
|
||||
https_parser.add_argument(
|
||||
'--all',
|
||||
action='store_true',
|
||||
default=False,
|
||||
dest='check_all',
|
||||
help="audit all packages"
|
||||
)
|
||||
|
||||
# Audit package recipes
|
||||
pkg_parser = sp.add_parser('packages', help='audit package recipes')
|
||||
pkg_parser.add_argument(
|
||||
'name', metavar='PKG', nargs='*',
|
||||
help='package to be analyzed (if none all packages will be processed)',
|
||||
)
|
||||
|
||||
for group in [pkg_parser, https_parser]:
|
||||
group.add_argument(
|
||||
'name', metavar='PKG', nargs='*',
|
||||
help='package to be analyzed (if none all packages will be processed)',
|
||||
)
|
||||
|
||||
# List all checks
|
||||
sp.add_parser('list', help='list available checks and exits')
|
||||
@@ -41,6 +54,17 @@ def packages(parser, args):
|
||||
_process_reports(reports)
|
||||
|
||||
|
||||
def packages_https(parser, args):
|
||||
|
||||
# Since packages takes a long time, --all is required without name
|
||||
if not args.check_all and not args.name:
|
||||
tty.die("Please specify one or more packages to audit, or --all.")
|
||||
|
||||
pkgs = args.name or spack.repo.path.all_package_names()
|
||||
reports = spack.audit.run_group(args.subcommand, pkgs=pkgs)
|
||||
_process_reports(reports)
|
||||
|
||||
|
||||
def list(parser, args):
|
||||
for subcommand, check_tags in spack.audit.GROUPS.items():
|
||||
print(cl.colorize('@*b{' + subcommand + '}:'))
|
||||
@@ -58,6 +82,7 @@ def audit(parser, args):
|
||||
subcommands = {
|
||||
'configs': configs,
|
||||
'packages': packages,
|
||||
'packages-https': packages_https,
|
||||
'list': list
|
||||
}
|
||||
subcommands[args.subcommand](parser, args)
|
||||
|
@@ -2,10 +2,13 @@
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
from __future__ import print_function
|
||||
|
||||
import os.path
|
||||
import shutil
|
||||
|
||||
import llnl.util.tty
|
||||
import llnl.util.tty.color
|
||||
|
||||
import spack.cmd.common.arguments
|
||||
import spack.config
|
||||
@@ -51,6 +54,27 @@ def setup_parser(subparser):
|
||||
help='set the bootstrap directory to this value'
|
||||
)
|
||||
|
||||
list = sp.add_parser(
|
||||
'list', help='list the methods available for bootstrapping'
|
||||
)
|
||||
_add_scope_option(list)
|
||||
|
||||
trust = sp.add_parser(
|
||||
'trust', help='trust a bootstrapping method'
|
||||
)
|
||||
_add_scope_option(trust)
|
||||
trust.add_argument(
|
||||
'name', help='name of the method to be trusted'
|
||||
)
|
||||
|
||||
untrust = sp.add_parser(
|
||||
'untrust', help='untrust a bootstrapping method'
|
||||
)
|
||||
_add_scope_option(untrust)
|
||||
untrust.add_argument(
|
||||
'name', help='name of the method to be untrusted'
|
||||
)
|
||||
|
||||
|
||||
def _enable_or_disable(args):
|
||||
# Set to True if we called "enable", otherwise set to false
|
||||
@@ -100,11 +124,97 @@ def _root(args):
|
||||
print(root)
|
||||
|
||||
|
||||
def _list(args):
|
||||
sources = spack.config.get(
|
||||
'bootstrap:sources', default=None, scope=args.scope
|
||||
)
|
||||
|
||||
if not sources:
|
||||
llnl.util.tty.msg(
|
||||
"No method available for bootstrapping Spack's dependencies"
|
||||
)
|
||||
return
|
||||
|
||||
def _print_method(source, trusted):
|
||||
color = llnl.util.tty.color
|
||||
|
||||
def fmt(header, content):
|
||||
header_fmt = "@*b{{{0}:}} {1}"
|
||||
color.cprint(header_fmt.format(header, content))
|
||||
|
||||
trust_str = "@*y{UNKNOWN}"
|
||||
if trusted is True:
|
||||
trust_str = "@*g{TRUSTED}"
|
||||
elif trusted is False:
|
||||
trust_str = "@*r{UNTRUSTED}"
|
||||
|
||||
fmt("Name", source['name'] + ' ' + trust_str)
|
||||
print()
|
||||
fmt(" Type", source['type'])
|
||||
print()
|
||||
|
||||
info_lines = ['\n']
|
||||
for key, value in source.get('info', {}).items():
|
||||
info_lines.append(' ' * 4 + '@*{{{0}}}: {1}\n'.format(key, value))
|
||||
if len(info_lines) > 1:
|
||||
fmt(" Info", ''.join(info_lines))
|
||||
|
||||
description_lines = ['\n']
|
||||
for line in source['description'].split('\n'):
|
||||
description_lines.append(' ' * 4 + line + '\n')
|
||||
|
||||
fmt(" Description", ''.join(description_lines))
|
||||
|
||||
trusted = spack.config.get('bootstrap:trusted', {})
|
||||
for s in sources:
|
||||
_print_method(s, trusted.get(s['name'], None))
|
||||
|
||||
|
||||
def _write_trust_state(args, value):
|
||||
name = args.name
|
||||
sources = spack.config.get('bootstrap:sources')
|
||||
|
||||
matches = [s for s in sources if s['name'] == name]
|
||||
if not matches:
|
||||
names = [s['name'] for s in sources]
|
||||
msg = ('there is no bootstrapping method named "{0}". Valid '
|
||||
'method names are: {1}'.format(name, ', '.join(names)))
|
||||
raise RuntimeError(msg)
|
||||
|
||||
if len(matches) > 1:
|
||||
msg = ('there is more than one bootstrapping method named "{0}". '
|
||||
'Please delete all methods but one from bootstrap.yaml '
|
||||
'before proceeding').format(name)
|
||||
raise RuntimeError(msg)
|
||||
|
||||
# Setting the scope explicitly is needed to not copy over to a new scope
|
||||
# the entire default configuration for bootstrap.yaml
|
||||
scope = args.scope or spack.config.default_modify_scope('bootstrap')
|
||||
spack.config.add(
|
||||
'bootstrap:trusted:{0}:{1}'.format(name, str(value)), scope=scope
|
||||
)
|
||||
|
||||
|
||||
def _trust(args):
|
||||
_write_trust_state(args, value=True)
|
||||
msg = '"{0}" is now trusted for bootstrapping'
|
||||
llnl.util.tty.msg(msg.format(args.name))
|
||||
|
||||
|
||||
def _untrust(args):
|
||||
_write_trust_state(args, value=False)
|
||||
msg = '"{0}" is now untrusted and will not be used for bootstrapping'
|
||||
llnl.util.tty.msg(msg.format(args.name))
|
||||
|
||||
|
||||
def bootstrap(parser, args):
|
||||
callbacks = {
|
||||
'enable': _enable_or_disable,
|
||||
'disable': _enable_or_disable,
|
||||
'reset': _reset,
|
||||
'root': _root
|
||||
'root': _root,
|
||||
'list': _list,
|
||||
'trust': _trust,
|
||||
'untrust': _untrust
|
||||
}
|
||||
callbacks[args.subcommand](args)
|
||||
|
@@ -2,10 +2,11 @@
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
import argparse
|
||||
import os
|
||||
import shutil
|
||||
import sys
|
||||
import tempfile
|
||||
|
||||
import llnl.util.tty as tty
|
||||
|
||||
@@ -15,16 +16,20 @@
|
||||
import spack.cmd.common.arguments as arguments
|
||||
import spack.config
|
||||
import spack.environment as ev
|
||||
import spack.fetch_strategy as fs
|
||||
import spack.hash_types as ht
|
||||
import spack.mirror
|
||||
import spack.relocate
|
||||
import spack.repo
|
||||
import spack.spec
|
||||
import spack.store
|
||||
import spack.util.crypto
|
||||
import spack.util.url as url_util
|
||||
import spack.util.web as web_util
|
||||
from spack.cmd import display_specs
|
||||
from spack.error import SpecError
|
||||
from spack.spec import Spec, save_dependency_spec_yamls
|
||||
from spack.spec import Spec, save_dependency_specfiles
|
||||
from spack.stage import Stage
|
||||
from spack.util.string import plural
|
||||
|
||||
description = "create, download and install binary packages"
|
||||
@@ -70,8 +75,9 @@ def setup_parser(subparser):
|
||||
create.add_argument('--rebuild-index', action='store_true',
|
||||
default=False, help="Regenerate buildcache index " +
|
||||
"after building package(s)")
|
||||
create.add_argument('-y', '--spec-yaml', default=None,
|
||||
help='Create buildcache entry for spec from yaml file')
|
||||
create.add_argument('--spec-file', default=None,
|
||||
help=('Create buildcache entry for spec from json or ' +
|
||||
'yaml file'))
|
||||
create.add_argument('--only', default='package,dependencies',
|
||||
dest='things_to_install',
|
||||
choices=['package', 'dependencies'],
|
||||
@@ -97,6 +103,8 @@ def setup_parser(subparser):
|
||||
install.add_argument('-o', '--otherarch', action='store_true',
|
||||
help="install specs from other architectures" +
|
||||
" instead of default platform and OS")
|
||||
# This argument is needed by the bootstrapping logic to verify checksums
|
||||
install.add_argument('--sha256', help=argparse.SUPPRESS)
|
||||
|
||||
arguments.add_common_arguments(install, ['specs'])
|
||||
install.set_defaults(func=installtarball)
|
||||
@@ -156,8 +164,9 @@ def setup_parser(subparser):
|
||||
help='Check single spec instead of release specs file')
|
||||
|
||||
check.add_argument(
|
||||
'-y', '--spec-yaml', default=None,
|
||||
help='Check single spec from yaml file instead of release specs file')
|
||||
'--spec-file', default=None,
|
||||
help=('Check single spec from json or yaml file instead of release ' +
|
||||
'specs file'))
|
||||
|
||||
check.add_argument(
|
||||
'--rebuild-on-error', default=False, action='store_true',
|
||||
@@ -166,14 +175,15 @@ def setup_parser(subparser):
|
||||
|
||||
check.set_defaults(func=check_binaries)
|
||||
|
||||
# Download tarball and spec.yaml
|
||||
# Download tarball and specfile
|
||||
dltarball = subparsers.add_parser('download', help=get_tarball.__doc__)
|
||||
dltarball.add_argument(
|
||||
'-s', '--spec', default=None,
|
||||
help="Download built tarball for spec from mirror")
|
||||
dltarball.add_argument(
|
||||
'-y', '--spec-yaml', default=None,
|
||||
help="Download built tarball for spec (from yaml file) from mirror")
|
||||
'--spec-file', default=None,
|
||||
help=("Download built tarball for spec (from json or yaml file) " +
|
||||
"from mirror"))
|
||||
dltarball.add_argument(
|
||||
'-p', '--path', default=None,
|
||||
help="Path to directory where tarball should be downloaded")
|
||||
@@ -189,26 +199,27 @@ def setup_parser(subparser):
|
||||
'-s', '--spec', default=None,
|
||||
help='Spec string for which buildcache name is desired')
|
||||
getbuildcachename.add_argument(
|
||||
'-y', '--spec-yaml', default=None,
|
||||
help='Path to spec yaml file for which buildcache name is desired')
|
||||
'--spec-file', default=None,
|
||||
help=('Path to spec json or yaml file for which buildcache name is ' +
|
||||
'desired'))
|
||||
getbuildcachename.set_defaults(func=get_buildcache_name)
|
||||
|
||||
# Given the root spec, save the yaml of the dependent spec to a file
|
||||
saveyaml = subparsers.add_parser('save-yaml',
|
||||
help=save_spec_yamls.__doc__)
|
||||
saveyaml.add_argument(
|
||||
savespecfile = subparsers.add_parser('save-specfile',
|
||||
help=save_specfiles.__doc__)
|
||||
savespecfile.add_argument(
|
||||
'--root-spec', default=None,
|
||||
help='Root spec of dependent spec')
|
||||
saveyaml.add_argument(
|
||||
'--root-spec-yaml', default=None,
|
||||
help='Path to yaml file containing root spec of dependent spec')
|
||||
saveyaml.add_argument(
|
||||
savespecfile.add_argument(
|
||||
'--root-specfile', default=None,
|
||||
help='Path to json or yaml file containing root spec of dependent spec')
|
||||
savespecfile.add_argument(
|
||||
'-s', '--specs', default=None,
|
||||
help='List of dependent specs for which saved yaml is desired')
|
||||
saveyaml.add_argument(
|
||||
'-y', '--yaml-dir', default=None,
|
||||
savespecfile.add_argument(
|
||||
'--specfile-dir', default=None,
|
||||
help='Path to directory where spec yamls should be saved')
|
||||
saveyaml.set_defaults(func=save_spec_yamls)
|
||||
savespecfile.set_defaults(func=save_specfiles)
|
||||
|
||||
# Copy buildcache from some directory to another mirror url
|
||||
copy = subparsers.add_parser('copy', help=buildcache_copy.__doc__)
|
||||
@@ -216,13 +227,44 @@ def setup_parser(subparser):
|
||||
'--base-dir', default=None,
|
||||
help='Path to mirror directory (root of existing buildcache)')
|
||||
copy.add_argument(
|
||||
'--spec-yaml', default=None,
|
||||
help='Path to spec yaml file representing buildcache entry to copy')
|
||||
'--spec-file', default=None,
|
||||
help=('Path to spec json or yaml file representing buildcache entry to' +
|
||||
' copy'))
|
||||
copy.add_argument(
|
||||
'--destination-url', default=None,
|
||||
help='Destination mirror url')
|
||||
copy.set_defaults(func=buildcache_copy)
|
||||
|
||||
# Sync buildcache entries from one mirror to another
|
||||
sync = subparsers.add_parser('sync', help=buildcache_sync.__doc__)
|
||||
source = sync.add_mutually_exclusive_group(required=True)
|
||||
source.add_argument('--src-directory',
|
||||
metavar='DIRECTORY',
|
||||
type=str,
|
||||
help="Source mirror as a local file path")
|
||||
source.add_argument('--src-mirror-name',
|
||||
metavar='MIRROR_NAME',
|
||||
type=str,
|
||||
help="Name of the source mirror")
|
||||
source.add_argument('--src-mirror-url',
|
||||
metavar='MIRROR_URL',
|
||||
type=str,
|
||||
help="URL of the source mirror")
|
||||
dest = sync.add_mutually_exclusive_group(required=True)
|
||||
dest.add_argument('--dest-directory',
|
||||
metavar='DIRECTORY',
|
||||
type=str,
|
||||
help="Destination mirror as a local file path")
|
||||
dest.add_argument('--dest-mirror-name',
|
||||
metavar='MIRROR_NAME',
|
||||
type=str,
|
||||
help="Name of the destination mirror")
|
||||
dest.add_argument('--dest-mirror-url',
|
||||
metavar='MIRROR_URL',
|
||||
type=str,
|
||||
help="URL of the destination mirror")
|
||||
sync.set_defaults(func=buildcache_sync)
|
||||
|
||||
# Update buildcache index without copying any additional packages
|
||||
update_index = subparsers.add_parser(
|
||||
'update-index', help=buildcache_update_index.__doc__)
|
||||
@@ -329,16 +371,19 @@ def match_downloaded_specs(pkgs, allow_multiple_matches=False, force=False,
|
||||
return specs_from_cli
|
||||
|
||||
|
||||
def _createtarball(env, spec_yaml=None, packages=None, add_spec=True,
|
||||
def _createtarball(env, spec_file=None, packages=None, add_spec=True,
|
||||
add_deps=True, output_location=os.getcwd(),
|
||||
signing_key=None, force=False, make_relative=False,
|
||||
unsigned=False, allow_root=False, rebuild_index=False):
|
||||
if spec_yaml:
|
||||
with open(spec_yaml, 'r') as fd:
|
||||
yaml_text = fd.read()
|
||||
tty.debug('createtarball read spec yaml:')
|
||||
tty.debug(yaml_text)
|
||||
s = Spec.from_yaml(yaml_text)
|
||||
if spec_file:
|
||||
with open(spec_file, 'r') as fd:
|
||||
specfile_contents = fd.read()
|
||||
tty.debug('createtarball read specfile contents:')
|
||||
tty.debug(specfile_contents)
|
||||
if spec_file.endswith('.json'):
|
||||
s = Spec.from_json(specfile_contents)
|
||||
else:
|
||||
s = Spec.from_yaml(specfile_contents)
|
||||
package = '/{0}'.format(s.dag_hash())
|
||||
matches = find_matching_specs(package, env=env)
|
||||
|
||||
@@ -351,7 +396,7 @@ def _createtarball(env, spec_yaml=None, packages=None, add_spec=True,
|
||||
else:
|
||||
tty.die("build cache file creation requires at least one" +
|
||||
" installed package spec, an active environment," +
|
||||
" or else a path to a yaml file containing a spec" +
|
||||
" or else a path to a json or yaml file containing a spec" +
|
||||
" to install")
|
||||
specs = set()
|
||||
|
||||
@@ -420,7 +465,7 @@ def createtarball(args):
|
||||
"""create a binary package from an existing install"""
|
||||
|
||||
# restrict matching to current environment if one is active
|
||||
env = ev.get_env(args, 'buildcache create')
|
||||
env = ev.active_environment()
|
||||
|
||||
output_location = None
|
||||
if args.directory:
|
||||
@@ -460,7 +505,7 @@ def createtarball(args):
|
||||
add_spec = ('package' in args.things_to_install)
|
||||
add_deps = ('dependencies' in args.things_to_install)
|
||||
|
||||
_createtarball(env, spec_yaml=args.spec_yaml, packages=args.specs,
|
||||
_createtarball(env, spec_file=args.spec_file, packages=args.specs,
|
||||
add_spec=add_spec, add_deps=add_deps,
|
||||
output_location=output_location, signing_key=args.key,
|
||||
force=args.force, make_relative=args.rel,
|
||||
@@ -495,6 +540,15 @@ def install_tarball(spec, args):
|
||||
else:
|
||||
tarball = bindist.download_tarball(spec)
|
||||
if tarball:
|
||||
if args.sha256:
|
||||
checker = spack.util.crypto.Checker(args.sha256)
|
||||
msg = ('cannot verify checksum for "{0}"'
|
||||
' [expected={1}]')
|
||||
msg = msg.format(tarball, args.sha256)
|
||||
if not checker.check(tarball):
|
||||
raise spack.binary_distribution.NoChecksumException(msg)
|
||||
tty.debug('Verified SHA256 checksum of the build cache')
|
||||
|
||||
tty.msg('Installing buildcache for spec %s' % spec.format())
|
||||
bindist.extract_tarball(spec, tarball, args.allow_root,
|
||||
args.unsigned, args.force)
|
||||
@@ -552,10 +606,10 @@ def check_binaries(args):
|
||||
its result, specifically, if the exit code is non-zero, then at least
|
||||
one of the indicated specs needs to be rebuilt.
|
||||
"""
|
||||
if args.spec or args.spec_yaml:
|
||||
if args.spec or args.spec_file:
|
||||
specs = [get_concrete_spec(args)]
|
||||
else:
|
||||
env = ev.get_env(args, 'buildcache', required=True)
|
||||
env = spack.cmd.require_active_env(cmd_name='buildcache')
|
||||
env.concretize()
|
||||
specs = env.all_specs()
|
||||
|
||||
@@ -589,15 +643,16 @@ def download_buildcache_files(concrete_spec, local_dest, require_cdashid,
|
||||
|
||||
files_to_fetch = [
|
||||
{
|
||||
'url': tarball_path_name,
|
||||
'url': [tarball_path_name],
|
||||
'path': local_tarball_path,
|
||||
'required': True,
|
||||
}, {
|
||||
'url': bindist.tarball_name(concrete_spec, '.spec.yaml'),
|
||||
'url': [bindist.tarball_name(concrete_spec, '.spec.json'),
|
||||
bindist.tarball_name(concrete_spec, '.spec.yaml')],
|
||||
'path': local_dest,
|
||||
'required': True,
|
||||
}, {
|
||||
'url': bindist.tarball_name(concrete_spec, '.cdashid'),
|
||||
'url': [bindist.tarball_name(concrete_spec, '.cdashid')],
|
||||
'path': local_dest,
|
||||
'required': require_cdashid,
|
||||
},
|
||||
@@ -611,9 +666,9 @@ def get_tarball(args):
|
||||
command uses the process exit code to indicate its result, specifically,
|
||||
a non-zero exit code indicates that the command failed to download at
|
||||
least one of the required buildcache components. Normally, just the
|
||||
tarball and .spec.yaml files are required, but if the --require-cdashid
|
||||
tarball and .spec.json files are required, but if the --require-cdashid
|
||||
argument was provided, then a .cdashid file is also required."""
|
||||
if not args.spec and not args.spec_yaml:
|
||||
if not args.spec and not args.spec_file:
|
||||
tty.msg('No specs provided, exiting.')
|
||||
sys.exit(0)
|
||||
|
||||
@@ -630,7 +685,7 @@ def get_tarball(args):
|
||||
|
||||
def get_concrete_spec(args):
|
||||
spec_str = args.spec
|
||||
spec_yaml_path = args.spec_yaml
|
||||
spec_yaml_path = args.spec_file
|
||||
|
||||
if not spec_str and not spec_yaml_path:
|
||||
tty.msg('Must provide either spec string or path to ' +
|
||||
@@ -662,14 +717,14 @@ def get_buildcache_name(args):
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
def save_spec_yamls(args):
|
||||
def save_specfiles(args):
|
||||
"""Get full spec for dependencies, relative to root spec, and write them
|
||||
to files in the specified output directory. Uses exit code to signal
|
||||
success or failure. An exit code of zero means the command was likely
|
||||
successful. If any errors or exceptions are encountered, or if expected
|
||||
command-line arguments are not provided, then the exit code will be
|
||||
non-zero."""
|
||||
if not args.root_spec and not args.root_spec_yaml:
|
||||
if not args.root_spec and not args.root_specfile:
|
||||
tty.msg('No root spec provided, exiting.')
|
||||
sys.exit(1)
|
||||
|
||||
@@ -677,20 +732,20 @@ def save_spec_yamls(args):
|
||||
tty.msg('No dependent specs provided, exiting.')
|
||||
sys.exit(1)
|
||||
|
||||
if not args.yaml_dir:
|
||||
if not args.specfile_dir:
|
||||
tty.msg('No yaml directory provided, exiting.')
|
||||
sys.exit(1)
|
||||
|
||||
if args.root_spec_yaml:
|
||||
with open(args.root_spec_yaml) as fd:
|
||||
root_spec_as_yaml = fd.read()
|
||||
if args.root_specfile:
|
||||
with open(args.root_specfile) as fd:
|
||||
root_spec_as_json = fd.read()
|
||||
else:
|
||||
root_spec = Spec(args.root_spec)
|
||||
root_spec.concretize()
|
||||
root_spec_as_yaml = root_spec.to_yaml(hash=ht.build_hash)
|
||||
|
||||
save_dependency_spec_yamls(
|
||||
root_spec_as_yaml, args.yaml_dir, args.specs.split())
|
||||
root_spec_as_json = root_spec.to_json(hash=ht.build_hash)
|
||||
spec_format = 'yaml' if args.root_specfile.endswith('yaml') else 'json'
|
||||
save_dependency_specfiles(
|
||||
root_spec_as_json, args.specfile_dir, args.specs.split(), spec_format)
|
||||
|
||||
sys.exit(0)
|
||||
|
||||
@@ -699,10 +754,10 @@ def buildcache_copy(args):
|
||||
"""Copy a buildcache entry and all its files from one mirror, given as
|
||||
'--base-dir', to some other mirror, specified as '--destination-url'.
|
||||
The specific buildcache entry to be copied from one location to the
|
||||
other is identified using the '--spec-yaml' argument."""
|
||||
other is identified using the '--spec-file' argument."""
|
||||
# TODO: This sub-command should go away once #11117 is merged
|
||||
|
||||
if not args.spec_yaml:
|
||||
if not args.spec_file:
|
||||
tty.msg('No spec yaml provided, exiting.')
|
||||
sys.exit(1)
|
||||
|
||||
@@ -722,12 +777,12 @@ def buildcache_copy(args):
|
||||
sys.exit(1)
|
||||
|
||||
try:
|
||||
with open(args.spec_yaml, 'r') as fd:
|
||||
with open(args.spec_file, 'r') as fd:
|
||||
spec = Spec.from_yaml(fd.read())
|
||||
except Exception as e:
|
||||
tty.debug(e)
|
||||
tty.error('Unable to concrectize spec from yaml {0}'.format(
|
||||
args.spec_yaml))
|
||||
args.spec_file))
|
||||
sys.exit(1)
|
||||
|
||||
dest_root_path = dest_url
|
||||
@@ -742,10 +797,15 @@ def buildcache_copy(args):
|
||||
tarball_dest_path = os.path.join(dest_root_path, tarball_rel_path)
|
||||
|
||||
specfile_rel_path = os.path.join(
|
||||
build_cache_dir, bindist.tarball_name(spec, '.spec.yaml'))
|
||||
build_cache_dir, bindist.tarball_name(spec, '.spec.json'))
|
||||
specfile_src_path = os.path.join(args.base_dir, specfile_rel_path)
|
||||
specfile_dest_path = os.path.join(dest_root_path, specfile_rel_path)
|
||||
|
||||
specfile_rel_path_yaml = os.path.join(
|
||||
build_cache_dir, bindist.tarball_name(spec, '.spec.yaml'))
|
||||
specfile_src_path_yaml = os.path.join(args.base_dir, specfile_rel_path)
|
||||
specfile_dest_path_yaml = os.path.join(dest_root_path, specfile_rel_path)
|
||||
|
||||
cdashidfile_rel_path = os.path.join(
|
||||
build_cache_dir, bindist.tarball_name(spec, '.cdashid'))
|
||||
cdashid_src_path = os.path.join(args.base_dir, cdashidfile_rel_path)
|
||||
@@ -761,12 +821,134 @@ def buildcache_copy(args):
|
||||
tty.msg('Copying {0}'.format(specfile_rel_path))
|
||||
shutil.copyfile(specfile_src_path, specfile_dest_path)
|
||||
|
||||
tty.msg('Copying {0}'.format(specfile_rel_path_yaml))
|
||||
shutil.copyfile(specfile_src_path_yaml, specfile_dest_path_yaml)
|
||||
|
||||
# Copy the cdashid file (if exists) to the destination mirror
|
||||
if os.path.exists(cdashid_src_path):
|
||||
tty.msg('Copying {0}'.format(cdashidfile_rel_path))
|
||||
shutil.copyfile(cdashid_src_path, cdashid_dest_path)
|
||||
|
||||
|
||||
def buildcache_sync(args):
|
||||
""" Syncs binaries (and associated metadata) from one mirror to another.
|
||||
Requires an active environment in order to know which specs to sync.
|
||||
|
||||
Args:
|
||||
src (str): Source mirror URL
|
||||
dest (str): Destination mirror URL
|
||||
"""
|
||||
# Figure out the source mirror
|
||||
source_location = None
|
||||
if args.src_directory:
|
||||
source_location = args.src_directory
|
||||
scheme = url_util.parse(source_location, scheme='<missing>').scheme
|
||||
if scheme != '<missing>':
|
||||
raise ValueError(
|
||||
'"--src-directory" expected a local path; got a URL, instead')
|
||||
# Ensure that the mirror lookup does not mistake this for named mirror
|
||||
source_location = 'file://' + source_location
|
||||
elif args.src_mirror_name:
|
||||
source_location = args.src_mirror_name
|
||||
result = spack.mirror.MirrorCollection().lookup(source_location)
|
||||
if result.name == "<unnamed>":
|
||||
raise ValueError(
|
||||
'no configured mirror named "{name}"'.format(
|
||||
name=source_location))
|
||||
elif args.src_mirror_url:
|
||||
source_location = args.src_mirror_url
|
||||
scheme = url_util.parse(source_location, scheme='<missing>').scheme
|
||||
if scheme == '<missing>':
|
||||
raise ValueError(
|
||||
'"{url}" is not a valid URL'.format(url=source_location))
|
||||
|
||||
src_mirror = spack.mirror.MirrorCollection().lookup(source_location)
|
||||
src_mirror_url = url_util.format(src_mirror.fetch_url)
|
||||
|
||||
# Figure out the destination mirror
|
||||
dest_location = None
|
||||
if args.dest_directory:
|
||||
dest_location = args.dest_directory
|
||||
scheme = url_util.parse(dest_location, scheme='<missing>').scheme
|
||||
if scheme != '<missing>':
|
||||
raise ValueError(
|
||||
'"--dest-directory" expected a local path; got a URL, instead')
|
||||
# Ensure that the mirror lookup does not mistake this for named mirror
|
||||
dest_location = 'file://' + dest_location
|
||||
elif args.dest_mirror_name:
|
||||
dest_location = args.dest_mirror_name
|
||||
result = spack.mirror.MirrorCollection().lookup(dest_location)
|
||||
if result.name == "<unnamed>":
|
||||
raise ValueError(
|
||||
'no configured mirror named "{name}"'.format(
|
||||
name=dest_location))
|
||||
elif args.dest_mirror_url:
|
||||
dest_location = args.dest_mirror_url
|
||||
scheme = url_util.parse(dest_location, scheme='<missing>').scheme
|
||||
if scheme == '<missing>':
|
||||
raise ValueError(
|
||||
'"{url}" is not a valid URL'.format(url=dest_location))
|
||||
|
||||
dest_mirror = spack.mirror.MirrorCollection().lookup(dest_location)
|
||||
dest_mirror_url = url_util.format(dest_mirror.fetch_url)
|
||||
|
||||
# Get the active environment
|
||||
env = spack.cmd.require_active_env(cmd_name='buildcache sync')
|
||||
|
||||
tty.msg('Syncing environment buildcache files from {0} to {1}'.format(
|
||||
src_mirror_url, dest_mirror_url))
|
||||
|
||||
build_cache_dir = bindist.build_cache_relative_path()
|
||||
buildcache_rel_paths = []
|
||||
|
||||
tty.debug('Syncing the following specs:')
|
||||
for s in env.all_specs():
|
||||
tty.debug(' {0}{1}: {2}'.format(
|
||||
'* ' if s in env.roots() else ' ', s.name, s.dag_hash()))
|
||||
|
||||
buildcache_rel_paths.extend([
|
||||
os.path.join(
|
||||
build_cache_dir, bindist.tarball_path_name(s, '.spack')),
|
||||
os.path.join(
|
||||
build_cache_dir, bindist.tarball_name(s, '.spec.yaml')),
|
||||
os.path.join(
|
||||
build_cache_dir, bindist.tarball_name(s, '.spec.json')),
|
||||
os.path.join(
|
||||
build_cache_dir, bindist.tarball_name(s, '.cdashid'))
|
||||
])
|
||||
|
||||
tmpdir = tempfile.mkdtemp()
|
||||
|
||||
try:
|
||||
for rel_path in buildcache_rel_paths:
|
||||
src_url = url_util.join(src_mirror_url, rel_path)
|
||||
local_path = os.path.join(tmpdir, rel_path)
|
||||
dest_url = url_util.join(dest_mirror_url, rel_path)
|
||||
|
||||
tty.debug('Copying {0} to {1} via {2}'.format(
|
||||
src_url, dest_url, local_path))
|
||||
|
||||
stage = Stage(src_url,
|
||||
name="temporary_file",
|
||||
path=os.path.dirname(local_path),
|
||||
keep=True)
|
||||
|
||||
try:
|
||||
stage.create()
|
||||
stage.fetch()
|
||||
web_util.push_to_url(
|
||||
local_path,
|
||||
dest_url,
|
||||
keep_original=True)
|
||||
except fs.FetchError as e:
|
||||
tty.debug('spack buildcache unable to sync {0}'.format(rel_path))
|
||||
tty.debug(e)
|
||||
finally:
|
||||
stage.destroy()
|
||||
finally:
|
||||
shutil.rmtree(tmpdir)
|
||||
|
||||
|
||||
def update_index(mirror_url, update_keys=False):
|
||||
mirror = spack.mirror.MirrorCollection().lookup(mirror_url)
|
||||
outdir = url_util.format(mirror.push_url)
|
||||
|
@@ -63,6 +63,16 @@ def checksum(parser, args):
|
||||
if not url_dict:
|
||||
tty.die("Could not find any versions for {0}".format(pkg.name))
|
||||
|
||||
# And ensure the specified version URLs take precedence, if available
|
||||
try:
|
||||
explicit_dict = {}
|
||||
for v in pkg.versions:
|
||||
if not v.isdevelop():
|
||||
explicit_dict[v] = pkg.url_for_version(v)
|
||||
url_dict.update(explicit_dict)
|
||||
except spack.package.NoURLError:
|
||||
pass
|
||||
|
||||
version_lines = spack.stage.get_checksums_for_versions(
|
||||
url_dict, pkg.name, keep_stage=args.keep_stage,
|
||||
batch=(args.batch or len(args.versions) > 0 or len(url_dict) == 1),
|
||||
|
@@ -78,8 +78,8 @@ def setup_parser(subparser):
|
||||
default=False, help="""Spack always check specs against configured
|
||||
binary mirrors when generating the pipeline, regardless of whether or not
|
||||
DAG pruning is enabled. This flag controls whether it might attempt to
|
||||
fetch remote spec.yaml files directly (ensuring no spec is rebuilt if it is
|
||||
present on the mirror), or whether it should reduce pipeline generation time
|
||||
fetch remote spec files directly (ensuring no spec is rebuilt if it
|
||||
is present on the mirror), or whether it should reduce pipeline generation time
|
||||
by assuming all remote buildcache indices are up to date and only use those
|
||||
to determine whether a given spec is up to date on mirrors. In the latter
|
||||
case, specs might be needlessly rebuilt if remote buildcache indices are out
|
||||
@@ -118,7 +118,7 @@ def ci_generate(args):
|
||||
for creating a build group for the generated workload and registering
|
||||
all generated jobs under that build group. If this environment
|
||||
variable is not set, no build group will be created on CDash."""
|
||||
env = ev.get_env(args, 'ci generate', required=True)
|
||||
env = spack.cmd.require_active_env(cmd_name='ci generate')
|
||||
|
||||
output_file = args.output_file
|
||||
copy_yaml_to = args.copy_to
|
||||
@@ -152,7 +152,7 @@ def ci_generate(args):
|
||||
def ci_reindex(args):
|
||||
"""Rebuild the buildcache index associated with the mirror in the
|
||||
active, gitlab-enabled environment. """
|
||||
env = ev.get_env(args, 'ci rebuild-index', required=True)
|
||||
env = spack.cmd.require_active_env(cmd_name='ci rebuild-index')
|
||||
yaml_root = ev.config_dict(env.yaml)
|
||||
|
||||
if 'mirrors' not in yaml_root or len(yaml_root['mirrors'].values()) < 1:
|
||||
@@ -169,7 +169,7 @@ def ci_rebuild(args):
|
||||
"""Check a single spec against the remote mirror, and rebuild it from
|
||||
source if the mirror does not contain the full hash match of the spec
|
||||
as computed locally. """
|
||||
env = ev.get_env(args, 'ci rebuild', required=True)
|
||||
env = spack.cmd.require_active_env(cmd_name='ci rebuild')
|
||||
|
||||
# Make sure the environment is "gitlab-enabled", or else there's nothing
|
||||
# to do.
|
||||
|
@@ -104,6 +104,6 @@ def clean(parser, args):
|
||||
if args.bootstrap:
|
||||
msg = 'Removing software in "{0}"'
|
||||
tty.msg(msg.format(spack.bootstrap.store_path()))
|
||||
with spack.store.use_store(spack.bootstrap.store_path()):
|
||||
with spack.bootstrap.ensure_bootstrap_configuration():
|
||||
uninstall = spack.main.SpackCommand('uninstall')
|
||||
uninstall('-a', '-y')
|
||||
|
@@ -3,6 +3,7 @@
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
import spack.cmd
|
||||
import spack.environment as ev
|
||||
|
||||
description = 'concretize an environment and write a lockfile'
|
||||
@@ -23,7 +24,7 @@ def setup_parser(subparser):
|
||||
|
||||
|
||||
def concretize(parser, args):
|
||||
env = ev.get_env(args, 'concretize', required=True)
|
||||
env = spack.cmd.require_active_env(cmd_name='concretize')
|
||||
|
||||
if args.test == 'all':
|
||||
tests = True
|
||||
|
@@ -118,7 +118,7 @@ def _get_scope_and_section(args):
|
||||
|
||||
# w/no args and an active environment, point to env manifest
|
||||
if not section:
|
||||
env = ev.get_env(args, 'config edit')
|
||||
env = ev.active_environment()
|
||||
if env:
|
||||
scope = env.env_file_config_scope_name()
|
||||
|
||||
|
@@ -36,7 +36,7 @@ def deactivate(parser, args):
|
||||
if len(specs) != 1:
|
||||
tty.die("deactivate requires one spec. %d given." % len(specs))
|
||||
|
||||
env = ev.get_env(args, 'deactivate')
|
||||
env = ev.active_environment()
|
||||
spec = spack.cmd.disambiguate_spec(specs[0], env)
|
||||
pkg = spec.package
|
||||
|
||||
|
@@ -74,6 +74,7 @@ def create_db_tarball(args):
|
||||
wd = os.path.dirname(str(spack.store.root))
|
||||
with working_dir(wd):
|
||||
files = [spack.store.db._index_path]
|
||||
files += glob('%s/*/*/*/.spack/spec.json' % base)
|
||||
files += glob('%s/*/*/*/.spack/spec.yaml' % base)
|
||||
files = [os.path.relpath(f) for f in files]
|
||||
|
||||
|
@@ -41,7 +41,7 @@ def dependencies(parser, args):
|
||||
tty.die("spack dependencies takes only one spec.")
|
||||
|
||||
if args.installed:
|
||||
env = ev.get_env(args, 'dependencies')
|
||||
env = ev.active_environment()
|
||||
spec = spack.cmd.disambiguate_spec(specs[0], env)
|
||||
|
||||
format_string = '{name}{@version}{%compiler}{/hash:7}'
|
||||
|
@@ -82,7 +82,7 @@ def dependents(parser, args):
|
||||
tty.die("spack dependents takes only one spec.")
|
||||
|
||||
if args.installed:
|
||||
env = ev.get_env(args, 'dependents')
|
||||
env = ev.active_environment()
|
||||
spec = spack.cmd.disambiguate_spec(specs[0], env)
|
||||
|
||||
format_string = '{name}{@version}{%compiler}{/hash:7}'
|
||||
|
@@ -71,7 +71,7 @@ def setup_parser(sp):
|
||||
|
||||
def deprecate(parser, args):
|
||||
"""Deprecate one spec in favor of another"""
|
||||
env = ev.get_env(args, 'deprecate')
|
||||
env = ev.active_environment()
|
||||
specs = spack.cmd.parse_specs(args.specs)
|
||||
|
||||
if len(specs) != 2:
|
||||
|
@@ -9,7 +9,6 @@
|
||||
|
||||
import spack.cmd
|
||||
import spack.cmd.common.arguments as arguments
|
||||
import spack.environment as ev
|
||||
from spack.error import SpackError
|
||||
|
||||
description = "add a spec to an environment's dev-build information"
|
||||
@@ -37,7 +36,7 @@ def setup_parser(subparser):
|
||||
|
||||
|
||||
def develop(parser, args):
|
||||
env = ev.get_env(args, 'develop', required=True)
|
||||
env = spack.cmd.require_active_env(cmd_name='develop')
|
||||
|
||||
if not args.spec:
|
||||
if args.clone is False:
|
||||
|
@@ -175,7 +175,7 @@ def group_by_type(diffset):
|
||||
|
||||
|
||||
def diff(parser, args):
|
||||
env = ev.get_env(args, 'diff')
|
||||
env = ev.active_environment()
|
||||
|
||||
if len(args.specs) != 2:
|
||||
tty.die("You must provide two specs to diff.")
|
||||
|
@@ -6,7 +6,6 @@
|
||||
import os
|
||||
import shutil
|
||||
import sys
|
||||
from collections import namedtuple
|
||||
|
||||
import llnl.util.filesystem as fs
|
||||
import llnl.util.tty as tty
|
||||
@@ -89,6 +88,11 @@ def env_activate(args):
|
||||
)
|
||||
return 1
|
||||
|
||||
# Error out when -e, -E, -D flags are given, cause they are ambiguous.
|
||||
if args.env or args.no_env or args.env_dir:
|
||||
tty.die('Calling spack env activate with --env, --env-dir and --no-env '
|
||||
'is ambiguous')
|
||||
|
||||
if ev.exists(env) and not args.dir:
|
||||
spack_env = ev.root(env)
|
||||
short_name = env
|
||||
@@ -106,10 +110,8 @@ def env_activate(args):
|
||||
tty.debug("Environment %s is already active" % args.activate_env)
|
||||
return
|
||||
|
||||
active_env = ev.get_env(namedtuple('args', ['env'])(env),
|
||||
'activate')
|
||||
cmds = ev.activate(
|
||||
active_env, add_view=args.with_view, shell=args.shell,
|
||||
ev.Environment(spack_env), add_view=args.with_view, shell=args.shell,
|
||||
prompt=env_prompt if args.prompt else None
|
||||
)
|
||||
sys.stdout.write(cmds)
|
||||
@@ -140,6 +142,11 @@ def env_deactivate(args):
|
||||
)
|
||||
return 1
|
||||
|
||||
# Error out when -e, -E, -D flags are given, cause they are ambiguous.
|
||||
if args.env or args.no_env or args.env_dir:
|
||||
tty.die('Calling spack env deactivate with --env, --env-dir and --no-env '
|
||||
'is ambiguous')
|
||||
|
||||
if 'SPACK_ENV' not in os.environ:
|
||||
tty.die('No environment is currently active.')
|
||||
|
||||
@@ -315,7 +322,7 @@ def env_view_setup_parser(subparser):
|
||||
|
||||
|
||||
def env_view(args):
|
||||
env = ev.get_env(args, 'env view')
|
||||
env = ev.active_environment()
|
||||
|
||||
if env:
|
||||
if args.action == ViewAction.regenerate:
|
||||
@@ -342,7 +349,7 @@ def env_status_setup_parser(subparser):
|
||||
|
||||
|
||||
def env_status(args):
|
||||
env = ev.get_env(args, 'env status')
|
||||
env = ev.active_environment()
|
||||
if env:
|
||||
if env.path == os.getcwd():
|
||||
tty.msg('Using %s in current directory: %s'
|
||||
@@ -373,7 +380,7 @@ def env_loads_setup_parser(subparser):
|
||||
|
||||
|
||||
def env_loads(args):
|
||||
env = ev.get_env(args, 'env loads', required=True)
|
||||
env = spack.cmd.require_active_env(cmd_name='env loads')
|
||||
|
||||
# Set the module types that have been selected
|
||||
module_type = args.module_type
|
||||
|
@@ -67,7 +67,7 @@ def extensions(parser, args):
|
||||
if not spec[0].package.extendable:
|
||||
tty.die("%s is not an extendable package." % spec[0].name)
|
||||
|
||||
env = ev.get_env(args, 'extensions')
|
||||
env = ev.active_environment()
|
||||
spec = cmd.disambiguate_spec(spec[0], env)
|
||||
|
||||
if not spec.package.extendable:
|
||||
|
@@ -47,7 +47,7 @@ def fetch(parser, args):
|
||||
# fetch all uninstalled specs from it otherwise fetch all.
|
||||
# If we are also not in an environment, complain to the
|
||||
# user that we don't know what to do.
|
||||
env = ev.get_env(args, "fetch")
|
||||
env = ev.active_environment()
|
||||
if env:
|
||||
if args.missing:
|
||||
specs = env.uninstalled_specs()
|
||||
|
@@ -205,24 +205,24 @@ def display_env(env, args, decorator):
|
||||
|
||||
|
||||
def find(parser, args):
|
||||
q_args = query_arguments(args)
|
||||
# Query the current store or the internal bootstrap store if required
|
||||
if args.bootstrap:
|
||||
bootstrap_store_path = spack.bootstrap.store_path()
|
||||
msg = 'Showing internal bootstrap store at "{0}"'
|
||||
tty.msg(msg.format(bootstrap_store_path))
|
||||
with spack.store.use_store(bootstrap_store_path):
|
||||
results = args.specs(**q_args)
|
||||
else:
|
||||
results = args.specs(**q_args)
|
||||
with spack.bootstrap.ensure_bootstrap_configuration():
|
||||
msg = 'Showing internal bootstrap store at "{0}"'
|
||||
tty.msg(msg.format(bootstrap_store_path))
|
||||
_find(parser, args)
|
||||
return
|
||||
_find(parser, args)
|
||||
|
||||
|
||||
def _find(parser, args):
|
||||
q_args = query_arguments(args)
|
||||
results = args.specs(**q_args)
|
||||
|
||||
env = ev.active_environment()
|
||||
decorator = lambda s, f: f
|
||||
added = set()
|
||||
removed = set()
|
||||
|
||||
env = ev.get_env(args, 'find')
|
||||
if env:
|
||||
decorator, added, roots, removed = setup_env(env)
|
||||
decorator, _, roots, _ = setup_env(env)
|
||||
|
||||
# use groups by default except with format.
|
||||
if args.groups is None:
|
||||
@@ -233,7 +233,7 @@ def find(parser, args):
|
||||
msg = "No package matches the query: {0}"
|
||||
msg = msg.format(' '.join(args.constraint))
|
||||
tty.msg(msg)
|
||||
return 1
|
||||
raise SystemExit(1)
|
||||
|
||||
# If tags have been specified on the command line, filter by tags
|
||||
if args.tags:
|
||||
|
@@ -7,7 +7,7 @@
|
||||
|
||||
import spack.cmd.common.arguments
|
||||
import spack.cmd.uninstall
|
||||
import spack.environment
|
||||
import spack.environment as ev
|
||||
import spack.store
|
||||
|
||||
description = "remove specs that are now no longer needed"
|
||||
@@ -24,7 +24,7 @@ def gc(parser, args):
|
||||
|
||||
# Restrict garbage collection to the active environment
|
||||
# speculating over roots that are yet to be installed
|
||||
env = spack.environment.get_env(args=None, cmd_name='gc')
|
||||
env = ev.active_environment()
|
||||
if env:
|
||||
msg = 'Restricting the garbage collection to the "{0}" environment'
|
||||
tty.msg(msg.format(env.name))
|
||||
|
@@ -10,6 +10,7 @@
|
||||
import spack.cmd
|
||||
import spack.cmd.common.arguments as arguments
|
||||
import spack.config
|
||||
import spack.environment as ev
|
||||
import spack.store
|
||||
from spack.graph import graph_ascii, graph_dot
|
||||
|
||||
@@ -35,7 +36,7 @@ def setup_parser(subparser):
|
||||
|
||||
subparser.add_argument(
|
||||
'-i', '--installed', action='store_true',
|
||||
help="graph all installed specs in dot format (implies --dot)")
|
||||
help="graph installed specs, or specs in the active env (implies --dot)")
|
||||
|
||||
arguments.add_common_arguments(subparser, ['deptype', 'specs'])
|
||||
|
||||
@@ -45,7 +46,12 @@ def graph(parser, args):
|
||||
if args.specs:
|
||||
tty.die("Can't specify specs with --installed")
|
||||
args.dot = True
|
||||
specs = spack.store.db.query()
|
||||
|
||||
env = ev.active_environment()
|
||||
if env:
|
||||
specs = env.all_specs()
|
||||
else:
|
||||
specs = spack.store.db.query()
|
||||
|
||||
else:
|
||||
specs = spack.cmd.parse_specs(args.specs, concretize=not args.static)
|
||||
|
@@ -204,7 +204,7 @@ def install_specs(cli_args, kwargs, specs):
|
||||
"""
|
||||
|
||||
# handle active environment, if any
|
||||
env = ev.get_env(cli_args, 'install')
|
||||
env = ev.active_environment()
|
||||
|
||||
try:
|
||||
if env:
|
||||
@@ -219,7 +219,7 @@ def install_specs(cli_args, kwargs, specs):
|
||||
|
||||
# If there is any ambiguity in the above call to matching_spec
|
||||
# (i.e. if more than one spec in the environment matches), then
|
||||
# SpackEnvironmentError is rasied, with a message listing the
|
||||
# SpackEnvironmentError is raised, with a message listing the
|
||||
# the matches. Getting to this point means there were either
|
||||
# no matches or exactly one match.
|
||||
|
||||
@@ -243,7 +243,7 @@ def install_specs(cli_args, kwargs, specs):
|
||||
|
||||
if m_spec in env.roots() or cli_args.no_add:
|
||||
# either the single match is a root spec (and --no-add is
|
||||
# the default for roots) or --no-add was stated explictly
|
||||
# the default for roots) or --no-add was stated explicitly
|
||||
tty.debug('just install {0}'.format(m_spec.name))
|
||||
specs_to_install.append(m_spec)
|
||||
else:
|
||||
@@ -324,10 +324,14 @@ def get_tests(specs):
|
||||
else:
|
||||
return False
|
||||
|
||||
# Parse cli arguments and construct a dictionary
|
||||
# that will be passed to the package installer
|
||||
update_kwargs_from_args(args, kwargs)
|
||||
|
||||
if not args.spec and not args.specfiles:
|
||||
# if there are no args but an active environment
|
||||
# then install the packages from it.
|
||||
env = ev.get_env(args, 'install')
|
||||
env = ev.active_environment()
|
||||
if env:
|
||||
tests = get_tests(env.user_specs)
|
||||
kwargs['tests'] = tests
|
||||
@@ -352,7 +356,7 @@ def get_tests(specs):
|
||||
|
||||
tty.msg("Installing environment {0}".format(env.name))
|
||||
with reporter('build'):
|
||||
env.install_all(args, **kwargs)
|
||||
env.install_all(**kwargs)
|
||||
|
||||
tty.debug("Regenerating environment views for {0}"
|
||||
.format(env.name))
|
||||
@@ -381,10 +385,6 @@ def get_tests(specs):
|
||||
if args.deprecated:
|
||||
spack.config.set('config:deprecated', True, scope='command_line')
|
||||
|
||||
# Parse cli arguments and construct a dictionary
|
||||
# that will be passed to the package installer
|
||||
update_kwargs_from_args(args, kwargs)
|
||||
|
||||
# 1. Abstract specs from cli
|
||||
abstract_specs = spack.cmd.parse_specs(args.spec)
|
||||
tests = get_tests(abstract_specs)
|
||||
@@ -401,7 +401,10 @@ def get_tests(specs):
|
||||
# 2. Concrete specs from yaml files
|
||||
for file in args.specfiles:
|
||||
with open(file, 'r') as f:
|
||||
s = spack.spec.Spec.from_yaml(f)
|
||||
if file.endswith('yaml') or file.endswith('yml'):
|
||||
s = spack.spec.Spec.from_yaml(f)
|
||||
else:
|
||||
s = spack.spec.Spec.from_json(f)
|
||||
|
||||
concretized = s.concretized()
|
||||
if concretized.dag_hash() != s.dag_hash():
|
||||
|
@@ -55,7 +55,7 @@ def setup_parser(subparser):
|
||||
|
||||
|
||||
def load(parser, args):
|
||||
env = ev.get_env(args, 'load')
|
||||
env = ev.active_environment()
|
||||
specs = [spack.cmd.disambiguate_spec(spec, env, first=args.load_first)
|
||||
for spec in spack.cmd.parse_specs(args.specs)]
|
||||
|
||||
|
@@ -11,7 +11,6 @@
|
||||
|
||||
import spack.cmd
|
||||
import spack.cmd.common.arguments as arguments
|
||||
import spack.environment
|
||||
import spack.environment as ev
|
||||
import spack.paths
|
||||
import spack.repo
|
||||
@@ -73,7 +72,7 @@ def location(parser, args):
|
||||
return
|
||||
|
||||
if args.location_env:
|
||||
path = spack.environment.root(args.location_env)
|
||||
path = ev.root(args.location_env)
|
||||
if not os.path.isdir(path):
|
||||
tty.die("no such environment: '%s'" % args.location_env)
|
||||
print(path)
|
||||
@@ -97,7 +96,7 @@ def location(parser, args):
|
||||
|
||||
# install_dir command matches against installed specs.
|
||||
if args.install_dir:
|
||||
env = ev.get_env(args, 'location')
|
||||
env = ev.active_environment()
|
||||
spec = spack.cmd.disambiguate_spec(specs[0], env)
|
||||
print(spec.prefix)
|
||||
return
|
||||
|
@@ -253,7 +253,7 @@ def _determine_specs_to_mirror(args):
|
||||
"To mirror all packages, use the '--all' option"
|
||||
" (this will require significant time and space).")
|
||||
|
||||
env = ev.get_env(args, 'mirror')
|
||||
env = ev.active_environment()
|
||||
if env:
|
||||
env_specs = env.all_specs()
|
||||
else:
|
||||
|
@@ -7,7 +7,6 @@
|
||||
|
||||
import spack.cmd
|
||||
import spack.cmd.common.arguments as arguments
|
||||
import spack.environment as ev
|
||||
|
||||
description = 'remove specs from an environment'
|
||||
section = "environments"
|
||||
@@ -28,7 +27,7 @@ def setup_parser(subparser):
|
||||
|
||||
|
||||
def remove(parser, args):
|
||||
env = ev.get_env(args, 'remove', required=True)
|
||||
env = spack.cmd.require_active_env(cmd_name='remove')
|
||||
|
||||
with env.write_transaction():
|
||||
if args.all:
|
||||
|
@@ -34,7 +34,7 @@ def stage(parser, args):
|
||||
spack.stage.create_stage_root(custom_path)
|
||||
|
||||
if not args.specs:
|
||||
env = ev.get_env(args, 'stage')
|
||||
env = ev.active_environment()
|
||||
if env:
|
||||
tty.msg("Staging specs from environment %s" % env.name)
|
||||
for spec in env.specs_by_hash.values():
|
||||
|
@@ -155,7 +155,7 @@ def test_run(args):
|
||||
spack.config.set('config:fail_fast', True, scope='command_line')
|
||||
|
||||
# Get specs to test
|
||||
env = ev.get_env(args, 'test')
|
||||
env = ev.active_environment()
|
||||
hashes = env.all_hashes() if env else None
|
||||
|
||||
specs = spack.cmd.parse_specs(args.specs) if args.specs else [None]
|
||||
@@ -221,7 +221,7 @@ def test_list(args):
|
||||
|
||||
# TODO: This can be extended to have all of the output formatting options
|
||||
# from `spack find`.
|
||||
env = ev.get_env(args, 'test')
|
||||
env = ev.active_environment()
|
||||
hashes = env.all_hashes() if env else None
|
||||
|
||||
specs = spack.store.db.query(hashes=hashes)
|
||||
|
@@ -7,7 +7,6 @@
|
||||
|
||||
import spack.cmd
|
||||
import spack.cmd.common.arguments as arguments
|
||||
import spack.environment as ev
|
||||
|
||||
description = 'remove specs from an environment'
|
||||
section = "environments"
|
||||
@@ -22,7 +21,7 @@ def setup_parser(subparser):
|
||||
|
||||
|
||||
def undevelop(parser, args):
|
||||
env = ev.get_env(args, 'undevelop', required=True)
|
||||
env = spack.cmd.require_active_env(cmd_name='undevelop')
|
||||
|
||||
if args.all:
|
||||
specs = env.dev_specs.keys()
|
||||
|
@@ -311,7 +311,7 @@ def get_uninstall_list(args, specs, env):
|
||||
|
||||
|
||||
def uninstall_specs(args, specs):
|
||||
env = ev.get_env(args, 'uninstall')
|
||||
env = ev.active_environment()
|
||||
|
||||
uninstall_list, remove_list = get_uninstall_list(args, specs, env)
|
||||
anything_to_do = set(uninstall_list).union(set(remove_list))
|
||||
|
@@ -9,6 +9,7 @@
|
||||
|
||||
import six.moves.urllib.parse as urllib_parse
|
||||
|
||||
import llnl.util.tty.color as color
|
||||
from llnl.util import tty
|
||||
|
||||
import spack.fetch_strategy as fs
|
||||
@@ -80,9 +81,13 @@ def setup_parser(subparser):
|
||||
help='print a summary of how well we are parsing package urls')
|
||||
|
||||
# Stats
|
||||
sp.add_parser(
|
||||
stats_parser = sp.add_parser(
|
||||
'stats',
|
||||
help='print statistics on versions and checksums for all packages')
|
||||
stats_parser.add_argument(
|
||||
"--show-issues", action="store_true",
|
||||
help="show packages with issues (md5 hashes, http urls)"
|
||||
)
|
||||
|
||||
|
||||
def url(parser, args):
|
||||
@@ -262,6 +267,9 @@ def url_summary(args):
|
||||
|
||||
|
||||
def url_stats(args):
|
||||
# dictionary of issue type -> package -> descriptions
|
||||
issues = defaultdict(lambda: defaultdict(lambda: []))
|
||||
|
||||
class UrlStats(object):
|
||||
def __init__(self):
|
||||
self.total = 0
|
||||
@@ -270,7 +278,7 @@ def __init__(self):
|
||||
self.url_type = defaultdict(lambda: 0)
|
||||
self.git_type = defaultdict(lambda: 0)
|
||||
|
||||
def add(self, fetcher):
|
||||
def add(self, pkg_name, fetcher):
|
||||
self.total += 1
|
||||
|
||||
url_type = fetcher.url_attr
|
||||
@@ -284,10 +292,18 @@ def add(self, fetcher):
|
||||
algo = 'no checksum'
|
||||
self.checksums[algo] += 1
|
||||
|
||||
if algo == "md5":
|
||||
md5_hashes = issues["md5 hashes"]
|
||||
md5_hashes[pkg_name].append(fetcher.url)
|
||||
|
||||
# parse out the URL scheme (https/http/ftp/etc.)
|
||||
urlinfo = urllib_parse.urlparse(fetcher.url)
|
||||
self.schemes[urlinfo.scheme] += 1
|
||||
|
||||
if urlinfo.scheme == "http":
|
||||
http_urls = issues["http urls"]
|
||||
http_urls[pkg_name].append(fetcher.url)
|
||||
|
||||
elif url_type == 'git':
|
||||
if getattr(fetcher, 'commit', None):
|
||||
self.git_type['commit'] += 1
|
||||
@@ -305,13 +321,16 @@ def add(self, fetcher):
|
||||
for pkg in spack.repo.path.all_packages():
|
||||
npkgs += 1
|
||||
|
||||
for v, args in pkg.versions.items():
|
||||
fetcher = fs.for_package_version(pkg, v)
|
||||
version_stats.add(fetcher)
|
||||
for v in pkg.versions:
|
||||
try:
|
||||
fetcher = fs.for_package_version(pkg, v)
|
||||
except (fs.InvalidArgsError, fs.FetcherConflict):
|
||||
continue
|
||||
version_stats.add(pkg.name, fetcher)
|
||||
|
||||
for _, resources in pkg.resources.items():
|
||||
for resource in resources:
|
||||
resource_stats.add(resource.fetcher)
|
||||
resource_stats.add(pkg.name, resource.fetcher)
|
||||
|
||||
# print a nice summary table
|
||||
tty.msg("URL stats for %d packages:" % npkgs)
|
||||
@@ -361,6 +380,21 @@ def print_stat(indent, name, stat_name=None):
|
||||
print_stat(4, git_type, "git_type")
|
||||
print_line()
|
||||
|
||||
if args.show_issues:
|
||||
total_issues = sum(
|
||||
len(issues)
|
||||
for _, pkg_issues in issues.items()
|
||||
for _, issues in pkg_issues.items()
|
||||
)
|
||||
print()
|
||||
tty.msg("Found %d issues." % total_issues)
|
||||
for issue_type, pkgs in issues.items():
|
||||
tty.msg("Package URLs with %s" % issue_type)
|
||||
for pkg, pkg_issues in pkgs.items():
|
||||
color.cprint(" @*C{%s}" % pkg)
|
||||
for issue in pkg_issues:
|
||||
print(" %s" % issue)
|
||||
|
||||
|
||||
def print_name_and_version(url):
|
||||
"""Prints a URL. Underlines the detected name with dashes and
|
||||
|
@@ -74,7 +74,7 @@ def verify(parser, args):
|
||||
|
||||
elif args.specs_or_files:
|
||||
# construct disambiguated spec list
|
||||
env = ev.get_env(args, 'verify')
|
||||
env = ev.active_environment()
|
||||
specs = list(map(lambda x: spack.cmd.disambiguate_spec(x, env,
|
||||
local=local),
|
||||
spec_args))
|
||||
|
@@ -202,7 +202,7 @@ def view(parser, args):
|
||||
|
||||
elif args.action in actions_link:
|
||||
# only link commands need to disambiguate specs
|
||||
env = ev.get_env(args, 'view link')
|
||||
env = ev.active_environment()
|
||||
specs = [spack.cmd.disambiguate_spec(s, env) for s in specs]
|
||||
|
||||
elif args.action in actions_status:
|
||||
|
@@ -544,7 +544,7 @@ def arguments_to_detect_version_fn(operating_system, paths):
|
||||
function by providing a method called with the same name.
|
||||
|
||||
Args:
|
||||
operating_system (spack.architecture.OperatingSystem): the operating system
|
||||
operating_system (spack.operating_systems.OperatingSystem): the operating system
|
||||
on which we are looking for compilers
|
||||
paths: paths to search for compilers
|
||||
|
||||
|
@@ -33,9 +33,11 @@
|
||||
import spack.abi
|
||||
import spack.architecture
|
||||
import spack.compilers
|
||||
import spack.environment
|
||||
import spack.error
|
||||
import spack.repo
|
||||
import spack.spec
|
||||
import spack.target
|
||||
import spack.tengine
|
||||
import spack.variant as vt
|
||||
from spack.config import config
|
||||
@@ -66,7 +68,7 @@ def concretize_develop(self, spec):
|
||||
"""
|
||||
Add ``dev_path=*`` variant to packages built from local source.
|
||||
"""
|
||||
env = spack.environment.get_env(None, None)
|
||||
env = spack.environment.active_environment()
|
||||
dev_info = env.dev_specs.get(spec.name, {}) if env else {}
|
||||
if not dev_info:
|
||||
return False
|
||||
@@ -255,8 +257,7 @@ def concretize_architecture(self, spec):
|
||||
# Get platform of nearest spec with a platform, including spec
|
||||
# If spec has a platform, easy
|
||||
if spec.architecture.platform:
|
||||
new_plat = spack.architecture.get_platform(
|
||||
spec.architecture.platform)
|
||||
new_plat = spack.platforms.by_name(spec.architecture.platform)
|
||||
else:
|
||||
# Else if anyone else has a platform, take the closest one
|
||||
# Search up, then down, along build/link deps first
|
||||
@@ -265,8 +266,7 @@ def concretize_architecture(self, spec):
|
||||
spec, lambda x: x.architecture and x.architecture.platform
|
||||
)
|
||||
if platform_spec:
|
||||
new_plat = spack.architecture.get_platform(
|
||||
platform_spec.architecture.platform)
|
||||
new_plat = spack.platforms.by_name(platform_spec.architecture.platform)
|
||||
else:
|
||||
# If no platform anywhere in this spec, grab the default
|
||||
new_plat = spack.architecture.platform()
|
||||
@@ -611,9 +611,7 @@ def _adjust_target(self, spec):
|
||||
# Try to adjust the target only if it is the default
|
||||
# target for this platform
|
||||
current_target = spec.architecture.target
|
||||
current_platform = spack.architecture.get_platform(
|
||||
spec.architecture.platform
|
||||
)
|
||||
current_platform = spack.platforms.by_name(spec.architecture.platform)
|
||||
|
||||
default_target = current_platform.target('default_target')
|
||||
if PackagePrefs.has_preferred_targets(spec.name):
|
||||
@@ -632,7 +630,7 @@ def _adjust_target(self, spec):
|
||||
for ancestor in microarchitecture.ancestors:
|
||||
candidate = None
|
||||
try:
|
||||
candidate = spack.architecture.Target(ancestor)
|
||||
candidate = spack.target.Target(ancestor)
|
||||
candidate.optimization_flags(spec.compiler)
|
||||
except archspec.cpu.UnsupportedMicroarchitecture:
|
||||
continue
|
||||
|
@@ -442,7 +442,8 @@ def pop_scope(self):
|
||||
|
||||
@_config_mutator
|
||||
def remove_scope(self, scope_name):
|
||||
return self.scopes.pop(scope_name)
|
||||
"""Remove scope by name; has no effect when ``scope_name`` does not exist"""
|
||||
return self.scopes.pop(scope_name, None)
|
||||
|
||||
@property
|
||||
def file_scopes(self):
|
||||
@@ -749,10 +750,11 @@ def override(path_or_scope, value=None):
|
||||
config.push_scope(overrides)
|
||||
config.set(path_or_scope, value, scope=scope_name)
|
||||
|
||||
yield config
|
||||
|
||||
scope = config.remove_scope(overrides.name)
|
||||
assert scope is overrides
|
||||
try:
|
||||
yield config
|
||||
finally:
|
||||
scope = config.remove_scope(overrides.name)
|
||||
assert scope is overrides
|
||||
|
||||
|
||||
#: configuration scopes added on the command line
|
||||
|
@@ -7,7 +7,7 @@
|
||||
"""
|
||||
import warnings
|
||||
|
||||
import spack.environment
|
||||
import spack.environment as ev
|
||||
import spack.schema.env as env
|
||||
import spack.util.spack_yaml as syaml
|
||||
|
||||
@@ -36,7 +36,7 @@ def validate(configuration_file):
|
||||
config = syaml.load(f)
|
||||
|
||||
# Ensure we have a "container" attribute with sensible defaults set
|
||||
env_dict = spack.environment.config_dict(config)
|
||||
env_dict = ev.config_dict(config)
|
||||
env_dict.setdefault('container', {
|
||||
'format': 'docker',
|
||||
'images': {'os': 'ubuntu:18.04', 'spack': 'develop'}
|
||||
|
@@ -8,7 +8,7 @@
|
||||
import collections
|
||||
import copy
|
||||
|
||||
import spack.environment
|
||||
import spack.environment as ev
|
||||
import spack.schema.env
|
||||
import spack.tengine as tengine
|
||||
import spack.util.spack_yaml as syaml
|
||||
@@ -37,7 +37,7 @@ def create(configuration):
|
||||
Args:
|
||||
configuration: how to generate the current recipe
|
||||
"""
|
||||
name = spack.environment.config_dict(configuration)['container']['format']
|
||||
name = ev.config_dict(configuration)['container']['format']
|
||||
return _writer_factory[name](configuration)
|
||||
|
||||
|
||||
@@ -56,7 +56,7 @@ class PathContext(tengine.Context):
|
||||
directly via PATH.
|
||||
"""
|
||||
def __init__(self, config):
|
||||
self.config = spack.environment.config_dict(config)
|
||||
self.config = ev.config_dict(config)
|
||||
self.container_config = self.config['container']
|
||||
|
||||
@tengine.context_property
|
||||
|
@@ -40,6 +40,7 @@
|
||||
import llnl.util.filesystem as fs
|
||||
import llnl.util.tty as tty
|
||||
|
||||
import spack.hash_types as ht
|
||||
import spack.repo
|
||||
import spack.spec
|
||||
import spack.store
|
||||
@@ -66,7 +67,7 @@ def nullcontext(*args, **kwargs):
|
||||
# DB version. This is stuck in the DB file to track changes in format.
|
||||
# Increment by one when the database format changes.
|
||||
# Versions before 5 were not integers.
|
||||
_db_version = Version('5')
|
||||
_db_version = Version('6')
|
||||
|
||||
# For any version combinations here, skip reindex when upgrading.
|
||||
# Reindexing can take considerable time and is not always necessary.
|
||||
@@ -77,6 +78,7 @@ def nullcontext(*args, **kwargs):
|
||||
# fields. So, skip the reindex for this transition. The new
|
||||
# version is saved to disk the first time the DB is written.
|
||||
(Version('0.9.3'), Version('5')),
|
||||
(Version('5'), Version('6'))
|
||||
]
|
||||
|
||||
# Default timeout for spack database locks in seconds or None (no timeout).
|
||||
@@ -645,7 +647,7 @@ def _write_to_file(self, stream):
|
||||
except (TypeError, ValueError) as e:
|
||||
raise sjson.SpackJSONError("error writing JSON database:", str(e))
|
||||
|
||||
def _read_spec_from_dict(self, hash_key, installs):
|
||||
def _read_spec_from_dict(self, hash_key, installs, hash=ht.dag_hash):
|
||||
"""Recursively construct a spec from a hash in a YAML database.
|
||||
|
||||
Does not do any locking.
|
||||
@@ -654,8 +656,13 @@ def _read_spec_from_dict(self, hash_key, installs):
|
||||
|
||||
# Install records don't include hash with spec, so we add it in here
|
||||
# to ensure it is read properly.
|
||||
for name in spec_dict:
|
||||
spec_dict[name]['hash'] = hash_key
|
||||
if 'name' not in spec_dict.keys():
|
||||
# old format, can't update format here
|
||||
for name in spec_dict:
|
||||
spec_dict[name]['hash'] = hash_key
|
||||
else:
|
||||
# new format, already a singleton
|
||||
spec_dict[hash.name] = hash_key
|
||||
|
||||
# Build spec from dict first.
|
||||
spec = spack.spec.Spec.from_node_dict(spec_dict)
|
||||
@@ -686,10 +693,13 @@ def _assign_dependencies(self, hash_key, installs, data):
|
||||
# Add dependencies from other records in the install DB to
|
||||
# form a full spec.
|
||||
spec = data[hash_key].spec
|
||||
spec_dict = installs[hash_key]['spec']
|
||||
if 'dependencies' in spec_dict[spec.name]:
|
||||
yaml_deps = spec_dict[spec.name]['dependencies']
|
||||
for dname, dhash, dtypes in spack.spec.Spec.read_yaml_dep_specs(
|
||||
spec_node_dict = installs[hash_key]['spec']
|
||||
if 'name' not in spec_node_dict:
|
||||
# old format
|
||||
spec_node_dict = spec_node_dict[spec.name]
|
||||
if 'dependencies' in spec_node_dict:
|
||||
yaml_deps = spec_node_dict['dependencies']
|
||||
for dname, dhash, dtypes, _ in spack.spec.Spec.read_yaml_dep_specs(
|
||||
yaml_deps):
|
||||
# It is important that we always check upstream installations
|
||||
# in the same order, and that we always check the local
|
||||
|
@@ -12,7 +12,8 @@
|
||||
|
||||
import ruamel.yaml as yaml
|
||||
|
||||
from llnl.util.filesystem import mkdirp
|
||||
import llnl.util.filesystem as fs
|
||||
import llnl.util.tty as tty
|
||||
|
||||
import spack.config
|
||||
import spack.hash_types as ht
|
||||
@@ -33,50 +34,282 @@ def _check_concrete(spec):
|
||||
|
||||
class DirectoryLayout(object):
|
||||
"""A directory layout is used to associate unique paths with specs.
|
||||
Different installations are going to want differnet layouts for their
|
||||
install, and they can use this to customize the nesting structure of
|
||||
spack installs.
|
||||
Different installations are going to want different layouts for their
|
||||
install, and they can use this to customize the nesting structure of
|
||||
spack installs. The default layout is:
|
||||
|
||||
* <install root>/
|
||||
|
||||
* <platform-os-target>/
|
||||
|
||||
* <compiler>-<compiler version>/
|
||||
|
||||
* <name>-<version>-<hash>
|
||||
|
||||
The hash here is a SHA-1 hash for the full DAG plus the build
|
||||
spec.
|
||||
|
||||
The installation directory projections can be modified with the
|
||||
projections argument.
|
||||
"""
|
||||
|
||||
def __init__(self, root):
|
||||
def __init__(self, root, **kwargs):
|
||||
self.root = root
|
||||
self.check_upstream = True
|
||||
projections = kwargs.get('projections') or default_projections
|
||||
self.projections = dict((key, projection.lower())
|
||||
for key, projection in projections.items())
|
||||
|
||||
# apply hash length as appropriate
|
||||
self.hash_length = kwargs.get('hash_length', None)
|
||||
if self.hash_length is not None:
|
||||
for when_spec, projection in self.projections.items():
|
||||
if '{hash}' not in projection:
|
||||
if '{hash' in projection:
|
||||
raise InvalidDirectoryLayoutParametersError(
|
||||
"Conflicting options for installation layout hash"
|
||||
" length")
|
||||
else:
|
||||
raise InvalidDirectoryLayoutParametersError(
|
||||
"Cannot specify hash length when the hash is not"
|
||||
" part of all install_tree projections")
|
||||
self.projections[when_spec] = projection.replace(
|
||||
"{hash}", "{hash:%d}" % self.hash_length)
|
||||
|
||||
# If any of these paths change, downstream databases may not be able to
|
||||
# locate files in older upstream databases
|
||||
self.metadata_dir = '.spack'
|
||||
self.deprecated_dir = 'deprecated'
|
||||
self.spec_file_name = 'spec.json'
|
||||
# Use for checking yaml and deprecated types
|
||||
self._spec_file_name_yaml = 'spec.yaml'
|
||||
self.extension_file_name = 'extensions.yaml'
|
||||
self.packages_dir = 'repos' # archive of package.py files
|
||||
self.manifest_file_name = 'install_manifest.json'
|
||||
|
||||
@property
|
||||
def hidden_file_paths(self):
|
||||
"""Return a list of hidden files used by the directory layout.
|
||||
|
||||
Paths are relative to the root of an install directory.
|
||||
|
||||
If the directory layout uses no hidden files to maintain
|
||||
state, this should return an empty container, e.g. [] or (,).
|
||||
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def all_specs(self):
|
||||
"""To be implemented by subclasses to traverse all specs for which there is
|
||||
a directory within the root.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
return (self.metadata_dir,)
|
||||
|
||||
def relative_path_for_spec(self, spec):
|
||||
"""Implemented by subclasses to return a relative path from the install
|
||||
root to a unique location for the provided spec."""
|
||||
raise NotImplementedError()
|
||||
_check_concrete(spec)
|
||||
|
||||
projection = spack.projections.get_projection(self.projections, spec)
|
||||
path = spec.format(projection)
|
||||
return path
|
||||
|
||||
def write_spec(self, spec, path):
|
||||
"""Write a spec out to a file."""
|
||||
_check_concrete(spec)
|
||||
with open(path, 'w') as f:
|
||||
# The hash the the projection is the DAG hash but we write out the
|
||||
# full provenance by full hash so it's availabe if we want it later
|
||||
# extension = os.path.splitext(path)[-1].lower()
|
||||
# if 'json' in extension:
|
||||
spec.to_json(f, hash=ht.full_hash)
|
||||
# elif 'yaml' in extension:
|
||||
# spec.to_yaml(f, hash=ht.full_hash)
|
||||
|
||||
def write_host_environment(self, spec):
|
||||
"""The host environment is a json file with os, kernel, and spack
|
||||
versioning. We use it in the case that an analysis later needs to
|
||||
easily access this information.
|
||||
"""
|
||||
from spack.util.environment import get_host_environment_metadata
|
||||
env_file = self.env_metadata_path(spec)
|
||||
environ = get_host_environment_metadata()
|
||||
with open(env_file, 'w') as fd:
|
||||
sjson.dump(environ, fd)
|
||||
|
||||
def read_spec(self, path):
|
||||
"""Read the contents of a file and parse them as a spec"""
|
||||
try:
|
||||
with open(path) as f:
|
||||
extension = os.path.splitext(path)[-1].lower()
|
||||
if extension == '.json':
|
||||
spec = spack.spec.Spec.from_json(f)
|
||||
elif extension == '.yaml':
|
||||
# Too late for conversion; spec_file_path() already called.
|
||||
spec = spack.spec.Spec.from_yaml(f)
|
||||
else:
|
||||
raise SpecReadError('Did not recognize spec file extension:'
|
||||
' {0}'.format(extension))
|
||||
except Exception as e:
|
||||
if spack.config.get('config:debug'):
|
||||
raise
|
||||
raise SpecReadError(
|
||||
'Unable to read file: %s' % path, 'Cause: ' + str(e))
|
||||
|
||||
# Specs read from actual installations are always concrete
|
||||
spec._mark_concrete()
|
||||
return spec
|
||||
|
||||
def spec_file_path(self, spec):
|
||||
"""Gets full path to spec file"""
|
||||
_check_concrete(spec)
|
||||
# Attempts to convert to JSON if possible.
|
||||
# Otherwise just returns the YAML.
|
||||
yaml_path = os.path.join(
|
||||
self.metadata_path(spec), self._spec_file_name_yaml)
|
||||
json_path = os.path.join(self.metadata_path(spec), self.spec_file_name)
|
||||
if os.path.exists(yaml_path) and fs.can_write_to_dir(yaml_path):
|
||||
self.write_spec(spec, json_path)
|
||||
try:
|
||||
os.remove(yaml_path)
|
||||
except OSError as err:
|
||||
tty.debug('Could not remove deprecated {0}'.format(yaml_path))
|
||||
tty.debug(err)
|
||||
elif os.path.exists(yaml_path):
|
||||
return yaml_path
|
||||
return json_path
|
||||
|
||||
def deprecated_file_path(self, deprecated_spec, deprecator_spec=None):
|
||||
"""Gets full path to spec file for deprecated spec
|
||||
|
||||
If the deprecator_spec is provided, use that. Otherwise, assume
|
||||
deprecated_spec is already deprecated and its prefix links to the
|
||||
prefix of its deprecator."""
|
||||
_check_concrete(deprecated_spec)
|
||||
if deprecator_spec:
|
||||
_check_concrete(deprecator_spec)
|
||||
|
||||
# If deprecator spec is None, assume deprecated_spec already deprecated
|
||||
# and use its link to find the file.
|
||||
base_dir = self.path_for_spec(
|
||||
deprecator_spec
|
||||
) if deprecator_spec else os.readlink(deprecated_spec.prefix)
|
||||
|
||||
yaml_path = os.path.join(base_dir, self.metadata_dir,
|
||||
self.deprecated_dir, deprecated_spec.dag_hash()
|
||||
+ '_' + self._spec_file_name_yaml)
|
||||
|
||||
json_path = os.path.join(base_dir, self.metadata_dir,
|
||||
self.deprecated_dir, deprecated_spec.dag_hash()
|
||||
+ '_' + self.spec_file_name)
|
||||
|
||||
if (os.path.exists(yaml_path) and fs.can_write_to_dir(yaml_path)):
|
||||
self.write_spec(deprecated_spec, json_path)
|
||||
try:
|
||||
os.remove(yaml_path)
|
||||
except (IOError, OSError) as err:
|
||||
tty.debug('Could not remove deprecated {0}'.format(yaml_path))
|
||||
tty.debug(err)
|
||||
elif os.path.exists(yaml_path):
|
||||
return yaml_path
|
||||
|
||||
return json_path
|
||||
|
||||
@contextmanager
|
||||
def disable_upstream_check(self):
|
||||
self.check_upstream = False
|
||||
yield
|
||||
self.check_upstream = True
|
||||
|
||||
def metadata_path(self, spec):
|
||||
return os.path.join(spec.prefix, self.metadata_dir)
|
||||
|
||||
def env_metadata_path(self, spec):
|
||||
return os.path.join(self.metadata_path(spec), "install_environment.json")
|
||||
|
||||
def build_packages_path(self, spec):
|
||||
return os.path.join(self.metadata_path(spec), self.packages_dir)
|
||||
|
||||
def create_install_directory(self, spec):
|
||||
"""Creates the installation directory for a spec."""
|
||||
raise NotImplementedError()
|
||||
_check_concrete(spec)
|
||||
|
||||
# Create install directory with properly configured permissions
|
||||
# Cannot import at top of file
|
||||
from spack.package_prefs import get_package_dir_permissions, get_package_group
|
||||
|
||||
# Each package folder can have its own specific permissions, while
|
||||
# intermediate folders (arch/compiler) are set with access permissions
|
||||
# equivalent to the root permissions of the layout.
|
||||
group = get_package_group(spec)
|
||||
perms = get_package_dir_permissions(spec)
|
||||
|
||||
fs.mkdirp(spec.prefix, mode=perms, group=group, default_perms='parents')
|
||||
fs.mkdirp(self.metadata_path(spec), mode=perms, group=group) # in prefix
|
||||
|
||||
self.write_spec(spec, self.spec_file_path(spec))
|
||||
|
||||
def check_installed(self, spec):
|
||||
"""Checks whether a spec is installed.
|
||||
_check_concrete(spec)
|
||||
path = self.path_for_spec(spec)
|
||||
spec_file_path = self.spec_file_path(spec)
|
||||
|
||||
Return the spec's prefix, if it is installed, None otherwise.
|
||||
if not os.path.isdir(path):
|
||||
return None
|
||||
|
||||
Raise an exception if the install is inconsistent or corrupt.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
if not os.path.isfile(spec_file_path):
|
||||
raise InconsistentInstallDirectoryError(
|
||||
'Install prefix exists but contains no spec.json:',
|
||||
" " + path)
|
||||
|
||||
installed_spec = self.read_spec(spec_file_path)
|
||||
if installed_spec == spec:
|
||||
return path
|
||||
|
||||
# DAG hashes currently do not include build dependencies.
|
||||
#
|
||||
# TODO: remove this when we do better concretization and don't
|
||||
# ignore build-only deps in hashes.
|
||||
elif (installed_spec.copy(deps=('link', 'run')) ==
|
||||
spec.copy(deps=('link', 'run'))):
|
||||
# The directory layout prefix is based on the dag hash, so among
|
||||
# specs with differing full-hash but matching dag-hash, only one
|
||||
# may be installed. This means for example that for two instances
|
||||
# that differ only in CMake version used to build, only one will
|
||||
# be installed.
|
||||
return path
|
||||
|
||||
if spec.dag_hash() == installed_spec.dag_hash():
|
||||
raise SpecHashCollisionError(spec, installed_spec)
|
||||
else:
|
||||
raise InconsistentInstallDirectoryError(
|
||||
'Spec file in %s does not match hash!' % spec_file_path)
|
||||
|
||||
def all_specs(self):
|
||||
if not os.path.isdir(self.root):
|
||||
return []
|
||||
|
||||
specs = []
|
||||
for _, path_scheme in self.projections.items():
|
||||
path_elems = ["*"] * len(path_scheme.split(os.sep))
|
||||
# NOTE: Does not validate filename extension; should happen later
|
||||
path_elems += [self.metadata_dir, 'spec.json']
|
||||
pattern = os.path.join(self.root, *path_elems)
|
||||
spec_files = glob.glob(pattern)
|
||||
if not spec_files: # we're probably looking at legacy yaml...
|
||||
path_elems += [self.metadata_dir, 'spec.yaml']
|
||||
pattern = os.path.join(self.root, *path_elems)
|
||||
spec_files = glob.glob(pattern)
|
||||
specs.extend([self.read_spec(s) for s in spec_files])
|
||||
return specs
|
||||
|
||||
def all_deprecated_specs(self):
|
||||
if not os.path.isdir(self.root):
|
||||
return []
|
||||
|
||||
deprecated_specs = set()
|
||||
for _, path_scheme in self.projections.items():
|
||||
path_elems = ["*"] * len(path_scheme.split(os.sep))
|
||||
# NOTE: Does not validate filename extension; should happen later
|
||||
path_elems += [self.metadata_dir, self.deprecated_dir,
|
||||
'*_spec.*'] # + self.spec_file_name]
|
||||
pattern = os.path.join(self.root, *path_elems)
|
||||
spec_files = glob.glob(pattern)
|
||||
get_depr_spec_file = lambda x: os.path.join(
|
||||
os.path.dirname(os.path.dirname(x)), self.spec_file_name)
|
||||
deprecated_specs |= set((self.read_spec(s),
|
||||
self.read_spec(get_depr_spec_file(s)))
|
||||
for s in spec_files)
|
||||
return deprecated_specs
|
||||
|
||||
def specs_by_hash(self):
|
||||
by_hash = {}
|
||||
for spec in self.all_specs():
|
||||
by_hash[spec.dag_hash()] = spec
|
||||
return by_hash
|
||||
|
||||
def path_for_spec(self, spec):
|
||||
"""Return absolute path from the root to a directory for the spec."""
|
||||
@@ -183,236 +416,6 @@ def remove_extension(self, spec, ext_spec):
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class YamlDirectoryLayout(DirectoryLayout):
|
||||
"""By default lays out installation directories like this::
|
||||
<install root>/
|
||||
<platform-os-target>/
|
||||
<compiler>-<compiler version>/
|
||||
<name>-<version>-<hash>
|
||||
|
||||
The hash here is a SHA-1 hash for the full DAG plus the build
|
||||
spec. TODO: implement the build spec.
|
||||
|
||||
The installation directory projections can be modified with the
|
||||
projections argument.
|
||||
"""
|
||||
|
||||
def __init__(self, root, **kwargs):
|
||||
super(YamlDirectoryLayout, self).__init__(root)
|
||||
projections = kwargs.get('projections') or default_projections
|
||||
self.projections = dict((key, projection.lower())
|
||||
for key, projection in projections.items())
|
||||
|
||||
# apply hash length as appropriate
|
||||
self.hash_length = kwargs.get('hash_length', None)
|
||||
if self.hash_length is not None:
|
||||
for when_spec, projection in self.projections.items():
|
||||
if '{hash}' not in projection:
|
||||
if '{hash' in projection:
|
||||
raise InvalidDirectoryLayoutParametersError(
|
||||
"Conflicting options for installation layout hash"
|
||||
" length")
|
||||
else:
|
||||
raise InvalidDirectoryLayoutParametersError(
|
||||
"Cannot specify hash length when the hash is not"
|
||||
" part of all install_tree projections")
|
||||
self.projections[when_spec] = projection.replace(
|
||||
"{hash}", "{hash:%d}" % self.hash_length)
|
||||
|
||||
# If any of these paths change, downstream databases may not be able to
|
||||
# locate files in older upstream databases
|
||||
self.metadata_dir = '.spack'
|
||||
self.deprecated_dir = 'deprecated'
|
||||
self.spec_file_name = 'spec.yaml'
|
||||
self.extension_file_name = 'extensions.yaml'
|
||||
self.packages_dir = 'repos' # archive of package.py files
|
||||
self.manifest_file_name = 'install_manifest.json'
|
||||
|
||||
@property
|
||||
def hidden_file_paths(self):
|
||||
return (self.metadata_dir,)
|
||||
|
||||
def relative_path_for_spec(self, spec):
|
||||
_check_concrete(spec)
|
||||
|
||||
projection = spack.projections.get_projection(self.projections, spec)
|
||||
path = spec.format(projection)
|
||||
return path
|
||||
|
||||
def write_spec(self, spec, path):
|
||||
"""Write a spec out to a file."""
|
||||
_check_concrete(spec)
|
||||
with open(path, 'w') as f:
|
||||
# The hash the the projection is the DAG hash but we write out the
|
||||
# full provenance by full hash so it's availabe if we want it later
|
||||
spec.to_yaml(f, hash=ht.full_hash)
|
||||
|
||||
def write_host_environment(self, spec):
|
||||
"""The host environment is a json file with os, kernel, and spack
|
||||
versioning. We use it in the case that an analysis later needs to
|
||||
easily access this information.
|
||||
"""
|
||||
from spack.util.environment import get_host_environment_metadata
|
||||
env_file = self.env_metadata_path(spec)
|
||||
environ = get_host_environment_metadata()
|
||||
with open(env_file, 'w') as fd:
|
||||
sjson.dump(environ, fd)
|
||||
|
||||
def read_spec(self, path):
|
||||
"""Read the contents of a file and parse them as a spec"""
|
||||
try:
|
||||
with open(path) as f:
|
||||
spec = spack.spec.Spec.from_yaml(f)
|
||||
except Exception as e:
|
||||
if spack.config.get('config:debug'):
|
||||
raise
|
||||
raise SpecReadError(
|
||||
'Unable to read file: %s' % path, 'Cause: ' + str(e))
|
||||
|
||||
# Specs read from actual installations are always concrete
|
||||
spec._mark_concrete()
|
||||
return spec
|
||||
|
||||
def spec_file_path(self, spec):
|
||||
"""Gets full path to spec file"""
|
||||
_check_concrete(spec)
|
||||
return os.path.join(self.metadata_path(spec), self.spec_file_name)
|
||||
|
||||
def deprecated_file_name(self, spec):
|
||||
"""Gets name of deprecated spec file in deprecated dir"""
|
||||
_check_concrete(spec)
|
||||
return spec.dag_hash() + '_' + self.spec_file_name
|
||||
|
||||
def deprecated_file_path(self, deprecated_spec, deprecator_spec=None):
|
||||
"""Gets full path to spec file for deprecated spec
|
||||
|
||||
If the deprecator_spec is provided, use that. Otherwise, assume
|
||||
deprecated_spec is already deprecated and its prefix links to the
|
||||
prefix of its deprecator."""
|
||||
_check_concrete(deprecated_spec)
|
||||
if deprecator_spec:
|
||||
_check_concrete(deprecator_spec)
|
||||
|
||||
# If deprecator spec is None, assume deprecated_spec already deprecated
|
||||
# and use its link to find the file.
|
||||
base_dir = self.path_for_spec(
|
||||
deprecator_spec
|
||||
) if deprecator_spec else os.readlink(deprecated_spec.prefix)
|
||||
|
||||
return os.path.join(base_dir, self.metadata_dir, self.deprecated_dir,
|
||||
self.deprecated_file_name(deprecated_spec))
|
||||
|
||||
@contextmanager
|
||||
def disable_upstream_check(self):
|
||||
self.check_upstream = False
|
||||
yield
|
||||
self.check_upstream = True
|
||||
|
||||
def metadata_path(self, spec):
|
||||
return os.path.join(spec.prefix, self.metadata_dir)
|
||||
|
||||
def env_metadata_path(self, spec):
|
||||
return os.path.join(self.metadata_path(spec), "install_environment.json")
|
||||
|
||||
def build_packages_path(self, spec):
|
||||
return os.path.join(self.metadata_path(spec), self.packages_dir)
|
||||
|
||||
def create_install_directory(self, spec):
|
||||
_check_concrete(spec)
|
||||
|
||||
prefix = self.check_installed(spec)
|
||||
if prefix:
|
||||
raise InstallDirectoryAlreadyExistsError(prefix)
|
||||
|
||||
# Create install directory with properly configured permissions
|
||||
# Cannot import at top of file
|
||||
from spack.package_prefs import get_package_dir_permissions, get_package_group
|
||||
|
||||
# Each package folder can have its own specific permissions, while
|
||||
# intermediate folders (arch/compiler) are set with access permissions
|
||||
# equivalent to the root permissions of the layout.
|
||||
group = get_package_group(spec)
|
||||
perms = get_package_dir_permissions(spec)
|
||||
|
||||
mkdirp(spec.prefix, mode=perms, group=group, default_perms='parents')
|
||||
mkdirp(self.metadata_path(spec), mode=perms, group=group) # in prefix
|
||||
|
||||
self.write_spec(spec, self.spec_file_path(spec))
|
||||
|
||||
def check_installed(self, spec):
|
||||
_check_concrete(spec)
|
||||
path = self.path_for_spec(spec)
|
||||
spec_file_path = self.spec_file_path(spec)
|
||||
|
||||
if not os.path.isdir(path):
|
||||
return None
|
||||
|
||||
if not os.path.isfile(spec_file_path):
|
||||
raise InconsistentInstallDirectoryError(
|
||||
'Install prefix exists but contains no spec.yaml:',
|
||||
" " + path)
|
||||
|
||||
installed_spec = self.read_spec(spec_file_path)
|
||||
if installed_spec == spec:
|
||||
return path
|
||||
|
||||
# DAG hashes currently do not include build dependencies.
|
||||
#
|
||||
# TODO: remove this when we do better concretization and don't
|
||||
# ignore build-only deps in hashes.
|
||||
elif (installed_spec.copy(deps=('link', 'run')) ==
|
||||
spec.copy(deps=('link', 'run'))):
|
||||
# The directory layout prefix is based on the dag hash, so among
|
||||
# specs with differing full-hash but matching dag-hash, only one
|
||||
# may be installed. This means for example that for two instances
|
||||
# that differ only in CMake version used to build, only one will
|
||||
# be installed.
|
||||
return path
|
||||
|
||||
if spec.dag_hash() == installed_spec.dag_hash():
|
||||
raise SpecHashCollisionError(spec, installed_spec)
|
||||
else:
|
||||
raise InconsistentInstallDirectoryError(
|
||||
'Spec file in %s does not match hash!' % spec_file_path)
|
||||
|
||||
def all_specs(self):
|
||||
if not os.path.isdir(self.root):
|
||||
return []
|
||||
|
||||
specs = []
|
||||
for _, path_scheme in self.projections.items():
|
||||
path_elems = ["*"] * len(path_scheme.split(os.sep))
|
||||
path_elems += [self.metadata_dir, self.spec_file_name]
|
||||
pattern = os.path.join(self.root, *path_elems)
|
||||
spec_files = glob.glob(pattern)
|
||||
specs.extend([self.read_spec(s) for s in spec_files])
|
||||
return specs
|
||||
|
||||
def all_deprecated_specs(self):
|
||||
if not os.path.isdir(self.root):
|
||||
return []
|
||||
|
||||
deprecated_specs = set()
|
||||
for _, path_scheme in self.projections.items():
|
||||
path_elems = ["*"] * len(path_scheme.split(os.sep))
|
||||
path_elems += [self.metadata_dir, self.deprecated_dir,
|
||||
'*_' + self.spec_file_name]
|
||||
pattern = os.path.join(self.root, *path_elems)
|
||||
spec_files = glob.glob(pattern)
|
||||
get_depr_spec_file = lambda x: os.path.join(
|
||||
os.path.dirname(os.path.dirname(x)), self.spec_file_name)
|
||||
deprecated_specs |= set((self.read_spec(s),
|
||||
self.read_spec(get_depr_spec_file(s)))
|
||||
for s in spec_files)
|
||||
return deprecated_specs
|
||||
|
||||
def specs_by_hash(self):
|
||||
by_hash = {}
|
||||
for spec in self.all_specs():
|
||||
by_hash[spec.dag_hash()] = spec
|
||||
return by_hash
|
||||
|
||||
|
||||
class YamlViewExtensionsLayout(ExtensionsLayout):
|
||||
"""Maintain extensions within a view.
|
||||
"""
|
||||
@@ -537,7 +540,7 @@ def _write_extensions(self, spec, extensions):
|
||||
|
||||
# Create a temp file in the same directory as the actual file.
|
||||
dirname, basename = os.path.split(path)
|
||||
mkdirp(dirname)
|
||||
fs.mkdirp(dirname)
|
||||
|
||||
tmp = tempfile.NamedTemporaryFile(
|
||||
prefix=basename, dir=dirname, delete=False)
|
||||
@@ -590,14 +593,6 @@ def __init__(self, message, long_msg=None):
|
||||
message, long_msg)
|
||||
|
||||
|
||||
class InstallDirectoryAlreadyExistsError(DirectoryLayoutError):
|
||||
"""Raised when create_install_directory is called unnecessarily."""
|
||||
|
||||
def __init__(self, path):
|
||||
super(InstallDirectoryAlreadyExistsError, self).__init__(
|
||||
"Install path %s already exists!" % path)
|
||||
|
||||
|
||||
class SpecReadError(DirectoryLayoutError):
|
||||
"""Raised when directory layout can't read a spec."""
|
||||
|
||||
|
@@ -3,6 +3,7 @@
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
import collections
|
||||
import contextlib
|
||||
import copy
|
||||
import os
|
||||
import re
|
||||
@@ -39,6 +40,7 @@
|
||||
inverse_view_func_parser,
|
||||
view_func_parser,
|
||||
)
|
||||
from spack.installer import PackageInstaller
|
||||
from spack.spec import Spec
|
||||
from spack.spec_list import InvalidSpecConstraintError, SpecList
|
||||
from spack.util.path import substitute_path_variables
|
||||
@@ -257,106 +259,15 @@ def deactivate(shell='sh'):
|
||||
tty.warn('Could not fully deactivate view due to missing package '
|
||||
'or repo, shell environment may be corrupt.')
|
||||
|
||||
tty.debug("Deactivated environmennt '%s'" % _active_environment.name)
|
||||
tty.debug("Deactivated environment '%s'" % _active_environment.name)
|
||||
_active_environment = None
|
||||
|
||||
return cmds
|
||||
|
||||
|
||||
def find_environment(args):
|
||||
"""Find active environment from args, spack.yaml, or environment variable.
|
||||
|
||||
This is called in ``spack.main`` to figure out which environment to
|
||||
activate.
|
||||
|
||||
Check for an environment in this order:
|
||||
1. via ``spack -e ENV`` or ``spack -D DIR`` (arguments)
|
||||
2. as a spack.yaml file in the current directory, or
|
||||
3. via a path in the SPACK_ENV environment variable.
|
||||
|
||||
If an environment is found, read it in. If not, return None.
|
||||
|
||||
Arguments:
|
||||
args (argparse.Namespace): argparse namespace wtih command arguments
|
||||
|
||||
Returns:
|
||||
(Environment): a found environment, or ``None``
|
||||
"""
|
||||
# try arguments
|
||||
env = getattr(args, 'env', None)
|
||||
|
||||
# treat env as a name
|
||||
if env:
|
||||
if exists(env):
|
||||
return read(env)
|
||||
|
||||
else:
|
||||
# if env was specified, see if it is a dirctory otherwise, look
|
||||
# at env_dir (env and env_dir are mutually exclusive)
|
||||
env = getattr(args, 'env_dir', None)
|
||||
|
||||
# if no argument, look for the environment variable
|
||||
if not env:
|
||||
env = os.environ.get(spack_env_var)
|
||||
|
||||
# nothing was set; there's no active environment
|
||||
if not env:
|
||||
return None
|
||||
|
||||
# if we get here, env isn't the name of a spack environment; it has
|
||||
# to be a path to an environment, or there is something wrong.
|
||||
if is_env_dir(env):
|
||||
return Environment(env)
|
||||
|
||||
raise SpackEnvironmentError('no environment in %s' % env)
|
||||
|
||||
|
||||
def get_env(args, cmd_name, required=False):
|
||||
"""Used by commands to get the active environment.
|
||||
|
||||
This first checks for an ``env`` argument, then looks at the
|
||||
``active`` environment. We check args first because Spack's
|
||||
subcommand arguments are parsed *after* the ``-e`` and ``-D``
|
||||
arguments to ``spack``. So there may be an ``env`` argument that is
|
||||
*not* the active environment, and we give it precedence.
|
||||
|
||||
This is used by a number of commands for determining whether there is
|
||||
an active environment.
|
||||
|
||||
If an environment is not found *and* is required, print an error
|
||||
message that says the calling command *needs* an active environment.
|
||||
|
||||
Arguments:
|
||||
args (argparse.Namespace): argparse namespace wtih command arguments
|
||||
cmd_name (str): name of calling command
|
||||
required (bool): if ``True``, raise an exception when no environment
|
||||
is found; if ``False``, just return ``None``
|
||||
|
||||
Returns:
|
||||
(Environment): if there is an arg or active environment
|
||||
"""
|
||||
# try argument first
|
||||
env = getattr(args, 'env', None)
|
||||
if env:
|
||||
if exists(env):
|
||||
return read(env)
|
||||
elif is_env_dir(env):
|
||||
return Environment(env)
|
||||
else:
|
||||
raise SpackEnvironmentError('no environment in %s' % env)
|
||||
|
||||
# try the active environment. This is set by find_environment() (above)
|
||||
if _active_environment:
|
||||
return _active_environment
|
||||
elif not required:
|
||||
return None
|
||||
else:
|
||||
tty.die(
|
||||
'`spack %s` requires an environment' % cmd_name,
|
||||
'activate an environment first:',
|
||||
' spack env activate ENV',
|
||||
'or use:',
|
||||
' spack -e ENV %s ...' % cmd_name)
|
||||
def active_environment():
|
||||
"""Returns the active environment when there is any"""
|
||||
return _active_environment
|
||||
|
||||
|
||||
def _root(name):
|
||||
@@ -1560,21 +1471,18 @@ def uninstalled_specs(self):
|
||||
uninstalled_specs.append(spec)
|
||||
return uninstalled_specs
|
||||
|
||||
def install_all(self, args=None, **install_args):
|
||||
def install_all(self, **install_args):
|
||||
"""Install all concretized specs in an environment.
|
||||
|
||||
Note: this does not regenerate the views for the environment;
|
||||
that needs to be done separately with a call to write().
|
||||
|
||||
Args:
|
||||
args (argparse.Namespace): argparse namespace with command arguments
|
||||
install_args (dict): keyword install arguments
|
||||
"""
|
||||
self.install_specs(None, args=args, **install_args)
|
||||
|
||||
def install_specs(self, specs=None, args=None, **install_args):
|
||||
from spack.installer import PackageInstaller
|
||||
self.install_specs(None, **install_args)
|
||||
|
||||
def install_specs(self, specs=None, **install_args):
|
||||
tty.debug('Assessing installation status of environment packages')
|
||||
# If "spack install" is invoked repeatedly for a large environment
|
||||
# where all specs are already installed, the operation can take
|
||||
@@ -1608,15 +1516,7 @@ def install_specs(self, specs=None, args=None, **install_args):
|
||||
|
||||
installs = []
|
||||
for spec in specs_to_install:
|
||||
# Parse cli arguments and construct a dictionary
|
||||
# that will be passed to the package installer
|
||||
kwargs = dict()
|
||||
if install_args:
|
||||
kwargs.update(install_args)
|
||||
if args:
|
||||
spack.cmd.install.update_kwargs_from_args(args, kwargs)
|
||||
|
||||
installs.append((spec.package, kwargs))
|
||||
installs.append((spec.package, install_args))
|
||||
|
||||
try:
|
||||
builder = PackageInstaller(installs)
|
||||
@@ -1703,7 +1603,22 @@ def matching_spec(self, spec):
|
||||
# Dependency-only specs will have value None
|
||||
matches = {}
|
||||
|
||||
if not isinstance(spec, spack.spec.Spec):
|
||||
spec = spack.spec.Spec(spec)
|
||||
|
||||
for user_spec, concretized_user_spec in self.concretized_specs():
|
||||
# Deal with concrete specs differently
|
||||
if spec.concrete:
|
||||
# Matching a concrete spec is more restrictive
|
||||
# than just matching the dag hash
|
||||
is_match = (
|
||||
spec in concretized_user_spec and
|
||||
concretized_user_spec[spec.name].build_hash() == spec.build_hash()
|
||||
)
|
||||
if is_match:
|
||||
matches[spec] = spec
|
||||
continue
|
||||
|
||||
if concretized_user_spec.satisfies(spec):
|
||||
matches[concretized_user_spec] = user_spec
|
||||
for dep_spec in concretized_user_spec.traverse(root=False):
|
||||
@@ -1783,7 +1698,8 @@ def _to_lockfile_dict(self):
|
||||
dag_hash_all = s.build_hash()
|
||||
if dag_hash_all not in concrete_specs:
|
||||
spec_dict = s.to_node_dict(hash=ht.build_hash)
|
||||
spec_dict[s.name]['hash'] = s.dag_hash()
|
||||
# Assumes no legacy formats, since this was just created.
|
||||
spec_dict[ht.dag_hash.name] = s.dag_hash()
|
||||
concrete_specs[dag_hash_all] = spec_dict
|
||||
|
||||
hash_spec_list = zip(
|
||||
@@ -1829,7 +1745,7 @@ def _read_lockfile_dict(self, d):
|
||||
specs_by_hash[dag_hash] = Spec.from_node_dict(node_dict)
|
||||
|
||||
for dag_hash, node_dict in json_specs_by_hash.items():
|
||||
for dep_name, dep_hash, deptypes in (
|
||||
for _, dep_hash, deptypes, _ in (
|
||||
Spec.dependencies_from_node_dict(node_dict)):
|
||||
specs_by_hash[dag_hash]._add_dependency(
|
||||
specs_by_hash[dep_hash], deptypes)
|
||||
@@ -2230,6 +2146,17 @@ def is_latest_format(manifest):
|
||||
return not changed
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def deactivate_environment():
|
||||
"""Deactivate an active environment for the duration of the context."""
|
||||
global _active_environment
|
||||
current, _active_environment = _active_environment, None
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
_active_environment = current
|
||||
|
||||
|
||||
class SpackEnvironmentError(spack.error.SpackError):
|
||||
"""Superclass for all errors to do with Spack environments."""
|
||||
|
||||
|
@@ -550,11 +550,21 @@ def dynamic_graph(spec, deptypes):
|
||||
out.write(' style="rounded,filled"')
|
||||
out.write(' ]\n')
|
||||
|
||||
# write nodes
|
||||
out.write('\n')
|
||||
for key, label in nodes:
|
||||
out.write(' "%s" [label="%s"]\n' % (key, label))
|
||||
|
||||
# write edges
|
||||
out.write('\n')
|
||||
for src, dest in edges:
|
||||
out.write(' "%s" -> "%s"\n' % (src, dest))
|
||||
|
||||
# ensure that roots are all at the top of the plot
|
||||
dests = set([d for _, d in edges])
|
||||
roots = ['"%s"' % k for k, _ in nodes if k not in dests]
|
||||
out.write('\n')
|
||||
out.write(' { rank=min; %s; }' % "; ".join(roots))
|
||||
|
||||
out.write('\n')
|
||||
out.write('}\n')
|
||||
|
@@ -7,6 +7,8 @@
|
||||
|
||||
import spack.dependency as dp
|
||||
|
||||
hashes = []
|
||||
|
||||
|
||||
class SpecHashDescriptor(object):
|
||||
"""This class defines how hashes are generated on Spec objects.
|
||||
@@ -16,36 +18,38 @@ class SpecHashDescriptor(object):
|
||||
include certain dependency types, and it may optionally include a
|
||||
canonicalized hash of the package.py for each node in the graph.
|
||||
|
||||
We currently use different hashes for different use cases.
|
||||
"""
|
||||
We currently use different hashes for different use cases."""
|
||||
|
||||
hash_types = ('_dag_hash', '_build_hash', '_full_hash', '_package_hash')
|
||||
|
||||
def __init__(self, deptype=('link', 'run'), package_hash=False, attr=None,
|
||||
override=None):
|
||||
def __init__(self, deptype, package_hash, name, override=None):
|
||||
self.deptype = dp.canonical_deptype(deptype)
|
||||
self.package_hash = package_hash
|
||||
self.attr = attr
|
||||
self.name = name
|
||||
hashes.append(self)
|
||||
# Allow spec hashes to have an alternate computation method
|
||||
self.override = override
|
||||
|
||||
@property
|
||||
def attr(self):
|
||||
"""Private attribute stored on spec"""
|
||||
return '_' + self.name
|
||||
|
||||
|
||||
#: Default Hash descriptor, used by Spec.dag_hash() and stored in the DB.
|
||||
dag_hash = SpecHashDescriptor(deptype=('link', 'run'), package_hash=False,
|
||||
attr='_hash')
|
||||
dag_hash = SpecHashDescriptor(
|
||||
deptype=('link', 'run'), package_hash=False, name='hash')
|
||||
|
||||
|
||||
#: Hash descriptor that includes build dependencies.
|
||||
build_hash = SpecHashDescriptor(
|
||||
deptype=('build', 'link', 'run'), package_hash=False, attr='_build_hash')
|
||||
deptype=('build', 'link', 'run'), package_hash=False, name='build_hash')
|
||||
|
||||
|
||||
#: Full hash used in build pipelines to determine when to rebuild packages.
|
||||
full_hash = SpecHashDescriptor(
|
||||
deptype=('build', 'link', 'run'), package_hash=True, attr='_full_hash')
|
||||
deptype=('build', 'link', 'run'), package_hash=True, name='full_hash')
|
||||
|
||||
|
||||
#: Package hash used as part of full hash
|
||||
package_hash = SpecHashDescriptor(
|
||||
deptype=(), package_hash=True, attr='_package_hash',
|
||||
deptype=(), package_hash=True, name='package_hash',
|
||||
override=lambda s: s.package.content_hash())
|
||||
|
@@ -37,8 +37,8 @@ def _for_each_enabled(spec, method_name):
|
||||
|
||||
|
||||
def post_install(spec):
|
||||
import spack.environment # break import cycle
|
||||
if spack.environment.get_env({}, ''):
|
||||
import spack.environment as ev # break import cycle
|
||||
if ev.active_environment():
|
||||
# If the installed through an environment, we skip post_install
|
||||
# module generation and generate the modules on env_write so Spack
|
||||
# can manage interactions between env views and modules
|
||||
|
@@ -12,7 +12,6 @@
|
||||
import llnl.util.filesystem as fs
|
||||
import llnl.util.tty as tty
|
||||
|
||||
import spack.modules
|
||||
import spack.paths
|
||||
import spack.store
|
||||
|
||||
|
@@ -8,9 +8,9 @@
|
||||
import re
|
||||
import shutil
|
||||
import sys
|
||||
import tty
|
||||
|
||||
import llnl.util.filesystem as fs
|
||||
import llnl.util.tty as tty
|
||||
|
||||
import spack.error
|
||||
import spack.util.prefix
|
||||
|
@@ -367,8 +367,13 @@ def _process_binary_cache_tarball(pkg, binary_spec, explicit, unsigned,
|
||||
|
||||
pkg_id = package_id(pkg)
|
||||
tty.msg('Extracting {0} from binary cache'.format(pkg_id))
|
||||
binary_distribution.extract_tarball(binary_spec, tarball, allow_root=False,
|
||||
unsigned=unsigned, force=False)
|
||||
|
||||
# don't print long padded paths while extracting/relocating binaries
|
||||
with spack.util.path.filter_padding():
|
||||
binary_distribution.extract_tarball(
|
||||
binary_spec, tarball, allow_root=False, unsigned=unsigned, force=False
|
||||
)
|
||||
|
||||
pkg.installed_from_binary_cache = True
|
||||
spack.store.db.add(pkg.spec, spack.store.layout, explicit=explicit)
|
||||
return True
|
||||
@@ -1163,7 +1168,7 @@ def _install_task(self, task):
|
||||
except spack.build_environment.StopPhase as e:
|
||||
# A StopPhase exception means that do_install was asked to
|
||||
# stop early from clients, and is not an error at this point
|
||||
pid = '{0}: '.format(pkg.pid) if tty.show_pid() else ''
|
||||
pid = '{0}: '.format(self.pid) if tty.show_pid() else ''
|
||||
tty.debug('{0}{1}'.format(pid, str(e)))
|
||||
tty.debug('Package stage directory: {0}' .format(pkg.stage.source_path))
|
||||
|
||||
@@ -1564,6 +1569,9 @@ def install(self):
|
||||
if os.path.exists(rec.path):
|
||||
with fs.replace_directory_transaction(
|
||||
rec.path):
|
||||
# fs transaction will put the old prefix
|
||||
# back on failure, so make sure to keep it.
|
||||
keep_prefix = True
|
||||
self._install_task(task)
|
||||
else:
|
||||
tty.debug("Missing installation to overwrite")
|
||||
@@ -1582,21 +1590,6 @@ def install(self):
|
||||
keep_prefix = keep_prefix or \
|
||||
(stop_before_phase is None and last_phase is None)
|
||||
|
||||
except spack.directory_layout.InstallDirectoryAlreadyExistsError \
|
||||
as exc:
|
||||
tty.debug('Install prefix for {0} exists, keeping {1} in '
|
||||
'place.'.format(pkg.name, pkg.prefix))
|
||||
self._update_installed(task)
|
||||
|
||||
# Only terminate at this point if a single build request was
|
||||
# made.
|
||||
if task.explicit and single_explicit_spec:
|
||||
spack.hooks.on_install_failure(task.request.pkg.spec)
|
||||
raise
|
||||
|
||||
if task.explicit:
|
||||
exists_errors.append((pkg_id, str(exc)))
|
||||
|
||||
except KeyboardInterrupt as exc:
|
||||
# The build has been terminated with a Ctrl-C so terminate
|
||||
# regardless of the number of remaining specs.
|
||||
@@ -1715,7 +1708,7 @@ def __init__(self, pkg, install_args):
|
||||
self.filter_fn = spack.util.path.padding_filter if filter_padding else None
|
||||
|
||||
# info/debug information
|
||||
pid = '{0}: '.format(pkg.pid) if tty.show_pid() else ''
|
||||
pid = '{0}: '.format(os.getpid()) if tty.show_pid() else ''
|
||||
self.pre = '{0}{1}:'.format(pid, pkg.name)
|
||||
self.pkg_id = package_id(pkg)
|
||||
|
||||
|
@@ -27,6 +27,7 @@
|
||||
|
||||
import llnl.util.filesystem as fs
|
||||
import llnl.util.tty as tty
|
||||
import llnl.util.tty.colify
|
||||
import llnl.util.tty.color as color
|
||||
from llnl.util.tty.log import log_output
|
||||
|
||||
@@ -173,14 +174,16 @@ def _format_actions_usage(self, actions, groups):
|
||||
usage = super(
|
||||
SpackHelpFormatter, self)._format_actions_usage(actions, groups)
|
||||
|
||||
# Eliminate any occurrence of two or more consecutive spaces
|
||||
usage = re.sub(r'[ ]{2,}', ' ', usage)
|
||||
|
||||
# compress single-character flags that are not mutually exclusive
|
||||
# at the beginning of the usage string
|
||||
chars = ''.join(re.findall(r'\[-(.)\]', usage))
|
||||
usage = re.sub(r'\[-.\] ?', '', usage)
|
||||
if chars:
|
||||
return '[-%s] %s' % (chars, usage)
|
||||
else:
|
||||
return usage
|
||||
usage = '[-%s] %s' % (chars, usage)
|
||||
return usage.strip()
|
||||
|
||||
|
||||
class SpackArgumentParser(argparse.ArgumentParser):
|
||||
@@ -293,7 +296,18 @@ def add_subcommand_group(title, commands):
|
||||
def add_subparsers(self, **kwargs):
|
||||
"""Ensure that sensible defaults are propagated to subparsers"""
|
||||
kwargs.setdefault('metavar', 'SUBCOMMAND')
|
||||
|
||||
# From Python 3.7 we can require a subparser, earlier versions
|
||||
# of argparse will error because required=True is unknown
|
||||
if sys.version_info[:2] > (3, 6):
|
||||
kwargs.setdefault('required', True)
|
||||
|
||||
sp = super(SpackArgumentParser, self).add_subparsers(**kwargs)
|
||||
# This monkey patching is needed for Python 3.5 and 3.6, which support
|
||||
# having a required subparser but don't expose the API used above
|
||||
if sys.version_info[:2] == (3, 5) or sys.version_info[:2] == (3, 6):
|
||||
sp.required = True
|
||||
|
||||
old_add_parser = sp.add_parser
|
||||
|
||||
def add_parser(name, **kwargs):
|
||||
@@ -336,6 +350,15 @@ def format_help(self, level='short'):
|
||||
# in subparsers, self.prog is, e.g., 'spack install'
|
||||
return super(SpackArgumentParser, self).format_help()
|
||||
|
||||
def _check_value(self, action, value):
|
||||
# converted value must be one of the choices (if specified)
|
||||
if action.choices is not None and value not in action.choices:
|
||||
cols = llnl.util.tty.colify.colified(
|
||||
sorted(action.choices), indent=4, tty=True
|
||||
)
|
||||
msg = 'invalid choice: %r choose from:\n%s' % (value, cols)
|
||||
raise argparse.ArgumentError(action, msg)
|
||||
|
||||
|
||||
def make_argument_parser(**kwargs):
|
||||
"""Create an basic argument parser without any subcommands added."""
|
||||
@@ -643,7 +666,7 @@ def shell_set(var, value):
|
||||
tty.die('shell must be sh or csh')
|
||||
|
||||
# print sys type
|
||||
shell_set('_sp_sys_type', spack.architecture.sys_type())
|
||||
shell_set('_sp_sys_type', str(spack.architecture.default_arch()))
|
||||
shell_set('_sp_compatible_sys_types',
|
||||
':'.join(spack.architecture.compatible_sys_types()))
|
||||
# print roots for all module systems
|
||||
@@ -720,7 +743,7 @@ def main(argv=None):
|
||||
|
||||
# activate an environment if one was specified on the command line
|
||||
if not args.no_env:
|
||||
env = ev.find_environment(args)
|
||||
env = spack.cmd.find_environment(args)
|
||||
if env:
|
||||
ev.activate(env, args.use_env_repo, add_view=False)
|
||||
|
||||
|
@@ -9,12 +9,14 @@
|
||||
|
||||
from __future__ import absolute_import
|
||||
|
||||
from .common import disable_modules
|
||||
from .lmod import LmodModulefileWriter
|
||||
from .tcl import TclModulefileWriter
|
||||
|
||||
__all__ = [
|
||||
'TclModulefileWriter',
|
||||
'LmodModulefileWriter'
|
||||
'LmodModulefileWriter',
|
||||
'disable_modules'
|
||||
]
|
||||
|
||||
module_types = {
|
||||
|
@@ -29,6 +29,7 @@
|
||||
module type.
|
||||
"""
|
||||
import collections
|
||||
import contextlib
|
||||
import copy
|
||||
import datetime
|
||||
import inspect
|
||||
@@ -41,6 +42,7 @@
|
||||
from llnl.util.lang import dedupe
|
||||
|
||||
import spack.build_environment as build_environment
|
||||
import spack.config
|
||||
import spack.environment as ev
|
||||
import spack.error
|
||||
import spack.paths
|
||||
@@ -698,7 +700,11 @@ def environment_modifications(self):
|
||||
if use_view is True:
|
||||
use_view = ev.default_view_name
|
||||
|
||||
env = ev.get_env({}, 'post_env_write_hook', required=True)
|
||||
env = ev.active_environment()
|
||||
if not env:
|
||||
raise ev.SpackEnvironmentViewError("Module generation with views "
|
||||
"requires active environment")
|
||||
|
||||
view = env.views[use_view]
|
||||
|
||||
spec.prefix = view.get_projection_for_spec(spec)
|
||||
@@ -901,6 +907,19 @@ def remove(self):
|
||||
pass
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def disable_modules():
|
||||
"""Disable the generation of modulefiles within the context manager."""
|
||||
data = {
|
||||
'modules:': {
|
||||
'enable': []
|
||||
}
|
||||
}
|
||||
disable_scope = spack.config.InternalConfigScope('disable_modules', data=data)
|
||||
with spack.config.override(disable_scope):
|
||||
yield
|
||||
|
||||
|
||||
class ModulesError(spack.error.SpackError):
|
||||
"""Base error for modules."""
|
||||
|
||||
|
@@ -87,7 +87,7 @@ def get_monitor_group(subparser):
|
||||
monitor_group = subparser.add_argument_group()
|
||||
monitor_group.add_argument(
|
||||
'--monitor', action='store_true', dest='use_monitor', default=False,
|
||||
help="interact with a montor server during builds.")
|
||||
help="interact with a monitor server during builds.")
|
||||
monitor_group.add_argument(
|
||||
'--monitor-save-local', action='store_true', dest='monitor_save_local',
|
||||
default=False, help="save monitor results to .spack instead of server.")
|
||||
@@ -425,11 +425,15 @@ def get_build_id(self, spec, return_response=False, spec_exists=True):
|
||||
data['tags'] = self.tags
|
||||
|
||||
# If we allow the spec to not exist (meaning we create it) we need to
|
||||
# include the full spec.yaml here
|
||||
# include the full specfile here
|
||||
if not spec_exists:
|
||||
meta_dir = os.path.dirname(spec.package.install_log_path)
|
||||
spec_file = os.path.join(meta_dir, "spec.yaml")
|
||||
data['spec'] = syaml.load(read_file(spec_file))
|
||||
spec_file = os.path.join(meta_dir, "spec.json")
|
||||
if os.path.exists(spec_file):
|
||||
data['spec'] = sjson.load(read_file(spec_file))
|
||||
else:
|
||||
spec_file = os.path.join(meta_dir, "spec.yaml")
|
||||
data['spec'] = syaml.load(read_file(spec_file))
|
||||
|
||||
if self.save_local:
|
||||
return self.get_local_build_id(data, full_hash, return_response)
|
||||
|
@@ -2,3 +2,19 @@
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
from ._operating_system import OperatingSystem
|
||||
from .cray_backend import CrayBackend
|
||||
from .cray_frontend import CrayFrontend
|
||||
from .linux_distro import LinuxDistro
|
||||
from .mac_os import MacOs
|
||||
|
||||
__all__ = [
|
||||
'OperatingSystem',
|
||||
'LinuxDistro',
|
||||
'MacOs',
|
||||
'CrayFrontend',
|
||||
'CrayBackend'
|
||||
]
|
||||
|
||||
#: List of all the Operating Systems known to Spack
|
||||
operating_systems = [LinuxDistro, MacOs, CrayFrontend, CrayBackend]
|
||||
|
36
lib/spack/spack/operating_systems/_operating_system.py
Normal file
36
lib/spack/spack/operating_systems/_operating_system.py
Normal file
@@ -0,0 +1,36 @@
|
||||
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
import llnl.util.lang
|
||||
|
||||
import spack.util.spack_yaml as syaml
|
||||
|
||||
|
||||
@llnl.util.lang.lazy_lexicographic_ordering
|
||||
class OperatingSystem(object):
|
||||
"""Base class for all the Operating Systems.
|
||||
|
||||
Each Operating System contain its own compiler finding logic, that is used
|
||||
to detect compilers.
|
||||
"""
|
||||
|
||||
def __init__(self, name, version):
|
||||
self.name = name.replace('-', '_')
|
||||
self.version = str(version).replace('-', '_')
|
||||
|
||||
def __str__(self):
|
||||
return "%s%s" % (self.name, self.version)
|
||||
|
||||
def __repr__(self):
|
||||
return self.__str__()
|
||||
|
||||
def _cmp_iter(self):
|
||||
yield self.name
|
||||
yield self.version
|
||||
|
||||
def to_dict(self):
|
||||
return syaml.syaml_dict([
|
||||
('name', self.name),
|
||||
('version', self.version)
|
||||
])
|
@@ -10,9 +10,10 @@
|
||||
|
||||
import spack.error
|
||||
import spack.version
|
||||
from spack.operating_systems.linux_distro import LinuxDistro
|
||||
from spack.util.module_cmd import module
|
||||
|
||||
from .linux_distro import LinuxDistro
|
||||
|
||||
#: Possible locations of the Cray CLE release file,
|
||||
#: which we look at to get the CNL OS version.
|
||||
_cle_release_file = '/etc/opt/cray/release/cle-release'
|
||||
|
@@ -11,10 +11,11 @@
|
||||
import llnl.util.lang
|
||||
import llnl.util.tty as tty
|
||||
|
||||
from spack.operating_systems.linux_distro import LinuxDistro
|
||||
from spack.util.environment import get_path
|
||||
from spack.util.module_cmd import module
|
||||
|
||||
from .linux_distro import LinuxDistro
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def unload_programming_environment():
|
||||
|
@@ -2,10 +2,9 @@
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
import re
|
||||
|
||||
from spack.architecture import OperatingSystem
|
||||
from ._operating_system import OperatingSystem
|
||||
|
||||
|
||||
class LinuxDistro(OperatingSystem):
|
||||
|
@@ -5,10 +5,11 @@
|
||||
|
||||
import platform as py_platform
|
||||
|
||||
from spack.architecture import OperatingSystem
|
||||
from spack.util.executable import Executable
|
||||
from spack.version import Version
|
||||
|
||||
from ._operating_system import OperatingSystem
|
||||
|
||||
|
||||
# FIXME: store versions inside OperatingSystem as a Version instead of string
|
||||
def macos_version():
|
||||
|
@@ -41,6 +41,7 @@
|
||||
import spack.dependency
|
||||
import spack.directives
|
||||
import spack.directory_layout
|
||||
import spack.environment
|
||||
import spack.error
|
||||
import spack.fetch_strategy as fs
|
||||
import spack.hooks
|
||||
@@ -1251,18 +1252,14 @@ def installed(self):
|
||||
Returns:
|
||||
True if the package has been installed, False otherwise.
|
||||
"""
|
||||
has_prefix = os.path.isdir(self.prefix)
|
||||
try:
|
||||
# If the spec is in the DB, check the installed
|
||||
# attribute of the record
|
||||
rec = spack.store.db.get_record(self.spec)
|
||||
db_says_installed = rec.installed
|
||||
return spack.store.db.get_record(self.spec).installed
|
||||
except KeyError:
|
||||
# If the spec is not in the DB, the method
|
||||
# above raises a Key error
|
||||
db_says_installed = False
|
||||
|
||||
return has_prefix and db_says_installed
|
||||
return False
|
||||
|
||||
@property
|
||||
def prefix(self):
|
||||
@@ -1537,7 +1534,7 @@ def content_hash(self, content=None):
|
||||
# should this attempt to download the source and set one? This
|
||||
# probably only happens for source repositories which are
|
||||
# referenced by branch name rather than tag or commit ID.
|
||||
env = spack.environment.get_env(None, None)
|
||||
env = spack.environment.active_environment()
|
||||
from_local_sources = env and env.is_develop(self.spec)
|
||||
if not self.spec.external and not from_local_sources:
|
||||
message = 'Missing a source id for {s.name}@{s.version}'
|
||||
|
@@ -2,3 +2,54 @@
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
import llnl.util.lang
|
||||
|
||||
from ._platform import Platform
|
||||
from .cray import Cray
|
||||
from .darwin import Darwin
|
||||
from .linux import Linux
|
||||
from .test import Test
|
||||
|
||||
__all__ = [
|
||||
'Platform',
|
||||
'Cray',
|
||||
'Darwin',
|
||||
'Linux',
|
||||
'Test'
|
||||
]
|
||||
|
||||
#: List of all the platform classes known to Spack
|
||||
platforms = [Cray, Darwin, Linux, Test]
|
||||
|
||||
|
||||
def host():
|
||||
"""Detect and return the platform for this machine or None if detection fails."""
|
||||
for platform_cls in sorted(platforms, key=lambda plt: plt.priority):
|
||||
if platform_cls.detect():
|
||||
return platform_cls()
|
||||
return None
|
||||
|
||||
|
||||
@llnl.util.lang.memoized
|
||||
def cls_by_name(name):
|
||||
"""Return a platform class that corresponds to the given name or None
|
||||
if there is no match.
|
||||
|
||||
Args:
|
||||
name (str): name of the platform
|
||||
"""
|
||||
for platform_cls in sorted(platforms, key=lambda plt: plt.priority):
|
||||
if name.replace("_", "").lower() == platform_cls.__name__.lower():
|
||||
return platform_cls
|
||||
return None
|
||||
|
||||
|
||||
def by_name(name):
|
||||
"""Return a platform object that corresponds to the given name or None
|
||||
if there is no match.
|
||||
|
||||
Args:
|
||||
name (str): name of the platform
|
||||
"""
|
||||
platform_cls = cls_by_name(name)
|
||||
return platform_cls() if platform_cls else None
|
||||
|
126
lib/spack/spack/platforms/_platform.py
Normal file
126
lib/spack/spack/platforms/_platform.py
Normal file
@@ -0,0 +1,126 @@
|
||||
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
import llnl.util.lang
|
||||
|
||||
import spack.error
|
||||
|
||||
|
||||
class NoPlatformError(spack.error.SpackError):
|
||||
def __init__(self):
|
||||
msg = "Could not determine a platform for this machine"
|
||||
super(NoPlatformError, self).__init__(msg)
|
||||
|
||||
|
||||
@llnl.util.lang.lazy_lexicographic_ordering
|
||||
class Platform(object):
|
||||
"""Base class for each type of Platform"""
|
||||
|
||||
# Subclass sets number. Controls detection order
|
||||
priority = None # type: int
|
||||
|
||||
#: binary formats used on this platform; used by relocation logic
|
||||
binary_formats = ['elf']
|
||||
|
||||
front_end = None # type: str
|
||||
back_end = None # type: str
|
||||
default = None # type: str # The default back end target.
|
||||
|
||||
front_os = None # type: str
|
||||
back_os = None # type: str
|
||||
default_os = None # type: str
|
||||
|
||||
reserved_targets = ['default_target', 'frontend', 'fe', 'backend', 'be']
|
||||
reserved_oss = ['default_os', 'frontend', 'fe', 'backend', 'be']
|
||||
|
||||
def __init__(self, name):
|
||||
self.targets = {}
|
||||
self.operating_sys = {}
|
||||
self.name = name
|
||||
|
||||
def add_target(self, name, target):
|
||||
"""Used by the platform specific subclass to list available targets.
|
||||
Raises an error if the platform specifies a name
|
||||
that is reserved by spack as an alias.
|
||||
"""
|
||||
if name in Platform.reserved_targets:
|
||||
msg = "{0} is a spack reserved alias and cannot be the name of a target"
|
||||
raise ValueError(msg.format(name))
|
||||
self.targets[name] = target
|
||||
|
||||
def target(self, name):
|
||||
"""This is a getter method for the target dictionary
|
||||
that handles defaulting based on the values provided by default,
|
||||
front-end, and back-end. This can be overwritten
|
||||
by a subclass for which we want to provide further aliasing options.
|
||||
"""
|
||||
# TODO: Check if we can avoid using strings here
|
||||
name = str(name)
|
||||
if name == 'default_target':
|
||||
name = self.default
|
||||
elif name == 'frontend' or name == 'fe':
|
||||
name = self.front_end
|
||||
elif name == 'backend' or name == 'be':
|
||||
name = self.back_end
|
||||
|
||||
return self.targets.get(name, None)
|
||||
|
||||
def add_operating_system(self, name, os_class):
|
||||
"""Add the operating_system class object into the
|
||||
platform.operating_sys dictionary.
|
||||
"""
|
||||
if name in Platform.reserved_oss:
|
||||
msg = "{0} is a spack reserved alias and cannot be the name of an OS"
|
||||
raise ValueError(msg.format(name))
|
||||
self.operating_sys[name] = os_class
|
||||
|
||||
def operating_system(self, name):
|
||||
if name == 'default_os':
|
||||
name = self.default_os
|
||||
if name == 'frontend' or name == "fe":
|
||||
name = self.front_os
|
||||
if name == 'backend' or name == 'be':
|
||||
name = self.back_os
|
||||
|
||||
return self.operating_sys.get(name, None)
|
||||
|
||||
@classmethod
|
||||
def setup_platform_environment(cls, pkg, env):
|
||||
"""Subclass can override this method if it requires any
|
||||
platform-specific build environment modifications.
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def detect(cls):
|
||||
"""Return True if the the host platform is detected to be the current
|
||||
Platform class, False otherwise.
|
||||
|
||||
Derived classes are responsible for implementing this method.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def __repr__(self):
|
||||
return self.__str__()
|
||||
|
||||
def __str__(self):
|
||||
return self.name
|
||||
|
||||
def _cmp_iter(self):
|
||||
yield self.name
|
||||
yield self.default
|
||||
yield self.front_end
|
||||
yield self.back_end
|
||||
yield self.default_os
|
||||
yield self.front_os
|
||||
yield self.back_os
|
||||
|
||||
def targets():
|
||||
for t in sorted(self.targets.values()):
|
||||
yield t._cmp_iter
|
||||
yield targets
|
||||
|
||||
def oses():
|
||||
for o in sorted(self.operating_sys.values()):
|
||||
yield o._cmp_iter
|
||||
yield oses
|
@@ -11,13 +11,15 @@
|
||||
|
||||
import llnl.util.tty as tty
|
||||
|
||||
from spack.architecture import NoPlatformError, Platform, Target
|
||||
import spack.target
|
||||
from spack.operating_systems.cray_backend import CrayBackend
|
||||
from spack.operating_systems.cray_frontend import CrayFrontend
|
||||
from spack.paths import build_env_path
|
||||
from spack.util.executable import Executable
|
||||
from spack.util.module_cmd import module
|
||||
|
||||
from ._platform import NoPlatformError, Platform
|
||||
|
||||
_craype_name_to_target_name = {
|
||||
'x86-cascadelake': 'cascadelake',
|
||||
'x86-naples': 'zen',
|
||||
@@ -51,7 +53,7 @@ def __init__(self):
|
||||
# Make all craype targets available.
|
||||
for target in self._avail_targets():
|
||||
name = _target_name_from_craype_target_name(target)
|
||||
self.add_target(name, Target(name, 'craype-%s' % target))
|
||||
self.add_target(name, spack.target.Target(name, 'craype-%s' % target))
|
||||
|
||||
self.back_end = os.environ.get('SPACK_BACK_END',
|
||||
self._default_target_from_env())
|
||||
@@ -63,12 +65,12 @@ def __init__(self):
|
||||
# Setup frontend targets
|
||||
for name in archspec.cpu.TARGETS:
|
||||
if name not in self.targets:
|
||||
self.add_target(name, Target(name))
|
||||
self.add_target(name, spack.target.Target(name))
|
||||
self.front_end = os.environ.get(
|
||||
'SPACK_FRONT_END', archspec.cpu.host().name
|
||||
)
|
||||
if self.front_end not in self.targets:
|
||||
self.add_target(self.front_end, Target(self.front_end))
|
||||
self.add_target(self.front_end, spack.target.Target(self.front_end))
|
||||
|
||||
front_distro = CrayFrontend()
|
||||
back_distro = CrayBackend()
|
||||
|
@@ -7,9 +7,11 @@
|
||||
|
||||
import archspec.cpu
|
||||
|
||||
from spack.architecture import Platform, Target
|
||||
import spack.target
|
||||
from spack.operating_systems.mac_os import MacOs
|
||||
|
||||
from ._platform import Platform
|
||||
|
||||
|
||||
class Darwin(Platform):
|
||||
priority = 89
|
||||
@@ -20,7 +22,7 @@ def __init__(self):
|
||||
super(Darwin, self).__init__('darwin')
|
||||
|
||||
for name in archspec.cpu.TARGETS:
|
||||
self.add_target(name, Target(name))
|
||||
self.add_target(name, spack.target.Target(name))
|
||||
|
||||
self.default = archspec.cpu.host().name
|
||||
self.front_end = self.default
|
||||
|
@@ -6,9 +6,11 @@
|
||||
|
||||
import archspec.cpu
|
||||
|
||||
from spack.architecture import Platform, Target
|
||||
import spack.target
|
||||
from spack.operating_systems.linux_distro import LinuxDistro
|
||||
|
||||
from ._platform import Platform
|
||||
|
||||
|
||||
class Linux(Platform):
|
||||
priority = 90
|
||||
@@ -17,7 +19,7 @@ def __init__(self):
|
||||
super(Linux, self).__init__('linux')
|
||||
|
||||
for name in archspec.cpu.TARGETS:
|
||||
self.add_target(name, Target(name))
|
||||
self.add_target(name, spack.target.Target(name))
|
||||
|
||||
# Get specific default
|
||||
self.default = archspec.cpu.host().name
|
||||
|
@@ -2,10 +2,12 @@
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
import platform
|
||||
|
||||
from spack.architecture import OperatingSystem, Platform, Target
|
||||
import spack.operating_systems
|
||||
import spack.target
|
||||
|
||||
from ._platform import Platform
|
||||
|
||||
|
||||
class Test(Platform):
|
||||
@@ -24,13 +26,13 @@ class Test(Platform):
|
||||
|
||||
def __init__(self):
|
||||
super(Test, self).__init__('test')
|
||||
self.add_target(self.default, Target(self.default))
|
||||
self.add_target(self.front_end, Target(self.front_end))
|
||||
self.add_target(self.default, spack.target.Target(self.default))
|
||||
self.add_target(self.front_end, spack.target.Target(self.front_end))
|
||||
|
||||
self.add_operating_system(
|
||||
self.default_os, OperatingSystem('debian', 6))
|
||||
self.default_os, spack.operating_systems.OperatingSystem('debian', 6))
|
||||
self.add_operating_system(
|
||||
self.front_os, OperatingSystem('redhat', 6))
|
||||
self.front_os, spack.operating_systems.OperatingSystem('redhat', 6))
|
||||
|
||||
@classmethod
|
||||
def detect(cls):
|
||||
|
@@ -16,7 +16,6 @@
|
||||
import llnl.util.tty as tty
|
||||
|
||||
import spack.architecture
|
||||
import spack.cmd
|
||||
import spack.repo
|
||||
import spack.spec
|
||||
import spack.util.executable as executable
|
||||
@@ -88,7 +87,8 @@ def _patchelf():
|
||||
return patchelf.path
|
||||
|
||||
# Check if patchelf spec is installed
|
||||
spec = spack.spec.Spec('patchelf').concretized()
|
||||
spec = spack.spec.Spec('patchelf')
|
||||
spec._old_concretize()
|
||||
exe_path = os.path.join(spec.prefix.bin, "patchelf")
|
||||
if spec.package.installed and os.path.exists(exe_path):
|
||||
return exe_path
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user