Compare commits
494 Commits
develop-20
...
hs/fix/che
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
122a53d322 | ||
|
|
bd85034570 | ||
|
|
e780073ccb | ||
|
|
ca85919353 | ||
|
|
a1a2b223b0 | ||
|
|
57769fac7d | ||
|
|
c65fd7e12d | ||
|
|
c71d778875 | ||
|
|
a7313dc407 | ||
|
|
31477d5dc7 | ||
|
|
382ba0d041 | ||
|
|
886c950423 | ||
|
|
3798b16a29 | ||
|
|
796617054d | ||
|
|
78fc25ec12 | ||
|
|
6de51fdc58 | ||
|
|
430ba496d1 | ||
|
|
e1ede9c04b | ||
|
|
856dd3417b | ||
|
|
e49c6f68bc | ||
|
|
eed7a1af24 | ||
|
|
22e40541c7 | ||
|
|
8561c89c25 | ||
|
|
6501705de2 | ||
|
|
0b3e1fd412 | ||
|
|
c260da5127 | ||
|
|
f63261dc65 | ||
|
|
1c081611ea | ||
|
|
428b4e340a | ||
|
|
20bf239a6a | ||
|
|
cd682613cf | ||
|
|
c1852e3706 | ||
|
|
855a8476e4 | ||
|
|
d4a892f200 | ||
|
|
66e2836ba1 | ||
|
|
52ab0c66fe | ||
|
|
f316068b27 | ||
|
|
553cc3b70a | ||
|
|
f0f9a16e4f | ||
|
|
9ec8eaa0d3 | ||
|
|
00182b19dc | ||
|
|
cc7a29c55a | ||
|
|
61b0f4f84d | ||
|
|
fe3bfa482e | ||
|
|
e5f53a6250 | ||
|
|
a7e8080784 | ||
|
|
f5e934f2dc | ||
|
|
54b57c5d1e | ||
|
|
725ef8f5c8 | ||
|
|
f51a9a9107 | ||
|
|
4f0e336ed0 | ||
|
|
64774f3015 | ||
|
|
4e9fbca033 | ||
|
|
a2fd26bbcc | ||
|
|
067da09b46 | ||
|
|
b1b0c108bb | ||
|
|
c624088a7b | ||
|
|
a965c7c5c8 | ||
|
|
904d43f0e6 | ||
|
|
10b6d7282a | ||
|
|
7112a49d1e | ||
|
|
b11bd6b745 | ||
|
|
4d0b04cf34 | ||
|
|
165c171659 | ||
|
|
aa3c62d936 | ||
|
|
cba2fe914c | ||
|
|
1b82779087 | ||
|
|
55b1b0f3f0 | ||
|
|
4606c8ed68 | ||
|
|
dd53eeb322 | ||
|
|
f42486b684 | ||
|
|
44ecea3813 | ||
|
|
f1114858f5 | ||
|
|
2b6bdc7013 | ||
|
|
586a35be43 | ||
|
|
7a8dc36760 | ||
|
|
e01151a200 | ||
|
|
29b50527a6 | ||
|
|
94961ffe0a | ||
|
|
03a7da1e44 | ||
|
|
97ffe2e575 | ||
|
|
7b10aae356 | ||
|
|
b61cd74707 | ||
|
|
374d94edf7 | ||
|
|
827522d825 | ||
|
|
8ba6e7eed2 | ||
|
|
e40c10509d | ||
|
|
21a2c3a591 | ||
|
|
70eb7506df | ||
|
|
2b95eecb83 | ||
|
|
df8507f470 | ||
|
|
645c8eeaeb | ||
|
|
b693987f95 | ||
|
|
7999686856 | ||
|
|
7001a2a65a | ||
|
|
7c985d6432 | ||
|
|
a66586d749 | ||
|
|
6b73f00310 | ||
|
|
063b987ceb | ||
|
|
fe19394bf9 | ||
|
|
e09955d83b | ||
|
|
d367f14d5e | ||
|
|
6f61e382da | ||
|
|
63e680e4f9 | ||
|
|
27557a133b | ||
|
|
78810e95ed | ||
|
|
553b44473f | ||
|
|
966a775a45 | ||
|
|
327c75386a | ||
|
|
a2cb7ee803 | ||
|
|
2a5d4b2291 | ||
|
|
3b59817ea7 | ||
|
|
06eacdf9d8 | ||
|
|
bfdcdb4851 | ||
|
|
83873d06a1 | ||
|
|
91333919c6 | ||
|
|
cd6237cac4 | ||
|
|
91412fb595 | ||
|
|
678c995415 | ||
|
|
63af548271 | ||
|
|
200dfb0346 | ||
|
|
e2f605f6e9 | ||
|
|
3cf1914b7e | ||
|
|
cd7a49114c | ||
|
|
1144487ee7 | ||
|
|
742b78d2b5 | ||
|
|
633d1d2ccb | ||
|
|
9adefd587e | ||
|
|
102a30a5a2 | ||
|
|
7ddc886d6d | ||
|
|
9e7183fb14 | ||
|
|
18ab3c20ce | ||
|
|
b91b42dc7b | ||
|
|
7900d0b3db | ||
|
|
847d7bc87d | ||
|
|
078984dcf4 | ||
|
|
010324714f | ||
|
|
7ce5ac1e6e | ||
|
|
565165f02d | ||
|
|
e4869cd558 | ||
|
|
990e0dc526 | ||
|
|
f9d8b6b5aa | ||
|
|
2079b888c8 | ||
|
|
2dbc5213b0 | ||
|
|
7a83cdbcc7 | ||
|
|
da33c12ad4 | ||
|
|
c30979ed66 | ||
|
|
5d7d18d028 | ||
|
|
92e42bbed9 | ||
|
|
899ac78887 | ||
|
|
7bec524dd5 | ||
|
|
546e0925b0 | ||
|
|
95b533ddcd | ||
|
|
28fe85ae66 | ||
|
|
6b936884f5 | ||
|
|
7b879d092d | ||
|
|
007c1148c0 | ||
|
|
8b2fec61f2 | ||
|
|
1cebb7e1c3 | ||
|
|
6f8d8ba47e | ||
|
|
9464898449 | ||
|
|
0902910784 | ||
|
|
7050ace968 | ||
|
|
7efbad0d81 | ||
|
|
2298abd7f4 | ||
|
|
46efa7e151 | ||
|
|
60c589db28 | ||
|
|
ca9a7b2033 | ||
|
|
470a26bbcd | ||
|
|
b52e4fc650 | ||
|
|
a653579e56 | ||
|
|
7f89391b14 | ||
|
|
34c98101ad | ||
|
|
f1ea979e2b | ||
|
|
55cbdd435c | ||
|
|
1cce947be6 | ||
|
|
0a735c6ea6 | ||
|
|
5400b1e222 | ||
|
|
ef461befcc | ||
|
|
831b4a3e4a | ||
|
|
6007a77a33 | ||
|
|
a2794f04bc | ||
|
|
3ae3bfd997 | ||
|
|
5f3f968a1f | ||
|
|
652de07d8c | ||
|
|
c16191d9ea | ||
|
|
1b1663acea | ||
|
|
d2f269ed7b | ||
|
|
4584d85ca6 | ||
|
|
2106a2be26 | ||
|
|
228c82502d | ||
|
|
431f5627d9 | ||
|
|
fb315c37ba | ||
|
|
f9fa160a24 | ||
|
|
1ee29929a7 | ||
|
|
97e691cdbf | ||
|
|
51ba25fec3 | ||
|
|
81281646e9 | ||
|
|
85ec4cca92 | ||
|
|
f3c21b0177 | ||
|
|
51ac4686b4 | ||
|
|
82752ad0b7 | ||
|
|
b231e6e9e9 | ||
|
|
90f8c20133 | ||
|
|
9835b072e2 | ||
|
|
f438a33978 | ||
|
|
8ded2ddf5e | ||
|
|
e3904d4cbf | ||
|
|
e1bcbcf9f3 | ||
|
|
fa671a639a | ||
|
|
28171f1b9d | ||
|
|
8de03e2bf5 | ||
|
|
2fa314b6b6 | ||
|
|
7780059c64 | ||
|
|
7e69671570 | ||
|
|
5650d4d419 | ||
|
|
fa38dd9386 | ||
|
|
16a2a5047c | ||
|
|
899e458ee5 | ||
|
|
4a8d09dcc1 | ||
|
|
98e206193b | ||
|
|
6a6c295938 | ||
|
|
9a1002c098 | ||
|
|
6c903543e1 | ||
|
|
994d995b64 | ||
|
|
54d17ae044 | ||
|
|
9ea103f94e | ||
|
|
83efafa09f | ||
|
|
5f29bb9b22 | ||
|
|
441b64c3d9 | ||
|
|
cee6c59684 | ||
|
|
b1adfcf665 | ||
|
|
433abfcc80 | ||
|
|
02063302c5 | ||
|
|
b9125ae3e7 | ||
|
|
0a2b63b032 | ||
|
|
35d84a6456 | ||
|
|
0257b2db4b | ||
|
|
d3bf1e04fc | ||
|
|
530639e15f | ||
|
|
c8695f2ba6 | ||
|
|
f3bd820374 | ||
|
|
29b9fe1f0b | ||
|
|
1090895e72 | ||
|
|
e983f4a858 | ||
|
|
72e3f10d5b | ||
|
|
c5ae5ba4db | ||
|
|
a1090029f3 | ||
|
|
0135c808a0 | ||
|
|
678084fed8 | ||
|
|
705d58005d | ||
|
|
cee266046b | ||
|
|
5aa3d9c39c | ||
|
|
3ee6507dd6 | ||
|
|
425bba2f1a | ||
|
|
a2cbc46dbc | ||
|
|
8538b0c01d | ||
|
|
ff30da7385 | ||
|
|
7c5771ed11 | ||
|
|
81fb1a9b8f | ||
|
|
835bd2557e | ||
|
|
e5a8d7be49 | ||
|
|
9f795de60b | ||
|
|
7791a30bc2 | ||
|
|
2e85c83301 | ||
|
|
251190a0c4 | ||
|
|
90b85239d5 | ||
|
|
f276a8da75 | ||
|
|
93799ec641 | ||
|
|
dddc056a29 | ||
|
|
3e6d9cdc06 | ||
|
|
091786411b | ||
|
|
4af09dd506 | ||
|
|
2626bff96d | ||
|
|
9ef1d609e2 | ||
|
|
4c60deb992 | ||
|
|
53bc782278 | ||
|
|
4e087349a4 | ||
|
|
53815b725a | ||
|
|
e8c8e7b8a8 | ||
|
|
b781ce5b0f | ||
|
|
a3c3f4c3d1 | ||
|
|
445b6dfcf8 | ||
|
|
b2ef64369f | ||
|
|
a8d2ea68f5 | ||
|
|
c7a437573b | ||
|
|
5736d1e206 | ||
|
|
e110e3c3af | ||
|
|
e2d8b581db | ||
|
|
10a4de8e04 | ||
|
|
96ddbd5e17 | ||
|
|
65b530e7ec | ||
|
|
de98e3d6e5 | ||
|
|
ffcb4ee487 | ||
|
|
bfba3c9d5c | ||
|
|
37e56ea24d | ||
|
|
453e8c71ac | ||
|
|
e669fcafd0 | ||
|
|
2dbbcf3ca5 | ||
|
|
bce710eec1 | ||
|
|
64a69796e2 | ||
|
|
dd460a0eb0 | ||
|
|
475fe9977a | ||
|
|
b86e42a5aa | ||
|
|
9f04c45dea | ||
|
|
20e6b60fce | ||
|
|
5f86ee5d93 | ||
|
|
84ad509621 | ||
|
|
ea0da49acb | ||
|
|
e77fbfe8f8 | ||
|
|
d485650369 | ||
|
|
c1f22ca5cb | ||
|
|
c5d1c9ae61 | ||
|
|
d8184b37a3 | ||
|
|
bd952a552f | ||
|
|
aa171a6cc9 | ||
|
|
e4ee59741e | ||
|
|
b3b9f4d4b7 | ||
|
|
c1f2b36854 | ||
|
|
87494d2941 | ||
|
|
ad26dcfbfc | ||
|
|
5541a184d5 | ||
|
|
f1140055d0 | ||
|
|
88782fb05a | ||
|
|
69d216a88e | ||
|
|
04f0af0a28 | ||
|
|
c71c7735fd | ||
|
|
89bc483c87 | ||
|
|
62d2e8d1f4 | ||
|
|
12abc233d0 | ||
|
|
c30c5df340 | ||
|
|
4a088f717e | ||
|
|
9a10538f6d | ||
|
|
c753446353 | ||
|
|
65a15c6145 | ||
|
|
e563f84ae2 | ||
|
|
622ad1ddd7 | ||
|
|
1bd17876ed | ||
|
|
a789689709 | ||
|
|
171a2e0e31 | ||
|
|
66d3fddedf | ||
|
|
14a3b13900 | ||
|
|
40d41455db | ||
|
|
d63ead25ac | ||
|
|
4a35dec206 | ||
|
|
c294b9d3b9 | ||
|
|
057b415074 | ||
|
|
3180b28d76 | ||
|
|
b47c31509d | ||
|
|
f99a5ef2e7 | ||
|
|
690bcf5d47 | ||
|
|
1e6bef079d | ||
|
|
564155fd1a | ||
|
|
f371b6f06c | ||
|
|
0f9434fca4 | ||
|
|
235831a035 | ||
|
|
4240748cea | ||
|
|
934e34fbd6 | ||
|
|
ea42d18506 | ||
|
|
2b763ff2db | ||
|
|
c6cc97953b | ||
|
|
ff144df549 | ||
|
|
f3acf201c4 | ||
|
|
e5364ea832 | ||
|
|
53b8f91c02 | ||
|
|
a841ddd00c | ||
|
|
39455768b2 | ||
|
|
e529a454eb | ||
|
|
1b5dc396e3 | ||
|
|
15a3ac0512 | ||
|
|
52f149266f | ||
|
|
8d33c2e7c0 | ||
|
|
b3d82dc3a8 | ||
|
|
0fb44529bb | ||
|
|
6ea944bf17 | ||
|
|
8c6177c47f | ||
|
|
b65d9f1524 | ||
|
|
03e5dddf24 | ||
|
|
7bb892f7b3 | ||
|
|
66ed8ebbd9 | ||
|
|
0d326f83b6 | ||
|
|
fc0955b125 | ||
|
|
13ba1b96c3 | ||
|
|
d66d169027 | ||
|
|
6decd6aaa1 | ||
|
|
3c0ffa8652 | ||
|
|
4917e3f664 | ||
|
|
b2a14b456e | ||
|
|
ab1580a37f | ||
|
|
c1f979cd54 | ||
|
|
d124338ecb | ||
|
|
d001f14514 | ||
|
|
c43205d6de | ||
|
|
54f1af5a29 | ||
|
|
350661f027 | ||
|
|
3699df2651 | ||
|
|
63197fea3e | ||
|
|
f044194b06 | ||
|
|
29302c13e9 | ||
|
|
c4808de2ff | ||
|
|
c390a4530e | ||
|
|
be771d5d6f | ||
|
|
8b45fa089e | ||
|
|
0d04223ccd | ||
|
|
5ef222d62f | ||
|
|
6810e9ed2e | ||
|
|
a6c638d0fa | ||
|
|
fa8a512945 | ||
|
|
24b73da9e6 | ||
|
|
4447d3339c | ||
|
|
d82c9e7f2a | ||
|
|
6828a7402a | ||
|
|
a0d62a40dd | ||
|
|
712dcf6b8d | ||
|
|
ad1fc34199 | ||
|
|
ab723b25d0 | ||
|
|
016673f419 | ||
|
|
7bc6d62e9b | ||
|
|
fca9cc3e0e | ||
|
|
2a178bfbb0 | ||
|
|
3381879358 | ||
|
|
ed9058618a | ||
|
|
a4c99bad6a | ||
|
|
f31f58ff26 | ||
|
|
f84918da4b | ||
|
|
80a237e250 | ||
|
|
f52d3b26c3 | ||
|
|
2029d714a0 | ||
|
|
31ef1df74f | ||
|
|
00ae96a7cb | ||
|
|
8d2a6d6744 | ||
|
|
9443e31b1e | ||
|
|
2d8ca8af69 | ||
|
|
de4d4695c4 | ||
|
|
c8cf85223f | ||
|
|
b869538544 | ||
|
|
4710cbb281 | ||
|
|
9ae1014e55 | ||
|
|
813c0dd031 | ||
|
|
91071933d0 | ||
|
|
df5bac3e6c | ||
|
|
7b9f8abce5 | ||
|
|
a2f9d4b6a1 | ||
|
|
77e16d55c1 | ||
|
|
ecb2442566 | ||
|
|
89c0b4accf | ||
|
|
8e5b51395a | ||
|
|
c2ada0f15a | ||
|
|
6d3541c5fd | ||
|
|
31e4149067 | ||
|
|
c9fba9ec79 | ||
|
|
282627714e | ||
|
|
714dd783f9 | ||
|
|
40b390903d | ||
|
|
ce1b569b69 | ||
|
|
b539eb5aab | ||
|
|
e992e1efbd | ||
|
|
33a52dd836 | ||
|
|
b5f06fb3bc | ||
|
|
494817b616 | ||
|
|
02470a5aae | ||
|
|
42232a8ab6 | ||
|
|
cb64df45c8 | ||
|
|
a11da7bdb9 | ||
|
|
9a22ae11c6 | ||
|
|
318a7e0e30 | ||
|
|
e976f351f8 | ||
|
|
437341d40e | ||
|
|
9d7ea1a28b | ||
|
|
d85668f096 | ||
|
|
5c3a23a481 | ||
|
|
8be1f26ac6 | ||
|
|
35bd21fc64 | ||
|
|
652170fb54 | ||
|
|
d4e6c29f25 | ||
|
|
c12772e73f | ||
|
|
a26ac1dbcc | ||
|
|
2afaeba292 | ||
|
|
a14e76b98d | ||
|
|
9a3a759ed3 | ||
|
|
72f17d6961 | ||
|
|
1b967a9d98 | ||
|
|
bb954390ec | ||
|
|
bf9b6940c9 | ||
|
|
22980b9e65 | ||
|
|
483426f771 | ||
|
|
1e5b976eb7 | ||
|
|
2aa6939b96 | ||
|
|
f7e601d352 | ||
|
|
c4082931e3 | ||
|
|
cee3e5436b | ||
|
|
613fa56bfc | ||
|
|
0752d94bbf | ||
|
|
3bf1a03760 |
10
.github/dependabot.yml
vendored
10
.github/dependabot.yml
vendored
@@ -5,14 +5,10 @@ updates:
|
|||||||
directory: "/"
|
directory: "/"
|
||||||
schedule:
|
schedule:
|
||||||
interval: "daily"
|
interval: "daily"
|
||||||
# Requirements to build documentation
|
# Requirements to run style checks and build documentation
|
||||||
- package-ecosystem: "pip"
|
|
||||||
directory: "/lib/spack/docs"
|
|
||||||
schedule:
|
|
||||||
interval: "daily"
|
|
||||||
# Requirements to run style checks
|
|
||||||
- package-ecosystem: "pip"
|
- package-ecosystem: "pip"
|
||||||
directories:
|
directories:
|
||||||
- "/.github/workflows/requirements/*"
|
- "/.github/workflows/requirements/style/*"
|
||||||
|
- "/lib/spack/docs"
|
||||||
schedule:
|
schedule:
|
||||||
interval: "daily"
|
interval: "daily"
|
||||||
|
|||||||
4
.github/workflows/audit.yaml
vendored
4
.github/workflows/audit.yaml
vendored
@@ -44,6 +44,7 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
. share/spack/setup-env.sh
|
. share/spack/setup-env.sh
|
||||||
coverage run $(which spack) audit packages
|
coverage run $(which spack) audit packages
|
||||||
|
coverage run $(which spack) audit configs
|
||||||
coverage run $(which spack) -d audit externals
|
coverage run $(which spack) -d audit externals
|
||||||
coverage combine
|
coverage combine
|
||||||
coverage xml
|
coverage xml
|
||||||
@@ -52,6 +53,7 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
. share/spack/setup-env.sh
|
. share/spack/setup-env.sh
|
||||||
spack -d audit packages
|
spack -d audit packages
|
||||||
|
spack -d audit configs
|
||||||
spack -d audit externals
|
spack -d audit externals
|
||||||
- name: Package audits (without coverage)
|
- name: Package audits (without coverage)
|
||||||
if: ${{ runner.os == 'Windows' }}
|
if: ${{ runner.os == 'Windows' }}
|
||||||
@@ -59,6 +61,8 @@ jobs:
|
|||||||
. share/spack/setup-env.sh
|
. share/spack/setup-env.sh
|
||||||
spack -d audit packages
|
spack -d audit packages
|
||||||
./share/spack/qa/validate_last_exit.ps1
|
./share/spack/qa/validate_last_exit.ps1
|
||||||
|
spack -d audit configs
|
||||||
|
./share/spack/qa/validate_last_exit.ps1
|
||||||
spack -d audit externals
|
spack -d audit externals
|
||||||
./share/spack/qa/validate_last_exit.ps1
|
./share/spack/qa/validate_last_exit.ps1
|
||||||
- uses: codecov/codecov-action@e28ff129e5465c2c0dcc6f003fc735cb6ae0c673
|
- uses: codecov/codecov-action@e28ff129e5465c2c0dcc6f003fc735cb6ae0c673
|
||||||
|
|||||||
14
.github/workflows/build-containers.yml
vendored
14
.github/workflows/build-containers.yml
vendored
@@ -87,19 +87,19 @@ jobs:
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
- name: Upload Dockerfile
|
- name: Upload Dockerfile
|
||||||
uses: actions/upload-artifact@0b2256b8c012f0828dc542b3febcab082c67f72b
|
uses: actions/upload-artifact@834a144ee995460fba8ed112a2fc961b36a5ec5a
|
||||||
with:
|
with:
|
||||||
name: dockerfiles_${{ matrix.dockerfile[0] }}
|
name: dockerfiles_${{ matrix.dockerfile[0] }}
|
||||||
path: dockerfiles
|
path: dockerfiles
|
||||||
|
|
||||||
- name: Set up QEMU
|
- name: Set up QEMU
|
||||||
uses: docker/setup-qemu-action@5927c834f5b4fdf503fca6f4c7eccda82949e1ee
|
uses: docker/setup-qemu-action@49b3bc8e6bdd4a60e6116a5414239cba5943d3cf
|
||||||
|
|
||||||
- name: Set up Docker Buildx
|
- name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@4fd812986e6c8c2a69e18311145f9371337f27d4
|
uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db
|
||||||
|
|
||||||
- name: Log in to GitHub Container Registry
|
- name: Log in to GitHub Container Registry
|
||||||
uses: docker/login-action@0d4c9c5ea7693da7b068278f7b52bda2a190a446
|
uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567
|
||||||
with:
|
with:
|
||||||
registry: ghcr.io
|
registry: ghcr.io
|
||||||
username: ${{ github.actor }}
|
username: ${{ github.actor }}
|
||||||
@@ -107,13 +107,13 @@ jobs:
|
|||||||
|
|
||||||
- name: Log in to DockerHub
|
- name: Log in to DockerHub
|
||||||
if: github.event_name != 'pull_request'
|
if: github.event_name != 'pull_request'
|
||||||
uses: docker/login-action@0d4c9c5ea7693da7b068278f7b52bda2a190a446
|
uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567
|
||||||
with:
|
with:
|
||||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
|
|
||||||
- name: Build & Deploy ${{ matrix.dockerfile[0] }}
|
- name: Build & Deploy ${{ matrix.dockerfile[0] }}
|
||||||
uses: docker/build-push-action@1a162644f9a7e87d8f4b053101d1d9a712edc18c
|
uses: docker/build-push-action@5cd11c3a4ced054e52742c5fd54dca954e0edd85
|
||||||
with:
|
with:
|
||||||
context: dockerfiles/${{ matrix.dockerfile[0] }}
|
context: dockerfiles/${{ matrix.dockerfile[0] }}
|
||||||
platforms: ${{ matrix.dockerfile[1] }}
|
platforms: ${{ matrix.dockerfile[1] }}
|
||||||
@@ -126,7 +126,7 @@ jobs:
|
|||||||
needs: deploy-images
|
needs: deploy-images
|
||||||
steps:
|
steps:
|
||||||
- name: Merge Artifacts
|
- name: Merge Artifacts
|
||||||
uses: actions/upload-artifact/merge@0b2256b8c012f0828dc542b3febcab082c67f72b
|
uses: actions/upload-artifact/merge@834a144ee995460fba8ed112a2fc961b36a5ec5a
|
||||||
with:
|
with:
|
||||||
name: dockerfiles
|
name: dockerfiles
|
||||||
pattern: dockerfiles_*
|
pattern: dockerfiles_*
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
black==24.4.2
|
black==24.8.0
|
||||||
clingo==5.7.1
|
clingo==5.7.1
|
||||||
flake8==7.1.0
|
flake8==7.1.1
|
||||||
isort==5.13.2
|
isort==5.13.2
|
||||||
mypy==1.8.0
|
mypy==1.8.0
|
||||||
types-six==1.16.21.20240513
|
types-six==1.16.21.20240513
|
||||||
|
|||||||
14
.github/workflows/unit_tests.yaml
vendored
14
.github/workflows/unit_tests.yaml
vendored
@@ -16,38 +16,27 @@ jobs:
|
|||||||
matrix:
|
matrix:
|
||||||
os: [ubuntu-latest]
|
os: [ubuntu-latest]
|
||||||
python-version: ['3.7', '3.8', '3.9', '3.10', '3.11', '3.12']
|
python-version: ['3.7', '3.8', '3.9', '3.10', '3.11', '3.12']
|
||||||
concretizer: ['clingo']
|
|
||||||
on_develop:
|
on_develop:
|
||||||
- ${{ github.ref == 'refs/heads/develop' }}
|
- ${{ github.ref == 'refs/heads/develop' }}
|
||||||
include:
|
include:
|
||||||
- python-version: '3.11'
|
|
||||||
os: ubuntu-latest
|
|
||||||
concretizer: original
|
|
||||||
on_develop: ${{ github.ref == 'refs/heads/develop' }}
|
|
||||||
- python-version: '3.6'
|
- python-version: '3.6'
|
||||||
os: ubuntu-20.04
|
os: ubuntu-20.04
|
||||||
concretizer: clingo
|
|
||||||
on_develop: ${{ github.ref == 'refs/heads/develop' }}
|
on_develop: ${{ github.ref == 'refs/heads/develop' }}
|
||||||
exclude:
|
exclude:
|
||||||
- python-version: '3.7'
|
- python-version: '3.7'
|
||||||
os: ubuntu-latest
|
os: ubuntu-latest
|
||||||
concretizer: 'clingo'
|
|
||||||
on_develop: false
|
on_develop: false
|
||||||
- python-version: '3.8'
|
- python-version: '3.8'
|
||||||
os: ubuntu-latest
|
os: ubuntu-latest
|
||||||
concretizer: 'clingo'
|
|
||||||
on_develop: false
|
on_develop: false
|
||||||
- python-version: '3.9'
|
- python-version: '3.9'
|
||||||
os: ubuntu-latest
|
os: ubuntu-latest
|
||||||
concretizer: 'clingo'
|
|
||||||
on_develop: false
|
on_develop: false
|
||||||
- python-version: '3.10'
|
- python-version: '3.10'
|
||||||
os: ubuntu-latest
|
os: ubuntu-latest
|
||||||
concretizer: 'clingo'
|
|
||||||
on_develop: false
|
on_develop: false
|
||||||
- python-version: '3.11'
|
- python-version: '3.11'
|
||||||
os: ubuntu-latest
|
os: ubuntu-latest
|
||||||
concretizer: 'clingo'
|
|
||||||
on_develop: false
|
on_develop: false
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
@@ -85,7 +74,6 @@ jobs:
|
|||||||
- name: Run unit tests
|
- name: Run unit tests
|
||||||
env:
|
env:
|
||||||
SPACK_PYTHON: python
|
SPACK_PYTHON: python
|
||||||
SPACK_TEST_SOLVER: ${{ matrix.concretizer }}
|
|
||||||
SPACK_TEST_PARALLEL: 2
|
SPACK_TEST_PARALLEL: 2
|
||||||
COVERAGE: true
|
COVERAGE: true
|
||||||
UNIT_TEST_COVERAGE: ${{ matrix.python-version == '3.11' }}
|
UNIT_TEST_COVERAGE: ${{ matrix.python-version == '3.11' }}
|
||||||
@@ -182,7 +170,6 @@ jobs:
|
|||||||
- name: Run unit tests (full suite with coverage)
|
- name: Run unit tests (full suite with coverage)
|
||||||
env:
|
env:
|
||||||
COVERAGE: true
|
COVERAGE: true
|
||||||
SPACK_TEST_SOLVER: clingo
|
|
||||||
run: |
|
run: |
|
||||||
share/spack/qa/run-unit-tests
|
share/spack/qa/run-unit-tests
|
||||||
- uses: codecov/codecov-action@e28ff129e5465c2c0dcc6f003fc735cb6ae0c673
|
- uses: codecov/codecov-action@e28ff129e5465c2c0dcc6f003fc735cb6ae0c673
|
||||||
@@ -213,7 +200,6 @@ jobs:
|
|||||||
brew install dash fish gcc gnupg2 kcov
|
brew install dash fish gcc gnupg2 kcov
|
||||||
- name: Run unit tests
|
- name: Run unit tests
|
||||||
env:
|
env:
|
||||||
SPACK_TEST_SOLVER: clingo
|
|
||||||
SPACK_TEST_PARALLEL: 4
|
SPACK_TEST_PARALLEL: 4
|
||||||
run: |
|
run: |
|
||||||
git --version
|
git --version
|
||||||
|
|||||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -7,6 +7,7 @@
|
|||||||
/var/spack/environments
|
/var/spack/environments
|
||||||
/var/spack/repos/*/index.yaml
|
/var/spack/repos/*/index.yaml
|
||||||
/var/spack/repos/*/lock
|
/var/spack/repos/*/lock
|
||||||
|
/var/spack/repos/*/packages.zip
|
||||||
/opt
|
/opt
|
||||||
# Ignore everything in /etc/spack except /etc/spack/defaults
|
# Ignore everything in /etc/spack except /etc/spack/defaults
|
||||||
/etc/spack/*
|
/etc/spack/*
|
||||||
|
|||||||
@@ -170,23 +170,6 @@ config:
|
|||||||
# If set to true, Spack will use ccache to cache C compiles.
|
# If set to true, Spack will use ccache to cache C compiles.
|
||||||
ccache: false
|
ccache: false
|
||||||
|
|
||||||
|
|
||||||
# The concretization algorithm to use in Spack. Options are:
|
|
||||||
#
|
|
||||||
# 'clingo': Uses a logic solver under the hood to solve DAGs with full
|
|
||||||
# backtracking and optimization for user preferences. Spack will
|
|
||||||
# try to bootstrap the logic solver, if not already available.
|
|
||||||
#
|
|
||||||
# 'original': Spack's original greedy, fixed-point concretizer. This
|
|
||||||
# algorithm can make decisions too early and will not backtrack
|
|
||||||
# sufficiently for many specs. This will soon be deprecated in
|
|
||||||
# favor of clingo.
|
|
||||||
#
|
|
||||||
# See `concretizer.yaml` for more settings you can fine-tune when
|
|
||||||
# using clingo.
|
|
||||||
concretizer: clingo
|
|
||||||
|
|
||||||
|
|
||||||
# How long to wait to lock the Spack installation database. This lock is used
|
# How long to wait to lock the Spack installation database. This lock is used
|
||||||
# when Spack needs to manage its own package metadata and all operations are
|
# when Spack needs to manage its own package metadata and all operations are
|
||||||
# expected to complete within the default time limit. The timeout should
|
# expected to complete within the default time limit. The timeout should
|
||||||
|
|||||||
@@ -1,3 +0,0 @@
|
|||||||
packages:
|
|
||||||
iconv:
|
|
||||||
require: [libiconv]
|
|
||||||
@@ -20,11 +20,14 @@ packages:
|
|||||||
awk: [gawk]
|
awk: [gawk]
|
||||||
armci: [armcimpi]
|
armci: [armcimpi]
|
||||||
blas: [openblas, amdblis]
|
blas: [openblas, amdblis]
|
||||||
|
c: [gcc]
|
||||||
|
cxx: [gcc]
|
||||||
D: [ldc]
|
D: [ldc]
|
||||||
daal: [intel-oneapi-daal]
|
daal: [intel-oneapi-daal]
|
||||||
elf: [elfutils]
|
elf: [elfutils]
|
||||||
fftw-api: [fftw, amdfftw]
|
fftw-api: [fftw, amdfftw]
|
||||||
flame: [libflame, amdlibflame]
|
flame: [libflame, amdlibflame]
|
||||||
|
fortran: [gcc]
|
||||||
fortran-rt: [gcc-runtime, intel-oneapi-runtime]
|
fortran-rt: [gcc-runtime, intel-oneapi-runtime]
|
||||||
fuse: [libfuse]
|
fuse: [libfuse]
|
||||||
gl: [glx, osmesa]
|
gl: [glx, osmesa]
|
||||||
@@ -61,6 +64,7 @@ packages:
|
|||||||
tbb: [intel-tbb]
|
tbb: [intel-tbb]
|
||||||
unwind: [libunwind]
|
unwind: [libunwind]
|
||||||
uuid: [util-linux-uuid, libuuid]
|
uuid: [util-linux-uuid, libuuid]
|
||||||
|
wasi-sdk: [wasi-sdk-prebuilt]
|
||||||
xxd: [xxd-standalone, vim]
|
xxd: [xxd-standalone, vim]
|
||||||
yacc: [bison, byacc]
|
yacc: [bison, byacc]
|
||||||
ziglang: [zig]
|
ziglang: [zig]
|
||||||
|
|||||||
@@ -1,6 +1,5 @@
|
|||||||
config:
|
config:
|
||||||
locks: false
|
locks: false
|
||||||
concretizer: clingo
|
|
||||||
build_stage::
|
build_stage::
|
||||||
- '$spack/.staging'
|
- '$spack/.staging'
|
||||||
stage_name: '{name}-{version}-{hash:7}'
|
stage_name: '{name}-{version}-{hash:7}'
|
||||||
|
|||||||
@@ -206,6 +206,7 @@ def setup(sphinx):
|
|||||||
("py:class", "six.moves.urllib.parse.ParseResult"),
|
("py:class", "six.moves.urllib.parse.ParseResult"),
|
||||||
("py:class", "TextIO"),
|
("py:class", "TextIO"),
|
||||||
("py:class", "hashlib._Hash"),
|
("py:class", "hashlib._Hash"),
|
||||||
|
("py:class", "concurrent.futures._base.Executor"),
|
||||||
# Spack classes that are private and we don't want to expose
|
# Spack classes that are private and we don't want to expose
|
||||||
("py:class", "spack.provider_index._IndexBase"),
|
("py:class", "spack.provider_index._IndexBase"),
|
||||||
("py:class", "spack.repo._PrependFileLoader"),
|
("py:class", "spack.repo._PrependFileLoader"),
|
||||||
|
|||||||
@@ -1263,6 +1263,11 @@ Git fetching supports the following parameters to ``version``:
|
|||||||
option ``--depth 1`` will be used if the version of git and the specified
|
option ``--depth 1`` will be used if the version of git and the specified
|
||||||
transport protocol support it, and ``--single-branch`` will be used if the
|
transport protocol support it, and ``--single-branch`` will be used if the
|
||||||
version of git supports it.
|
version of git supports it.
|
||||||
|
* ``git_sparse_paths``: Use ``sparse-checkout`` to only clone these relative paths.
|
||||||
|
This feature requires ``git`` to be version ``2.25.0`` or later but is useful for
|
||||||
|
large repositories that have separate portions that can be built independently.
|
||||||
|
If paths provided are directories then all the subdirectories and associated files
|
||||||
|
will also be cloned.
|
||||||
|
|
||||||
Only one of ``tag``, ``branch``, or ``commit`` can be used at a time.
|
Only one of ``tag``, ``branch``, or ``commit`` can be used at a time.
|
||||||
|
|
||||||
@@ -1361,6 +1366,41 @@ Submodules
|
|||||||
For more information about git submodules see the manpage of git: ``man
|
For more information about git submodules see the manpage of git: ``man
|
||||||
git-submodule``.
|
git-submodule``.
|
||||||
|
|
||||||
|
Sparse-Checkout
|
||||||
|
You can supply ``git_sparse_paths`` at the package or version level to utilize git's
|
||||||
|
sparse-checkout feature. This will only clone the paths that are specified in the
|
||||||
|
``git_sparse_paths`` attribute for the package along with the files in the top level directory.
|
||||||
|
This feature allows you to only clone what you need from a large repository.
|
||||||
|
Note that this is a newer feature in git and requries git ``2.25.0`` or greater.
|
||||||
|
If ``git_sparse_paths`` is supplied and the git version is too old
|
||||||
|
then a warning will be issued and that package will use the standard cloning operations instead.
|
||||||
|
``git_sparse_paths`` should be supplied as a list of paths, a callable function for versions,
|
||||||
|
or a more complex package attribute using the ``@property`` decorator. The return value should be
|
||||||
|
a list for a callable implementation of ``git_sparse_paths``.
|
||||||
|
|
||||||
|
.. code-block:: python
|
||||||
|
|
||||||
|
def sparse_path_function(package)
|
||||||
|
"""a callable function that can be used in side a version"""
|
||||||
|
# paths can be directories or functions, all subdirectories and files are included
|
||||||
|
paths = ["doe", "rae", "me/file.cpp"]
|
||||||
|
if package.spec.version > Version("1.2.0"):
|
||||||
|
paths.extend(["fae"])
|
||||||
|
return paths
|
||||||
|
|
||||||
|
class MyPackage(package):
|
||||||
|
# can also be a package attribute that will be used if not specified in versions
|
||||||
|
git_sparse_paths = ["doe", "rae"]
|
||||||
|
|
||||||
|
# use the package attribute
|
||||||
|
version("1.0.0")
|
||||||
|
version("1.1.0")
|
||||||
|
# use the function
|
||||||
|
version("1.1.5", git_sparse_paths=sparse_path_func)
|
||||||
|
version("1.2.0", git_sparse_paths=sparse_path_func)
|
||||||
|
version("1.2.5", git_sparse_paths=sparse_path_func)
|
||||||
|
version("1.1.5", git_sparse_paths=sparse_path_func)
|
||||||
|
|
||||||
.. _github-fetch:
|
.. _github-fetch:
|
||||||
|
|
||||||
^^^^^^
|
^^^^^^
|
||||||
|
|||||||
@@ -1,13 +1,13 @@
|
|||||||
sphinx==7.2.6
|
sphinx==7.4.7
|
||||||
sphinxcontrib-programoutput==0.17
|
sphinxcontrib-programoutput==0.17
|
||||||
sphinx_design==0.6.0
|
sphinx_design==0.6.1
|
||||||
sphinx-rtd-theme==2.0.0
|
sphinx-rtd-theme==2.0.0
|
||||||
python-levenshtein==0.25.1
|
python-levenshtein==0.25.1
|
||||||
docutils==0.20.1
|
docutils==0.20.1
|
||||||
pygments==2.18.0
|
pygments==2.18.0
|
||||||
urllib3==2.2.2
|
urllib3==2.2.2
|
||||||
pytest==8.2.2
|
pytest==8.3.2
|
||||||
isort==5.13.2
|
isort==5.13.2
|
||||||
black==24.4.2
|
black==24.8.0
|
||||||
flake8==7.1.0
|
flake8==7.1.1
|
||||||
mypy==1.10.1
|
mypy==1.11.1
|
||||||
|
|||||||
96
lib/spack/env/cc
vendored
96
lib/spack/env/cc
vendored
@@ -174,6 +174,46 @@ preextend() {
|
|||||||
unset IFS
|
unset IFS
|
||||||
}
|
}
|
||||||
|
|
||||||
|
execute() {
|
||||||
|
# dump the full command if the caller supplies SPACK_TEST_COMMAND=dump-args
|
||||||
|
if [ -n "${SPACK_TEST_COMMAND=}" ]; then
|
||||||
|
case "$SPACK_TEST_COMMAND" in
|
||||||
|
dump-args)
|
||||||
|
IFS="$lsep"
|
||||||
|
for arg in $full_command_list; do
|
||||||
|
echo "$arg"
|
||||||
|
done
|
||||||
|
unset IFS
|
||||||
|
exit
|
||||||
|
;;
|
||||||
|
dump-env-*)
|
||||||
|
var=${SPACK_TEST_COMMAND#dump-env-}
|
||||||
|
eval "printf '%s\n' \"\$0: \$var: \$$var\""
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
die "Unknown test command: '$SPACK_TEST_COMMAND'"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
fi
|
||||||
|
|
||||||
|
#
|
||||||
|
# Write the input and output commands to debug logs if it's asked for.
|
||||||
|
#
|
||||||
|
if [ "$SPACK_DEBUG" = TRUE ]; then
|
||||||
|
input_log="$SPACK_DEBUG_LOG_DIR/spack-cc-$SPACK_DEBUG_LOG_ID.in.log"
|
||||||
|
output_log="$SPACK_DEBUG_LOG_DIR/spack-cc-$SPACK_DEBUG_LOG_ID.out.log"
|
||||||
|
echo "[$mode] $command $input_command" >> "$input_log"
|
||||||
|
IFS="$lsep"
|
||||||
|
echo "[$mode] "$full_command_list >> "$output_log"
|
||||||
|
unset IFS
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Execute the full command, preserving spaces with IFS set
|
||||||
|
# to the alarm bell separator.
|
||||||
|
IFS="$lsep"; exec $full_command_list
|
||||||
|
exit
|
||||||
|
}
|
||||||
|
|
||||||
# Fail with a clear message if the input contains any bell characters.
|
# Fail with a clear message if the input contains any bell characters.
|
||||||
if eval "[ \"\${*#*${lsep}}\" != \"\$*\" ]"; then
|
if eval "[ \"\${*#*${lsep}}\" != \"\$*\" ]"; then
|
||||||
die "Compiler command line contains our separator ('${lsep}'). Cannot parse."
|
die "Compiler command line contains our separator ('${lsep}'). Cannot parse."
|
||||||
@@ -231,12 +271,17 @@ fi
|
|||||||
# ld link
|
# ld link
|
||||||
# ccld compile & link
|
# ccld compile & link
|
||||||
|
|
||||||
|
# Note. SPACK_ALWAYS_XFLAGS are applied for all compiler invocations,
|
||||||
|
# including version checks (SPACK_XFLAGS variants are not applied
|
||||||
|
# for version checks).
|
||||||
command="${0##*/}"
|
command="${0##*/}"
|
||||||
comp="CC"
|
comp="CC"
|
||||||
|
vcheck_flags=""
|
||||||
case "$command" in
|
case "$command" in
|
||||||
cpp)
|
cpp)
|
||||||
mode=cpp
|
mode=cpp
|
||||||
debug_flags="-g"
|
debug_flags="-g"
|
||||||
|
vcheck_flags="${SPACK_ALWAYS_CPPFLAGS}"
|
||||||
;;
|
;;
|
||||||
cc|c89|c99|gcc|clang|armclang|icc|icx|pgcc|nvc|xlc|xlc_r|fcc|amdclang|cl.exe|craycc)
|
cc|c89|c99|gcc|clang|armclang|icc|icx|pgcc|nvc|xlc|xlc_r|fcc|amdclang|cl.exe|craycc)
|
||||||
command="$SPACK_CC"
|
command="$SPACK_CC"
|
||||||
@@ -244,6 +289,7 @@ case "$command" in
|
|||||||
comp="CC"
|
comp="CC"
|
||||||
lang_flags=C
|
lang_flags=C
|
||||||
debug_flags="-g"
|
debug_flags="-g"
|
||||||
|
vcheck_flags="${SPACK_ALWAYS_CFLAGS}"
|
||||||
;;
|
;;
|
||||||
c++|CC|g++|clang++|armclang++|icpc|icpx|pgc++|nvc++|xlc++|xlc++_r|FCC|amdclang++|crayCC)
|
c++|CC|g++|clang++|armclang++|icpc|icpx|pgc++|nvc++|xlc++|xlc++_r|FCC|amdclang++|crayCC)
|
||||||
command="$SPACK_CXX"
|
command="$SPACK_CXX"
|
||||||
@@ -251,6 +297,7 @@ case "$command" in
|
|||||||
comp="CXX"
|
comp="CXX"
|
||||||
lang_flags=CXX
|
lang_flags=CXX
|
||||||
debug_flags="-g"
|
debug_flags="-g"
|
||||||
|
vcheck_flags="${SPACK_ALWAYS_CXXFLAGS}"
|
||||||
;;
|
;;
|
||||||
ftn|f90|fc|f95|gfortran|flang|armflang|ifort|ifx|pgfortran|nvfortran|xlf90|xlf90_r|nagfor|frt|amdflang|crayftn)
|
ftn|f90|fc|f95|gfortran|flang|armflang|ifort|ifx|pgfortran|nvfortran|xlf90|xlf90_r|nagfor|frt|amdflang|crayftn)
|
||||||
command="$SPACK_FC"
|
command="$SPACK_FC"
|
||||||
@@ -258,6 +305,7 @@ case "$command" in
|
|||||||
comp="FC"
|
comp="FC"
|
||||||
lang_flags=F
|
lang_flags=F
|
||||||
debug_flags="-g"
|
debug_flags="-g"
|
||||||
|
vcheck_flags="${SPACK_ALWAYS_FFLAGS}"
|
||||||
;;
|
;;
|
||||||
f77|xlf|xlf_r|pgf77)
|
f77|xlf|xlf_r|pgf77)
|
||||||
command="$SPACK_F77"
|
command="$SPACK_F77"
|
||||||
@@ -265,6 +313,7 @@ case "$command" in
|
|||||||
comp="F77"
|
comp="F77"
|
||||||
lang_flags=F
|
lang_flags=F
|
||||||
debug_flags="-g"
|
debug_flags="-g"
|
||||||
|
vcheck_flags="${SPACK_ALWAYS_FFLAGS}"
|
||||||
;;
|
;;
|
||||||
ld|ld.gold|ld.lld)
|
ld|ld.gold|ld.lld)
|
||||||
mode=ld
|
mode=ld
|
||||||
@@ -365,7 +414,11 @@ unset IFS
|
|||||||
export PATH="$new_dirs"
|
export PATH="$new_dirs"
|
||||||
|
|
||||||
if [ "$mode" = vcheck ]; then
|
if [ "$mode" = vcheck ]; then
|
||||||
exec "${command}" "$@"
|
full_command_list="$command"
|
||||||
|
args="$@"
|
||||||
|
extend full_command_list vcheck_flags
|
||||||
|
extend full_command_list args
|
||||||
|
execute
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Darwin's linker has a -r argument that merges object files together.
|
# Darwin's linker has a -r argument that merges object files together.
|
||||||
@@ -722,6 +775,7 @@ case "$mode" in
|
|||||||
cc|ccld)
|
cc|ccld)
|
||||||
case $lang_flags in
|
case $lang_flags in
|
||||||
F)
|
F)
|
||||||
|
extend spack_flags_list SPACK_ALWAYS_FFLAGS
|
||||||
extend spack_flags_list SPACK_FFLAGS
|
extend spack_flags_list SPACK_FFLAGS
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
@@ -731,6 +785,7 @@ esac
|
|||||||
# C preprocessor flags come before any C/CXX flags
|
# C preprocessor flags come before any C/CXX flags
|
||||||
case "$mode" in
|
case "$mode" in
|
||||||
cpp|as|cc|ccld)
|
cpp|as|cc|ccld)
|
||||||
|
extend spack_flags_list SPACK_ALWAYS_CPPFLAGS
|
||||||
extend spack_flags_list SPACK_CPPFLAGS
|
extend spack_flags_list SPACK_CPPFLAGS
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
@@ -741,9 +796,11 @@ case "$mode" in
|
|||||||
cc|ccld)
|
cc|ccld)
|
||||||
case $lang_flags in
|
case $lang_flags in
|
||||||
C)
|
C)
|
||||||
|
extend spack_flags_list SPACK_ALWAYS_CFLAGS
|
||||||
extend spack_flags_list SPACK_CFLAGS
|
extend spack_flags_list SPACK_CFLAGS
|
||||||
;;
|
;;
|
||||||
CXX)
|
CXX)
|
||||||
|
extend spack_flags_list SPACK_ALWAYS_CXXFLAGS
|
||||||
extend spack_flags_list SPACK_CXXFLAGS
|
extend spack_flags_list SPACK_CXXFLAGS
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
@@ -933,39 +990,4 @@ if [ -n "$SPACK_CCACHE_BINARY" ]; then
|
|||||||
esac
|
esac
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# dump the full command if the caller supplies SPACK_TEST_COMMAND=dump-args
|
execute
|
||||||
if [ -n "${SPACK_TEST_COMMAND=}" ]; then
|
|
||||||
case "$SPACK_TEST_COMMAND" in
|
|
||||||
dump-args)
|
|
||||||
IFS="$lsep"
|
|
||||||
for arg in $full_command_list; do
|
|
||||||
echo "$arg"
|
|
||||||
done
|
|
||||||
unset IFS
|
|
||||||
exit
|
|
||||||
;;
|
|
||||||
dump-env-*)
|
|
||||||
var=${SPACK_TEST_COMMAND#dump-env-}
|
|
||||||
eval "printf '%s\n' \"\$0: \$var: \$$var\""
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
die "Unknown test command: '$SPACK_TEST_COMMAND'"
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
fi
|
|
||||||
|
|
||||||
#
|
|
||||||
# Write the input and output commands to debug logs if it's asked for.
|
|
||||||
#
|
|
||||||
if [ "$SPACK_DEBUG" = TRUE ]; then
|
|
||||||
input_log="$SPACK_DEBUG_LOG_DIR/spack-cc-$SPACK_DEBUG_LOG_ID.in.log"
|
|
||||||
output_log="$SPACK_DEBUG_LOG_DIR/spack-cc-$SPACK_DEBUG_LOG_ID.out.log"
|
|
||||||
echo "[$mode] $command $input_command" >> "$input_log"
|
|
||||||
IFS="$lsep"
|
|
||||||
echo "[$mode] "$full_command_list >> "$output_log"
|
|
||||||
unset IFS
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Execute the full command, preserving spaces with IFS set
|
|
||||||
# to the alarm bell separator.
|
|
||||||
IFS="$lsep"; exec $full_command_list
|
|
||||||
|
|||||||
2
lib/spack/external/__init__.py
vendored
2
lib/spack/external/__init__.py
vendored
@@ -18,7 +18,7 @@
|
|||||||
|
|
||||||
* Homepage: https://pypi.python.org/pypi/archspec
|
* Homepage: https://pypi.python.org/pypi/archspec
|
||||||
* Usage: Labeling, comparison and detection of microarchitectures
|
* Usage: Labeling, comparison and detection of microarchitectures
|
||||||
* Version: 0.2.4 (commit 48b92512b9ce203ded0ebd1ac41b42593e931f7c)
|
* Version: 0.2.5-dev (commit 7e6740012b897ae4a950f0bba7e9726b767e921f)
|
||||||
|
|
||||||
astunparse
|
astunparse
|
||||||
----------------
|
----------------
|
||||||
|
|||||||
12
lib/spack/external/archspec/cpu/detect.py
vendored
12
lib/spack/external/archspec/cpu/detect.py
vendored
@@ -47,7 +47,11 @@ def decorator(factory):
|
|||||||
|
|
||||||
|
|
||||||
def partial_uarch(
|
def partial_uarch(
|
||||||
name: str = "", vendor: str = "", features: Optional[Set[str]] = None, generation: int = 0
|
name: str = "",
|
||||||
|
vendor: str = "",
|
||||||
|
features: Optional[Set[str]] = None,
|
||||||
|
generation: int = 0,
|
||||||
|
cpu_part: str = "",
|
||||||
) -> Microarchitecture:
|
) -> Microarchitecture:
|
||||||
"""Construct a partial microarchitecture, from information gathered during system scan."""
|
"""Construct a partial microarchitecture, from information gathered during system scan."""
|
||||||
return Microarchitecture(
|
return Microarchitecture(
|
||||||
@@ -57,6 +61,7 @@ def partial_uarch(
|
|||||||
features=features or set(),
|
features=features or set(),
|
||||||
compilers={},
|
compilers={},
|
||||||
generation=generation,
|
generation=generation,
|
||||||
|
cpu_part=cpu_part,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@@ -90,6 +95,7 @@ def proc_cpuinfo() -> Microarchitecture:
|
|||||||
return partial_uarch(
|
return partial_uarch(
|
||||||
vendor=_canonicalize_aarch64_vendor(data),
|
vendor=_canonicalize_aarch64_vendor(data),
|
||||||
features=_feature_set(data, key="Features"),
|
features=_feature_set(data, key="Features"),
|
||||||
|
cpu_part=data.get("CPU part", ""),
|
||||||
)
|
)
|
||||||
|
|
||||||
if architecture in (PPC64LE, PPC64):
|
if architecture in (PPC64LE, PPC64):
|
||||||
@@ -345,6 +351,10 @@ def sorting_fn(item):
|
|||||||
generic_candidates = [c for c in candidates if c.vendor == "generic"]
|
generic_candidates = [c for c in candidates if c.vendor == "generic"]
|
||||||
best_generic = max(generic_candidates, key=sorting_fn)
|
best_generic = max(generic_candidates, key=sorting_fn)
|
||||||
|
|
||||||
|
# Relevant for AArch64. Filter on "cpu_part" if we have any match
|
||||||
|
if info.cpu_part != "" and any(c for c in candidates if info.cpu_part == c.cpu_part):
|
||||||
|
candidates = [c for c in candidates if info.cpu_part == c.cpu_part]
|
||||||
|
|
||||||
# Filter the candidates to be descendant of the best generic candidate.
|
# Filter the candidates to be descendant of the best generic candidate.
|
||||||
# This is to avoid that the lack of a niche feature that can be disabled
|
# This is to avoid that the lack of a niche feature that can be disabled
|
||||||
# from e.g. BIOS prevents detection of a reasonably performant architecture
|
# from e.g. BIOS prevents detection of a reasonably performant architecture
|
||||||
|
|||||||
@@ -2,9 +2,7 @@
|
|||||||
# Archspec Project Developers. See the top-level COPYRIGHT file for details.
|
# Archspec Project Developers. See the top-level COPYRIGHT file for details.
|
||||||
#
|
#
|
||||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||||
"""Types and functions to manage information
|
"""Types and functions to manage information on CPU microarchitectures."""
|
||||||
on CPU microarchitectures.
|
|
||||||
"""
|
|
||||||
import functools
|
import functools
|
||||||
import platform
|
import platform
|
||||||
import re
|
import re
|
||||||
@@ -65,21 +63,24 @@ class Microarchitecture:
|
|||||||
passed in as argument above.
|
passed in as argument above.
|
||||||
* versions: versions that support this micro-architecture.
|
* versions: versions that support this micro-architecture.
|
||||||
|
|
||||||
generation (int): generation of the micro-architecture, if
|
generation (int): generation of the micro-architecture, if relevant.
|
||||||
relevant.
|
cpu_part (str): cpu part of the architecture, if relevant.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# pylint: disable=too-many-arguments
|
# pylint: disable=too-many-arguments,too-many-instance-attributes
|
||||||
#: Aliases for micro-architecture's features
|
#: Aliases for micro-architecture's features
|
||||||
feature_aliases = FEATURE_ALIASES
|
feature_aliases = FEATURE_ALIASES
|
||||||
|
|
||||||
def __init__(self, name, parents, vendor, features, compilers, generation=0):
|
def __init__(self, name, parents, vendor, features, compilers, generation=0, cpu_part=""):
|
||||||
self.name = name
|
self.name = name
|
||||||
self.parents = parents
|
self.parents = parents
|
||||||
self.vendor = vendor
|
self.vendor = vendor
|
||||||
self.features = features
|
self.features = features
|
||||||
self.compilers = compilers
|
self.compilers = compilers
|
||||||
|
# Only relevant for PowerPC
|
||||||
self.generation = generation
|
self.generation = generation
|
||||||
|
# Only relevant for AArch64
|
||||||
|
self.cpu_part = cpu_part
|
||||||
# Cache the ancestor computation
|
# Cache the ancestor computation
|
||||||
self._ancestors = None
|
self._ancestors = None
|
||||||
|
|
||||||
@@ -111,6 +112,7 @@ def __eq__(self, other):
|
|||||||
and self.parents == other.parents # avoid ancestors here
|
and self.parents == other.parents # avoid ancestors here
|
||||||
and self.compilers == other.compilers
|
and self.compilers == other.compilers
|
||||||
and self.generation == other.generation
|
and self.generation == other.generation
|
||||||
|
and self.cpu_part == other.cpu_part
|
||||||
)
|
)
|
||||||
|
|
||||||
@coerce_target_names
|
@coerce_target_names
|
||||||
@@ -143,7 +145,8 @@ def __repr__(self):
|
|||||||
cls_name = self.__class__.__name__
|
cls_name = self.__class__.__name__
|
||||||
fmt = (
|
fmt = (
|
||||||
cls_name + "({0.name!r}, {0.parents!r}, {0.vendor!r}, "
|
cls_name + "({0.name!r}, {0.parents!r}, {0.vendor!r}, "
|
||||||
"{0.features!r}, {0.compilers!r}, {0.generation!r})"
|
"{0.features!r}, {0.compilers!r}, generation={0.generation!r}, "
|
||||||
|
"cpu_part={0.cpu_part!r})"
|
||||||
)
|
)
|
||||||
return fmt.format(self)
|
return fmt.format(self)
|
||||||
|
|
||||||
@@ -190,6 +193,7 @@ def to_dict(self):
|
|||||||
"generation": self.generation,
|
"generation": self.generation,
|
||||||
"parents": [str(x) for x in self.parents],
|
"parents": [str(x) for x in self.parents],
|
||||||
"compilers": self.compilers,
|
"compilers": self.compilers,
|
||||||
|
"cpupart": self.cpu_part,
|
||||||
}
|
}
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
@@ -202,6 +206,7 @@ def from_dict(data) -> "Microarchitecture":
|
|||||||
features=set(data["features"]),
|
features=set(data["features"]),
|
||||||
compilers=data.get("compilers", {}),
|
compilers=data.get("compilers", {}),
|
||||||
generation=data.get("generation", 0),
|
generation=data.get("generation", 0),
|
||||||
|
cpu_part=data.get("cpupart", ""),
|
||||||
)
|
)
|
||||||
|
|
||||||
def optimization_flags(self, compiler, version):
|
def optimization_flags(self, compiler, version):
|
||||||
@@ -360,8 +365,11 @@ def fill_target_from_dict(name, data, targets):
|
|||||||
features = set(values["features"])
|
features = set(values["features"])
|
||||||
compilers = values.get("compilers", {})
|
compilers = values.get("compilers", {})
|
||||||
generation = values.get("generation", 0)
|
generation = values.get("generation", 0)
|
||||||
|
cpu_part = values.get("cpupart", "")
|
||||||
|
|
||||||
targets[name] = Microarchitecture(name, parents, vendor, features, compilers, generation)
|
targets[name] = Microarchitecture(
|
||||||
|
name, parents, vendor, features, compilers, generation=generation, cpu_part=cpu_part
|
||||||
|
)
|
||||||
|
|
||||||
known_targets = {}
|
known_targets = {}
|
||||||
data = archspec.cpu.schema.TARGETS_JSON["microarchitectures"]
|
data = archspec.cpu.schema.TARGETS_JSON["microarchitectures"]
|
||||||
|
|||||||
@@ -2225,10 +2225,14 @@
|
|||||||
],
|
],
|
||||||
"nvhpc": [
|
"nvhpc": [
|
||||||
{
|
{
|
||||||
"versions": "21.11:",
|
"versions": "21.11:23.8",
|
||||||
"name": "zen3",
|
"name": "zen3",
|
||||||
"flags": "-tp {name}",
|
"flags": "-tp {name}",
|
||||||
"warnings": "zen4 is not fully supported by nvhpc yet, falling back to zen3"
|
"warnings": "zen4 is not fully supported by nvhpc versions < 23.9, falling back to zen3"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"versions": "23.9:",
|
||||||
|
"flags": "-tp {name}"
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
@@ -2711,7 +2715,8 @@
|
|||||||
"flags": "-mcpu=thunderx2t99"
|
"flags": "-mcpu=thunderx2t99"
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
},
|
||||||
|
"cpupart": "0x0af"
|
||||||
},
|
},
|
||||||
"a64fx": {
|
"a64fx": {
|
||||||
"from": ["armv8.2a"],
|
"from": ["armv8.2a"],
|
||||||
@@ -2779,7 +2784,8 @@
|
|||||||
"flags": "-march=armv8.2-a+crc+crypto+fp16+sve"
|
"flags": "-march=armv8.2-a+crc+crypto+fp16+sve"
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
},
|
||||||
|
"cpupart": "0x001"
|
||||||
},
|
},
|
||||||
"cortex_a72": {
|
"cortex_a72": {
|
||||||
"from": ["aarch64"],
|
"from": ["aarch64"],
|
||||||
@@ -2816,7 +2822,8 @@
|
|||||||
"flags" : "-mcpu=cortex-a72"
|
"flags" : "-mcpu=cortex-a72"
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
},
|
||||||
|
"cpupart": "0xd08"
|
||||||
},
|
},
|
||||||
"neoverse_n1": {
|
"neoverse_n1": {
|
||||||
"from": ["cortex_a72", "armv8.2a"],
|
"from": ["cortex_a72", "armv8.2a"],
|
||||||
@@ -2902,7 +2909,8 @@
|
|||||||
"flags": "-tp {name}"
|
"flags": "-tp {name}"
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
},
|
||||||
|
"cpupart": "0xd0c"
|
||||||
},
|
},
|
||||||
"neoverse_v1": {
|
"neoverse_v1": {
|
||||||
"from": ["neoverse_n1", "armv8.4a"],
|
"from": ["neoverse_n1", "armv8.4a"],
|
||||||
@@ -2926,8 +2934,6 @@
|
|||||||
"lrcpc",
|
"lrcpc",
|
||||||
"dcpop",
|
"dcpop",
|
||||||
"sha3",
|
"sha3",
|
||||||
"sm3",
|
|
||||||
"sm4",
|
|
||||||
"asimddp",
|
"asimddp",
|
||||||
"sha512",
|
"sha512",
|
||||||
"sve",
|
"sve",
|
||||||
@@ -3028,7 +3034,8 @@
|
|||||||
"flags": "-tp {name}"
|
"flags": "-tp {name}"
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
},
|
||||||
|
"cpupart": "0xd40"
|
||||||
},
|
},
|
||||||
"neoverse_v2": {
|
"neoverse_v2": {
|
||||||
"from": ["neoverse_n1", "armv9.0a"],
|
"from": ["neoverse_n1", "armv9.0a"],
|
||||||
@@ -3052,13 +3059,10 @@
|
|||||||
"lrcpc",
|
"lrcpc",
|
||||||
"dcpop",
|
"dcpop",
|
||||||
"sha3",
|
"sha3",
|
||||||
"sm3",
|
|
||||||
"sm4",
|
|
||||||
"asimddp",
|
"asimddp",
|
||||||
"sha512",
|
"sha512",
|
||||||
"sve",
|
"sve",
|
||||||
"asimdfhm",
|
"asimdfhm",
|
||||||
"dit",
|
|
||||||
"uscat",
|
"uscat",
|
||||||
"ilrcpc",
|
"ilrcpc",
|
||||||
"flagm",
|
"flagm",
|
||||||
@@ -3066,18 +3070,12 @@
|
|||||||
"sb",
|
"sb",
|
||||||
"dcpodp",
|
"dcpodp",
|
||||||
"sve2",
|
"sve2",
|
||||||
"sveaes",
|
|
||||||
"svepmull",
|
|
||||||
"svebitperm",
|
|
||||||
"svesha3",
|
|
||||||
"svesm4",
|
|
||||||
"flagm2",
|
"flagm2",
|
||||||
"frint",
|
"frint",
|
||||||
"svei8mm",
|
"svei8mm",
|
||||||
"svebf16",
|
"svebf16",
|
||||||
"i8mm",
|
"i8mm",
|
||||||
"bf16",
|
"bf16"
|
||||||
"dgh"
|
|
||||||
],
|
],
|
||||||
"compilers" : {
|
"compilers" : {
|
||||||
"gcc": [
|
"gcc": [
|
||||||
@@ -3102,15 +3100,19 @@
|
|||||||
"flags" : "-march=armv8.5-a+sve -mtune=cortex-a76"
|
"flags" : "-march=armv8.5-a+sve -mtune=cortex-a76"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"versions": "10.0:11.99",
|
"versions": "10.0:11.3.99",
|
||||||
"flags" : "-march=armv8.5-a+sve+sve2+i8mm+bf16 -mtune=cortex-a77"
|
"flags" : "-march=armv8.5-a+sve+sve2+i8mm+bf16 -mtune=cortex-a77"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"versions": "11.4:11.99",
|
||||||
|
"flags" : "-mcpu=neoverse-v2"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"versions": "12.0:12.99",
|
"versions": "12.0:12.2.99",
|
||||||
"flags" : "-march=armv9-a+i8mm+bf16 -mtune=cortex-a710"
|
"flags" : "-march=armv9-a+i8mm+bf16 -mtune=cortex-a710"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"versions": "13.0:",
|
"versions": "12.3:",
|
||||||
"flags" : "-mcpu=neoverse-v2"
|
"flags" : "-mcpu=neoverse-v2"
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
@@ -3145,7 +3147,113 @@
|
|||||||
"flags": "-tp {name}"
|
"flags": "-tp {name}"
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
},
|
||||||
|
"cpupart": "0xd4f"
|
||||||
|
},
|
||||||
|
"neoverse_n2": {
|
||||||
|
"from": ["neoverse_n1", "armv9.0a"],
|
||||||
|
"vendor": "ARM",
|
||||||
|
"features": [
|
||||||
|
"fp",
|
||||||
|
"asimd",
|
||||||
|
"evtstrm",
|
||||||
|
"aes",
|
||||||
|
"pmull",
|
||||||
|
"sha1",
|
||||||
|
"sha2",
|
||||||
|
"crc32",
|
||||||
|
"atomics",
|
||||||
|
"fphp",
|
||||||
|
"asimdhp",
|
||||||
|
"cpuid",
|
||||||
|
"asimdrdm",
|
||||||
|
"jscvt",
|
||||||
|
"fcma",
|
||||||
|
"lrcpc",
|
||||||
|
"dcpop",
|
||||||
|
"sha3",
|
||||||
|
"asimddp",
|
||||||
|
"sha512",
|
||||||
|
"sve",
|
||||||
|
"asimdfhm",
|
||||||
|
"uscat",
|
||||||
|
"ilrcpc",
|
||||||
|
"flagm",
|
||||||
|
"ssbs",
|
||||||
|
"sb",
|
||||||
|
"dcpodp",
|
||||||
|
"sve2",
|
||||||
|
"flagm2",
|
||||||
|
"frint",
|
||||||
|
"svei8mm",
|
||||||
|
"svebf16",
|
||||||
|
"i8mm",
|
||||||
|
"bf16"
|
||||||
|
],
|
||||||
|
"compilers" : {
|
||||||
|
"gcc": [
|
||||||
|
{
|
||||||
|
"versions": "4.8:5.99",
|
||||||
|
"flags": "-march=armv8-a"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"versions": "6:6.99",
|
||||||
|
"flags" : "-march=armv8.1-a"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"versions": "7.0:7.99",
|
||||||
|
"flags" : "-march=armv8.2-a -mtune=cortex-a72"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"versions": "8.0:8.99",
|
||||||
|
"flags" : "-march=armv8.4-a+sve -mtune=cortex-a72"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"versions": "9.0:9.99",
|
||||||
|
"flags" : "-march=armv8.5-a+sve -mtune=cortex-a76"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"versions": "10.0:10.99",
|
||||||
|
"flags" : "-march=armv8.5-a+sve+sve2+i8mm+bf16 -mtune=cortex-a77"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"versions": "11.0:",
|
||||||
|
"flags" : "-mcpu=neoverse-n2"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"clang" : [
|
||||||
|
{
|
||||||
|
"versions": "9.0:10.99",
|
||||||
|
"flags" : "-march=armv8.5-a+sve"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"versions": "11.0:13.99",
|
||||||
|
"flags" : "-march=armv8.5-a+sve+sve2+i8mm+bf16"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"versions": "14.0:15.99",
|
||||||
|
"flags" : "-march=armv9-a+i8mm+bf16"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"versions": "16.0:",
|
||||||
|
"flags" : "-mcpu=neoverse-n2"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"arm" : [
|
||||||
|
{
|
||||||
|
"versions": "23.04.0:",
|
||||||
|
"flags" : "-mcpu=neoverse-n2"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"nvhpc" : [
|
||||||
|
{
|
||||||
|
"versions": "23.3:",
|
||||||
|
"name": "neoverse-n1",
|
||||||
|
"flags": "-tp {name}"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"cpupart": "0xd49"
|
||||||
},
|
},
|
||||||
"m1": {
|
"m1": {
|
||||||
"from": ["armv8.4a"],
|
"from": ["armv8.4a"],
|
||||||
@@ -3211,7 +3319,8 @@
|
|||||||
"flags" : "-mcpu=apple-m1"
|
"flags" : "-mcpu=apple-m1"
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
},
|
||||||
|
"cpupart": "0x022"
|
||||||
},
|
},
|
||||||
"m2": {
|
"m2": {
|
||||||
"from": ["m1", "armv8.5a"],
|
"from": ["m1", "armv8.5a"],
|
||||||
@@ -3289,7 +3398,8 @@
|
|||||||
"flags" : "-mcpu=apple-m2"
|
"flags" : "-mcpu=apple-m2"
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
},
|
||||||
|
"cpupart": "0x032"
|
||||||
},
|
},
|
||||||
"arm": {
|
"arm": {
|
||||||
"from": [],
|
"from": [],
|
||||||
|
|||||||
@@ -52,6 +52,9 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
},
|
||||||
|
"cpupart": {
|
||||||
|
"type": "string"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"required": [
|
"required": [
|
||||||
@@ -107,4 +110,4 @@
|
|||||||
"additionalProperties": false
|
"additionalProperties": false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1624,6 +1624,12 @@ def remove_linked_tree(path):
|
|||||||
shutil.rmtree(os.path.realpath(path), **kwargs)
|
shutil.rmtree(os.path.realpath(path), **kwargs)
|
||||||
os.unlink(path)
|
os.unlink(path)
|
||||||
else:
|
else:
|
||||||
|
if sys.platform == "win32":
|
||||||
|
# Adding this prefix allows shutil to remove long paths on windows
|
||||||
|
# https://learn.microsoft.com/en-us/windows/win32/fileio/maximum-file-path-limitation?tabs=registry
|
||||||
|
long_path_pfx = "\\\\?\\"
|
||||||
|
if not path.startswith(long_path_pfx):
|
||||||
|
path = long_path_pfx + path
|
||||||
shutil.rmtree(path, **kwargs)
|
shutil.rmtree(path, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -351,6 +351,22 @@ def _wrongly_named_spec(error_cls):
|
|||||||
return errors
|
return errors
|
||||||
|
|
||||||
|
|
||||||
|
@config_packages
|
||||||
|
def _ensure_all_virtual_packages_have_default_providers(error_cls):
|
||||||
|
"""All virtual packages must have a default provider explicitly set."""
|
||||||
|
configuration = spack.config.create()
|
||||||
|
defaults = configuration.get("packages", scope="defaults")
|
||||||
|
default_providers = defaults["all"]["providers"]
|
||||||
|
virtuals = spack.repo.PATH.provider_index.providers
|
||||||
|
default_providers_filename = configuration.scopes["defaults"].get_section_filename("packages")
|
||||||
|
|
||||||
|
return [
|
||||||
|
error_cls(f"'{virtual}' must have a default provider in {default_providers_filename}", [])
|
||||||
|
for virtual in virtuals
|
||||||
|
if virtual not in default_providers
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
def _make_config_error(config_data, summary, error_cls):
|
def _make_config_error(config_data, summary, error_cls):
|
||||||
s = io.StringIO()
|
s = io.StringIO()
|
||||||
s.write("Occurring in the following file:\n")
|
s.write("Occurring in the following file:\n")
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
154
lib/spack/spack/bootstrap/clingo.py
Normal file
154
lib/spack/spack/bootstrap/clingo.py
Normal file
@@ -0,0 +1,154 @@
|
|||||||
|
# Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
|
||||||
|
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||||
|
#
|
||||||
|
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||||
|
"""Bootstrap concrete specs for clingo
|
||||||
|
|
||||||
|
Spack uses clingo to concretize specs. When clingo itself needs to be bootstrapped from sources,
|
||||||
|
we need to rely on another mechanism to get a concrete spec that fits the current host.
|
||||||
|
|
||||||
|
This module contains the logic to get a concrete spec for clingo, starting from a prototype
|
||||||
|
JSON file for a similar platform.
|
||||||
|
"""
|
||||||
|
import pathlib
|
||||||
|
import sys
|
||||||
|
from typing import Dict, Optional, Tuple
|
||||||
|
|
||||||
|
import archspec.cpu
|
||||||
|
|
||||||
|
import spack.compiler
|
||||||
|
import spack.compilers
|
||||||
|
import spack.platforms
|
||||||
|
import spack.spec
|
||||||
|
import spack.traverse
|
||||||
|
|
||||||
|
from .config import spec_for_current_python
|
||||||
|
|
||||||
|
|
||||||
|
class ClingoBootstrapConcretizer:
|
||||||
|
def __init__(self, configuration):
|
||||||
|
self.host_platform = spack.platforms.host()
|
||||||
|
self.host_os = self.host_platform.operating_system("frontend")
|
||||||
|
self.host_target = archspec.cpu.host().family
|
||||||
|
self.host_architecture = spack.spec.ArchSpec.frontend_arch()
|
||||||
|
self.host_architecture.target = str(self.host_target)
|
||||||
|
self.host_compiler = self._valid_compiler_or_raise()
|
||||||
|
self.host_python = self.python_external_spec()
|
||||||
|
if str(self.host_platform) == "linux":
|
||||||
|
self.host_libc = self.libc_external_spec()
|
||||||
|
|
||||||
|
self.external_cmake, self.external_bison = self._externals_from_yaml(configuration)
|
||||||
|
|
||||||
|
def _valid_compiler_or_raise(self) -> "spack.compiler.Compiler":
|
||||||
|
if str(self.host_platform) == "linux":
|
||||||
|
compiler_name = "gcc"
|
||||||
|
elif str(self.host_platform) == "darwin":
|
||||||
|
compiler_name = "apple-clang"
|
||||||
|
elif str(self.host_platform) == "windows":
|
||||||
|
compiler_name = "msvc"
|
||||||
|
elif str(self.host_platform) == "freebsd":
|
||||||
|
compiler_name = "clang"
|
||||||
|
else:
|
||||||
|
raise RuntimeError(f"Cannot bootstrap clingo from sources on {self.host_platform}")
|
||||||
|
candidates = spack.compilers.compilers_for_spec(
|
||||||
|
compiler_name, arch_spec=self.host_architecture
|
||||||
|
)
|
||||||
|
if not candidates:
|
||||||
|
raise RuntimeError(
|
||||||
|
f"Cannot find any version of {compiler_name} to bootstrap clingo from sources"
|
||||||
|
)
|
||||||
|
candidates.sort(key=lambda x: x.spec.version, reverse=True)
|
||||||
|
return candidates[0]
|
||||||
|
|
||||||
|
def _externals_from_yaml(
|
||||||
|
self, configuration: "spack.config.Configuration"
|
||||||
|
) -> Tuple[Optional["spack.spec.Spec"], Optional["spack.spec.Spec"]]:
|
||||||
|
packages_yaml = configuration.get("packages")
|
||||||
|
requirements = {"cmake": "@3.20:", "bison": "@2.5:"}
|
||||||
|
selected: Dict[str, Optional["spack.spec.Spec"]] = {"cmake": None, "bison": None}
|
||||||
|
for pkg_name in ["cmake", "bison"]:
|
||||||
|
if pkg_name not in packages_yaml:
|
||||||
|
continue
|
||||||
|
|
||||||
|
candidates = packages_yaml[pkg_name].get("externals", [])
|
||||||
|
for candidate in candidates:
|
||||||
|
s = spack.spec.Spec(candidate["spec"], external_path=candidate["prefix"])
|
||||||
|
if not s.satisfies(requirements[pkg_name]):
|
||||||
|
continue
|
||||||
|
|
||||||
|
if not s.intersects(f"%{self.host_compiler.spec}"):
|
||||||
|
continue
|
||||||
|
|
||||||
|
if not s.intersects(f"arch={self.host_architecture}"):
|
||||||
|
continue
|
||||||
|
|
||||||
|
selected[pkg_name] = self._external_spec(s)
|
||||||
|
break
|
||||||
|
return selected["cmake"], selected["bison"]
|
||||||
|
|
||||||
|
def prototype_path(self) -> pathlib.Path:
|
||||||
|
"""Path to a prototype concrete specfile for clingo"""
|
||||||
|
parent_dir = pathlib.Path(__file__).parent
|
||||||
|
result = parent_dir / "prototypes" / f"clingo-{self.host_platform}-{self.host_target}.json"
|
||||||
|
if str(self.host_platform) == "linux":
|
||||||
|
# Using aarch64 as a fallback, since it has gnuconfig (x86_64 doesn't have it)
|
||||||
|
if not result.exists():
|
||||||
|
result = parent_dir / "prototypes" / f"clingo-{self.host_platform}-aarch64.json"
|
||||||
|
|
||||||
|
elif str(self.host_platform) == "freebsd":
|
||||||
|
result = parent_dir / "prototypes" / f"clingo-{self.host_platform}-amd64.json"
|
||||||
|
|
||||||
|
elif not result.exists():
|
||||||
|
raise RuntimeError(f"Cannot bootstrap clingo from sources on {self.host_platform}")
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
def concretize(self) -> "spack.spec.Spec":
|
||||||
|
# Read the prototype and mark it NOT concrete
|
||||||
|
s = spack.spec.Spec.from_specfile(str(self.prototype_path()))
|
||||||
|
s._mark_concrete(False)
|
||||||
|
|
||||||
|
# Tweak it to conform to the host architecture
|
||||||
|
for node in s.traverse():
|
||||||
|
node.architecture.os = str(self.host_os)
|
||||||
|
node.compiler = self.host_compiler.spec
|
||||||
|
node.architecture = self.host_architecture
|
||||||
|
|
||||||
|
if node.name == "gcc-runtime":
|
||||||
|
node.versions = self.host_compiler.spec.versions
|
||||||
|
|
||||||
|
for edge in spack.traverse.traverse_edges([s], cover="edges"):
|
||||||
|
if edge.spec.name == "python":
|
||||||
|
edge.spec = self.host_python
|
||||||
|
|
||||||
|
if edge.spec.name == "bison" and self.external_bison:
|
||||||
|
edge.spec = self.external_bison
|
||||||
|
|
||||||
|
if edge.spec.name == "cmake" and self.external_cmake:
|
||||||
|
edge.spec = self.external_cmake
|
||||||
|
|
||||||
|
if "libc" in edge.virtuals:
|
||||||
|
edge.spec = self.host_libc
|
||||||
|
|
||||||
|
s._finalize_concretization()
|
||||||
|
|
||||||
|
# Work around the fact that the installer calls Spec.dependents() and
|
||||||
|
# we modified edges inconsistently
|
||||||
|
return s.copy()
|
||||||
|
|
||||||
|
def python_external_spec(self) -> "spack.spec.Spec":
|
||||||
|
"""Python external spec corresponding to the current running interpreter"""
|
||||||
|
result = spack.spec.Spec(spec_for_current_python(), external_path=sys.exec_prefix)
|
||||||
|
return self._external_spec(result)
|
||||||
|
|
||||||
|
def libc_external_spec(self) -> "spack.spec.Spec":
|
||||||
|
result = self.host_compiler.default_libc
|
||||||
|
return self._external_spec(result)
|
||||||
|
|
||||||
|
def _external_spec(self, initial_spec) -> "spack.spec.Spec":
|
||||||
|
initial_spec.namespace = "builtin"
|
||||||
|
initial_spec.compiler = self.host_compiler.spec
|
||||||
|
initial_spec.architecture = self.host_architecture
|
||||||
|
for flag_type in spack.spec.FlagMap.valid_compiler_flags():
|
||||||
|
initial_spec.compiler_flags[flag_type] = []
|
||||||
|
return spack.spec.parse_with_version_concrete(initial_spec)
|
||||||
@@ -54,6 +54,7 @@
|
|||||||
import spack.version
|
import spack.version
|
||||||
|
|
||||||
from ._common import _executables_in_store, _python_import, _root_spec, _try_import_from_store
|
from ._common import _executables_in_store, _python_import, _root_spec, _try_import_from_store
|
||||||
|
from .clingo import ClingoBootstrapConcretizer
|
||||||
from .config import spack_python_interpreter, spec_for_current_python
|
from .config import spack_python_interpreter, spec_for_current_python
|
||||||
|
|
||||||
#: Name of the file containing metadata about the bootstrapping source
|
#: Name of the file containing metadata about the bootstrapping source
|
||||||
@@ -268,15 +269,13 @@ def try_import(self, module: str, abstract_spec_str: str) -> bool:
|
|||||||
|
|
||||||
# Try to build and install from sources
|
# Try to build and install from sources
|
||||||
with spack_python_interpreter():
|
with spack_python_interpreter():
|
||||||
# Add hint to use frontend operating system on Cray
|
|
||||||
concrete_spec = spack.spec.Spec(abstract_spec_str + " ^" + spec_for_current_python())
|
|
||||||
|
|
||||||
if module == "clingo":
|
if module == "clingo":
|
||||||
# TODO: remove when the old concretizer is deprecated # pylint: disable=fixme
|
bootstrapper = ClingoBootstrapConcretizer(configuration=spack.config.CONFIG)
|
||||||
concrete_spec._old_concretize( # pylint: disable=protected-access
|
concrete_spec = bootstrapper.concretize()
|
||||||
deprecation_warning=False
|
|
||||||
)
|
|
||||||
else:
|
else:
|
||||||
|
concrete_spec = spack.spec.Spec(
|
||||||
|
abstract_spec_str + " ^" + spec_for_current_python()
|
||||||
|
)
|
||||||
concrete_spec.concretize()
|
concrete_spec.concretize()
|
||||||
|
|
||||||
msg = "[BOOTSTRAP MODULE {0}] Try installing '{1}' from sources"
|
msg = "[BOOTSTRAP MODULE {0}] Try installing '{1}' from sources"
|
||||||
@@ -303,14 +302,7 @@ def try_search_path(self, executables: Tuple[str], abstract_spec_str: str) -> bo
|
|||||||
# might reduce compilation time by a fair amount
|
# might reduce compilation time by a fair amount
|
||||||
_add_externals_if_missing()
|
_add_externals_if_missing()
|
||||||
|
|
||||||
concrete_spec = spack.spec.Spec(abstract_spec_str)
|
concrete_spec = spack.spec.Spec(abstract_spec_str).concretized()
|
||||||
if concrete_spec.name == "patchelf":
|
|
||||||
concrete_spec._old_concretize( # pylint: disable=protected-access
|
|
||||||
deprecation_warning=False
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
concrete_spec.concretize()
|
|
||||||
|
|
||||||
msg = "[BOOTSTRAP] Try installing '{0}' from sources"
|
msg = "[BOOTSTRAP] Try installing '{0}' from sources"
|
||||||
tty.debug(msg.format(abstract_spec_str))
|
tty.debug(msg.format(abstract_spec_str))
|
||||||
with spack.config.override(self.mirror_scope):
|
with spack.config.override(self.mirror_scope):
|
||||||
|
|||||||
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@@ -1473,7 +1473,7 @@ def long_message(self):
|
|||||||
out.write(" {0}\n".format(self.log_name))
|
out.write(" {0}\n".format(self.log_name))
|
||||||
|
|
||||||
# Also output the test log path IF it exists
|
# Also output the test log path IF it exists
|
||||||
if self.context != "test":
|
if self.context != "test" and have_log:
|
||||||
test_log = join_path(os.path.dirname(self.log_name), spack_install_test_log)
|
test_log = join_path(os.path.dirname(self.log_name), spack_install_test_log)
|
||||||
if os.path.isfile(test_log):
|
if os.path.isfile(test_log):
|
||||||
out.write("\nSee test log for details:\n")
|
out.write("\nSee test log for details:\n")
|
||||||
|
|||||||
@@ -124,6 +124,8 @@ def cuda_flags(arch_list):
|
|||||||
# minimum supported versions
|
# minimum supported versions
|
||||||
conflicts("%gcc@:4", when="+cuda ^cuda@11.0:")
|
conflicts("%gcc@:4", when="+cuda ^cuda@11.0:")
|
||||||
conflicts("%gcc@:5", when="+cuda ^cuda@11.4:")
|
conflicts("%gcc@:5", when="+cuda ^cuda@11.4:")
|
||||||
|
conflicts("%gcc@:7.2", when="+cuda ^cuda@12.4:")
|
||||||
|
conflicts("%clang@:6", when="+cuda ^cuda@12.2:")
|
||||||
|
|
||||||
# maximum supported version
|
# maximum supported version
|
||||||
# NOTE:
|
# NOTE:
|
||||||
@@ -211,12 +213,16 @@ def cuda_flags(arch_list):
|
|||||||
conflicts("%intel@19.0:", when="+cuda ^cuda@:10.0")
|
conflicts("%intel@19.0:", when="+cuda ^cuda@:10.0")
|
||||||
conflicts("%intel@19.1:", when="+cuda ^cuda@:10.1")
|
conflicts("%intel@19.1:", when="+cuda ^cuda@:10.1")
|
||||||
conflicts("%intel@19.2:", when="+cuda ^cuda@:11.1.0")
|
conflicts("%intel@19.2:", when="+cuda ^cuda@:11.1.0")
|
||||||
|
conflicts("%intel@2021:", when="+cuda ^cuda@:11.4.0")
|
||||||
|
|
||||||
# XL is mostly relevant for ppc64le Linux
|
# XL is mostly relevant for ppc64le Linux
|
||||||
conflicts("%xl@:12,14:", when="+cuda ^cuda@:9.1")
|
conflicts("%xl@:12,14:", when="+cuda ^cuda@:9.1")
|
||||||
conflicts("%xl@:12,14:15,17:", when="+cuda ^cuda@9.2")
|
conflicts("%xl@:12,14:15,17:", when="+cuda ^cuda@9.2")
|
||||||
conflicts("%xl@:12,17:", when="+cuda ^cuda@:11.1.0")
|
conflicts("%xl@:12,17:", when="+cuda ^cuda@:11.1.0")
|
||||||
|
|
||||||
|
# PowerPC.
|
||||||
|
conflicts("target=ppc64le", when="+cuda ^cuda@12.5:")
|
||||||
|
|
||||||
# Darwin.
|
# Darwin.
|
||||||
# TODO: add missing conflicts for %apple-clang cuda@:10
|
# TODO: add missing conflicts for %apple-clang cuda@:10
|
||||||
conflicts("platform=darwin", when="+cuda ^cuda@11.0.2: ")
|
conflicts("platform=darwin", when="+cuda ^cuda@11.0.2:")
|
||||||
|
|||||||
@@ -72,7 +72,7 @@ def build_directory(self):
|
|||||||
def build_args(self):
|
def build_args(self):
|
||||||
"""Arguments for ``go build``."""
|
"""Arguments for ``go build``."""
|
||||||
# Pass ldflags -s = --strip-all and -w = --no-warnings by default
|
# Pass ldflags -s = --strip-all and -w = --no-warnings by default
|
||||||
return ["-ldflags", "-s -w", "-o", f"{self.pkg.name}"]
|
return ["-modcacherw", "-ldflags", "-s -w", "-o", f"{self.pkg.name}"]
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def check_args(self):
|
def check_args(self):
|
||||||
|
|||||||
@@ -3,7 +3,6 @@
|
|||||||
#
|
#
|
||||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||||
"""Common utilities for managing intel oneapi packages."""
|
"""Common utilities for managing intel oneapi packages."""
|
||||||
import getpass
|
|
||||||
import os
|
import os
|
||||||
import platform
|
import platform
|
||||||
import shutil
|
import shutil
|
||||||
@@ -13,6 +12,7 @@
|
|||||||
from llnl.util.filesystem import HeaderList, LibraryList, find_libraries, join_path, mkdirp
|
from llnl.util.filesystem import HeaderList, LibraryList, find_libraries, join_path, mkdirp
|
||||||
from llnl.util.link_tree import LinkTree
|
from llnl.util.link_tree import LinkTree
|
||||||
|
|
||||||
|
import spack.util.path
|
||||||
from spack.build_environment import dso_suffix
|
from spack.build_environment import dso_suffix
|
||||||
from spack.directives import conflicts, license, redistribute, variant
|
from spack.directives import conflicts, license, redistribute, variant
|
||||||
from spack.package_base import InstallError
|
from spack.package_base import InstallError
|
||||||
@@ -99,7 +99,7 @@ def install_component(self, installer_path):
|
|||||||
# with other install depends on the userid. For root, we
|
# with other install depends on the userid. For root, we
|
||||||
# delete the installercache before and after install. For
|
# delete the installercache before and after install. For
|
||||||
# non root we redefine the HOME environment variable.
|
# non root we redefine the HOME environment variable.
|
||||||
if getpass.getuser() == "root":
|
if spack.util.path.get_user() == "root":
|
||||||
shutil.rmtree("/var/intel/installercache", ignore_errors=True)
|
shutil.rmtree("/var/intel/installercache", ignore_errors=True)
|
||||||
|
|
||||||
bash = Executable("bash")
|
bash = Executable("bash")
|
||||||
@@ -122,7 +122,7 @@ def install_component(self, installer_path):
|
|||||||
self.prefix,
|
self.prefix,
|
||||||
)
|
)
|
||||||
|
|
||||||
if getpass.getuser() == "root":
|
if spack.util.path.get_user() == "root":
|
||||||
shutil.rmtree("/var/intel/installercache", ignore_errors=True)
|
shutil.rmtree("/var/intel/installercache", ignore_errors=True)
|
||||||
|
|
||||||
# Some installers have a bug and do not return an error code when failing
|
# Some installers have a bug and do not return an error code when failing
|
||||||
|
|||||||
@@ -139,6 +139,10 @@ def configure(self, pkg, spec, prefix):
|
|||||||
args = ["--verbose", "--target-dir", inspect.getmodule(self.pkg).python_platlib]
|
args = ["--verbose", "--target-dir", inspect.getmodule(self.pkg).python_platlib]
|
||||||
args.extend(self.configure_args())
|
args.extend(self.configure_args())
|
||||||
|
|
||||||
|
# https://github.com/Python-SIP/sip/commit/cb0be6cb6e9b756b8b0db3136efb014f6fb9b766
|
||||||
|
if spec["py-sip"].satisfies("@6.1.0:"):
|
||||||
|
args.extend(["--scripts-dir", pkg.prefix.bin])
|
||||||
|
|
||||||
sip_build = Executable(spec["py-sip"].prefix.bin.join("sip-build"))
|
sip_build = Executable(spec["py-sip"].prefix.bin.join("sip-build"))
|
||||||
sip_build(*args)
|
sip_build(*args)
|
||||||
|
|
||||||
|
|||||||
@@ -38,6 +38,7 @@
|
|||||||
import spack.paths
|
import spack.paths
|
||||||
import spack.repo
|
import spack.repo
|
||||||
import spack.spec
|
import spack.spec
|
||||||
|
import spack.stage
|
||||||
import spack.util.git
|
import spack.util.git
|
||||||
import spack.util.gpg as gpg_util
|
import spack.util.gpg as gpg_util
|
||||||
import spack.util.spack_yaml as syaml
|
import spack.util.spack_yaml as syaml
|
||||||
@@ -71,7 +72,7 @@
|
|||||||
# TODO: Remove this in Spack 0.23
|
# TODO: Remove this in Spack 0.23
|
||||||
SHARED_PR_MIRROR_URL = "s3://spack-binaries-prs/shared_pr_mirror"
|
SHARED_PR_MIRROR_URL = "s3://spack-binaries-prs/shared_pr_mirror"
|
||||||
JOB_NAME_FORMAT = (
|
JOB_NAME_FORMAT = (
|
||||||
"{name}{@version} {/hash:7} {%compiler.name}{@compiler.version}{arch=architecture}"
|
"{name}{@version} {/hash:7} {%compiler.name}{@compiler.version}{ arch=architecture}"
|
||||||
)
|
)
|
||||||
IS_WINDOWS = sys.platform == "win32"
|
IS_WINDOWS = sys.platform == "win32"
|
||||||
spack_gpg = spack.main.SpackCommand("gpg")
|
spack_gpg = spack.main.SpackCommand("gpg")
|
||||||
@@ -1107,7 +1108,7 @@ def main_script_replacements(cmd):
|
|||||||
if cdash_handler and cdash_handler.auth_token:
|
if cdash_handler and cdash_handler.auth_token:
|
||||||
try:
|
try:
|
||||||
cdash_handler.populate_buildgroup(all_job_names)
|
cdash_handler.populate_buildgroup(all_job_names)
|
||||||
except (SpackError, HTTPError, URLError) as err:
|
except (SpackError, HTTPError, URLError, TimeoutError) as err:
|
||||||
tty.warn(f"Problem populating buildgroup: {err}")
|
tty.warn(f"Problem populating buildgroup: {err}")
|
||||||
else:
|
else:
|
||||||
tty.warn("Unable to populate buildgroup without CDash credentials")
|
tty.warn("Unable to populate buildgroup without CDash credentials")
|
||||||
@@ -1370,15 +1371,6 @@ def can_verify_binaries():
|
|||||||
return len(gpg_util.public_keys()) >= 1
|
return len(gpg_util.public_keys()) >= 1
|
||||||
|
|
||||||
|
|
||||||
def _push_to_build_cache(spec: spack.spec.Spec, sign_binaries: bool, mirror_url: str) -> None:
|
|
||||||
"""Unchecked version of the public API, for easier mocking"""
|
|
||||||
bindist.push_or_raise(
|
|
||||||
spec,
|
|
||||||
spack.mirror.Mirror.from_url(mirror_url).push_url,
|
|
||||||
bindist.PushOptions(force=True, unsigned=not sign_binaries),
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def push_to_build_cache(spec: spack.spec.Spec, mirror_url: str, sign_binaries: bool) -> bool:
|
def push_to_build_cache(spec: spack.spec.Spec, mirror_url: str, sign_binaries: bool) -> bool:
|
||||||
"""Push one or more binary packages to the mirror.
|
"""Push one or more binary packages to the mirror.
|
||||||
|
|
||||||
@@ -1389,20 +1381,13 @@ def push_to_build_cache(spec: spack.spec.Spec, mirror_url: str, sign_binaries: b
|
|||||||
sign_binaries: If True, spack will attempt to sign binary package before pushing.
|
sign_binaries: If True, spack will attempt to sign binary package before pushing.
|
||||||
"""
|
"""
|
||||||
tty.debug(f"Pushing to build cache ({'signed' if sign_binaries else 'unsigned'})")
|
tty.debug(f"Pushing to build cache ({'signed' if sign_binaries else 'unsigned'})")
|
||||||
|
signing_key = bindist.select_signing_key() if sign_binaries else None
|
||||||
try:
|
try:
|
||||||
_push_to_build_cache(spec, sign_binaries, mirror_url)
|
bindist.push_or_raise([spec], out_url=mirror_url, signing_key=signing_key)
|
||||||
return True
|
return True
|
||||||
except bindist.PushToBuildCacheError as e:
|
except bindist.PushToBuildCacheError as e:
|
||||||
tty.error(str(e))
|
tty.error(f"Problem writing to {mirror_url}: {e}")
|
||||||
return False
|
return False
|
||||||
except Exception as e:
|
|
||||||
# TODO (zackgalbreath): write an adapter for boto3 exceptions so we can catch a specific
|
|
||||||
# exception instead of parsing str(e)...
|
|
||||||
msg = str(e)
|
|
||||||
if any(x in msg for x in ["Access Denied", "InvalidAccessKeyId"]):
|
|
||||||
tty.error(f"Permission problem writing to {mirror_url}: {msg}")
|
|
||||||
return False
|
|
||||||
raise
|
|
||||||
|
|
||||||
|
|
||||||
def remove_other_mirrors(mirrors_to_keep, scope=None):
|
def remove_other_mirrors(mirrors_to_keep, scope=None):
|
||||||
@@ -1448,10 +1433,6 @@ def copy_stage_logs_to_artifacts(job_spec: spack.spec.Spec, job_log_dir: str) ->
|
|||||||
job_log_dir: path into which build log should be copied
|
job_log_dir: path into which build log should be copied
|
||||||
"""
|
"""
|
||||||
tty.debug(f"job spec: {job_spec}")
|
tty.debug(f"job spec: {job_spec}")
|
||||||
if not job_spec:
|
|
||||||
msg = f"Cannot copy stage logs: job spec ({job_spec}) is required"
|
|
||||||
tty.error(msg)
|
|
||||||
return
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
pkg_cls = spack.repo.PATH.get_pkg_class(job_spec.name)
|
pkg_cls = spack.repo.PATH.get_pkg_class(job_spec.name)
|
||||||
@@ -2083,7 +2064,7 @@ def read_broken_spec(broken_spec_url):
|
|||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
_, _, fs = web_util.read_from_url(broken_spec_url)
|
_, _, fs = web_util.read_from_url(broken_spec_url)
|
||||||
except (URLError, web_util.SpackWebError, HTTPError):
|
except web_util.SpackWebError:
|
||||||
tty.warn(f"Unable to read broken spec from {broken_spec_url}")
|
tty.warn(f"Unable to read broken spec from {broken_spec_url}")
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|||||||
@@ -237,7 +237,7 @@ def ensure_single_spec_or_die(spec, matching_specs):
|
|||||||
if len(matching_specs) <= 1:
|
if len(matching_specs) <= 1:
|
||||||
return
|
return
|
||||||
|
|
||||||
format_string = "{name}{@version}{%compiler.name}{@compiler.version}{arch=architecture}"
|
format_string = "{name}{@version}{%compiler.name}{@compiler.version}{ arch=architecture}"
|
||||||
args = ["%s matches multiple packages." % spec, "Matching packages:"]
|
args = ["%s matches multiple packages." % spec, "Matching packages:"]
|
||||||
args += [
|
args += [
|
||||||
colorize(" @K{%s} " % s.dag_hash(7)) + s.cformat(format_string) for s in matching_specs
|
colorize(" @K{%s} " % s.dag_hash(7)) + s.cformat(format_string) for s in matching_specs
|
||||||
|
|||||||
@@ -3,28 +3,24 @@
|
|||||||
#
|
#
|
||||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||||
import argparse
|
import argparse
|
||||||
import copy
|
|
||||||
import glob
|
import glob
|
||||||
import hashlib
|
|
||||||
import json
|
import json
|
||||||
import multiprocessing
|
|
||||||
import multiprocessing.pool
|
|
||||||
import os
|
import os
|
||||||
import shutil
|
import shutil
|
||||||
import sys
|
import sys
|
||||||
import tempfile
|
import tempfile
|
||||||
from typing import Dict, List, Optional, Tuple, Union
|
from typing import List, Tuple
|
||||||
|
|
||||||
import llnl.util.tty as tty
|
import llnl.util.tty as tty
|
||||||
from llnl.string import plural
|
from llnl.string import plural
|
||||||
from llnl.util.lang import elide_list
|
from llnl.util.lang import elide_list, stable_partition
|
||||||
|
|
||||||
import spack.binary_distribution as bindist
|
import spack.binary_distribution as bindist
|
||||||
import spack.cmd
|
import spack.cmd
|
||||||
import spack.config
|
import spack.config
|
||||||
|
import spack.deptypes as dt
|
||||||
import spack.environment as ev
|
import spack.environment as ev
|
||||||
import spack.error
|
import spack.error
|
||||||
import spack.hash_types as ht
|
|
||||||
import spack.mirror
|
import spack.mirror
|
||||||
import spack.oci.oci
|
import spack.oci.oci
|
||||||
import spack.oci.opener
|
import spack.oci.opener
|
||||||
@@ -35,28 +31,13 @@
|
|||||||
import spack.store
|
import spack.store
|
||||||
import spack.user_environment
|
import spack.user_environment
|
||||||
import spack.util.crypto
|
import spack.util.crypto
|
||||||
|
import spack.util.parallel
|
||||||
import spack.util.url as url_util
|
import spack.util.url as url_util
|
||||||
import spack.util.web as web_util
|
import spack.util.web as web_util
|
||||||
from spack import traverse
|
from spack import traverse
|
||||||
from spack.build_environment import determine_number_of_jobs
|
|
||||||
from spack.cmd import display_specs
|
from spack.cmd import display_specs
|
||||||
from spack.cmd.common import arguments
|
from spack.cmd.common import arguments
|
||||||
from spack.oci.image import (
|
from spack.oci.image import ImageReference
|
||||||
Digest,
|
|
||||||
ImageReference,
|
|
||||||
default_config,
|
|
||||||
default_index_tag,
|
|
||||||
default_manifest,
|
|
||||||
default_tag,
|
|
||||||
tag_is_spec,
|
|
||||||
)
|
|
||||||
from spack.oci.oci import (
|
|
||||||
copy_missing_layers_with_retry,
|
|
||||||
get_manifest_and_config_with_retry,
|
|
||||||
list_tags,
|
|
||||||
upload_blob_with_retry,
|
|
||||||
upload_manifest_with_retry,
|
|
||||||
)
|
|
||||||
from spack.spec import Spec, save_dependency_specfiles
|
from spack.spec import Spec, save_dependency_specfiles
|
||||||
|
|
||||||
description = "create, download and install binary packages"
|
description = "create, download and install binary packages"
|
||||||
@@ -112,6 +93,17 @@ def setup_parser(subparser: argparse.ArgumentParser):
|
|||||||
"Alternatively, one can decide to build a cache for only the package or only the "
|
"Alternatively, one can decide to build a cache for only the package or only the "
|
||||||
"dependencies",
|
"dependencies",
|
||||||
)
|
)
|
||||||
|
with_or_without_build_deps = push.add_mutually_exclusive_group()
|
||||||
|
with_or_without_build_deps.add_argument(
|
||||||
|
"--with-build-dependencies",
|
||||||
|
action="store_true",
|
||||||
|
help="include build dependencies in the buildcache",
|
||||||
|
)
|
||||||
|
with_or_without_build_deps.add_argument(
|
||||||
|
"--without-build-dependencies",
|
||||||
|
action="store_true",
|
||||||
|
help="exclude build dependencies from the buildcache",
|
||||||
|
)
|
||||||
push.add_argument(
|
push.add_argument(
|
||||||
"--fail-fast",
|
"--fail-fast",
|
||||||
action="store_true",
|
action="store_true",
|
||||||
@@ -329,39 +321,6 @@ def _format_spec(spec: Spec) -> str:
|
|||||||
return spec.cformat("{name}{@version}{/hash:7}")
|
return spec.cformat("{name}{@version}{/hash:7}")
|
||||||
|
|
||||||
|
|
||||||
def _progress(i: int, total: int):
|
|
||||||
if total > 1:
|
|
||||||
digits = len(str(total))
|
|
||||||
return f"[{i+1:{digits}}/{total}] "
|
|
||||||
return ""
|
|
||||||
|
|
||||||
|
|
||||||
class NoPool:
|
|
||||||
def map(self, func, args):
|
|
||||||
return [func(a) for a in args]
|
|
||||||
|
|
||||||
def starmap(self, func, args):
|
|
||||||
return [func(*a) for a in args]
|
|
||||||
|
|
||||||
def __enter__(self):
|
|
||||||
return self
|
|
||||||
|
|
||||||
def __exit__(self, *args):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
MaybePool = Union[multiprocessing.pool.Pool, NoPool]
|
|
||||||
|
|
||||||
|
|
||||||
def _make_pool() -> MaybePool:
|
|
||||||
"""Can't use threading because it's unsafe, and can't use spawned processes because of globals.
|
|
||||||
That leaves only forking"""
|
|
||||||
if multiprocessing.get_start_method() == "fork":
|
|
||||||
return multiprocessing.pool.Pool(determine_number_of_jobs(parallel=True))
|
|
||||||
else:
|
|
||||||
return NoPool()
|
|
||||||
|
|
||||||
|
|
||||||
def _skip_no_redistribute_for_public(specs):
|
def _skip_no_redistribute_for_public(specs):
|
||||||
remaining_specs = list()
|
remaining_specs = list()
|
||||||
removed_specs = list()
|
removed_specs = list()
|
||||||
@@ -381,6 +340,45 @@ def _skip_no_redistribute_for_public(specs):
|
|||||||
return remaining_specs
|
return remaining_specs
|
||||||
|
|
||||||
|
|
||||||
|
class PackagesAreNotInstalledError(spack.error.SpackError):
|
||||||
|
"""Raised when a list of specs is not installed but picked to be packaged."""
|
||||||
|
|
||||||
|
def __init__(self, specs: List[Spec]):
|
||||||
|
super().__init__(
|
||||||
|
"Cannot push non-installed packages",
|
||||||
|
", ".join(elide_list([_format_spec(s) for s in specs], 5)),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class PackageNotInstalledError(spack.error.SpackError):
|
||||||
|
"""Raised when a spec is not installed but picked to be packaged."""
|
||||||
|
|
||||||
|
|
||||||
|
def _specs_to_be_packaged(
|
||||||
|
requested: List[Spec], things_to_install: str, build_deps: bool
|
||||||
|
) -> List[Spec]:
|
||||||
|
"""Collect all non-external with or without roots and dependencies"""
|
||||||
|
if "dependencies" not in things_to_install:
|
||||||
|
deptype = dt.NONE
|
||||||
|
elif build_deps:
|
||||||
|
deptype = dt.ALL
|
||||||
|
else:
|
||||||
|
deptype = dt.RUN | dt.LINK | dt.TEST
|
||||||
|
specs = [
|
||||||
|
s
|
||||||
|
for s in traverse.traverse_nodes(
|
||||||
|
requested,
|
||||||
|
root="package" in things_to_install,
|
||||||
|
deptype=deptype,
|
||||||
|
order="breadth",
|
||||||
|
key=traverse.by_dag_hash,
|
||||||
|
)
|
||||||
|
if not s.external
|
||||||
|
]
|
||||||
|
specs.reverse()
|
||||||
|
return specs
|
||||||
|
|
||||||
|
|
||||||
def push_fn(args):
|
def push_fn(args):
|
||||||
"""create a binary package and push it to a mirror"""
|
"""create a binary package and push it to a mirror"""
|
||||||
if args.spec_file:
|
if args.spec_file:
|
||||||
@@ -412,91 +410,84 @@ def push_fn(args):
|
|||||||
|
|
||||||
# For OCI images, we require dependencies to be pushed for now.
|
# For OCI images, we require dependencies to be pushed for now.
|
||||||
if target_image:
|
if target_image:
|
||||||
if "dependencies" not in args.things_to_install:
|
|
||||||
tty.die("Dependencies must be pushed for OCI images.")
|
|
||||||
if not unsigned:
|
if not unsigned:
|
||||||
tty.warn(
|
tty.warn(
|
||||||
"Code signing is currently not supported for OCI images. "
|
"Code signing is currently not supported for OCI images. "
|
||||||
"Use --unsigned to silence this warning."
|
"Use --unsigned to silence this warning."
|
||||||
)
|
)
|
||||||
|
unsigned = True
|
||||||
|
|
||||||
# This is a list of installed, non-external specs.
|
# Select a signing key, or None if unsigned.
|
||||||
specs = bindist.specs_to_be_packaged(
|
signing_key = None if unsigned else (args.key or bindist.select_signing_key())
|
||||||
|
|
||||||
|
specs = _specs_to_be_packaged(
|
||||||
roots,
|
roots,
|
||||||
root="package" in args.things_to_install,
|
things_to_install=args.things_to_install,
|
||||||
dependencies="dependencies" in args.things_to_install,
|
build_deps=args.with_build_dependencies or not args.without_build_dependencies,
|
||||||
)
|
)
|
||||||
|
|
||||||
if not args.private:
|
if not args.private:
|
||||||
specs = _skip_no_redistribute_for_public(specs)
|
specs = _skip_no_redistribute_for_public(specs)
|
||||||
|
|
||||||
# When pushing multiple specs, print the url once ahead of time, as well as how
|
|
||||||
# many specs are being pushed.
|
|
||||||
if len(specs) > 1:
|
if len(specs) > 1:
|
||||||
tty.info(f"Selected {len(specs)} specs to push to {push_url}")
|
tty.info(f"Selected {len(specs)} specs to push to {push_url}")
|
||||||
|
|
||||||
failed = []
|
# Pushing not installed specs is an error. Either fail fast or populate the error list and
|
||||||
|
# push installed package in best effort mode.
|
||||||
|
failed: List[Tuple[Spec, BaseException]] = []
|
||||||
|
with spack.store.STORE.db.read_transaction():
|
||||||
|
if any(not s.installed for s in specs):
|
||||||
|
specs, not_installed = stable_partition(specs, lambda s: s.installed)
|
||||||
|
if args.fail_fast:
|
||||||
|
raise PackagesAreNotInstalledError(not_installed)
|
||||||
|
else:
|
||||||
|
failed.extend(
|
||||||
|
(s, PackageNotInstalledError("package not installed")) for s in not_installed
|
||||||
|
)
|
||||||
|
|
||||||
# TODO: unify this logic in the future.
|
with bindist.default_push_context() as (tmpdir, executor):
|
||||||
if target_image:
|
if target_image:
|
||||||
base_image = ImageReference.from_string(args.base_image) if args.base_image else None
|
base_image = ImageReference.from_string(args.base_image) if args.base_image else None
|
||||||
with tempfile.TemporaryDirectory(
|
skipped, base_images, checksums, upload_errors = bindist._push_oci(
|
||||||
dir=spack.stage.get_stage_root()
|
|
||||||
) as tmpdir, _make_pool() as pool:
|
|
||||||
skipped, base_images, checksums = _push_oci(
|
|
||||||
target_image=target_image,
|
target_image=target_image,
|
||||||
base_image=base_image,
|
base_image=base_image,
|
||||||
installed_specs_with_deps=specs,
|
installed_specs_with_deps=specs,
|
||||||
force=args.force,
|
force=args.force,
|
||||||
tmpdir=tmpdir,
|
tmpdir=tmpdir,
|
||||||
pool=pool,
|
executor=executor,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if upload_errors:
|
||||||
|
failed.extend(upload_errors)
|
||||||
|
|
||||||
# Apart from creating manifests for each individual spec, we allow users to create a
|
# Apart from creating manifests for each individual spec, we allow users to create a
|
||||||
# separate image tag for all root specs and their runtime dependencies.
|
# separate image tag for all root specs and their runtime dependencies.
|
||||||
if args.tag:
|
elif args.tag:
|
||||||
tagged_image = target_image.with_tag(args.tag)
|
tagged_image = target_image.with_tag(args.tag)
|
||||||
# _push_oci may not populate base_images if binaries were already in the registry
|
# _push_oci may not populate base_images if binaries were already in the registry
|
||||||
for spec in roots:
|
for spec in roots:
|
||||||
_update_base_images(
|
bindist._oci_update_base_images(
|
||||||
base_image=base_image,
|
base_image=base_image,
|
||||||
target_image=target_image,
|
target_image=target_image,
|
||||||
spec=spec,
|
spec=spec,
|
||||||
base_image_cache=base_images,
|
base_image_cache=base_images,
|
||||||
)
|
)
|
||||||
_put_manifest(base_images, checksums, tagged_image, tmpdir, None, None, *roots)
|
bindist._oci_put_manifest(
|
||||||
|
base_images, checksums, tagged_image, tmpdir, None, None, *roots
|
||||||
|
)
|
||||||
tty.info(f"Tagged {tagged_image}")
|
tty.info(f"Tagged {tagged_image}")
|
||||||
|
|
||||||
else:
|
else:
|
||||||
skipped = []
|
skipped, upload_errors = bindist._push(
|
||||||
|
specs,
|
||||||
for i, spec in enumerate(specs):
|
out_url=push_url,
|
||||||
try:
|
force=args.force,
|
||||||
bindist.push_or_raise(
|
update_index=args.update_index,
|
||||||
spec,
|
signing_key=signing_key,
|
||||||
push_url,
|
tmpdir=tmpdir,
|
||||||
bindist.PushOptions(
|
executor=executor,
|
||||||
force=args.force,
|
)
|
||||||
unsigned=unsigned,
|
failed.extend(upload_errors)
|
||||||
key=args.key,
|
|
||||||
regenerate_index=args.update_index,
|
|
||||||
),
|
|
||||||
)
|
|
||||||
|
|
||||||
msg = f"{_progress(i, len(specs))}Pushed {_format_spec(spec)}"
|
|
||||||
if len(specs) == 1:
|
|
||||||
msg += f" to {push_url}"
|
|
||||||
tty.info(msg)
|
|
||||||
|
|
||||||
except bindist.NoOverwriteException:
|
|
||||||
skipped.append(_format_spec(spec))
|
|
||||||
|
|
||||||
# Catch any other exception unless the fail fast option is set
|
|
||||||
except Exception as e:
|
|
||||||
if args.fail_fast or isinstance(
|
|
||||||
e, (bindist.PickKeyException, bindist.NoKeyException)
|
|
||||||
):
|
|
||||||
raise
|
|
||||||
failed.append((_format_spec(spec), e))
|
|
||||||
|
|
||||||
if skipped:
|
if skipped:
|
||||||
if len(specs) == 1:
|
if len(specs) == 1:
|
||||||
@@ -519,389 +510,22 @@ def push_fn(args):
|
|||||||
raise spack.error.SpackError(
|
raise spack.error.SpackError(
|
||||||
f"The following {len(failed)} errors occurred while pushing specs to the buildcache",
|
f"The following {len(failed)} errors occurred while pushing specs to the buildcache",
|
||||||
"\n".join(
|
"\n".join(
|
||||||
elide_list([f" {spec}: {e.__class__.__name__}: {e}" for spec, e in failed], 5)
|
elide_list(
|
||||||
|
[
|
||||||
|
f" {_format_spec(spec)}: {e.__class__.__name__}: {e}"
|
||||||
|
for spec, e in failed
|
||||||
|
],
|
||||||
|
5,
|
||||||
|
)
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
# Update the index if requested
|
# Update the OCI index if requested
|
||||||
# TODO: remove update index logic out of bindist; should be once after all specs are pushed
|
|
||||||
# not once per spec.
|
|
||||||
if target_image and len(skipped) < len(specs) and args.update_index:
|
if target_image and len(skipped) < len(specs) and args.update_index:
|
||||||
with tempfile.TemporaryDirectory(
|
with tempfile.TemporaryDirectory(
|
||||||
dir=spack.stage.get_stage_root()
|
dir=spack.stage.get_stage_root()
|
||||||
) as tmpdir, _make_pool() as pool:
|
) as tmpdir, spack.util.parallel.make_concurrent_executor() as executor:
|
||||||
_update_index_oci(target_image, tmpdir, pool)
|
bindist._oci_update_index(target_image, tmpdir, executor)
|
||||||
|
|
||||||
|
|
||||||
def _get_spack_binary_blob(image_ref: ImageReference) -> Optional[spack.oci.oci.Blob]:
|
|
||||||
"""Get the spack tarball layer digests and size if it exists"""
|
|
||||||
try:
|
|
||||||
manifest, config = get_manifest_and_config_with_retry(image_ref)
|
|
||||||
|
|
||||||
return spack.oci.oci.Blob(
|
|
||||||
compressed_digest=Digest.from_string(manifest["layers"][-1]["digest"]),
|
|
||||||
uncompressed_digest=Digest.from_string(config["rootfs"]["diff_ids"][-1]),
|
|
||||||
size=manifest["layers"][-1]["size"],
|
|
||||||
)
|
|
||||||
except Exception:
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
def _push_single_spack_binary_blob(image_ref: ImageReference, spec: spack.spec.Spec, tmpdir: str):
|
|
||||||
filename = os.path.join(tmpdir, f"{spec.dag_hash()}.tar.gz")
|
|
||||||
|
|
||||||
# Create an oci.image.layer aka tarball of the package
|
|
||||||
compressed_tarfile_checksum, tarfile_checksum = spack.oci.oci.create_tarball(spec, filename)
|
|
||||||
|
|
||||||
blob = spack.oci.oci.Blob(
|
|
||||||
Digest.from_sha256(compressed_tarfile_checksum),
|
|
||||||
Digest.from_sha256(tarfile_checksum),
|
|
||||||
os.path.getsize(filename),
|
|
||||||
)
|
|
||||||
|
|
||||||
# Upload the blob
|
|
||||||
upload_blob_with_retry(image_ref, file=filename, digest=blob.compressed_digest)
|
|
||||||
|
|
||||||
# delete the file
|
|
||||||
os.unlink(filename)
|
|
||||||
|
|
||||||
return blob
|
|
||||||
|
|
||||||
|
|
||||||
def _retrieve_env_dict_from_config(config: dict) -> dict:
|
|
||||||
"""Retrieve the environment variables from the image config file.
|
|
||||||
Sets a default value for PATH if it is not present.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
config (dict): The image config file.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
dict: The environment variables.
|
|
||||||
"""
|
|
||||||
env = {"PATH": "/bin:/usr/bin"}
|
|
||||||
|
|
||||||
if "Env" in config.get("config", {}):
|
|
||||||
for entry in config["config"]["Env"]:
|
|
||||||
key, value = entry.split("=", 1)
|
|
||||||
env[key] = value
|
|
||||||
return env
|
|
||||||
|
|
||||||
|
|
||||||
def _archspec_to_gooarch(spec: spack.spec.Spec) -> str:
|
|
||||||
name = spec.target.family.name
|
|
||||||
name_map = {"aarch64": "arm64", "x86_64": "amd64"}
|
|
||||||
return name_map.get(name, name)
|
|
||||||
|
|
||||||
|
|
||||||
def _put_manifest(
|
|
||||||
base_images: Dict[str, Tuple[dict, dict]],
|
|
||||||
checksums: Dict[str, spack.oci.oci.Blob],
|
|
||||||
image_ref: ImageReference,
|
|
||||||
tmpdir: str,
|
|
||||||
extra_config: Optional[dict],
|
|
||||||
annotations: Optional[dict],
|
|
||||||
*specs: spack.spec.Spec,
|
|
||||||
):
|
|
||||||
architecture = _archspec_to_gooarch(specs[0])
|
|
||||||
|
|
||||||
dependencies = list(
|
|
||||||
reversed(
|
|
||||||
list(
|
|
||||||
s
|
|
||||||
for s in traverse.traverse_nodes(
|
|
||||||
specs, order="topo", deptype=("link", "run"), root=True
|
|
||||||
)
|
|
||||||
if not s.external
|
|
||||||
)
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
base_manifest, base_config = base_images[architecture]
|
|
||||||
env = _retrieve_env_dict_from_config(base_config)
|
|
||||||
|
|
||||||
# If the base image uses `vnd.docker.distribution.manifest.v2+json`, then we use that too.
|
|
||||||
# This is because Singularity / Apptainer is very strict about not mixing them.
|
|
||||||
base_manifest_mediaType = base_manifest.get(
|
|
||||||
"mediaType", "application/vnd.oci.image.manifest.v1+json"
|
|
||||||
)
|
|
||||||
use_docker_format = (
|
|
||||||
base_manifest_mediaType == "application/vnd.docker.distribution.manifest.v2+json"
|
|
||||||
)
|
|
||||||
|
|
||||||
spack.user_environment.environment_modifications_for_specs(*specs).apply_modifications(env)
|
|
||||||
|
|
||||||
# Create an oci.image.config file
|
|
||||||
config = copy.deepcopy(base_config)
|
|
||||||
|
|
||||||
# Add the diff ids of the dependencies
|
|
||||||
for s in dependencies:
|
|
||||||
config["rootfs"]["diff_ids"].append(str(checksums[s.dag_hash()].uncompressed_digest))
|
|
||||||
|
|
||||||
# Set the environment variables
|
|
||||||
config["config"]["Env"] = [f"{k}={v}" for k, v in env.items()]
|
|
||||||
|
|
||||||
if extra_config:
|
|
||||||
# From the OCI v1.0 spec:
|
|
||||||
# > Any extra fields in the Image JSON struct are considered implementation
|
|
||||||
# > specific and MUST be ignored by any implementations which are unable to
|
|
||||||
# > interpret them.
|
|
||||||
config.update(extra_config)
|
|
||||||
|
|
||||||
config_file = os.path.join(tmpdir, f"{specs[0].dag_hash()}.config.json")
|
|
||||||
|
|
||||||
with open(config_file, "w") as f:
|
|
||||||
json.dump(config, f, separators=(",", ":"))
|
|
||||||
|
|
||||||
config_file_checksum = Digest.from_sha256(
|
|
||||||
spack.util.crypto.checksum(hashlib.sha256, config_file)
|
|
||||||
)
|
|
||||||
|
|
||||||
# Upload the config file
|
|
||||||
upload_blob_with_retry(image_ref, file=config_file, digest=config_file_checksum)
|
|
||||||
|
|
||||||
manifest = {
|
|
||||||
"mediaType": base_manifest_mediaType,
|
|
||||||
"schemaVersion": 2,
|
|
||||||
"config": {
|
|
||||||
"mediaType": base_manifest["config"]["mediaType"],
|
|
||||||
"digest": str(config_file_checksum),
|
|
||||||
"size": os.path.getsize(config_file),
|
|
||||||
},
|
|
||||||
"layers": [
|
|
||||||
*(layer for layer in base_manifest["layers"]),
|
|
||||||
*(
|
|
||||||
{
|
|
||||||
"mediaType": (
|
|
||||||
"application/vnd.docker.image.rootfs.diff.tar.gzip"
|
|
||||||
if use_docker_format
|
|
||||||
else "application/vnd.oci.image.layer.v1.tar+gzip"
|
|
||||||
),
|
|
||||||
"digest": str(checksums[s.dag_hash()].compressed_digest),
|
|
||||||
"size": checksums[s.dag_hash()].size,
|
|
||||||
}
|
|
||||||
for s in dependencies
|
|
||||||
),
|
|
||||||
],
|
|
||||||
}
|
|
||||||
|
|
||||||
if not use_docker_format and annotations:
|
|
||||||
manifest["annotations"] = annotations
|
|
||||||
|
|
||||||
# Finally upload the manifest
|
|
||||||
upload_manifest_with_retry(image_ref, manifest=manifest)
|
|
||||||
|
|
||||||
# delete the config file
|
|
||||||
os.unlink(config_file)
|
|
||||||
|
|
||||||
|
|
||||||
def _update_base_images(
|
|
||||||
*,
|
|
||||||
base_image: Optional[ImageReference],
|
|
||||||
target_image: ImageReference,
|
|
||||||
spec: spack.spec.Spec,
|
|
||||||
base_image_cache: Dict[str, Tuple[dict, dict]],
|
|
||||||
):
|
|
||||||
"""For a given spec and base image, copy the missing layers of the base image with matching
|
|
||||||
arch to the registry of the target image. If no base image is specified, create a dummy
|
|
||||||
manifest and config file."""
|
|
||||||
architecture = _archspec_to_gooarch(spec)
|
|
||||||
if architecture in base_image_cache:
|
|
||||||
return
|
|
||||||
if base_image is None:
|
|
||||||
base_image_cache[architecture] = (
|
|
||||||
default_manifest(),
|
|
||||||
default_config(architecture, "linux"),
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
base_image_cache[architecture] = copy_missing_layers_with_retry(
|
|
||||||
base_image, target_image, architecture
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def _push_oci(
|
|
||||||
*,
|
|
||||||
target_image: ImageReference,
|
|
||||||
base_image: Optional[ImageReference],
|
|
||||||
installed_specs_with_deps: List[Spec],
|
|
||||||
tmpdir: str,
|
|
||||||
pool: MaybePool,
|
|
||||||
force: bool = False,
|
|
||||||
) -> Tuple[List[str], Dict[str, Tuple[dict, dict]], Dict[str, spack.oci.oci.Blob]]:
|
|
||||||
"""Push specs to an OCI registry
|
|
||||||
|
|
||||||
Args:
|
|
||||||
image_ref: The target OCI image
|
|
||||||
base_image: Optional base image, which will be copied to the target registry.
|
|
||||||
installed_specs_with_deps: The installed specs to push, excluding externals,
|
|
||||||
including deps, ordered from roots to leaves.
|
|
||||||
force: Whether to overwrite existing layers and manifests in the buildcache.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
A tuple consisting of the list of skipped specs already in the build cache,
|
|
||||||
a dictionary mapping architectures to base image manifests and configs,
|
|
||||||
and a dictionary mapping each spec's dag hash to a blob.
|
|
||||||
"""
|
|
||||||
|
|
||||||
# Reverse the order
|
|
||||||
installed_specs_with_deps = list(reversed(installed_specs_with_deps))
|
|
||||||
|
|
||||||
# Spec dag hash -> blob
|
|
||||||
checksums: Dict[str, spack.oci.oci.Blob] = {}
|
|
||||||
|
|
||||||
# arch -> (manifest, config)
|
|
||||||
base_images: Dict[str, Tuple[dict, dict]] = {}
|
|
||||||
|
|
||||||
# Specs not uploaded because they already exist
|
|
||||||
skipped = []
|
|
||||||
|
|
||||||
if not force:
|
|
||||||
tty.info("Checking for existing specs in the buildcache")
|
|
||||||
to_be_uploaded = []
|
|
||||||
|
|
||||||
tags_to_check = (target_image.with_tag(default_tag(s)) for s in installed_specs_with_deps)
|
|
||||||
available_blobs = pool.map(_get_spack_binary_blob, tags_to_check)
|
|
||||||
|
|
||||||
for spec, maybe_blob in zip(installed_specs_with_deps, available_blobs):
|
|
||||||
if maybe_blob is not None:
|
|
||||||
checksums[spec.dag_hash()] = maybe_blob
|
|
||||||
skipped.append(_format_spec(spec))
|
|
||||||
else:
|
|
||||||
to_be_uploaded.append(spec)
|
|
||||||
else:
|
|
||||||
to_be_uploaded = installed_specs_with_deps
|
|
||||||
|
|
||||||
if not to_be_uploaded:
|
|
||||||
return skipped, base_images, checksums
|
|
||||||
|
|
||||||
tty.info(
|
|
||||||
f"{len(to_be_uploaded)} specs need to be pushed to "
|
|
||||||
f"{target_image.domain}/{target_image.name}"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Upload blobs
|
|
||||||
new_blobs = pool.starmap(
|
|
||||||
_push_single_spack_binary_blob, ((target_image, spec, tmpdir) for spec in to_be_uploaded)
|
|
||||||
)
|
|
||||||
|
|
||||||
# And update the spec to blob mapping
|
|
||||||
for spec, blob in zip(to_be_uploaded, new_blobs):
|
|
||||||
checksums[spec.dag_hash()] = blob
|
|
||||||
|
|
||||||
# Copy base images if necessary
|
|
||||||
for spec in to_be_uploaded:
|
|
||||||
_update_base_images(
|
|
||||||
base_image=base_image,
|
|
||||||
target_image=target_image,
|
|
||||||
spec=spec,
|
|
||||||
base_image_cache=base_images,
|
|
||||||
)
|
|
||||||
|
|
||||||
def extra_config(spec: Spec):
|
|
||||||
spec_dict = spec.to_dict(hash=ht.dag_hash)
|
|
||||||
spec_dict["buildcache_layout_version"] = 1
|
|
||||||
spec_dict["binary_cache_checksum"] = {
|
|
||||||
"hash_algorithm": "sha256",
|
|
||||||
"hash": checksums[spec.dag_hash()].compressed_digest.digest,
|
|
||||||
}
|
|
||||||
return spec_dict
|
|
||||||
|
|
||||||
# Upload manifests
|
|
||||||
tty.info("Uploading manifests")
|
|
||||||
pool.starmap(
|
|
||||||
_put_manifest,
|
|
||||||
(
|
|
||||||
(
|
|
||||||
base_images,
|
|
||||||
checksums,
|
|
||||||
target_image.with_tag(default_tag(spec)),
|
|
||||||
tmpdir,
|
|
||||||
extra_config(spec),
|
|
||||||
{"org.opencontainers.image.description": spec.format()},
|
|
||||||
spec,
|
|
||||||
)
|
|
||||||
for spec in to_be_uploaded
|
|
||||||
),
|
|
||||||
)
|
|
||||||
|
|
||||||
# Print the image names of the top-level specs
|
|
||||||
for spec in to_be_uploaded:
|
|
||||||
tty.info(f"Pushed {_format_spec(spec)} to {target_image.with_tag(default_tag(spec))}")
|
|
||||||
|
|
||||||
return skipped, base_images, checksums
|
|
||||||
|
|
||||||
|
|
||||||
def _config_from_tag(image_ref: ImageReference, tag: str) -> Optional[dict]:
|
|
||||||
# Don't allow recursion here, since Spack itself always uploads
|
|
||||||
# vnd.oci.image.manifest.v1+json, not vnd.oci.image.index.v1+json
|
|
||||||
_, config = get_manifest_and_config_with_retry(image_ref.with_tag(tag), tag, recurse=0)
|
|
||||||
|
|
||||||
# Do very basic validation: if "spec" is a key in the config, it
|
|
||||||
# must be a Spec object too.
|
|
||||||
return config if "spec" in config else None
|
|
||||||
|
|
||||||
|
|
||||||
def _update_index_oci(image_ref: ImageReference, tmpdir: str, pool: MaybePool) -> None:
|
|
||||||
tags = list_tags(image_ref)
|
|
||||||
|
|
||||||
# Fetch all image config files in parallel
|
|
||||||
spec_dicts = pool.starmap(
|
|
||||||
_config_from_tag, ((image_ref, tag) for tag in tags if tag_is_spec(tag))
|
|
||||||
)
|
|
||||||
|
|
||||||
# Populate the database
|
|
||||||
db_root_dir = os.path.join(tmpdir, "db_root")
|
|
||||||
db = bindist.BuildCacheDatabase(db_root_dir)
|
|
||||||
|
|
||||||
for spec_dict in spec_dicts:
|
|
||||||
spec = Spec.from_dict(spec_dict)
|
|
||||||
db.add(spec, directory_layout=None)
|
|
||||||
db.mark(spec, "in_buildcache", True)
|
|
||||||
|
|
||||||
# Create the index.json file
|
|
||||||
index_json_path = os.path.join(tmpdir, "index.json")
|
|
||||||
with open(index_json_path, "w") as f:
|
|
||||||
db._write_to_file(f)
|
|
||||||
|
|
||||||
# Create an empty config.json file
|
|
||||||
empty_config_json_path = os.path.join(tmpdir, "config.json")
|
|
||||||
with open(empty_config_json_path, "wb") as f:
|
|
||||||
f.write(b"{}")
|
|
||||||
|
|
||||||
# Upload the index.json file
|
|
||||||
index_shasum = Digest.from_sha256(spack.util.crypto.checksum(hashlib.sha256, index_json_path))
|
|
||||||
upload_blob_with_retry(image_ref, file=index_json_path, digest=index_shasum)
|
|
||||||
|
|
||||||
# Upload the config.json file
|
|
||||||
empty_config_digest = Digest.from_sha256(
|
|
||||||
spack.util.crypto.checksum(hashlib.sha256, empty_config_json_path)
|
|
||||||
)
|
|
||||||
upload_blob_with_retry(image_ref, file=empty_config_json_path, digest=empty_config_digest)
|
|
||||||
|
|
||||||
# Push a manifest file that references the index.json file as a layer
|
|
||||||
# Notice that we push this as if it is an image, which it of course is not.
|
|
||||||
# When the ORAS spec becomes official, we can use that instead of a fake image.
|
|
||||||
# For now we just use the OCI image spec, so that we don't run into issues with
|
|
||||||
# automatic garbage collection of blobs that are not referenced by any image manifest.
|
|
||||||
oci_manifest = {
|
|
||||||
"mediaType": "application/vnd.oci.image.manifest.v1+json",
|
|
||||||
"schemaVersion": 2,
|
|
||||||
# Config is just an empty {} file for now, and irrelevant
|
|
||||||
"config": {
|
|
||||||
"mediaType": "application/vnd.oci.image.config.v1+json",
|
|
||||||
"digest": str(empty_config_digest),
|
|
||||||
"size": os.path.getsize(empty_config_json_path),
|
|
||||||
},
|
|
||||||
# The buildcache index is the only layer, and is not a tarball, we lie here.
|
|
||||||
"layers": [
|
|
||||||
{
|
|
||||||
"mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
|
|
||||||
"digest": str(index_shasum),
|
|
||||||
"size": os.path.getsize(index_json_path),
|
|
||||||
}
|
|
||||||
],
|
|
||||||
}
|
|
||||||
|
|
||||||
upload_manifest_with_retry(image_ref.with_tag(default_index_tag), oci_manifest)
|
|
||||||
|
|
||||||
|
|
||||||
def install_fn(args):
|
def install_fn(args):
|
||||||
@@ -1182,14 +806,15 @@ def update_index(mirror: spack.mirror.Mirror, update_keys=False):
|
|||||||
if image_ref:
|
if image_ref:
|
||||||
with tempfile.TemporaryDirectory(
|
with tempfile.TemporaryDirectory(
|
||||||
dir=spack.stage.get_stage_root()
|
dir=spack.stage.get_stage_root()
|
||||||
) as tmpdir, _make_pool() as pool:
|
) as tmpdir, spack.util.parallel.make_concurrent_executor() as executor:
|
||||||
_update_index_oci(image_ref, tmpdir, pool)
|
bindist._oci_update_index(image_ref, tmpdir, executor)
|
||||||
return
|
return
|
||||||
|
|
||||||
# Otherwise, assume a normal mirror.
|
# Otherwise, assume a normal mirror.
|
||||||
url = mirror.push_url
|
url = mirror.push_url
|
||||||
|
|
||||||
bindist.generate_package_index(url_util.join(url, bindist.build_cache_relative_path()))
|
with tempfile.TemporaryDirectory(dir=spack.stage.get_stage_root()) as tmpdir:
|
||||||
|
bindist.generate_package_index(url, tmpdir)
|
||||||
|
|
||||||
if update_keys:
|
if update_keys:
|
||||||
keys_url = url_util.join(
|
keys_url = url_util.join(
|
||||||
@@ -1197,7 +822,8 @@ def update_index(mirror: spack.mirror.Mirror, update_keys=False):
|
|||||||
)
|
)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
bindist.generate_key_index(keys_url)
|
with tempfile.TemporaryDirectory(dir=spack.stage.get_stage_root()) as tmpdir:
|
||||||
|
bindist.generate_key_index(keys_url, tmpdir)
|
||||||
except bindist.CannotListKeys as e:
|
except bindist.CannotListKeys as e:
|
||||||
# Do not error out if listing keys went wrong. This usually means that the _gpg path
|
# Do not error out if listing keys went wrong. This usually means that the _gpg path
|
||||||
# does not exist. TODO: distinguish between this and other errors.
|
# does not exist. TODO: distinguish between this and other errors.
|
||||||
|
|||||||
@@ -11,7 +11,6 @@
|
|||||||
from argparse import ArgumentParser, Namespace
|
from argparse import ArgumentParser, Namespace
|
||||||
from typing import IO, Any, Callable, Dict, Iterable, List, Optional, Sequence, Set, Tuple, Union
|
from typing import IO, Any, Callable, Dict, Iterable, List, Optional, Sequence, Set, Tuple, Union
|
||||||
|
|
||||||
import llnl.util.filesystem as fs
|
|
||||||
import llnl.util.tty as tty
|
import llnl.util.tty as tty
|
||||||
from llnl.util.argparsewriter import ArgparseRstWriter, ArgparseWriter, Command
|
from llnl.util.argparsewriter import ArgparseRstWriter, ArgparseWriter, Command
|
||||||
from llnl.util.tty.colify import colify
|
from llnl.util.tty.colify import colify
|
||||||
@@ -867,9 +866,6 @@ def _commands(parser: ArgumentParser, args: Namespace) -> None:
|
|||||||
prepend_header(args, f)
|
prepend_header(args, f)
|
||||||
formatter(args, f)
|
formatter(args, f)
|
||||||
|
|
||||||
if args.update_completion:
|
|
||||||
fs.set_executable(args.update)
|
|
||||||
|
|
||||||
else:
|
else:
|
||||||
prepend_header(args, sys.stdout)
|
prepend_header(args, sys.stdout)
|
||||||
formatter(args, sys.stdout)
|
formatter(args, sys.stdout)
|
||||||
|
|||||||
@@ -6,6 +6,7 @@
|
|||||||
import os
|
import os
|
||||||
import platform
|
import platform
|
||||||
import re
|
import re
|
||||||
|
import sys
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
from glob import glob
|
from glob import glob
|
||||||
|
|
||||||
@@ -62,9 +63,10 @@ def create_db_tarball(args):
|
|||||||
|
|
||||||
base = os.path.basename(str(spack.store.STORE.root))
|
base = os.path.basename(str(spack.store.STORE.root))
|
||||||
transform_args = []
|
transform_args = []
|
||||||
|
# Currently --transform and -s are not supported by Windows native tar
|
||||||
if "GNU" in tar("--version", output=str):
|
if "GNU" in tar("--version", output=str):
|
||||||
transform_args = ["--transform", "s/^%s/%s/" % (base, tarball_name)]
|
transform_args = ["--transform", "s/^%s/%s/" % (base, tarball_name)]
|
||||||
else:
|
elif sys.platform != "win32":
|
||||||
transform_args = ["-s", "/^%s/%s/" % (base, tarball_name)]
|
transform_args = ["-s", "/^%s/%s/" % (base, tarball_name)]
|
||||||
|
|
||||||
wd = os.path.dirname(str(spack.store.STORE.root))
|
wd = os.path.dirname(str(spack.store.STORE.root))
|
||||||
@@ -90,7 +92,6 @@ def report(args):
|
|||||||
print("* **Spack:**", get_version())
|
print("* **Spack:**", get_version())
|
||||||
print("* **Python:**", platform.python_version())
|
print("* **Python:**", platform.python_version())
|
||||||
print("* **Platform:**", architecture)
|
print("* **Platform:**", architecture)
|
||||||
print("* **Concretizer:**", spack.config.get("config:concretizer"))
|
|
||||||
|
|
||||||
|
|
||||||
def debug(parser, args):
|
def debug(parser, args):
|
||||||
|
|||||||
@@ -47,16 +47,6 @@ def inverted_dependencies():
|
|||||||
dependents of, e.g., `mpi`, but virtuals are not included as
|
dependents of, e.g., `mpi`, but virtuals are not included as
|
||||||
actual dependents.
|
actual dependents.
|
||||||
"""
|
"""
|
||||||
dag = {}
|
|
||||||
for pkg_cls in spack.repo.PATH.all_package_classes():
|
|
||||||
dag.setdefault(pkg_cls.name, set())
|
|
||||||
for dep in pkg_cls.dependencies_by_name():
|
|
||||||
deps = [dep]
|
|
||||||
|
|
||||||
# expand virtuals if necessary
|
|
||||||
if spack.repo.PATH.is_virtual(dep):
|
|
||||||
deps += [s.name for s in spack.repo.PATH.providers_for(dep)]
|
|
||||||
|
|
||||||
dag = collections.defaultdict(set)
|
dag = collections.defaultdict(set)
|
||||||
for pkg_cls in spack.repo.PATH.all_package_classes():
|
for pkg_cls in spack.repo.PATH.all_package_classes():
|
||||||
for _, deps_by_name in pkg_cls.dependencies.items():
|
for _, deps_by_name in pkg_cls.dependencies.items():
|
||||||
|
|||||||
@@ -468,32 +468,30 @@ def env_remove(args):
|
|||||||
This removes an environment managed by Spack. Directory environments
|
This removes an environment managed by Spack. Directory environments
|
||||||
and manifests embedded in repositories should be removed manually.
|
and manifests embedded in repositories should be removed manually.
|
||||||
"""
|
"""
|
||||||
read_envs = []
|
remove_envs = []
|
||||||
valid_envs = []
|
valid_envs = []
|
||||||
bad_envs = []
|
bad_envs = []
|
||||||
invalid_envs = []
|
|
||||||
|
|
||||||
for env_name in ev.all_environment_names():
|
for env_name in ev.all_environment_names():
|
||||||
try:
|
try:
|
||||||
env = ev.read(env_name)
|
env = ev.read(env_name)
|
||||||
valid_envs.append(env_name)
|
valid_envs.append(env)
|
||||||
|
|
||||||
if env_name in args.rm_env:
|
if env_name in args.rm_env:
|
||||||
read_envs.append(env)
|
remove_envs.append(env)
|
||||||
except (spack.config.ConfigFormatError, ev.SpackEnvironmentConfigError):
|
except (spack.config.ConfigFormatError, ev.SpackEnvironmentConfigError):
|
||||||
invalid_envs.append(env_name)
|
|
||||||
|
|
||||||
if env_name in args.rm_env:
|
if env_name in args.rm_env:
|
||||||
bad_envs.append(env_name)
|
bad_envs.append(env_name)
|
||||||
|
|
||||||
# Check if env is linked to another before trying to remove
|
# Check if remove_env is included from another env before trying to remove
|
||||||
for name in valid_envs:
|
for env in valid_envs:
|
||||||
|
for remove_env in remove_envs:
|
||||||
# don't check if environment is included to itself
|
# don't check if environment is included to itself
|
||||||
if name == env_name:
|
if env.name == remove_env.name:
|
||||||
continue
|
continue
|
||||||
environ = ev.Environment(ev.root(name))
|
|
||||||
if ev.root(env_name) in environ.included_concrete_envs:
|
if remove_env.path in env.included_concrete_envs:
|
||||||
msg = f'Environment "{env_name}" is being used by environment "{name}"'
|
msg = f'Environment "{remove_env.name}" is being used by environment "{env.name}"'
|
||||||
if args.force:
|
if args.force:
|
||||||
tty.warn(msg)
|
tty.warn(msg)
|
||||||
else:
|
else:
|
||||||
@@ -506,7 +504,7 @@ def env_remove(args):
|
|||||||
if not answer:
|
if not answer:
|
||||||
tty.die("Will not remove any environments")
|
tty.die("Will not remove any environments")
|
||||||
|
|
||||||
for env in read_envs:
|
for env in remove_envs:
|
||||||
name = env.name
|
name = env.name
|
||||||
if env.active:
|
if env.active:
|
||||||
tty.die(f"Environment {name} can't be removed while activated.")
|
tty.die(f"Environment {name} can't be removed while activated.")
|
||||||
|
|||||||
@@ -135,9 +135,7 @@ def external_find(args):
|
|||||||
candidate_packages = packages_to_search_for(
|
candidate_packages = packages_to_search_for(
|
||||||
names=args.packages, tags=args.tags, exclude=args.exclude
|
names=args.packages, tags=args.tags, exclude=args.exclude
|
||||||
)
|
)
|
||||||
detected_packages = spack.detection.by_path(
|
detected_packages = spack.detection.by_path(candidate_packages, path_hints=args.path)
|
||||||
candidate_packages, path_hints=args.path, max_workers=args.jobs
|
|
||||||
)
|
|
||||||
|
|
||||||
new_specs = spack.detection.update_configuration(
|
new_specs = spack.detection.update_configuration(
|
||||||
detected_packages, scope=args.scope, buildable=not args.not_buildable
|
detected_packages, scope=args.scope, buildable=not args.not_buildable
|
||||||
|
|||||||
@@ -5,10 +5,12 @@
|
|||||||
|
|
||||||
import argparse
|
import argparse
|
||||||
import os
|
import os
|
||||||
|
import tempfile
|
||||||
|
|
||||||
import spack.binary_distribution
|
import spack.binary_distribution
|
||||||
import spack.mirror
|
import spack.mirror
|
||||||
import spack.paths
|
import spack.paths
|
||||||
|
import spack.stage
|
||||||
import spack.util.gpg
|
import spack.util.gpg
|
||||||
import spack.util.url
|
import spack.util.url
|
||||||
from spack.cmd.common import arguments
|
from spack.cmd.common import arguments
|
||||||
@@ -115,6 +117,7 @@ def setup_parser(subparser):
|
|||||||
help="URL of the mirror where keys will be published",
|
help="URL of the mirror where keys will be published",
|
||||||
)
|
)
|
||||||
publish.add_argument(
|
publish.add_argument(
|
||||||
|
"--update-index",
|
||||||
"--rebuild-index",
|
"--rebuild-index",
|
||||||
action="store_true",
|
action="store_true",
|
||||||
default=False,
|
default=False,
|
||||||
@@ -220,9 +223,10 @@ def gpg_publish(args):
|
|||||||
elif args.mirror_url:
|
elif args.mirror_url:
|
||||||
mirror = spack.mirror.Mirror(args.mirror_url, args.mirror_url)
|
mirror = spack.mirror.Mirror(args.mirror_url, args.mirror_url)
|
||||||
|
|
||||||
spack.binary_distribution.push_keys(
|
with tempfile.TemporaryDirectory(dir=spack.stage.get_stage_root()) as tmpdir:
|
||||||
mirror, keys=args.keys, regenerate_index=args.rebuild_index
|
spack.binary_distribution.push_keys(
|
||||||
)
|
mirror, keys=args.keys, tmpdir=tmpdir, update_index=args.update_index
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def gpg(parser, args):
|
def gpg(parser, args):
|
||||||
|
|||||||
@@ -502,7 +502,7 @@ def print_licenses(pkg, args):
|
|||||||
|
|
||||||
def info(parser, args):
|
def info(parser, args):
|
||||||
spec = spack.spec.Spec(args.package)
|
spec = spack.spec.Spec(args.package)
|
||||||
pkg_cls = spack.repo.PATH.get_pkg_class(spec.name)
|
pkg_cls = spack.repo.PATH.get_pkg_class(spec.fullname)
|
||||||
pkg = pkg_cls(spec)
|
pkg = pkg_cls(spec)
|
||||||
|
|
||||||
# Output core package information
|
# Output core package information
|
||||||
|
|||||||
@@ -169,7 +169,9 @@ def pkg_hash(args):
|
|||||||
|
|
||||||
def get_grep(required=False):
|
def get_grep(required=False):
|
||||||
"""Get a grep command to use with ``spack pkg grep``."""
|
"""Get a grep command to use with ``spack pkg grep``."""
|
||||||
return exe.which(os.environ.get("SPACK_GREP") or "grep", required=required)
|
grep = exe.which(os.environ.get("SPACK_GREP") or "grep", required=required)
|
||||||
|
grep.ignore_quotes = True # allow `spack pkg grep '"quoted string"'` without warning
|
||||||
|
return grep
|
||||||
|
|
||||||
|
|
||||||
def pkg_grep(args, unknown_args):
|
def pkg_grep(args, unknown_args):
|
||||||
|
|||||||
@@ -3,8 +3,13 @@
|
|||||||
#
|
#
|
||||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||||
|
|
||||||
|
import filecmp
|
||||||
import os
|
import os
|
||||||
|
import pathlib
|
||||||
import sys
|
import sys
|
||||||
|
import tempfile
|
||||||
|
import zipfile
|
||||||
|
from typing import List, Optional, Tuple
|
||||||
|
|
||||||
import llnl.util.tty as tty
|
import llnl.util.tty as tty
|
||||||
|
|
||||||
@@ -12,6 +17,7 @@
|
|||||||
import spack.repo
|
import spack.repo
|
||||||
import spack.util.path
|
import spack.util.path
|
||||||
from spack.cmd.common import arguments
|
from spack.cmd.common import arguments
|
||||||
|
from spack.util.archive import reproducible_zipfile_from_prefix
|
||||||
|
|
||||||
description = "manage package source repositories"
|
description = "manage package source repositories"
|
||||||
section = "config"
|
section = "config"
|
||||||
@@ -67,6 +73,12 @@ def setup_parser(subparser):
|
|||||||
help="configuration scope to modify",
|
help="configuration scope to modify",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Zip
|
||||||
|
zip_parser = sp.add_parser("zip", help=repo_zip.__doc__)
|
||||||
|
zip_parser.add_argument(
|
||||||
|
"namespace_or_path", help="namespace or path of a Spack package repository"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def repo_create(args):
|
def repo_create(args):
|
||||||
"""create a new package repository"""
|
"""create a new package repository"""
|
||||||
@@ -109,31 +121,18 @@ def repo_add(args):
|
|||||||
def repo_remove(args):
|
def repo_remove(args):
|
||||||
"""remove a repository from Spack's configuration"""
|
"""remove a repository from Spack's configuration"""
|
||||||
repos = spack.config.get("repos", scope=args.scope)
|
repos = spack.config.get("repos", scope=args.scope)
|
||||||
namespace_or_path = args.namespace_or_path
|
|
||||||
|
|
||||||
# If the argument is a path, remove that repository from config.
|
key, repo = _get_repo(repos, args.namespace_or_path)
|
||||||
canon_path = spack.util.path.canonicalize_path(namespace_or_path)
|
|
||||||
for repo_path in repos:
|
|
||||||
repo_canon_path = spack.util.path.canonicalize_path(repo_path)
|
|
||||||
if canon_path == repo_canon_path:
|
|
||||||
repos.remove(repo_path)
|
|
||||||
spack.config.set("repos", repos, args.scope)
|
|
||||||
tty.msg("Removed repository %s" % repo_path)
|
|
||||||
return
|
|
||||||
|
|
||||||
# If it is a namespace, remove corresponding repo
|
if not key:
|
||||||
for path in repos:
|
tty.die(f"No repository with path or namespace: {args.namespace_or_path}")
|
||||||
try:
|
|
||||||
repo = spack.repo.from_path(path)
|
|
||||||
if repo.namespace == namespace_or_path:
|
|
||||||
repos.remove(path)
|
|
||||||
spack.config.set("repos", repos, args.scope)
|
|
||||||
tty.msg("Removed repository %s with namespace '%s'." % (repo.root, repo.namespace))
|
|
||||||
return
|
|
||||||
except spack.repo.RepoError:
|
|
||||||
continue
|
|
||||||
|
|
||||||
tty.die("No repository with path or namespace: %s" % namespace_or_path)
|
repos.remove(key)
|
||||||
|
spack.config.set("repos", repos, args.scope)
|
||||||
|
if repo:
|
||||||
|
tty.msg(f"Removed repository {repo.root} with namespace '{repo.namespace}'")
|
||||||
|
else:
|
||||||
|
tty.msg(f"Removed repository {key}")
|
||||||
|
|
||||||
|
|
||||||
def repo_list(args):
|
def repo_list(args):
|
||||||
@@ -147,17 +146,77 @@ def repo_list(args):
|
|||||||
continue
|
continue
|
||||||
|
|
||||||
if sys.stdout.isatty():
|
if sys.stdout.isatty():
|
||||||
msg = "%d package repositor" % len(repos)
|
tty.msg(f"{len(repos)} package repositor{'y.' if len(repos) == 1 else 'ies.'}")
|
||||||
msg += "y." if len(repos) == 1 else "ies."
|
|
||||||
tty.msg(msg)
|
|
||||||
|
|
||||||
if not repos:
|
if not repos:
|
||||||
return
|
return
|
||||||
|
|
||||||
max_ns_len = max(len(r.namespace) for r in repos)
|
max_ns_len = max(len(r.namespace) for r in repos)
|
||||||
for repo in repos:
|
for repo in repos:
|
||||||
fmt = "%%-%ds%%s" % (max_ns_len + 4)
|
print(f"{repo.namespace:<{max_ns_len}} {repo.root}")
|
||||||
print(fmt % (repo.namespace, repo.root))
|
|
||||||
|
|
||||||
|
def repo_zip(args):
|
||||||
|
"""zip a package repository to make it immutable and faster to load"""
|
||||||
|
key, _ = _get_repo(spack.config.get("repos"), args.namespace_or_path)
|
||||||
|
|
||||||
|
if not key:
|
||||||
|
tty.die(f"No repository with path or namespace: {args.namespace_or_path}")
|
||||||
|
|
||||||
|
try:
|
||||||
|
repo = spack.repo.from_path(key)
|
||||||
|
except spack.repo.RepoError:
|
||||||
|
tty.die(f"No repository at path: {key}")
|
||||||
|
|
||||||
|
def _zip_repo_skip(entry: os.DirEntry):
|
||||||
|
return entry.name == "__pycache__"
|
||||||
|
|
||||||
|
def _zip_repo_path_to_name(path: str) -> str:
|
||||||
|
# use spack/pkg/<repo>/* prefix and rename `package.py` as `__init__.py`
|
||||||
|
rel_path = pathlib.PurePath(path).relative_to(repo.packages_path)
|
||||||
|
if rel_path.name == "package.py":
|
||||||
|
rel_path = rel_path.with_name("__init__.py")
|
||||||
|
return str(rel_path)
|
||||||
|
|
||||||
|
# Create a zipfile in a temporary file
|
||||||
|
with tempfile.NamedTemporaryFile(delete=False, mode="wb", dir=repo.root) as f, zipfile.ZipFile(
|
||||||
|
f, "w", compression=zipfile.ZIP_DEFLATED
|
||||||
|
) as zip:
|
||||||
|
reproducible_zipfile_from_prefix(
|
||||||
|
zip, repo.packages_path, skip=_zip_repo_skip, path_to_name=_zip_repo_path_to_name
|
||||||
|
)
|
||||||
|
|
||||||
|
packages_zip = os.path.join(repo.root, "packages.zip")
|
||||||
|
try:
|
||||||
|
# Inform the user whether or not the repo was modified since it was last zipped
|
||||||
|
if os.path.exists(packages_zip) and filecmp.cmp(f.name, packages_zip):
|
||||||
|
tty.msg(f"{repo.namespace}: {packages_zip} is up to date")
|
||||||
|
return
|
||||||
|
else:
|
||||||
|
os.rename(f.name, packages_zip)
|
||||||
|
tty.msg(f"{repo.namespace} was zipped: {packages_zip}")
|
||||||
|
finally:
|
||||||
|
try:
|
||||||
|
os.unlink(f.name)
|
||||||
|
except OSError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def _get_repo(repos: List[str], path_or_name) -> Tuple[Optional[str], Optional[spack.repo.Repo]]:
|
||||||
|
"""Find repo by path or namespace"""
|
||||||
|
canon_path = spack.util.path.canonicalize_path(path_or_name)
|
||||||
|
for path in repos:
|
||||||
|
if canon_path == spack.util.path.canonicalize_path(path):
|
||||||
|
return path, None
|
||||||
|
|
||||||
|
for path in repos:
|
||||||
|
try:
|
||||||
|
repo = spack.repo.from_path(path)
|
||||||
|
except spack.repo.RepoError:
|
||||||
|
continue
|
||||||
|
if repo.namespace == path_or_name:
|
||||||
|
return path, repo
|
||||||
|
return None, None
|
||||||
|
|
||||||
|
|
||||||
def repo(parser, args):
|
def repo(parser, args):
|
||||||
@@ -167,5 +226,6 @@ def repo(parser, args):
|
|||||||
"add": repo_add,
|
"add": repo_add,
|
||||||
"remove": repo_remove,
|
"remove": repo_remove,
|
||||||
"rm": repo_remove,
|
"rm": repo_remove,
|
||||||
|
"zip": repo_zip,
|
||||||
}
|
}
|
||||||
action[args.repo_command](args)
|
action[args.repo_command](args)
|
||||||
|
|||||||
@@ -339,7 +339,7 @@ def add(self, pkg_name, fetcher):
|
|||||||
for pkg_cls in spack.repo.PATH.all_package_classes():
|
for pkg_cls in spack.repo.PATH.all_package_classes():
|
||||||
npkgs += 1
|
npkgs += 1
|
||||||
|
|
||||||
for v in pkg_cls.versions:
|
for v in list(pkg_cls.versions):
|
||||||
try:
|
try:
|
||||||
pkg = pkg_cls(spack.spec.Spec(pkg_cls.name))
|
pkg = pkg_cls(spack.spec.Spec(pkg_cls.name))
|
||||||
fetcher = fs.for_package_version(pkg, v)
|
fetcher = fs.for_package_version(pkg, v)
|
||||||
|
|||||||
@@ -23,11 +23,6 @@ def setup_parser(subparser):
|
|||||||
output.add_argument(
|
output.add_argument(
|
||||||
"-s", "--safe", action="store_true", help="only list safe versions of the package"
|
"-s", "--safe", action="store_true", help="only list safe versions of the package"
|
||||||
)
|
)
|
||||||
output.add_argument(
|
|
||||||
"--safe-only",
|
|
||||||
action="store_true",
|
|
||||||
help="[deprecated] only list safe versions of the package",
|
|
||||||
)
|
|
||||||
output.add_argument(
|
output.add_argument(
|
||||||
"-r", "--remote", action="store_true", help="only list remote versions of the package"
|
"-r", "--remote", action="store_true", help="only list remote versions of the package"
|
||||||
)
|
)
|
||||||
@@ -47,17 +42,13 @@ def versions(parser, args):
|
|||||||
|
|
||||||
safe_versions = pkg.versions
|
safe_versions = pkg.versions
|
||||||
|
|
||||||
if args.safe_only:
|
|
||||||
tty.warn('"--safe-only" is deprecated. Use "--safe" instead.')
|
|
||||||
args.safe = args.safe_only
|
|
||||||
|
|
||||||
if not (args.remote or args.new):
|
if not (args.remote or args.new):
|
||||||
if sys.stdout.isatty():
|
if sys.stdout.isatty():
|
||||||
tty.msg("Safe versions (already checksummed):")
|
tty.msg("Safe versions (already checksummed):")
|
||||||
|
|
||||||
if not safe_versions:
|
if not safe_versions:
|
||||||
if sys.stdout.isatty():
|
if sys.stdout.isatty():
|
||||||
tty.warn("Found no versions for {0}".format(pkg.name))
|
tty.warn(f"Found no versions for {pkg.name}")
|
||||||
tty.debug("Manually add versions to the package.")
|
tty.debug("Manually add versions to the package.")
|
||||||
else:
|
else:
|
||||||
colify(sorted(safe_versions, reverse=True), indent=2)
|
colify(sorted(safe_versions, reverse=True), indent=2)
|
||||||
@@ -83,12 +74,12 @@ def versions(parser, args):
|
|||||||
if not remote_versions:
|
if not remote_versions:
|
||||||
if sys.stdout.isatty():
|
if sys.stdout.isatty():
|
||||||
if not fetched_versions:
|
if not fetched_versions:
|
||||||
tty.warn("Found no versions for {0}".format(pkg.name))
|
tty.warn(f"Found no versions for {pkg.name}")
|
||||||
tty.debug(
|
tty.debug(
|
||||||
"Check the list_url and list_depth attributes of "
|
"Check the list_url and list_depth attributes of "
|
||||||
"the package to help Spack find versions."
|
"the package to help Spack find versions."
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
tty.warn("Found no unchecksummed versions for {0}".format(pkg.name))
|
tty.warn(f"Found no unchecksummed versions for {pkg.name}")
|
||||||
else:
|
else:
|
||||||
colify(sorted(remote_versions, reverse=True), indent=2)
|
colify(sorted(remote_versions, reverse=True), indent=2)
|
||||||
|
|||||||
@@ -278,11 +278,6 @@ def debug_flags(self):
|
|||||||
def opt_flags(self):
|
def opt_flags(self):
|
||||||
return ["-O", "-O0", "-O1", "-O2", "-O3"]
|
return ["-O", "-O0", "-O1", "-O2", "-O3"]
|
||||||
|
|
||||||
# Cray PrgEnv name that can be used to load this compiler
|
|
||||||
PrgEnv: Optional[str] = None
|
|
||||||
# Name of module used to switch versions of this compiler
|
|
||||||
PrgEnv_compiler: Optional[str] = None
|
|
||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
cspec,
|
cspec,
|
||||||
|
|||||||
@@ -25,9 +25,6 @@ class Aocc(Compiler):
|
|||||||
# Subclasses use possible names of Fortran 90 compiler
|
# Subclasses use possible names of Fortran 90 compiler
|
||||||
fc_names = ["flang"]
|
fc_names = ["flang"]
|
||||||
|
|
||||||
PrgEnv = "PrgEnv-aocc"
|
|
||||||
PrgEnv_compiler = "aocc"
|
|
||||||
|
|
||||||
version_argument = "--version"
|
version_argument = "--version"
|
||||||
|
|
||||||
@property
|
@property
|
||||||
|
|||||||
@@ -34,12 +34,9 @@ def __init__(self, *args, **kwargs):
|
|||||||
# MacPorts builds gcc versions with prefixes and -mp-X.Y suffixes.
|
# MacPorts builds gcc versions with prefixes and -mp-X.Y suffixes.
|
||||||
suffixes = [r"-mp-\d\.\d"]
|
suffixes = [r"-mp-\d\.\d"]
|
||||||
|
|
||||||
PrgEnv = "PrgEnv-cray"
|
|
||||||
PrgEnv_compiler = "cce"
|
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def link_paths(self):
|
def link_paths(self):
|
||||||
if any(self.PrgEnv in m for m in self.modules):
|
if any("PrgEnv-cray" in m for m in self.modules):
|
||||||
# Old module-based interface to cray compilers
|
# Old module-based interface to cray compilers
|
||||||
return {
|
return {
|
||||||
"cc": os.path.join("cce", "cc"),
|
"cc": os.path.join("cce", "cc"),
|
||||||
|
|||||||
@@ -40,9 +40,6 @@ class Gcc(spack.compiler.Compiler):
|
|||||||
"fc": os.path.join("gcc", "gfortran"),
|
"fc": os.path.join("gcc", "gfortran"),
|
||||||
}
|
}
|
||||||
|
|
||||||
PrgEnv = "PrgEnv-gnu"
|
|
||||||
PrgEnv_compiler = "gcc"
|
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def verbose_flag(self):
|
def verbose_flag(self):
|
||||||
return "-v"
|
return "-v"
|
||||||
|
|||||||
@@ -31,9 +31,6 @@ class Intel(Compiler):
|
|||||||
"fc": os.path.join("intel", "ifort"),
|
"fc": os.path.join("intel", "ifort"),
|
||||||
}
|
}
|
||||||
|
|
||||||
PrgEnv = "PrgEnv-intel"
|
|
||||||
PrgEnv_compiler = "intel"
|
|
||||||
|
|
||||||
if sys.platform == "win32":
|
if sys.platform == "win32":
|
||||||
version_argument = "/QV"
|
version_argument = "/QV"
|
||||||
else:
|
else:
|
||||||
@@ -126,3 +123,14 @@ def fc_pic_flag(self):
|
|||||||
@property
|
@property
|
||||||
def stdcxx_libs(self):
|
def stdcxx_libs(self):
|
||||||
return ("-cxxlib",)
|
return ("-cxxlib",)
|
||||||
|
|
||||||
|
def setup_custom_environment(self, pkg, env):
|
||||||
|
# Edge cases for Intel's oneAPI compilers when using the legacy classic compilers:
|
||||||
|
# Always pass flags to disable deprecation warnings, since these warnings can
|
||||||
|
# confuse tools that parse the output of compiler commands (e.g. version checks).
|
||||||
|
if self.cc and self.cc.endswith("icc") and self.real_version >= Version("2021"):
|
||||||
|
env.append_flags("SPACK_ALWAYS_CFLAGS", "-diag-disable=10441")
|
||||||
|
if self.cxx and self.cxx.endswith("icpc") and self.real_version >= Version("2021"):
|
||||||
|
env.append_flags("SPACK_ALWAYS_CXXFLAGS", "-diag-disable=10441")
|
||||||
|
if self.fc and self.fc.endswith("ifort") and self.real_version >= Version("2021"):
|
||||||
|
env.append_flags("SPACK_ALWAYS_FFLAGS", "-diag-disable=10448")
|
||||||
|
|||||||
@@ -231,24 +231,55 @@ def msvc_version(self):
|
|||||||
|
|
||||||
@property
|
@property
|
||||||
def short_msvc_version(self):
|
def short_msvc_version(self):
|
||||||
|
"""This is the shorthand VCToolset version of form
|
||||||
|
MSVC<short-ver>
|
||||||
"""
|
"""
|
||||||
This is the shorthand VCToolset version of form
|
return "MSVC" + self.vc_toolset_ver
|
||||||
MSVC<short-ver> *NOT* the full version, for that see
|
|
||||||
|
@property
|
||||||
|
def vc_toolset_ver(self):
|
||||||
|
"""
|
||||||
|
The toolset version is the version of the combined set of cl and link
|
||||||
|
This typically relates directly to VS version i.e. VS 2022 is v143
|
||||||
|
VS 19 is v142, etc.
|
||||||
|
This value is defined by the first three digits of the major + minor
|
||||||
|
version of the VS toolset (143 for 14.3x.bbbbb). Traditionally the
|
||||||
|
minor version has remained a static two digit number for a VS release
|
||||||
|
series, however, as of VS22, this is no longer true, both
|
||||||
|
14.4x.bbbbb and 14.3x.bbbbb are considered valid VS22 VC toolset
|
||||||
|
versions due to a change in toolset minor version sentiment.
|
||||||
|
|
||||||
|
This is *NOT* the full version, for that see
|
||||||
Msvc.msvc_version or MSVC.platform_toolset_ver for the
|
Msvc.msvc_version or MSVC.platform_toolset_ver for the
|
||||||
raw platform toolset version
|
raw platform toolset version
|
||||||
|
|
||||||
"""
|
"""
|
||||||
ver = self.platform_toolset_ver
|
ver = self.msvc_version[:2].joined.string[:3]
|
||||||
return "MSVC" + ver
|
return ver
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def platform_toolset_ver(self):
|
def platform_toolset_ver(self):
|
||||||
"""
|
"""
|
||||||
This is the platform toolset version of current MSVC compiler
|
This is the platform toolset version of current MSVC compiler
|
||||||
i.e. 142.
|
i.e. 142. The platform toolset is the targeted MSVC library/compiler
|
||||||
|
versions by compilation (this is different from the VC Toolset)
|
||||||
|
|
||||||
|
|
||||||
This is different from the VC toolset version as established
|
This is different from the VC toolset version as established
|
||||||
by `short_msvc_version`
|
by `short_msvc_version`, but typically are represented by the same
|
||||||
|
three digit value
|
||||||
"""
|
"""
|
||||||
return self.msvc_version[:2].joined.string[:3]
|
# Typically VS toolset version and platform toolset versions match
|
||||||
|
# VS22 introduces the first divergence of VS toolset version
|
||||||
|
# (144 for "recent" releases) and platform toolset version (143)
|
||||||
|
# so it needs additional handling until MS releases v144
|
||||||
|
# (assuming v144 is also for VS22)
|
||||||
|
# or adds better support for detection
|
||||||
|
# TODO: (johnwparent) Update this logic for the next platform toolset
|
||||||
|
# or VC toolset version update
|
||||||
|
toolset_ver = self.vc_toolset_ver
|
||||||
|
vs22_toolset = Version(toolset_ver) > Version("142")
|
||||||
|
return toolset_ver if not vs22_toolset else "143"
|
||||||
|
|
||||||
def _compiler_version(self, compiler):
|
def _compiler_version(self, compiler):
|
||||||
"""Returns version object for given compiler"""
|
"""Returns version object for given compiler"""
|
||||||
|
|||||||
@@ -29,9 +29,6 @@ class Nvhpc(Compiler):
|
|||||||
"fc": os.path.join("nvhpc", "nvfortran"),
|
"fc": os.path.join("nvhpc", "nvfortran"),
|
||||||
}
|
}
|
||||||
|
|
||||||
PrgEnv = "PrgEnv-nvhpc"
|
|
||||||
PrgEnv_compiler = "nvhpc"
|
|
||||||
|
|
||||||
version_argument = "--version"
|
version_argument = "--version"
|
||||||
version_regex = r"nv[^ ]* (?:[^ ]+ Dev-r)?([0-9.]+)(?:-[0-9]+)?"
|
version_regex = r"nv[^ ]* (?:[^ ]+ Dev-r)?([0-9.]+)(?:-[0-9]+)?"
|
||||||
|
|
||||||
|
|||||||
@@ -9,6 +9,7 @@
|
|||||||
from llnl.util import tty
|
from llnl.util import tty
|
||||||
|
|
||||||
from spack.compiler import Compiler
|
from spack.compiler import Compiler
|
||||||
|
from spack.version import Version
|
||||||
|
|
||||||
|
|
||||||
class Oneapi(Compiler):
|
class Oneapi(Compiler):
|
||||||
@@ -32,9 +33,6 @@ class Oneapi(Compiler):
|
|||||||
"fc": os.path.join("oneapi", "ifx"),
|
"fc": os.path.join("oneapi", "ifx"),
|
||||||
}
|
}
|
||||||
|
|
||||||
PrgEnv = "PrgEnv-oneapi"
|
|
||||||
PrgEnv_compiler = "oneapi"
|
|
||||||
|
|
||||||
version_argument = "--version"
|
version_argument = "--version"
|
||||||
version_regex = r"(?:(?:oneAPI DPC\+\+(?:\/C\+\+)? Compiler)|(?:\(IFORT\))|(?:\(IFX\))) (\S+)"
|
version_regex = r"(?:(?:oneAPI DPC\+\+(?:\/C\+\+)? Compiler)|(?:\(IFORT\))|(?:\(IFX\))) (\S+)"
|
||||||
|
|
||||||
@@ -142,6 +140,16 @@ def setup_custom_environment(self, pkg, env):
|
|||||||
env.prepend_path("PATH", dirname(self.cxx))
|
env.prepend_path("PATH", dirname(self.cxx))
|
||||||
env.prepend_path("LD_LIBRARY_PATH", join(dirname(dirname(self.cxx)), "lib"))
|
env.prepend_path("LD_LIBRARY_PATH", join(dirname(dirname(self.cxx)), "lib"))
|
||||||
|
|
||||||
|
# Edge cases for Intel's oneAPI compilers when using the legacy classic compilers:
|
||||||
|
# Always pass flags to disable deprecation warnings, since these warnings can
|
||||||
|
# confuse tools that parse the output of compiler commands (e.g. version checks).
|
||||||
|
if self.cc and self.cc.endswith("icc") and self.real_version >= Version("2021"):
|
||||||
|
env.append_flags("SPACK_ALWAYS_CFLAGS", "-diag-disable=10441")
|
||||||
|
if self.cxx and self.cxx.endswith("icpc") and self.real_version >= Version("2021"):
|
||||||
|
env.append_flags("SPACK_ALWAYS_CXXFLAGS", "-diag-disable=10441")
|
||||||
|
if self.fc and self.fc.endswith("ifort") and self.real_version >= Version("2021"):
|
||||||
|
env.append_flags("SPACK_ALWAYS_FFLAGS", "-diag-disable=10448")
|
||||||
|
|
||||||
# 2024 release bumped the libsycl version because of an ABI
|
# 2024 release bumped the libsycl version because of an ABI
|
||||||
# change, 2024 compilers are required. You will see this
|
# change, 2024 compilers are required. You will see this
|
||||||
# error:
|
# error:
|
||||||
|
|||||||
@@ -30,9 +30,6 @@ class Pgi(Compiler):
|
|||||||
"fc": os.path.join("pgi", "pgfortran"),
|
"fc": os.path.join("pgi", "pgfortran"),
|
||||||
}
|
}
|
||||||
|
|
||||||
PrgEnv = "PrgEnv-pgi"
|
|
||||||
PrgEnv_compiler = "pgi"
|
|
||||||
|
|
||||||
version_argument = "-V"
|
version_argument = "-V"
|
||||||
ignore_version_errors = [2] # `pgcc -V` on PowerPC annoyingly returns 2
|
ignore_version_errors = [2] # `pgcc -V` on PowerPC annoyingly returns 2
|
||||||
version_regex = r"pg[^ ]* ([0-9.]+)-[0-9]+ (LLVM )?[^ ]+ target on "
|
version_regex = r"pg[^ ]* ([0-9.]+)-[0-9]+ (LLVM )?[^ ]+ target on "
|
||||||
|
|||||||
@@ -23,9 +23,6 @@ class Rocmcc(spack.compilers.clang.Clang):
|
|||||||
# Subclasses use possible names of Fortran 90 compiler
|
# Subclasses use possible names of Fortran 90 compiler
|
||||||
fc_names = ["amdflang"]
|
fc_names = ["amdflang"]
|
||||||
|
|
||||||
PrgEnv = "PrgEnv-amd"
|
|
||||||
PrgEnv_compiler = "amd"
|
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def link_paths(self):
|
def link_paths(self):
|
||||||
link_paths = {
|
link_paths = {
|
||||||
|
|||||||
@@ -2,29 +2,11 @@
|
|||||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||||
#
|
#
|
||||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||||
|
|
||||||
"""
|
"""
|
||||||
Functions here are used to take abstract specs and make them concrete.
|
(DEPRECATED) Used to contain the code for the original concretizer
|
||||||
For example, if a spec asks for a version between 1.8 and 1.9, these
|
|
||||||
functions might take will take the most recent 1.9 version of the
|
|
||||||
package available. Or, if the user didn't specify a compiler for a
|
|
||||||
spec, then this will assign a compiler to the spec based on defaults
|
|
||||||
or user preferences.
|
|
||||||
|
|
||||||
TODO: make this customizable and allow users to configure
|
|
||||||
concretization policies.
|
|
||||||
"""
|
"""
|
||||||
import functools
|
|
||||||
import platform
|
|
||||||
import tempfile
|
|
||||||
from contextlib import contextmanager
|
from contextlib import contextmanager
|
||||||
from itertools import chain
|
from itertools import chain
|
||||||
from typing import Union
|
|
||||||
|
|
||||||
import archspec.cpu
|
|
||||||
|
|
||||||
import llnl.util.lang
|
|
||||||
import llnl.util.tty as tty
|
|
||||||
|
|
||||||
import spack.abi
|
import spack.abi
|
||||||
import spack.compilers
|
import spack.compilers
|
||||||
@@ -37,639 +19,20 @@
|
|||||||
import spack.target
|
import spack.target
|
||||||
import spack.tengine
|
import spack.tengine
|
||||||
import spack.util.path
|
import spack.util.path
|
||||||
import spack.variant as vt
|
|
||||||
from spack.package_prefs import PackagePrefs, is_spec_buildable, spec_externals
|
|
||||||
from spack.version import ClosedOpenRange, VersionList, ver
|
|
||||||
|
|
||||||
#: impements rudimentary logic for ABI compatibility
|
|
||||||
_abi: Union[spack.abi.ABI, llnl.util.lang.Singleton] = llnl.util.lang.Singleton(
|
|
||||||
lambda: spack.abi.ABI()
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@functools.total_ordering
|
|
||||||
class reverse_order:
|
|
||||||
"""Helper for creating key functions.
|
|
||||||
|
|
||||||
This is a wrapper that inverts the sense of the natural
|
|
||||||
comparisons on the object.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, value):
|
|
||||||
self.value = value
|
|
||||||
|
|
||||||
def __eq__(self, other):
|
|
||||||
return other.value == self.value
|
|
||||||
|
|
||||||
def __lt__(self, other):
|
|
||||||
return other.value < self.value
|
|
||||||
|
|
||||||
|
|
||||||
class Concretizer:
|
class Concretizer:
|
||||||
"""You can subclass this class to override some of the default
|
"""(DEPRECATED) Only contains logic to enable/disable compiler existence checks."""
|
||||||
concretization strategies, or you can override all of them.
|
|
||||||
"""
|
|
||||||
|
|
||||||
#: Controls whether we check that compiler versions actually exist
|
#: Controls whether we check that compiler versions actually exist
|
||||||
#: during concretization. Used for testing and for mirror creation
|
#: during concretization. Used for testing and for mirror creation
|
||||||
check_for_compiler_existence = None
|
check_for_compiler_existence = None
|
||||||
|
|
||||||
#: Packages that the old concretizer cannot deal with correctly, and cannot build anyway.
|
def __init__(self):
|
||||||
#: Those will not be considered as providers for virtuals.
|
|
||||||
non_buildable_packages = {"glibc", "musl"}
|
|
||||||
|
|
||||||
def __init__(self, abstract_spec=None):
|
|
||||||
if Concretizer.check_for_compiler_existence is None:
|
if Concretizer.check_for_compiler_existence is None:
|
||||||
Concretizer.check_for_compiler_existence = not spack.config.get(
|
Concretizer.check_for_compiler_existence = not spack.config.get(
|
||||||
"config:install_missing_compilers", False
|
"config:install_missing_compilers", False
|
||||||
)
|
)
|
||||||
self.abstract_spec = abstract_spec
|
|
||||||
self._adjust_target_answer_generator = None
|
|
||||||
|
|
||||||
def concretize_develop(self, spec):
|
|
||||||
"""
|
|
||||||
Add ``dev_path=*`` variant to packages built from local source.
|
|
||||||
"""
|
|
||||||
env = spack.environment.active_environment()
|
|
||||||
dev_info = env.dev_specs.get(spec.name, {}) if env else {}
|
|
||||||
if not dev_info:
|
|
||||||
return False
|
|
||||||
|
|
||||||
path = spack.util.path.canonicalize_path(dev_info["path"], default_wd=env.path)
|
|
||||||
|
|
||||||
if "dev_path" in spec.variants:
|
|
||||||
assert spec.variants["dev_path"].value == path
|
|
||||||
changed = False
|
|
||||||
else:
|
|
||||||
spec.variants.setdefault("dev_path", vt.SingleValuedVariant("dev_path", path))
|
|
||||||
changed = True
|
|
||||||
changed |= spec.constrain(dev_info["spec"])
|
|
||||||
return changed
|
|
||||||
|
|
||||||
def _valid_virtuals_and_externals(self, spec):
|
|
||||||
"""Returns a list of candidate virtual dep providers and external
|
|
||||||
packages that coiuld be used to concretize a spec.
|
|
||||||
|
|
||||||
Preferred specs come first in the list.
|
|
||||||
"""
|
|
||||||
# First construct a list of concrete candidates to replace spec with.
|
|
||||||
candidates = [spec]
|
|
||||||
pref_key = lambda spec: 0 # no-op pref key
|
|
||||||
|
|
||||||
if spec.virtual:
|
|
||||||
candidates = [
|
|
||||||
s
|
|
||||||
for s in spack.repo.PATH.providers_for(spec)
|
|
||||||
if s.name not in self.non_buildable_packages
|
|
||||||
]
|
|
||||||
if not candidates:
|
|
||||||
raise spack.error.UnsatisfiableProviderSpecError(candidates[0], spec)
|
|
||||||
|
|
||||||
# Find nearest spec in the DAG (up then down) that has prefs.
|
|
||||||
spec_w_prefs = find_spec(
|
|
||||||
spec, lambda p: PackagePrefs.has_preferred_providers(p.name, spec.name), spec
|
|
||||||
) # default to spec itself.
|
|
||||||
|
|
||||||
# Create a key to sort candidates by the prefs we found
|
|
||||||
pref_key = PackagePrefs(spec_w_prefs.name, "providers", spec.name)
|
|
||||||
|
|
||||||
# For each candidate package, if it has externals, add those
|
|
||||||
# to the usable list. if it's not buildable, then *only* add
|
|
||||||
# the externals.
|
|
||||||
usable = []
|
|
||||||
for cspec in candidates:
|
|
||||||
if is_spec_buildable(cspec):
|
|
||||||
usable.append(cspec)
|
|
||||||
|
|
||||||
externals = spec_externals(cspec)
|
|
||||||
for ext in externals:
|
|
||||||
if ext.intersects(spec):
|
|
||||||
usable.append(ext)
|
|
||||||
|
|
||||||
# If nothing is in the usable list now, it's because we aren't
|
|
||||||
# allowed to build anything.
|
|
||||||
if not usable:
|
|
||||||
raise NoBuildError(spec)
|
|
||||||
|
|
||||||
# Use a sort key to order the results
|
|
||||||
return sorted(
|
|
||||||
usable,
|
|
||||||
key=lambda spec: (
|
|
||||||
not spec.external, # prefer externals
|
|
||||||
pref_key(spec), # respect prefs
|
|
||||||
spec.name, # group by name
|
|
||||||
reverse_order(spec.versions), # latest version
|
|
||||||
spec, # natural order
|
|
||||||
),
|
|
||||||
)
|
|
||||||
|
|
||||||
def choose_virtual_or_external(self, spec: spack.spec.Spec):
|
|
||||||
"""Given a list of candidate virtual and external packages, try to
|
|
||||||
find one that is most ABI compatible.
|
|
||||||
"""
|
|
||||||
candidates = self._valid_virtuals_and_externals(spec)
|
|
||||||
if not candidates:
|
|
||||||
return candidates
|
|
||||||
|
|
||||||
# Find the nearest spec in the dag that has a compiler. We'll
|
|
||||||
# use that spec to calibrate compiler compatibility.
|
|
||||||
abi_exemplar = find_spec(spec, lambda x: x.compiler)
|
|
||||||
if abi_exemplar is None:
|
|
||||||
abi_exemplar = spec.root
|
|
||||||
|
|
||||||
# Sort candidates from most to least compatibility.
|
|
||||||
# We reverse because True > False.
|
|
||||||
# Sort is stable, so candidates keep their order.
|
|
||||||
return sorted(
|
|
||||||
candidates,
|
|
||||||
reverse=True,
|
|
||||||
key=lambda spec: (
|
|
||||||
_abi.compatible(spec, abi_exemplar, loose=True),
|
|
||||||
_abi.compatible(spec, abi_exemplar),
|
|
||||||
),
|
|
||||||
)
|
|
||||||
|
|
||||||
def concretize_version(self, spec):
|
|
||||||
"""If the spec is already concrete, return. Otherwise take
|
|
||||||
the preferred version from spackconfig, and default to the package's
|
|
||||||
version if there are no available versions.
|
|
||||||
|
|
||||||
TODO: In many cases we probably want to look for installed
|
|
||||||
versions of each package and use an installed version
|
|
||||||
if we can link to it. The policy implemented here will
|
|
||||||
tend to rebuild a lot of stuff becasue it will prefer
|
|
||||||
a compiler in the spec to any compiler already-
|
|
||||||
installed things were built with. There is likely
|
|
||||||
some better policy that finds some middle ground
|
|
||||||
between these two extremes.
|
|
||||||
"""
|
|
||||||
# return if already concrete.
|
|
||||||
if spec.versions.concrete:
|
|
||||||
return False
|
|
||||||
|
|
||||||
# List of versions we could consider, in sorted order
|
|
||||||
pkg_versions = spec.package_class.versions
|
|
||||||
usable = [v for v in pkg_versions if any(v.intersects(sv) for sv in spec.versions)]
|
|
||||||
|
|
||||||
yaml_prefs = PackagePrefs(spec.name, "version")
|
|
||||||
|
|
||||||
# The keys below show the order of precedence of factors used
|
|
||||||
# to select a version when concretizing. The item with
|
|
||||||
# the "largest" key will be selected.
|
|
||||||
#
|
|
||||||
# NOTE: When COMPARING VERSIONS, the '@develop' version is always
|
|
||||||
# larger than other versions. BUT when CONCRETIZING,
|
|
||||||
# the largest NON-develop version is selected by default.
|
|
||||||
keyfn = lambda v: (
|
|
||||||
# ------- Special direction from the user
|
|
||||||
# Respect order listed in packages.yaml
|
|
||||||
-yaml_prefs(v),
|
|
||||||
# The preferred=True flag (packages or packages.yaml or both?)
|
|
||||||
pkg_versions.get(v).get("preferred", False),
|
|
||||||
# ------- Regular case: use latest non-develop version by default.
|
|
||||||
# Avoid @develop version, which would otherwise be the "largest"
|
|
||||||
# in straight version comparisons
|
|
||||||
not v.isdevelop(),
|
|
||||||
# Compare the version itself
|
|
||||||
# This includes the logic:
|
|
||||||
# a) develop > everything (disabled by "not v.isdevelop() above)
|
|
||||||
# b) numeric > non-numeric
|
|
||||||
# c) Numeric or string comparison
|
|
||||||
v,
|
|
||||||
)
|
|
||||||
usable.sort(key=keyfn, reverse=True)
|
|
||||||
|
|
||||||
if usable:
|
|
||||||
spec.versions = ver([usable[0]])
|
|
||||||
else:
|
|
||||||
# We don't know of any SAFE versions that match the given
|
|
||||||
# spec. Grab the spec's versions and grab the highest
|
|
||||||
# *non-open* part of the range of versions it specifies.
|
|
||||||
# Someone else can raise an error if this happens,
|
|
||||||
# e.g. when we go to fetch it and don't know how. But it
|
|
||||||
# *might* work.
|
|
||||||
if not spec.versions or spec.versions == VersionList([":"]):
|
|
||||||
raise NoValidVersionError(spec)
|
|
||||||
else:
|
|
||||||
last = spec.versions[-1]
|
|
||||||
if isinstance(last, ClosedOpenRange):
|
|
||||||
range_as_version = VersionList([last]).concrete_range_as_version
|
|
||||||
if range_as_version:
|
|
||||||
spec.versions = ver([range_as_version])
|
|
||||||
else:
|
|
||||||
raise NoValidVersionError(spec)
|
|
||||||
else:
|
|
||||||
spec.versions = ver([last])
|
|
||||||
|
|
||||||
return True # Things changed
|
|
||||||
|
|
||||||
def concretize_architecture(self, spec):
|
|
||||||
"""If the spec is empty provide the defaults of the platform. If the
|
|
||||||
architecture is not a string type, then check if either the platform,
|
|
||||||
target or operating system are concretized. If any of the fields are
|
|
||||||
changed then return True. If everything is concretized (i.e the
|
|
||||||
architecture attribute is a namedtuple of classes) then return False.
|
|
||||||
If the target is a string type, then convert the string into a
|
|
||||||
concretized architecture. If it has no architecture and the root of the
|
|
||||||
DAG has an architecture, then use the root otherwise use the defaults
|
|
||||||
on the platform.
|
|
||||||
"""
|
|
||||||
# ensure type safety for the architecture
|
|
||||||
if spec.architecture is None:
|
|
||||||
spec.architecture = spack.spec.ArchSpec()
|
|
||||||
|
|
||||||
if spec.architecture.concrete:
|
|
||||||
return False
|
|
||||||
|
|
||||||
# Get platform of nearest spec with a platform, including spec
|
|
||||||
# If spec has a platform, easy
|
|
||||||
if spec.architecture.platform:
|
|
||||||
new_plat = spack.platforms.by_name(spec.architecture.platform)
|
|
||||||
else:
|
|
||||||
# Else if anyone else has a platform, take the closest one
|
|
||||||
# Search up, then down, along build/link deps first
|
|
||||||
# Then any nearest. Algorithm from compilerspec search
|
|
||||||
platform_spec = find_spec(spec, lambda x: x.architecture and x.architecture.platform)
|
|
||||||
if platform_spec:
|
|
||||||
new_plat = spack.platforms.by_name(platform_spec.architecture.platform)
|
|
||||||
else:
|
|
||||||
# If no platform anywhere in this spec, grab the default
|
|
||||||
new_plat = spack.platforms.host()
|
|
||||||
|
|
||||||
# Get nearest spec with relevant platform and an os
|
|
||||||
# Generally, same algorithm as finding platform, except we only
|
|
||||||
# consider specs that have a platform
|
|
||||||
if spec.architecture.os:
|
|
||||||
new_os = spec.architecture.os
|
|
||||||
else:
|
|
||||||
new_os_spec = find_spec(
|
|
||||||
spec,
|
|
||||||
lambda x: (
|
|
||||||
x.architecture
|
|
||||||
and x.architecture.platform == str(new_plat)
|
|
||||||
and x.architecture.os
|
|
||||||
),
|
|
||||||
)
|
|
||||||
if new_os_spec:
|
|
||||||
new_os = new_os_spec.architecture.os
|
|
||||||
else:
|
|
||||||
new_os = new_plat.operating_system("default_os")
|
|
||||||
|
|
||||||
# Get the nearest spec with relevant platform and a target
|
|
||||||
# Generally, same algorithm as finding os
|
|
||||||
curr_target = None
|
|
||||||
if spec.architecture.target:
|
|
||||||
curr_target = spec.architecture.target
|
|
||||||
if spec.architecture.target and spec.architecture.target_concrete:
|
|
||||||
new_target = spec.architecture.target
|
|
||||||
else:
|
|
||||||
new_target_spec = find_spec(
|
|
||||||
spec,
|
|
||||||
lambda x: (
|
|
||||||
x.architecture
|
|
||||||
and x.architecture.platform == str(new_plat)
|
|
||||||
and x.architecture.target
|
|
||||||
and x.architecture.target != curr_target
|
|
||||||
),
|
|
||||||
)
|
|
||||||
if new_target_spec:
|
|
||||||
if curr_target:
|
|
||||||
# constrain one target by the other
|
|
||||||
new_target_arch = spack.spec.ArchSpec(
|
|
||||||
(None, None, new_target_spec.architecture.target)
|
|
||||||
)
|
|
||||||
curr_target_arch = spack.spec.ArchSpec((None, None, curr_target))
|
|
||||||
curr_target_arch.constrain(new_target_arch)
|
|
||||||
new_target = curr_target_arch.target
|
|
||||||
else:
|
|
||||||
new_target = new_target_spec.architecture.target
|
|
||||||
else:
|
|
||||||
# To get default platform, consider package prefs
|
|
||||||
if PackagePrefs.has_preferred_targets(spec.name):
|
|
||||||
new_target = self.target_from_package_preferences(spec)
|
|
||||||
else:
|
|
||||||
new_target = new_plat.target("default_target")
|
|
||||||
if curr_target:
|
|
||||||
# convert to ArchSpec to compare satisfaction
|
|
||||||
new_target_arch = spack.spec.ArchSpec((None, None, str(new_target)))
|
|
||||||
curr_target_arch = spack.spec.ArchSpec((None, None, str(curr_target)))
|
|
||||||
|
|
||||||
if not new_target_arch.intersects(curr_target_arch):
|
|
||||||
# new_target is an incorrect guess based on preferences
|
|
||||||
# and/or default
|
|
||||||
valid_target_ranges = str(curr_target).split(",")
|
|
||||||
for target_range in valid_target_ranges:
|
|
||||||
t_min, t_sep, t_max = target_range.partition(":")
|
|
||||||
if not t_sep:
|
|
||||||
new_target = t_min
|
|
||||||
break
|
|
||||||
elif t_max:
|
|
||||||
new_target = t_max
|
|
||||||
break
|
|
||||||
elif t_min:
|
|
||||||
# TODO: something better than picking first
|
|
||||||
new_target = t_min
|
|
||||||
break
|
|
||||||
|
|
||||||
# Construct new architecture, compute whether spec changed
|
|
||||||
arch_spec = (str(new_plat), str(new_os), str(new_target))
|
|
||||||
new_arch = spack.spec.ArchSpec(arch_spec)
|
|
||||||
spec_changed = new_arch != spec.architecture
|
|
||||||
spec.architecture = new_arch
|
|
||||||
return spec_changed
|
|
||||||
|
|
||||||
def target_from_package_preferences(self, spec):
|
|
||||||
"""Returns the preferred target from the package preferences if
|
|
||||||
there's any.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
spec: abstract spec to be concretized
|
|
||||||
"""
|
|
||||||
target_prefs = PackagePrefs(spec.name, "target")
|
|
||||||
target_specs = [spack.spec.Spec("target=%s" % tname) for tname in archspec.cpu.TARGETS]
|
|
||||||
|
|
||||||
def tspec_filter(s):
|
|
||||||
# Filter target specs by whether the architecture
|
|
||||||
# family is the current machine type. This ensures
|
|
||||||
# we only consider x86_64 targets when on an
|
|
||||||
# x86_64 machine, etc. This may need to change to
|
|
||||||
# enable setting cross compiling as a default
|
|
||||||
target = archspec.cpu.TARGETS[str(s.architecture.target)]
|
|
||||||
arch_family_name = target.family.name
|
|
||||||
return arch_family_name == platform.machine()
|
|
||||||
|
|
||||||
# Sort filtered targets by package prefs
|
|
||||||
target_specs = list(filter(tspec_filter, target_specs))
|
|
||||||
target_specs.sort(key=target_prefs)
|
|
||||||
new_target = target_specs[0].architecture.target
|
|
||||||
return new_target
|
|
||||||
|
|
||||||
def concretize_variants(self, spec):
|
|
||||||
"""If the spec already has variants filled in, return. Otherwise, add
|
|
||||||
the user preferences from packages.yaml or the default variants from
|
|
||||||
the package specification.
|
|
||||||
"""
|
|
||||||
changed = False
|
|
||||||
preferred_variants = PackagePrefs.preferred_variants(spec.name)
|
|
||||||
pkg_cls = spec.package_class
|
|
||||||
for name, entry in pkg_cls.variants.items():
|
|
||||||
variant, when = entry
|
|
||||||
var = spec.variants.get(name, None)
|
|
||||||
if var and "*" in var:
|
|
||||||
# remove variant wildcard before concretizing
|
|
||||||
# wildcard cannot be combined with other variables in a
|
|
||||||
# multivalue variant, a concrete variant cannot have the value
|
|
||||||
# wildcard, and a wildcard does not constrain a variant
|
|
||||||
spec.variants.pop(name)
|
|
||||||
if name not in spec.variants and any(spec.satisfies(w) for w in when):
|
|
||||||
changed = True
|
|
||||||
if name in preferred_variants:
|
|
||||||
spec.variants[name] = preferred_variants.get(name)
|
|
||||||
else:
|
|
||||||
spec.variants[name] = variant.make_default()
|
|
||||||
if name in spec.variants and not any(spec.satisfies(w) for w in when):
|
|
||||||
raise vt.InvalidVariantForSpecError(name, when, spec)
|
|
||||||
|
|
||||||
return changed
|
|
||||||
|
|
||||||
def concretize_compiler(self, spec):
|
|
||||||
"""If the spec already has a compiler, we're done. If not, then take
|
|
||||||
the compiler used for the nearest ancestor with a compiler
|
|
||||||
spec and use that. If the ancestor's compiler is not
|
|
||||||
concrete, then used the preferred compiler as specified in
|
|
||||||
spackconfig.
|
|
||||||
|
|
||||||
Intuition: Use the spackconfig default if no package that depends on
|
|
||||||
this one has a strict compiler requirement. Otherwise, try to
|
|
||||||
build with the compiler that will be used by libraries that
|
|
||||||
link to this one, to maximize compatibility.
|
|
||||||
"""
|
|
||||||
# Pass on concretizing the compiler if the target or operating system
|
|
||||||
# is not yet determined
|
|
||||||
if not spec.architecture.concrete:
|
|
||||||
# We haven't changed, but other changes need to happen before we
|
|
||||||
# continue. `return True` here to force concretization to keep
|
|
||||||
# running.
|
|
||||||
return True
|
|
||||||
|
|
||||||
# Only use a matching compiler if it is of the proper style
|
|
||||||
# Takes advantage of the proper logic already existing in
|
|
||||||
# compiler_for_spec Should think whether this can be more
|
|
||||||
# efficient
|
|
||||||
def _proper_compiler_style(cspec, aspec):
|
|
||||||
compilers = spack.compilers.compilers_for_spec(cspec, arch_spec=aspec)
|
|
||||||
# If the spec passed as argument is concrete we want to check
|
|
||||||
# the versions match exactly
|
|
||||||
if (
|
|
||||||
cspec.concrete
|
|
||||||
and compilers
|
|
||||||
and cspec.version not in [c.version for c in compilers]
|
|
||||||
):
|
|
||||||
return []
|
|
||||||
|
|
||||||
return compilers
|
|
||||||
|
|
||||||
if spec.compiler and spec.compiler.concrete:
|
|
||||||
if self.check_for_compiler_existence and not _proper_compiler_style(
|
|
||||||
spec.compiler, spec.architecture
|
|
||||||
):
|
|
||||||
_compiler_concretization_failure(spec.compiler, spec.architecture)
|
|
||||||
return False
|
|
||||||
|
|
||||||
# Find another spec that has a compiler, or the root if none do
|
|
||||||
other_spec = spec if spec.compiler else find_spec(spec, lambda x: x.compiler, spec.root)
|
|
||||||
other_compiler = other_spec.compiler
|
|
||||||
assert other_spec
|
|
||||||
|
|
||||||
# Check if the compiler is already fully specified
|
|
||||||
if other_compiler and other_compiler.concrete:
|
|
||||||
if self.check_for_compiler_existence and not _proper_compiler_style(
|
|
||||||
other_compiler, spec.architecture
|
|
||||||
):
|
|
||||||
_compiler_concretization_failure(other_compiler, spec.architecture)
|
|
||||||
spec.compiler = other_compiler
|
|
||||||
return True
|
|
||||||
|
|
||||||
if other_compiler: # Another node has abstract compiler information
|
|
||||||
compiler_list = spack.compilers.find_specs_by_arch(other_compiler, spec.architecture)
|
|
||||||
if not compiler_list:
|
|
||||||
# We don't have a matching compiler installed
|
|
||||||
if not self.check_for_compiler_existence:
|
|
||||||
# Concretize compiler spec versions as a package to build
|
|
||||||
cpkg_spec = spack.compilers.pkg_spec_for_compiler(other_compiler)
|
|
||||||
self.concretize_version(cpkg_spec)
|
|
||||||
spec.compiler = spack.spec.CompilerSpec(
|
|
||||||
other_compiler.name, cpkg_spec.versions
|
|
||||||
)
|
|
||||||
return True
|
|
||||||
else:
|
|
||||||
# No compiler with a satisfactory spec was found
|
|
||||||
raise UnavailableCompilerVersionError(other_compiler, spec.architecture)
|
|
||||||
else:
|
|
||||||
# We have no hints to go by, grab any compiler
|
|
||||||
compiler_list = spack.compilers.all_compiler_specs()
|
|
||||||
if not compiler_list:
|
|
||||||
# Spack has no compilers.
|
|
||||||
raise spack.compilers.NoCompilersError()
|
|
||||||
|
|
||||||
# By default, prefer later versions of compilers
|
|
||||||
compiler_list = sorted(compiler_list, key=lambda x: (x.name, x.version), reverse=True)
|
|
||||||
ppk = PackagePrefs(other_spec.name, "compiler")
|
|
||||||
matches = sorted(compiler_list, key=ppk)
|
|
||||||
|
|
||||||
# copy concrete version into other_compiler
|
|
||||||
try:
|
|
||||||
spec.compiler = next(
|
|
||||||
c for c in matches if _proper_compiler_style(c, spec.architecture)
|
|
||||||
).copy()
|
|
||||||
except StopIteration:
|
|
||||||
# No compiler with a satisfactory spec has a suitable arch
|
|
||||||
_compiler_concretization_failure(other_compiler, spec.architecture)
|
|
||||||
|
|
||||||
assert spec.compiler.concrete
|
|
||||||
return True # things changed.
|
|
||||||
|
|
||||||
def concretize_compiler_flags(self, spec):
|
|
||||||
"""
|
|
||||||
The compiler flags are updated to match those of the spec whose
|
|
||||||
compiler is used, defaulting to no compiler flags in the spec.
|
|
||||||
Default specs set at the compiler level will still be added later.
|
|
||||||
"""
|
|
||||||
# Pass on concretizing the compiler flags if the target or operating
|
|
||||||
# system is not set.
|
|
||||||
if not spec.architecture.concrete:
|
|
||||||
# We haven't changed, but other changes need to happen before we
|
|
||||||
# continue. `return True` here to force concretization to keep
|
|
||||||
# running.
|
|
||||||
return True
|
|
||||||
|
|
||||||
compiler_match = lambda other: (
|
|
||||||
spec.compiler == other.compiler and spec.architecture == other.architecture
|
|
||||||
)
|
|
||||||
|
|
||||||
ret = False
|
|
||||||
for flag in spack.spec.FlagMap.valid_compiler_flags():
|
|
||||||
if flag not in spec.compiler_flags:
|
|
||||||
spec.compiler_flags[flag] = list()
|
|
||||||
try:
|
|
||||||
nearest = next(
|
|
||||||
p
|
|
||||||
for p in spec.traverse(direction="parents")
|
|
||||||
if (compiler_match(p) and (p is not spec) and flag in p.compiler_flags)
|
|
||||||
)
|
|
||||||
nearest_flags = nearest.compiler_flags.get(flag, [])
|
|
||||||
flags = spec.compiler_flags.get(flag, [])
|
|
||||||
if set(nearest_flags) - set(flags):
|
|
||||||
spec.compiler_flags[flag] = list(llnl.util.lang.dedupe(nearest_flags + flags))
|
|
||||||
ret = True
|
|
||||||
except StopIteration:
|
|
||||||
pass
|
|
||||||
|
|
||||||
# Include the compiler flag defaults from the config files
|
|
||||||
# This ensures that spack will detect conflicts that stem from a change
|
|
||||||
# in default compiler flags.
|
|
||||||
try:
|
|
||||||
compiler = spack.compilers.compiler_for_spec(spec.compiler, spec.architecture)
|
|
||||||
except spack.compilers.NoCompilerForSpecError:
|
|
||||||
if self.check_for_compiler_existence:
|
|
||||||
raise
|
|
||||||
return ret
|
|
||||||
for flag in compiler.flags:
|
|
||||||
config_flags = compiler.flags.get(flag, [])
|
|
||||||
flags = spec.compiler_flags.get(flag, [])
|
|
||||||
spec.compiler_flags[flag] = list(llnl.util.lang.dedupe(config_flags + flags))
|
|
||||||
if set(config_flags) - set(flags):
|
|
||||||
ret = True
|
|
||||||
|
|
||||||
return ret
|
|
||||||
|
|
||||||
def adjust_target(self, spec):
|
|
||||||
"""Adjusts the target microarchitecture if the compiler is too old
|
|
||||||
to support the default one.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
spec: spec to be concretized
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
True if spec was modified, False otherwise
|
|
||||||
"""
|
|
||||||
# To minimize the impact on performance this function will attempt
|
|
||||||
# to adjust the target only at the very first call once necessary
|
|
||||||
# information is set. It will just return False on subsequent calls.
|
|
||||||
# The way this is achieved is by initializing a generator and making
|
|
||||||
# this function return the next answer.
|
|
||||||
if not (spec.architecture and spec.architecture.concrete):
|
|
||||||
# Not ready, but keep going because we have work to do later
|
|
||||||
return True
|
|
||||||
|
|
||||||
def _make_only_one_call(spec):
|
|
||||||
yield self._adjust_target(spec)
|
|
||||||
while True:
|
|
||||||
yield False
|
|
||||||
|
|
||||||
if self._adjust_target_answer_generator is None:
|
|
||||||
self._adjust_target_answer_generator = _make_only_one_call(spec)
|
|
||||||
|
|
||||||
return next(self._adjust_target_answer_generator)
|
|
||||||
|
|
||||||
def _adjust_target(self, spec):
|
|
||||||
"""Assumes that the architecture and the compiler have been
|
|
||||||
set already and checks if the current target microarchitecture
|
|
||||||
is the default and can be optimized by the compiler.
|
|
||||||
|
|
||||||
If not, downgrades the microarchitecture until a suitable one
|
|
||||||
is found. If none can be found raise an error.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
spec: spec to be concretized
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
True if any modification happened, False otherwise
|
|
||||||
"""
|
|
||||||
import archspec.cpu
|
|
||||||
|
|
||||||
# Try to adjust the target only if it is the default
|
|
||||||
# target for this platform
|
|
||||||
current_target = spec.architecture.target
|
|
||||||
current_platform = spack.platforms.by_name(spec.architecture.platform)
|
|
||||||
|
|
||||||
default_target = current_platform.target("default_target")
|
|
||||||
if PackagePrefs.has_preferred_targets(spec.name):
|
|
||||||
default_target = self.target_from_package_preferences(spec)
|
|
||||||
|
|
||||||
if current_target != default_target or (
|
|
||||||
self.abstract_spec
|
|
||||||
and self.abstract_spec.architecture
|
|
||||||
and self.abstract_spec.architecture.concrete
|
|
||||||
):
|
|
||||||
return False
|
|
||||||
|
|
||||||
try:
|
|
||||||
current_target.optimization_flags(spec.compiler)
|
|
||||||
except archspec.cpu.UnsupportedMicroarchitecture:
|
|
||||||
microarchitecture = current_target.microarchitecture
|
|
||||||
for ancestor in microarchitecture.ancestors:
|
|
||||||
candidate = None
|
|
||||||
try:
|
|
||||||
candidate = spack.target.Target(ancestor)
|
|
||||||
candidate.optimization_flags(spec.compiler)
|
|
||||||
except archspec.cpu.UnsupportedMicroarchitecture:
|
|
||||||
continue
|
|
||||||
|
|
||||||
if candidate is not None:
|
|
||||||
msg = (
|
|
||||||
"{0.name}@{0.version} cannot build optimized "
|
|
||||||
'binaries for "{1}". Using best target possible: '
|
|
||||||
'"{2}"'
|
|
||||||
)
|
|
||||||
msg = msg.format(spec.compiler, current_target, candidate)
|
|
||||||
tty.warn(msg)
|
|
||||||
spec.architecture.target = candidate
|
|
||||||
return True
|
|
||||||
else:
|
|
||||||
raise
|
|
||||||
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
@contextmanager
|
@contextmanager
|
||||||
@@ -719,19 +82,6 @@ def find_spec(spec, condition, default=None):
|
|||||||
return default # Nothing matched the condition; return default.
|
return default # Nothing matched the condition; return default.
|
||||||
|
|
||||||
|
|
||||||
def _compiler_concretization_failure(compiler_spec, arch):
|
|
||||||
# Distinguish between the case that there are compilers for
|
|
||||||
# the arch but not with the given compiler spec and the case that
|
|
||||||
# there are no compilers for the arch at all
|
|
||||||
if not spack.compilers.compilers_for_arch(arch):
|
|
||||||
available_os_targets = set(
|
|
||||||
(c.operating_system, c.target) for c in spack.compilers.all_compilers()
|
|
||||||
)
|
|
||||||
raise NoCompilersForArchError(arch, available_os_targets)
|
|
||||||
else:
|
|
||||||
raise UnavailableCompilerVersionError(compiler_spec, arch)
|
|
||||||
|
|
||||||
|
|
||||||
def concretize_specs_together(*abstract_specs, **kwargs):
|
def concretize_specs_together(*abstract_specs, **kwargs):
|
||||||
"""Given a number of specs as input, tries to concretize them together.
|
"""Given a number of specs as input, tries to concretize them together.
|
||||||
|
|
||||||
@@ -744,12 +94,6 @@ def concretize_specs_together(*abstract_specs, **kwargs):
|
|||||||
Returns:
|
Returns:
|
||||||
List of concretized specs
|
List of concretized specs
|
||||||
"""
|
"""
|
||||||
if spack.config.get("config:concretizer", "clingo") == "original":
|
|
||||||
return _concretize_specs_together_original(*abstract_specs, **kwargs)
|
|
||||||
return _concretize_specs_together_new(*abstract_specs, **kwargs)
|
|
||||||
|
|
||||||
|
|
||||||
def _concretize_specs_together_new(*abstract_specs, **kwargs):
|
|
||||||
import spack.solver.asp
|
import spack.solver.asp
|
||||||
|
|
||||||
allow_deprecated = spack.config.get("config:deprecated", False)
|
allow_deprecated = spack.config.get("config:deprecated", False)
|
||||||
@@ -760,51 +104,6 @@ def _concretize_specs_together_new(*abstract_specs, **kwargs):
|
|||||||
return [s.copy() for s in result.specs]
|
return [s.copy() for s in result.specs]
|
||||||
|
|
||||||
|
|
||||||
def _concretize_specs_together_original(*abstract_specs, **kwargs):
|
|
||||||
abstract_specs = [spack.spec.Spec(s) for s in abstract_specs]
|
|
||||||
tmpdir = tempfile.mkdtemp()
|
|
||||||
builder = spack.repo.MockRepositoryBuilder(tmpdir)
|
|
||||||
# Split recursive specs, as it seems the concretizer has issue
|
|
||||||
# respecting conditions on dependents expressed like
|
|
||||||
# depends_on('foo ^bar@1.0'), see issue #11160
|
|
||||||
split_specs = [
|
|
||||||
dep.copy(deps=False) for spec1 in abstract_specs for dep in spec1.traverse(root=True)
|
|
||||||
]
|
|
||||||
builder.add_package(
|
|
||||||
"concretizationroot", dependencies=[(str(x), None, None) for x in split_specs]
|
|
||||||
)
|
|
||||||
|
|
||||||
with spack.repo.use_repositories(builder.root, override=False):
|
|
||||||
# Spec from a helper package that depends on all the abstract_specs
|
|
||||||
concretization_root = spack.spec.Spec("concretizationroot")
|
|
||||||
concretization_root.concretize(tests=kwargs.get("tests", False))
|
|
||||||
# Retrieve the direct dependencies
|
|
||||||
concrete_specs = [concretization_root[spec.name].copy() for spec in abstract_specs]
|
|
||||||
|
|
||||||
return concrete_specs
|
|
||||||
|
|
||||||
|
|
||||||
class NoCompilersForArchError(spack.error.SpackError):
|
|
||||||
def __init__(self, arch, available_os_targets):
|
|
||||||
err_msg = (
|
|
||||||
"No compilers found"
|
|
||||||
" for operating system %s and target %s."
|
|
||||||
"\nIf previous installations have succeeded, the"
|
|
||||||
" operating system may have been updated." % (arch.os, arch.target)
|
|
||||||
)
|
|
||||||
|
|
||||||
available_os_target_strs = list()
|
|
||||||
for operating_system, t in available_os_targets:
|
|
||||||
os_target_str = "%s-%s" % (operating_system, t) if t else operating_system
|
|
||||||
available_os_target_strs.append(os_target_str)
|
|
||||||
err_msg += (
|
|
||||||
"\nCompilers are defined for the following"
|
|
||||||
" operating systems and targets:\n\t" + "\n\t".join(available_os_target_strs)
|
|
||||||
)
|
|
||||||
|
|
||||||
super().__init__(err_msg, "Run 'spack compiler find' to add compilers.")
|
|
||||||
|
|
||||||
|
|
||||||
class UnavailableCompilerVersionError(spack.error.SpackError):
|
class UnavailableCompilerVersionError(spack.error.SpackError):
|
||||||
"""Raised when there is no available compiler that satisfies a
|
"""Raised when there is no available compiler that satisfies a
|
||||||
compiler spec."""
|
compiler spec."""
|
||||||
@@ -820,37 +119,3 @@ def __init__(self, compiler_spec, arch=None):
|
|||||||
"'spack compilers' to see which compilers are already recognized"
|
"'spack compilers' to see which compilers are already recognized"
|
||||||
" by spack.",
|
" by spack.",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class NoValidVersionError(spack.error.SpackError):
|
|
||||||
"""Raised when there is no way to have a concrete version for a
|
|
||||||
particular spec."""
|
|
||||||
|
|
||||||
def __init__(self, spec):
|
|
||||||
super().__init__(
|
|
||||||
"There are no valid versions for %s that match '%s'" % (spec.name, spec.versions)
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class InsufficientArchitectureInfoError(spack.error.SpackError):
|
|
||||||
"""Raised when details on architecture cannot be collected from the
|
|
||||||
system"""
|
|
||||||
|
|
||||||
def __init__(self, spec, archs):
|
|
||||||
super().__init__(
|
|
||||||
"Cannot determine necessary architecture information for '%s': %s"
|
|
||||||
% (spec.name, str(archs))
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class NoBuildError(spack.error.SpecError):
|
|
||||||
"""Raised when a package is configured with the buildable option False, but
|
|
||||||
no satisfactory external versions can be found
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, spec):
|
|
||||||
msg = (
|
|
||||||
"The spec\n '%s'\n is configured as not buildable, "
|
|
||||||
"and no matching external installs were found"
|
|
||||||
)
|
|
||||||
super().__init__(msg % spec)
|
|
||||||
|
|||||||
@@ -99,7 +99,6 @@
|
|||||||
"dirty": False,
|
"dirty": False,
|
||||||
"build_jobs": min(16, cpus_available()),
|
"build_jobs": min(16, cpus_available()),
|
||||||
"build_stage": "$tempdir/spack-stage",
|
"build_stage": "$tempdir/spack-stage",
|
||||||
"concretizer": "clingo",
|
|
||||||
"license_dir": spack.paths.default_license_dir,
|
"license_dir": spack.paths.default_license_dir,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -24,6 +24,7 @@
|
|||||||
import llnl.util.tty
|
import llnl.util.tty
|
||||||
|
|
||||||
import spack.config
|
import spack.config
|
||||||
|
import spack.error
|
||||||
import spack.operating_systems.windows_os as winOs
|
import spack.operating_systems.windows_os as winOs
|
||||||
import spack.spec
|
import spack.spec
|
||||||
import spack.util.spack_yaml
|
import spack.util.spack_yaml
|
||||||
@@ -136,10 +137,10 @@ def path_to_dict(search_paths: List[str]):
|
|||||||
# entry overrides later entries
|
# entry overrides later entries
|
||||||
for search_path in reversed(search_paths):
|
for search_path in reversed(search_paths):
|
||||||
try:
|
try:
|
||||||
for lib in os.listdir(search_path):
|
with os.scandir(search_path) as entries:
|
||||||
lib_path = os.path.join(search_path, lib)
|
path_to_lib.update(
|
||||||
if llnl.util.filesystem.is_readable_file(lib_path):
|
{entry.path: entry.name for entry in entries if entry.is_file()}
|
||||||
path_to_lib[lib_path] = lib
|
)
|
||||||
except OSError as e:
|
except OSError as e:
|
||||||
msg = f"cannot scan '{search_path}' for external software: {str(e)}"
|
msg = f"cannot scan '{search_path}' for external software: {str(e)}"
|
||||||
llnl.util.tty.debug(msg)
|
llnl.util.tty.debug(msg)
|
||||||
|
|||||||
@@ -12,16 +12,19 @@
|
|||||||
import re
|
import re
|
||||||
import sys
|
import sys
|
||||||
import warnings
|
import warnings
|
||||||
from typing import Dict, List, Optional, Set, Tuple, Type
|
from typing import Dict, Iterable, List, Optional, Set, Tuple, Type
|
||||||
|
|
||||||
import llnl.util.filesystem
|
import llnl.util.filesystem
|
||||||
import llnl.util.lang
|
import llnl.util.lang
|
||||||
import llnl.util.tty
|
import llnl.util.tty
|
||||||
|
|
||||||
|
import spack.package_base
|
||||||
|
import spack.repo
|
||||||
import spack.util.elf as elf_utils
|
import spack.util.elf as elf_utils
|
||||||
import spack.util.environment
|
import spack.util.environment
|
||||||
import spack.util.environment as environment
|
import spack.util.environment as environment
|
||||||
import spack.util.ld_so_conf
|
import spack.util.ld_so_conf
|
||||||
|
import spack.util.parallel
|
||||||
|
|
||||||
from .common import (
|
from .common import (
|
||||||
DetectedPackage,
|
DetectedPackage,
|
||||||
@@ -79,26 +82,27 @@ def executables_in_path(path_hints: List[str]) -> Dict[str, str]:
|
|||||||
path_hints: list of paths to be searched. If None the list will be
|
path_hints: list of paths to be searched. If None the list will be
|
||||||
constructed based on the PATH environment variable.
|
constructed based on the PATH environment variable.
|
||||||
"""
|
"""
|
||||||
search_paths = llnl.util.filesystem.search_paths_for_executables(*path_hints)
|
return path_to_dict(llnl.util.filesystem.search_paths_for_executables(*path_hints))
|
||||||
return path_to_dict(search_paths)
|
|
||||||
|
|
||||||
|
|
||||||
def accept_elf(path, host_compat):
|
def accept_elf(entry: os.DirEntry, host_compat: Tuple[bool, bool, int]):
|
||||||
"""Accept an ELF file if the header matches the given compat triplet. In case it's not an ELF
|
"""Accept an ELF file if the header matches the given compat triplet. In case it's not an ELF
|
||||||
(e.g. static library, or some arbitrary file, fall back to is_readable_file)."""
|
(e.g. static library, or some arbitrary file, fall back to is_readable_file)."""
|
||||||
# Fast path: assume libraries at least have .so in their basename.
|
# Fast path: assume libraries at least have .so in their basename.
|
||||||
# Note: don't replace with splitext, because of libsmth.so.1.2.3 file names.
|
# Note: don't replace with splitext, because of libsmth.so.1.2.3 file names.
|
||||||
if ".so" not in os.path.basename(path):
|
if ".so" not in entry.name:
|
||||||
return llnl.util.filesystem.is_readable_file(path)
|
return is_readable_file(entry)
|
||||||
try:
|
try:
|
||||||
return host_compat == elf_utils.get_elf_compat(path)
|
return host_compat == elf_utils.get_elf_compat(entry.path)
|
||||||
except (OSError, elf_utils.ElfParsingError):
|
except (OSError, elf_utils.ElfParsingError):
|
||||||
return llnl.util.filesystem.is_readable_file(path)
|
return is_readable_file(entry)
|
||||||
|
|
||||||
|
|
||||||
def libraries_in_ld_and_system_library_path(
|
def is_readable_file(entry: os.DirEntry) -> bool:
|
||||||
path_hints: Optional[List[str]] = None,
|
return entry.is_file() and os.access(entry.path, os.R_OK)
|
||||||
) -> Dict[str, str]:
|
|
||||||
|
|
||||||
|
def system_library_paths() -> List[str]:
|
||||||
"""Get the paths of all libraries available from ``path_hints`` or the
|
"""Get the paths of all libraries available from ``path_hints`` or the
|
||||||
following defaults:
|
following defaults:
|
||||||
|
|
||||||
@@ -112,82 +116,57 @@ def libraries_in_ld_and_system_library_path(
|
|||||||
(i.e. the basename of the library path).
|
(i.e. the basename of the library path).
|
||||||
|
|
||||||
There may be multiple paths with the same basename. In this case it is
|
There may be multiple paths with the same basename. In this case it is
|
||||||
assumed there are two different instances of the library.
|
assumed there are two different instances of the library."""
|
||||||
|
|
||||||
Args:
|
search_paths: List[str] = []
|
||||||
path_hints: list of paths to be searched. If None the list will be
|
|
||||||
constructed based on the set of LD_LIBRARY_PATH, LIBRARY_PATH,
|
|
||||||
DYLD_LIBRARY_PATH, and DYLD_FALLBACK_LIBRARY_PATH environment
|
|
||||||
variables as well as the standard system library paths.
|
|
||||||
path_hints (list): list of paths to be searched. If ``None``, the default
|
|
||||||
system paths are used.
|
|
||||||
"""
|
|
||||||
if path_hints:
|
|
||||||
search_paths = llnl.util.filesystem.search_paths_for_libraries(*path_hints)
|
|
||||||
else:
|
|
||||||
search_paths = []
|
|
||||||
|
|
||||||
# Environment variables
|
if sys.platform == "win32":
|
||||||
if sys.platform == "darwin":
|
search_hints = spack.util.environment.get_path("PATH")
|
||||||
search_paths.extend(environment.get_path("DYLD_LIBRARY_PATH"))
|
search_paths.extend(llnl.util.filesystem.search_paths_for_libraries(*search_hints))
|
||||||
search_paths.extend(environment.get_path("DYLD_FALLBACK_LIBRARY_PATH"))
|
# on Windows, some libraries (.dlls) are found in the bin directory or sometimes
|
||||||
elif sys.platform.startswith("linux"):
|
# at the search root. Add both of those options to the search scheme
|
||||||
search_paths.extend(environment.get_path("LD_LIBRARY_PATH"))
|
search_paths.extend(llnl.util.filesystem.search_paths_for_executables(*search_hints))
|
||||||
|
|
||||||
# Dynamic linker paths
|
|
||||||
search_paths.extend(spack.util.ld_so_conf.host_dynamic_linker_search_paths())
|
|
||||||
|
|
||||||
# Drop redundant paths
|
|
||||||
search_paths = list(filter(os.path.isdir, search_paths))
|
|
||||||
|
|
||||||
# Make use we don't doubly list /usr/lib and /lib etc
|
|
||||||
search_paths = list(llnl.util.lang.dedupe(search_paths, key=file_identifier))
|
|
||||||
|
|
||||||
try:
|
|
||||||
host_compat = elf_utils.get_elf_compat(sys.executable)
|
|
||||||
accept = lambda path: accept_elf(path, host_compat)
|
|
||||||
except (OSError, elf_utils.ElfParsingError):
|
|
||||||
accept = llnl.util.filesystem.is_readable_file
|
|
||||||
|
|
||||||
path_to_lib = {}
|
|
||||||
# Reverse order of search directories so that a lib in the first
|
|
||||||
# search path entry overrides later entries
|
|
||||||
for search_path in reversed(search_paths):
|
|
||||||
for lib in os.listdir(search_path):
|
|
||||||
lib_path = os.path.join(search_path, lib)
|
|
||||||
if accept(lib_path):
|
|
||||||
path_to_lib[lib_path] = lib
|
|
||||||
return path_to_lib
|
|
||||||
|
|
||||||
|
|
||||||
def libraries_in_windows_paths(path_hints: Optional[List[str]] = None) -> Dict[str, str]:
|
|
||||||
"""Get the paths of all libraries available from the system PATH paths.
|
|
||||||
|
|
||||||
For more details, see `libraries_in_ld_and_system_library_path` regarding
|
|
||||||
return type and contents.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
path_hints: list of paths to be searched. If None the list will be
|
|
||||||
constructed based on the set of PATH environment
|
|
||||||
variables as well as the standard system library paths.
|
|
||||||
"""
|
|
||||||
search_hints = (
|
|
||||||
path_hints if path_hints is not None else spack.util.environment.get_path("PATH")
|
|
||||||
)
|
|
||||||
search_paths = llnl.util.filesystem.search_paths_for_libraries(*search_hints)
|
|
||||||
# on Windows, some libraries (.dlls) are found in the bin directory or sometimes
|
|
||||||
# at the search root. Add both of those options to the search scheme
|
|
||||||
search_paths.extend(llnl.util.filesystem.search_paths_for_executables(*search_hints))
|
|
||||||
if path_hints is None:
|
|
||||||
# if no user provided path was given, add defaults to the search
|
# if no user provided path was given, add defaults to the search
|
||||||
search_paths.extend(WindowsKitExternalPaths.find_windows_kit_lib_paths())
|
search_paths.extend(WindowsKitExternalPaths.find_windows_kit_lib_paths())
|
||||||
# SDK and WGL should be handled by above, however on occasion the WDK is in an atypical
|
# SDK and WGL should be handled by above, however on occasion the WDK is in an atypical
|
||||||
# location, so we handle that case specifically.
|
# location, so we handle that case specifically.
|
||||||
search_paths.extend(WindowsKitExternalPaths.find_windows_driver_development_kit_paths())
|
search_paths.extend(WindowsKitExternalPaths.find_windows_driver_development_kit_paths())
|
||||||
return path_to_dict(search_paths)
|
elif sys.platform == "darwin":
|
||||||
|
search_paths.extend(environment.get_path("DYLD_LIBRARY_PATH"))
|
||||||
|
search_paths.extend(environment.get_path("DYLD_FALLBACK_LIBRARY_PATH"))
|
||||||
|
search_paths.extend(spack.util.ld_so_conf.host_dynamic_linker_search_paths())
|
||||||
|
elif sys.platform.startswith("linux"):
|
||||||
|
search_paths.extend(environment.get_path("LD_LIBRARY_PATH"))
|
||||||
|
search_paths.extend(spack.util.ld_so_conf.host_dynamic_linker_search_paths())
|
||||||
|
|
||||||
|
# Drop redundant paths
|
||||||
|
search_paths = list(filter(os.path.isdir, search_paths))
|
||||||
|
|
||||||
|
# Make use we don't doubly list /usr/lib and /lib etc
|
||||||
|
search_paths = list(llnl.util.lang.dedupe(search_paths, key=file_identifier))
|
||||||
|
|
||||||
|
return search_paths
|
||||||
|
|
||||||
|
|
||||||
def _group_by_prefix(paths: Set[str]) -> Dict[str, Set[str]]:
|
def libraries_in_path(search_paths: List[str]) -> Dict[str, str]:
|
||||||
|
try:
|
||||||
|
host_compat = elf_utils.get_elf_compat(sys.executable)
|
||||||
|
accept = lambda entry: accept_elf(entry, host_compat)
|
||||||
|
except (OSError, elf_utils.ElfParsingError):
|
||||||
|
accept = is_readable_file
|
||||||
|
|
||||||
|
path_to_lib = {}
|
||||||
|
# Reverse order of search directories so that a lib in the first
|
||||||
|
# search path entry overrides later entries
|
||||||
|
for search_path in reversed(search_paths):
|
||||||
|
with os.scandir(search_path) as it:
|
||||||
|
for entry in it:
|
||||||
|
if accept(entry):
|
||||||
|
path_to_lib[entry.path] = entry.name
|
||||||
|
return path_to_lib
|
||||||
|
|
||||||
|
|
||||||
|
def _group_by_prefix(paths: List[str]) -> Dict[str, Set[str]]:
|
||||||
groups = collections.defaultdict(set)
|
groups = collections.defaultdict(set)
|
||||||
for p in paths:
|
for p in paths:
|
||||||
groups[os.path.dirname(p)].add(p)
|
groups[os.path.dirname(p)].add(p)
|
||||||
@@ -197,10 +176,13 @@ def _group_by_prefix(paths: Set[str]) -> Dict[str, Set[str]]:
|
|||||||
class Finder:
|
class Finder:
|
||||||
"""Inspects the file-system looking for packages. Guesses places where to look using PATH."""
|
"""Inspects the file-system looking for packages. Guesses places where to look using PATH."""
|
||||||
|
|
||||||
|
def __init__(self, paths: Dict[str, str]):
|
||||||
|
self.paths = paths
|
||||||
|
|
||||||
def default_path_hints(self) -> List[str]:
|
def default_path_hints(self) -> List[str]:
|
||||||
return []
|
return []
|
||||||
|
|
||||||
def search_patterns(self, *, pkg: Type["spack.package_base.PackageBase"]) -> List[str]:
|
def search_patterns(self, *, pkg: Type[spack.package_base.PackageBase]) -> Optional[List[str]]:
|
||||||
"""Returns the list of patterns used to match candidate files.
|
"""Returns the list of patterns used to match candidate files.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
@@ -208,15 +190,6 @@ def search_patterns(self, *, pkg: Type["spack.package_base.PackageBase"]) -> Lis
|
|||||||
"""
|
"""
|
||||||
raise NotImplementedError("must be implemented by derived classes")
|
raise NotImplementedError("must be implemented by derived classes")
|
||||||
|
|
||||||
def candidate_files(self, *, patterns: List[str], paths: List[str]) -> List[str]:
|
|
||||||
"""Returns a list of candidate files found on the system.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
patterns: search patterns to be used for matching files
|
|
||||||
paths: paths where to search for files
|
|
||||||
"""
|
|
||||||
raise NotImplementedError("must be implemented by derived classes")
|
|
||||||
|
|
||||||
def prefix_from_path(self, *, path: str) -> str:
|
def prefix_from_path(self, *, path: str) -> str:
|
||||||
"""Given a path where a file was found, returns the corresponding prefix.
|
"""Given a path where a file was found, returns the corresponding prefix.
|
||||||
|
|
||||||
@@ -226,7 +199,7 @@ def prefix_from_path(self, *, path: str) -> str:
|
|||||||
raise NotImplementedError("must be implemented by derived classes")
|
raise NotImplementedError("must be implemented by derived classes")
|
||||||
|
|
||||||
def detect_specs(
|
def detect_specs(
|
||||||
self, *, pkg: Type["spack.package_base.PackageBase"], paths: List[str]
|
self, *, pkg: Type[spack.package_base.PackageBase], paths: List[str]
|
||||||
) -> List[DetectedPackage]:
|
) -> List[DetectedPackage]:
|
||||||
"""Given a list of files matching the search patterns, returns a list of detected specs.
|
"""Given a list of files matching the search patterns, returns a list of detected specs.
|
||||||
|
|
||||||
@@ -243,7 +216,9 @@ def detect_specs(
|
|||||||
return []
|
return []
|
||||||
|
|
||||||
result = []
|
result = []
|
||||||
for candidate_path, items_in_prefix in sorted(_group_by_prefix(set(paths)).items()):
|
for candidate_path, items_in_prefix in _group_by_prefix(
|
||||||
|
llnl.util.lang.dedupe(paths)
|
||||||
|
).items():
|
||||||
# TODO: multiple instances of a package can live in the same
|
# TODO: multiple instances of a package can live in the same
|
||||||
# prefix, and a package implementation can return multiple specs
|
# prefix, and a package implementation can return multiple specs
|
||||||
# for one prefix, but without additional details (e.g. about the
|
# for one prefix, but without additional details (e.g. about the
|
||||||
@@ -298,50 +273,36 @@ def detect_specs(
|
|||||||
|
|
||||||
return result
|
return result
|
||||||
|
|
||||||
def find(
|
def find(self, *, pkg_name: str, repository: spack.repo.Repo) -> List[DetectedPackage]:
|
||||||
self, *, pkg_name: str, initial_guess: Optional[List[str]] = None
|
|
||||||
) -> List[DetectedPackage]:
|
|
||||||
"""For a given package, returns a list of detected specs.
|
"""For a given package, returns a list of detected specs.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
pkg_name: package being detected
|
pkg_name: package being detected
|
||||||
initial_guess: initial list of paths to search from the caller
|
repository: repository to retrieve the package
|
||||||
if None, default paths are searched. If this
|
|
||||||
is an empty list, nothing will be searched.
|
|
||||||
"""
|
"""
|
||||||
import spack.repo
|
pkg_cls = repository.get_pkg_class(pkg_name)
|
||||||
|
|
||||||
pkg_cls = spack.repo.PATH.get_pkg_class(pkg_name)
|
|
||||||
patterns = self.search_patterns(pkg=pkg_cls)
|
patterns = self.search_patterns(pkg=pkg_cls)
|
||||||
if not patterns:
|
if not patterns:
|
||||||
return []
|
return []
|
||||||
if initial_guess is None:
|
regex = re.compile("|".join(patterns))
|
||||||
initial_guess = self.default_path_hints()
|
paths = [path for path, file in self.paths.items() if regex.search(file)]
|
||||||
initial_guess.extend(common_windows_package_paths(pkg_cls))
|
paths.sort()
|
||||||
candidates = self.candidate_files(patterns=patterns, paths=initial_guess)
|
return self.detect_specs(pkg=pkg_cls, paths=paths)
|
||||||
result = self.detect_specs(pkg=pkg_cls, paths=candidates)
|
|
||||||
return result
|
|
||||||
|
|
||||||
|
|
||||||
class ExecutablesFinder(Finder):
|
class ExecutablesFinder(Finder):
|
||||||
def default_path_hints(self) -> List[str]:
|
@classmethod
|
||||||
return spack.util.environment.get_path("PATH")
|
def in_search_paths(cls, paths: List[str]):
|
||||||
|
return cls(executables_in_path(paths))
|
||||||
|
|
||||||
def search_patterns(self, *, pkg: Type["spack.package_base.PackageBase"]) -> List[str]:
|
@classmethod
|
||||||
result = []
|
def in_default_paths(cls):
|
||||||
|
return cls.in_search_paths(spack.util.environment.get_path("PATH"))
|
||||||
|
|
||||||
|
def search_patterns(self, *, pkg: Type[spack.package_base.PackageBase]) -> Optional[List[str]]:
|
||||||
if hasattr(pkg, "executables") and hasattr(pkg, "platform_executables"):
|
if hasattr(pkg, "executables") and hasattr(pkg, "platform_executables"):
|
||||||
result = pkg.platform_executables()
|
return pkg.platform_executables()
|
||||||
return result
|
return None
|
||||||
|
|
||||||
def candidate_files(self, *, patterns: List[str], paths: List[str]) -> List[str]:
|
|
||||||
executables_by_path = executables_in_path(path_hints=paths)
|
|
||||||
patterns = [re.compile(x) for x in patterns]
|
|
||||||
result = []
|
|
||||||
for compiled_re in patterns:
|
|
||||||
for path, exe in executables_by_path.items():
|
|
||||||
if compiled_re.search(exe):
|
|
||||||
result.append(path)
|
|
||||||
return list(sorted(set(result)))
|
|
||||||
|
|
||||||
def prefix_from_path(self, *, path: str) -> str:
|
def prefix_from_path(self, *, path: str) -> str:
|
||||||
result = executable_prefix(path)
|
result = executable_prefix(path)
|
||||||
@@ -352,29 +313,18 @@ def prefix_from_path(self, *, path: str) -> str:
|
|||||||
|
|
||||||
|
|
||||||
class LibrariesFinder(Finder):
|
class LibrariesFinder(Finder):
|
||||||
"""Finds libraries on the system, searching by LD_LIBRARY_PATH, LIBRARY_PATH,
|
"""Finds libraries in the provided paths matching package search patterns."""
|
||||||
DYLD_LIBRARY_PATH, DYLD_FALLBACK_LIBRARY_PATH, and standard system library paths
|
|
||||||
"""
|
|
||||||
|
|
||||||
def search_patterns(self, *, pkg: Type["spack.package_base.PackageBase"]) -> List[str]:
|
@classmethod
|
||||||
result = []
|
def in_search_paths(cls, paths: List[str]):
|
||||||
if hasattr(pkg, "libraries"):
|
return cls(libraries_in_path(paths))
|
||||||
result = pkg.libraries
|
|
||||||
return result
|
|
||||||
|
|
||||||
def candidate_files(self, *, patterns: List[str], paths: List[str]) -> List[str]:
|
@classmethod
|
||||||
libraries_by_path = (
|
def in_default_paths(cls):
|
||||||
libraries_in_ld_and_system_library_path(path_hints=paths)
|
return cls.in_search_paths(system_library_paths())
|
||||||
if sys.platform != "win32"
|
|
||||||
else libraries_in_windows_paths(path_hints=paths)
|
def search_patterns(self, *, pkg: Type[spack.package_base.PackageBase]) -> Optional[List[str]]:
|
||||||
)
|
return getattr(pkg, "libraries", None)
|
||||||
patterns = [re.compile(x) for x in patterns]
|
|
||||||
result = []
|
|
||||||
for compiled_re in patterns:
|
|
||||||
for path, exe in libraries_by_path.items():
|
|
||||||
if compiled_re.search(exe):
|
|
||||||
result.append(path)
|
|
||||||
return result
|
|
||||||
|
|
||||||
def prefix_from_path(self, *, path: str) -> str:
|
def prefix_from_path(self, *, path: str) -> str:
|
||||||
result = library_prefix(path)
|
result = library_prefix(path)
|
||||||
@@ -385,10 +335,7 @@ def prefix_from_path(self, *, path: str) -> str:
|
|||||||
|
|
||||||
|
|
||||||
def by_path(
|
def by_path(
|
||||||
packages_to_search: List[str],
|
packages_to_search: Iterable[str], *, path_hints: Optional[List[str]] = None
|
||||||
*,
|
|
||||||
path_hints: Optional[List[str]] = None,
|
|
||||||
max_workers: Optional[int] = None,
|
|
||||||
) -> Dict[str, List[DetectedPackage]]:
|
) -> Dict[str, List[DetectedPackage]]:
|
||||||
"""Return the list of packages that have been detected on the system, keyed by
|
"""Return the list of packages that have been detected on the system, keyed by
|
||||||
unqualified package name.
|
unqualified package name.
|
||||||
@@ -397,22 +344,26 @@ def by_path(
|
|||||||
packages_to_search: list of packages to be detected. Each package can be either unqualified
|
packages_to_search: list of packages to be detected. Each package can be either unqualified
|
||||||
of fully qualified
|
of fully qualified
|
||||||
path_hints: initial list of paths to be searched
|
path_hints: initial list of paths to be searched
|
||||||
max_workers: maximum number of workers to search for packages in parallel
|
|
||||||
"""
|
"""
|
||||||
# TODO: Packages should be able to define both .libraries and .executables in the future
|
# TODO: Packages should be able to define both .libraries and .executables in the future
|
||||||
# TODO: determine_spec_details should get all relevant libraries and executables in one call
|
# TODO: determine_spec_details should get all relevant libraries and executables in one call
|
||||||
executables_finder, libraries_finder = ExecutablesFinder(), LibrariesFinder()
|
if path_hints is None:
|
||||||
|
exe_finder = ExecutablesFinder.in_default_paths()
|
||||||
|
lib_finder = LibrariesFinder.in_default_paths()
|
||||||
|
else:
|
||||||
|
exe_finder = ExecutablesFinder.in_search_paths(path_hints)
|
||||||
|
lib_finder = LibrariesFinder.in_search_paths(path_hints)
|
||||||
|
|
||||||
detected_specs_by_package: Dict[str, Tuple[concurrent.futures.Future, ...]] = {}
|
detected_specs_by_package: Dict[str, Tuple[concurrent.futures.Future, ...]] = {}
|
||||||
|
|
||||||
result = collections.defaultdict(list)
|
result = collections.defaultdict(list)
|
||||||
with concurrent.futures.ProcessPoolExecutor(max_workers=max_workers) as executor:
|
repository = spack.repo.PATH.ensure_unwrapped()
|
||||||
|
with spack.util.parallel.make_concurrent_executor() as executor:
|
||||||
for pkg in packages_to_search:
|
for pkg in packages_to_search:
|
||||||
executable_future = executor.submit(
|
executable_future = executor.submit(
|
||||||
executables_finder.find, pkg_name=pkg, initial_guess=path_hints
|
exe_finder.find, pkg_name=pkg, repository=repository
|
||||||
)
|
|
||||||
library_future = executor.submit(
|
|
||||||
libraries_finder.find, pkg_name=pkg, initial_guess=path_hints
|
|
||||||
)
|
)
|
||||||
|
library_future = executor.submit(lib_finder.find, pkg_name=pkg, repository=repository)
|
||||||
detected_specs_by_package[pkg] = executable_future, library_future
|
detected_specs_by_package[pkg] = executable_future, library_future
|
||||||
|
|
||||||
for pkg_name, futures in detected_specs_by_package.items():
|
for pkg_name, futures in detected_specs_by_package.items():
|
||||||
@@ -428,7 +379,7 @@ def by_path(
|
|||||||
)
|
)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
llnl.util.tty.debug(
|
llnl.util.tty.debug(
|
||||||
f"[EXTERNAL DETECTION] Skipping {pkg_name}: exception occured {e}"
|
f"[EXTERNAL DETECTION] Skipping {pkg_name} due to: {e.__class__}: {e}"
|
||||||
)
|
)
|
||||||
|
|
||||||
return result
|
return result
|
||||||
|
|||||||
@@ -81,7 +81,17 @@ class OpenMpi(Package):
|
|||||||
]
|
]
|
||||||
|
|
||||||
#: These are variant names used by Spack internally; packages can't use them
|
#: These are variant names used by Spack internally; packages can't use them
|
||||||
reserved_names = ["patches", "dev_path"]
|
reserved_names = [
|
||||||
|
"arch",
|
||||||
|
"architecture",
|
||||||
|
"dev_path",
|
||||||
|
"namespace",
|
||||||
|
"operating_system",
|
||||||
|
"os",
|
||||||
|
"patches",
|
||||||
|
"platform",
|
||||||
|
"target",
|
||||||
|
]
|
||||||
|
|
||||||
#: Names of possible directives. This list is mostly populated using the @directive decorator.
|
#: Names of possible directives. This list is mostly populated using the @directive decorator.
|
||||||
#: Some directives leverage others and in that case are not automatically added.
|
#: Some directives leverage others and in that case are not automatically added.
|
||||||
|
|||||||
@@ -5,6 +5,7 @@
|
|||||||
import collections
|
import collections
|
||||||
import collections.abc
|
import collections.abc
|
||||||
import contextlib
|
import contextlib
|
||||||
|
import errno
|
||||||
import os
|
import os
|
||||||
import pathlib
|
import pathlib
|
||||||
import re
|
import re
|
||||||
@@ -788,6 +789,23 @@ def regenerate(self, concrete_roots: List[Spec]) -> None:
|
|||||||
root_dirname = os.path.dirname(self.root)
|
root_dirname = os.path.dirname(self.root)
|
||||||
tmp_symlink_name = os.path.join(root_dirname, "._view_link")
|
tmp_symlink_name = os.path.join(root_dirname, "._view_link")
|
||||||
|
|
||||||
|
# Remove self.root if is it an empty dir, since we need a symlink there. Note that rmdir
|
||||||
|
# fails if self.root is a symlink.
|
||||||
|
try:
|
||||||
|
os.rmdir(self.root)
|
||||||
|
except (FileNotFoundError, NotADirectoryError):
|
||||||
|
pass
|
||||||
|
except OSError as e:
|
||||||
|
if e.errno == errno.ENOTEMPTY:
|
||||||
|
msg = "it is a non-empty directory"
|
||||||
|
elif e.errno == errno.EACCES:
|
||||||
|
msg = "of insufficient permissions"
|
||||||
|
else:
|
||||||
|
raise
|
||||||
|
raise SpackEnvironmentViewError(
|
||||||
|
f"The environment view in {self.root} cannot not be created because {msg}."
|
||||||
|
) from e
|
||||||
|
|
||||||
# Create a new view
|
# Create a new view
|
||||||
try:
|
try:
|
||||||
fs.mkdirp(new_root)
|
fs.mkdirp(new_root)
|
||||||
@@ -1196,7 +1214,6 @@ def scope_name(self):
|
|||||||
def include_concrete_envs(self):
|
def include_concrete_envs(self):
|
||||||
"""Copy and save the included envs' specs internally"""
|
"""Copy and save the included envs' specs internally"""
|
||||||
|
|
||||||
lockfile_meta = None
|
|
||||||
root_hash_seen = set()
|
root_hash_seen = set()
|
||||||
concrete_hash_seen = set()
|
concrete_hash_seen = set()
|
||||||
self.included_concrete_spec_data = {}
|
self.included_concrete_spec_data = {}
|
||||||
@@ -1207,37 +1224,26 @@ def include_concrete_envs(self):
|
|||||||
raise SpackEnvironmentError(f"Unable to find env at {env_path}")
|
raise SpackEnvironmentError(f"Unable to find env at {env_path}")
|
||||||
|
|
||||||
env = Environment(env_path)
|
env = Environment(env_path)
|
||||||
|
self.included_concrete_spec_data[env_path] = {"roots": [], "concrete_specs": {}}
|
||||||
with open(env.lock_path) as f:
|
|
||||||
lockfile_as_dict = env._read_lockfile(f)
|
|
||||||
|
|
||||||
# Lockfile_meta must match each env and use at least format version 5
|
|
||||||
if lockfile_meta is None:
|
|
||||||
lockfile_meta = lockfile_as_dict["_meta"]
|
|
||||||
elif lockfile_meta != lockfile_as_dict["_meta"]:
|
|
||||||
raise SpackEnvironmentError("All lockfile _meta values must match")
|
|
||||||
elif lockfile_meta["lockfile-version"] < 5:
|
|
||||||
raise SpackEnvironmentError("The lockfile format must be at version 5 or higher")
|
|
||||||
|
|
||||||
# Copy unique root specs from env
|
# Copy unique root specs from env
|
||||||
self.included_concrete_spec_data[env_path] = {"roots": []}
|
for root_dict in env._concrete_roots_dict():
|
||||||
for root_dict in lockfile_as_dict["roots"]:
|
|
||||||
if root_dict["hash"] not in root_hash_seen:
|
if root_dict["hash"] not in root_hash_seen:
|
||||||
self.included_concrete_spec_data[env_path]["roots"].append(root_dict)
|
self.included_concrete_spec_data[env_path]["roots"].append(root_dict)
|
||||||
root_hash_seen.add(root_dict["hash"])
|
root_hash_seen.add(root_dict["hash"])
|
||||||
|
|
||||||
# Copy unique concrete specs from env
|
# Copy unique concrete specs from env
|
||||||
for concrete_spec in lockfile_as_dict["concrete_specs"]:
|
for dag_hash, spec_details in env._concrete_specs_dict().items():
|
||||||
if concrete_spec not in concrete_hash_seen:
|
if dag_hash not in concrete_hash_seen:
|
||||||
self.included_concrete_spec_data[env_path].update(
|
self.included_concrete_spec_data[env_path]["concrete_specs"].update(
|
||||||
{"concrete_specs": lockfile_as_dict["concrete_specs"]}
|
{dag_hash: spec_details}
|
||||||
)
|
)
|
||||||
concrete_hash_seen.add(concrete_spec)
|
concrete_hash_seen.add(dag_hash)
|
||||||
|
|
||||||
if "include_concrete" in lockfile_as_dict.keys():
|
# Copy transitive include data
|
||||||
self.included_concrete_spec_data[env_path]["include_concrete"] = lockfile_as_dict[
|
transitive = env.included_concrete_spec_data
|
||||||
"include_concrete"
|
if transitive:
|
||||||
]
|
self.included_concrete_spec_data[env_path]["include_concrete"] = transitive
|
||||||
|
|
||||||
self._read_lockfile_dict(self._to_lockfile_dict())
|
self._read_lockfile_dict(self._to_lockfile_dict())
|
||||||
self.write()
|
self.write()
|
||||||
@@ -1626,9 +1632,8 @@ def _concretize_separately(self, tests=False):
|
|||||||
i += 1
|
i += 1
|
||||||
|
|
||||||
# Ensure we don't try to bootstrap clingo in parallel
|
# Ensure we don't try to bootstrap clingo in parallel
|
||||||
if spack.config.get("config:concretizer", "clingo") == "clingo":
|
with spack.bootstrap.ensure_bootstrap_configuration():
|
||||||
with spack.bootstrap.ensure_bootstrap_configuration():
|
spack.bootstrap.ensure_clingo_importable_or_raise()
|
||||||
spack.bootstrap.ensure_clingo_importable_or_raise()
|
|
||||||
|
|
||||||
# Ensure all the indexes have been built or updated, since
|
# Ensure all the indexes have been built or updated, since
|
||||||
# otherwise the processes in the pool may timeout on waiting
|
# otherwise the processes in the pool may timeout on waiting
|
||||||
@@ -2156,16 +2161,23 @@ def _get_environment_specs(self, recurse_dependencies=True):
|
|||||||
|
|
||||||
return specs
|
return specs
|
||||||
|
|
||||||
def _to_lockfile_dict(self):
|
def _concrete_specs_dict(self):
|
||||||
"""Create a dictionary to store a lockfile for this environment."""
|
|
||||||
concrete_specs = {}
|
concrete_specs = {}
|
||||||
for s in traverse.traverse_nodes(self.specs_by_hash.values(), key=traverse.by_dag_hash):
|
for s in traverse.traverse_nodes(self.specs_by_hash.values(), key=traverse.by_dag_hash):
|
||||||
spec_dict = s.node_dict_with_hashes(hash=ht.dag_hash)
|
spec_dict = s.node_dict_with_hashes(hash=ht.dag_hash)
|
||||||
# Assumes no legacy formats, since this was just created.
|
# Assumes no legacy formats, since this was just created.
|
||||||
spec_dict[ht.dag_hash.name] = s.dag_hash()
|
spec_dict[ht.dag_hash.name] = s.dag_hash()
|
||||||
concrete_specs[s.dag_hash()] = spec_dict
|
concrete_specs[s.dag_hash()] = spec_dict
|
||||||
|
return concrete_specs
|
||||||
|
|
||||||
|
def _concrete_roots_dict(self):
|
||||||
hash_spec_list = zip(self.concretized_order, self.concretized_user_specs)
|
hash_spec_list = zip(self.concretized_order, self.concretized_user_specs)
|
||||||
|
return [{"hash": h, "spec": str(s)} for h, s in hash_spec_list]
|
||||||
|
|
||||||
|
def _to_lockfile_dict(self):
|
||||||
|
"""Create a dictionary to store a lockfile for this environment."""
|
||||||
|
concrete_specs = self._concrete_specs_dict()
|
||||||
|
root_specs = self._concrete_roots_dict()
|
||||||
|
|
||||||
spack_dict = {"version": spack.spack_version}
|
spack_dict = {"version": spack.spack_version}
|
||||||
spack_commit = spack.main.get_spack_commit()
|
spack_commit = spack.main.get_spack_commit()
|
||||||
@@ -2186,7 +2198,7 @@ def _to_lockfile_dict(self):
|
|||||||
# spack version information
|
# spack version information
|
||||||
"spack": spack_dict,
|
"spack": spack_dict,
|
||||||
# users specs + hashes are the 'roots' of the environment
|
# users specs + hashes are the 'roots' of the environment
|
||||||
"roots": [{"hash": h, "spec": str(s)} for h, s in hash_spec_list],
|
"roots": root_specs,
|
||||||
# Concrete specs by hash, including dependencies
|
# Concrete specs by hash, including dependencies
|
||||||
"concrete_specs": concrete_specs,
|
"concrete_specs": concrete_specs,
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -30,6 +30,7 @@
|
|||||||
import shutil
|
import shutil
|
||||||
import urllib.error
|
import urllib.error
|
||||||
import urllib.parse
|
import urllib.parse
|
||||||
|
import urllib.request
|
||||||
from pathlib import PurePath
|
from pathlib import PurePath
|
||||||
from typing import List, Optional
|
from typing import List, Optional
|
||||||
|
|
||||||
@@ -53,7 +54,7 @@
|
|||||||
import spack.version
|
import spack.version
|
||||||
import spack.version.git_ref_lookup
|
import spack.version.git_ref_lookup
|
||||||
from spack.util.compression import decompressor_for
|
from spack.util.compression import decompressor_for
|
||||||
from spack.util.executable import CommandNotFoundError, which
|
from spack.util.executable import CommandNotFoundError, Executable, which
|
||||||
|
|
||||||
#: List of all fetch strategies, created by FetchStrategy metaclass.
|
#: List of all fetch strategies, created by FetchStrategy metaclass.
|
||||||
all_strategies = []
|
all_strategies = []
|
||||||
@@ -245,38 +246,30 @@ class URLFetchStrategy(FetchStrategy):
|
|||||||
|
|
||||||
# these are checksum types. The generic 'checksum' is deprecated for
|
# these are checksum types. The generic 'checksum' is deprecated for
|
||||||
# specific hash names, but we need it for backward compatibility
|
# specific hash names, but we need it for backward compatibility
|
||||||
optional_attrs = list(crypto.hashes.keys()) + ["checksum"]
|
optional_attrs = [*crypto.hashes.keys(), "checksum"]
|
||||||
|
|
||||||
def __init__(self, url=None, checksum=None, **kwargs):
|
def __init__(self, *, url: str, checksum: Optional[str] = None, **kwargs) -> None:
|
||||||
super().__init__(**kwargs)
|
super().__init__(**kwargs)
|
||||||
|
|
||||||
# Prefer values in kwargs to the positionals.
|
self.url = url
|
||||||
self.url = kwargs.get("url", url)
|
|
||||||
self.mirrors = kwargs.get("mirrors", [])
|
self.mirrors = kwargs.get("mirrors", [])
|
||||||
|
|
||||||
# digest can be set as the first argument, or from an explicit
|
# digest can be set as the first argument, or from an explicit
|
||||||
# kwarg by the hash name.
|
# kwarg by the hash name.
|
||||||
self.digest = kwargs.get("checksum", checksum)
|
self.digest: Optional[str] = checksum
|
||||||
for h in self.optional_attrs:
|
for h in self.optional_attrs:
|
||||||
if h in kwargs:
|
if h in kwargs:
|
||||||
self.digest = kwargs[h]
|
self.digest = kwargs[h]
|
||||||
|
|
||||||
self.expand_archive = kwargs.get("expand", True)
|
self.expand_archive: bool = kwargs.get("expand", True)
|
||||||
self.extra_options = kwargs.get("fetch_options", {})
|
self.extra_options: dict = kwargs.get("fetch_options", {})
|
||||||
self._curl = None
|
self._curl: Optional[Executable] = None
|
||||||
|
self.extension: Optional[str] = kwargs.get("extension", None)
|
||||||
self.extension = kwargs.get("extension", None)
|
|
||||||
|
|
||||||
if not self.url:
|
|
||||||
raise ValueError("URLFetchStrategy requires a url for fetching.")
|
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def curl(self):
|
def curl(self) -> Executable:
|
||||||
if not self._curl:
|
if not self._curl:
|
||||||
try:
|
self._curl = web_util.require_curl()
|
||||||
self._curl = which("curl", required=True)
|
|
||||||
except CommandNotFoundError as exc:
|
|
||||||
tty.error(str(exc))
|
|
||||||
return self._curl
|
return self._curl
|
||||||
|
|
||||||
def source_id(self):
|
def source_id(self):
|
||||||
@@ -297,27 +290,23 @@ def candidate_urls(self):
|
|||||||
@_needs_stage
|
@_needs_stage
|
||||||
def fetch(self):
|
def fetch(self):
|
||||||
if self.archive_file:
|
if self.archive_file:
|
||||||
tty.debug("Already downloaded {0}".format(self.archive_file))
|
tty.debug(f"Already downloaded {self.archive_file}")
|
||||||
return
|
return
|
||||||
|
|
||||||
url = None
|
errors: List[Exception] = []
|
||||||
errors = []
|
|
||||||
for url in self.candidate_urls:
|
for url in self.candidate_urls:
|
||||||
if not web_util.url_exists(url):
|
|
||||||
tty.debug("URL does not exist: " + url)
|
|
||||||
continue
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
self._fetch_from_url(url)
|
self._fetch_from_url(url)
|
||||||
break
|
break
|
||||||
except FailedDownloadError as e:
|
except FailedDownloadError as e:
|
||||||
errors.append(str(e))
|
errors.extend(e.exceptions)
|
||||||
|
else:
|
||||||
for msg in errors:
|
raise FailedDownloadError(*errors)
|
||||||
tty.debug(msg)
|
|
||||||
|
|
||||||
if not self.archive_file:
|
if not self.archive_file:
|
||||||
raise FailedDownloadError(url)
|
raise FailedDownloadError(
|
||||||
|
RuntimeError(f"Missing archive {self.archive_file} after fetching")
|
||||||
|
)
|
||||||
|
|
||||||
def _fetch_from_url(self, url):
|
def _fetch_from_url(self, url):
|
||||||
if spack.config.get("config:url_fetch_method") == "curl":
|
if spack.config.get("config:url_fetch_method") == "curl":
|
||||||
@@ -336,27 +325,28 @@ def _check_headers(self, headers):
|
|||||||
@_needs_stage
|
@_needs_stage
|
||||||
def _fetch_urllib(self, url):
|
def _fetch_urllib(self, url):
|
||||||
save_file = self.stage.save_filename
|
save_file = self.stage.save_filename
|
||||||
tty.msg("Fetching {0}".format(url))
|
|
||||||
|
|
||||||
# Run urllib but grab the mime type from the http headers
|
request = urllib.request.Request(url, headers={"User-Agent": web_util.SPACK_USER_AGENT})
|
||||||
|
|
||||||
try:
|
try:
|
||||||
url, headers, response = web_util.read_from_url(url)
|
response = web_util.urlopen(request)
|
||||||
except web_util.SpackWebError as e:
|
except (TimeoutError, urllib.error.URLError) as e:
|
||||||
# clean up archive on failure.
|
# clean up archive on failure.
|
||||||
if self.archive_file:
|
if self.archive_file:
|
||||||
os.remove(self.archive_file)
|
os.remove(self.archive_file)
|
||||||
if os.path.lexists(save_file):
|
if os.path.lexists(save_file):
|
||||||
os.remove(save_file)
|
os.remove(save_file)
|
||||||
msg = "urllib failed to fetch with error {0}".format(e)
|
raise FailedDownloadError(e) from e
|
||||||
raise FailedDownloadError(url, msg)
|
|
||||||
|
tty.msg(f"Fetching {url}")
|
||||||
|
|
||||||
if os.path.lexists(save_file):
|
if os.path.lexists(save_file):
|
||||||
os.remove(save_file)
|
os.remove(save_file)
|
||||||
|
|
||||||
with open(save_file, "wb") as _open_file:
|
with open(save_file, "wb") as f:
|
||||||
shutil.copyfileobj(response, _open_file)
|
shutil.copyfileobj(response, f)
|
||||||
|
|
||||||
self._check_headers(str(headers))
|
self._check_headers(str(response.headers))
|
||||||
|
|
||||||
@_needs_stage
|
@_needs_stage
|
||||||
def _fetch_curl(self, url):
|
def _fetch_curl(self, url):
|
||||||
@@ -365,7 +355,7 @@ def _fetch_curl(self, url):
|
|||||||
if self.stage.save_filename:
|
if self.stage.save_filename:
|
||||||
save_file = self.stage.save_filename
|
save_file = self.stage.save_filename
|
||||||
partial_file = self.stage.save_filename + ".part"
|
partial_file = self.stage.save_filename + ".part"
|
||||||
tty.msg("Fetching {0}".format(url))
|
tty.msg(f"Fetching {url}")
|
||||||
if partial_file:
|
if partial_file:
|
||||||
save_args = [
|
save_args = [
|
||||||
"-C",
|
"-C",
|
||||||
@@ -405,8 +395,8 @@ def _fetch_curl(self, url):
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
web_util.check_curl_code(curl.returncode)
|
web_util.check_curl_code(curl.returncode)
|
||||||
except spack.error.FetchError as err:
|
except spack.error.FetchError as e:
|
||||||
raise spack.fetch_strategy.FailedDownloadError(url, str(err))
|
raise FailedDownloadError(e) from e
|
||||||
|
|
||||||
self._check_headers(headers)
|
self._check_headers(headers)
|
||||||
|
|
||||||
@@ -473,7 +463,7 @@ def check(self):
|
|||||||
"""Check the downloaded archive against a checksum digest.
|
"""Check the downloaded archive against a checksum digest.
|
||||||
No-op if this stage checks code out of a repository."""
|
No-op if this stage checks code out of a repository."""
|
||||||
if not self.digest:
|
if not self.digest:
|
||||||
raise NoDigestError("Attempt to check URLFetchStrategy with no digest.")
|
raise NoDigestError(f"Attempt to check {self.__class__.__name__} with no digest.")
|
||||||
|
|
||||||
verify_checksum(self.archive_file, self.digest)
|
verify_checksum(self.archive_file, self.digest)
|
||||||
|
|
||||||
@@ -484,8 +474,8 @@ def reset(self):
|
|||||||
"""
|
"""
|
||||||
if not self.archive_file:
|
if not self.archive_file:
|
||||||
raise NoArchiveFileError(
|
raise NoArchiveFileError(
|
||||||
"Tried to reset URLFetchStrategy before fetching",
|
f"Tried to reset {self.__class__.__name__} before fetching",
|
||||||
"Failed on reset() for URL %s" % self.url,
|
f"Failed on reset() for URL{self.url}",
|
||||||
)
|
)
|
||||||
|
|
||||||
# Remove everything but the archive from the stage
|
# Remove everything but the archive from the stage
|
||||||
@@ -498,14 +488,10 @@ def reset(self):
|
|||||||
self.expand()
|
self.expand()
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
url = self.url if self.url else "no url"
|
return f"{self.__class__.__name__}<{self.url}>"
|
||||||
return "%s<%s>" % (self.__class__.__name__, url)
|
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
if self.url:
|
return self.url
|
||||||
return self.url
|
|
||||||
else:
|
|
||||||
return "[no url]"
|
|
||||||
|
|
||||||
|
|
||||||
@fetcher
|
@fetcher
|
||||||
@@ -518,7 +504,7 @@ def fetch(self):
|
|||||||
|
|
||||||
# check whether the cache file exists.
|
# check whether the cache file exists.
|
||||||
if not os.path.isfile(path):
|
if not os.path.isfile(path):
|
||||||
raise NoCacheError("No cache of %s" % path)
|
raise NoCacheError(f"No cache of {path}")
|
||||||
|
|
||||||
# remove old symlink if one is there.
|
# remove old symlink if one is there.
|
||||||
filename = self.stage.save_filename
|
filename = self.stage.save_filename
|
||||||
@@ -528,8 +514,8 @@ def fetch(self):
|
|||||||
# Symlink to local cached archive.
|
# Symlink to local cached archive.
|
||||||
symlink(path, filename)
|
symlink(path, filename)
|
||||||
|
|
||||||
# Remove link if checksum fails, or subsequent fetchers
|
# Remove link if checksum fails, or subsequent fetchers will assume they don't need to
|
||||||
# will assume they don't need to download.
|
# download.
|
||||||
if self.digest:
|
if self.digest:
|
||||||
try:
|
try:
|
||||||
self.check()
|
self.check()
|
||||||
@@ -538,12 +524,12 @@ def fetch(self):
|
|||||||
raise
|
raise
|
||||||
|
|
||||||
# Notify the user how we fetched.
|
# Notify the user how we fetched.
|
||||||
tty.msg("Using cached archive: {0}".format(path))
|
tty.msg(f"Using cached archive: {path}")
|
||||||
|
|
||||||
|
|
||||||
class OCIRegistryFetchStrategy(URLFetchStrategy):
|
class OCIRegistryFetchStrategy(URLFetchStrategy):
|
||||||
def __init__(self, url=None, checksum=None, **kwargs):
|
def __init__(self, *, url: str, checksum: Optional[str] = None, **kwargs):
|
||||||
super().__init__(url, checksum, **kwargs)
|
super().__init__(url=url, checksum=checksum, **kwargs)
|
||||||
|
|
||||||
self._urlopen = kwargs.get("_urlopen", spack.oci.opener.urlopen)
|
self._urlopen = kwargs.get("_urlopen", spack.oci.opener.urlopen)
|
||||||
|
|
||||||
@@ -554,13 +540,13 @@ def fetch(self):
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
response = self._urlopen(self.url)
|
response = self._urlopen(self.url)
|
||||||
except urllib.error.URLError as e:
|
except (TimeoutError, urllib.error.URLError) as e:
|
||||||
# clean up archive on failure.
|
# clean up archive on failure.
|
||||||
if self.archive_file:
|
if self.archive_file:
|
||||||
os.remove(self.archive_file)
|
os.remove(self.archive_file)
|
||||||
if os.path.lexists(file):
|
if os.path.lexists(file):
|
||||||
os.remove(file)
|
os.remove(file)
|
||||||
raise FailedDownloadError(self.url, f"Failed to fetch {self.url}: {e}") from e
|
raise FailedDownloadError(e) from e
|
||||||
|
|
||||||
if os.path.lexists(file):
|
if os.path.lexists(file):
|
||||||
os.remove(file)
|
os.remove(file)
|
||||||
@@ -725,6 +711,7 @@ class GitFetchStrategy(VCSFetchStrategy):
|
|||||||
"submodules",
|
"submodules",
|
||||||
"get_full_repo",
|
"get_full_repo",
|
||||||
"submodules_delete",
|
"submodules_delete",
|
||||||
|
"git_sparse_paths",
|
||||||
]
|
]
|
||||||
|
|
||||||
git_version_re = r"git version (\S+)"
|
git_version_re = r"git version (\S+)"
|
||||||
@@ -740,6 +727,7 @@ def __init__(self, **kwargs):
|
|||||||
self.submodules = kwargs.get("submodules", False)
|
self.submodules = kwargs.get("submodules", False)
|
||||||
self.submodules_delete = kwargs.get("submodules_delete", False)
|
self.submodules_delete = kwargs.get("submodules_delete", False)
|
||||||
self.get_full_repo = kwargs.get("get_full_repo", False)
|
self.get_full_repo = kwargs.get("get_full_repo", False)
|
||||||
|
self.git_sparse_paths = kwargs.get("git_sparse_paths", None)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def git_version(self):
|
def git_version(self):
|
||||||
@@ -807,38 +795,50 @@ def fetch(self):
|
|||||||
tty.debug("Already fetched {0}".format(self.stage.source_path))
|
tty.debug("Already fetched {0}".format(self.stage.source_path))
|
||||||
return
|
return
|
||||||
|
|
||||||
self.clone(commit=self.commit, branch=self.branch, tag=self.tag)
|
if self.git_sparse_paths:
|
||||||
|
self._sparse_clone_src(commit=self.commit, branch=self.branch, tag=self.tag)
|
||||||
|
else:
|
||||||
|
self._clone_src(commit=self.commit, branch=self.branch, tag=self.tag)
|
||||||
|
self.submodule_operations()
|
||||||
|
|
||||||
def clone(self, dest=None, commit=None, branch=None, tag=None, bare=False):
|
def bare_clone(self, dest):
|
||||||
"""
|
"""
|
||||||
Clone a repository to a path.
|
Execute a bare clone for metadata only
|
||||||
|
|
||||||
This method handles cloning from git, but does not require a stage.
|
Requires a destination since bare cloning does not provide source
|
||||||
|
and shouldn't be used for staging.
|
||||||
Arguments:
|
|
||||||
dest (str or None): The path into which the code is cloned. If None,
|
|
||||||
requires a stage and uses the stage's source path.
|
|
||||||
commit (str or None): A commit to fetch from the remote. Only one of
|
|
||||||
commit, branch, and tag may be non-None.
|
|
||||||
branch (str or None): A branch to fetch from the remote.
|
|
||||||
tag (str or None): A tag to fetch from the remote.
|
|
||||||
bare (bool): Execute a "bare" git clone (--bare option to git)
|
|
||||||
"""
|
"""
|
||||||
# Default to spack source path
|
# Default to spack source path
|
||||||
dest = dest or self.stage.source_path
|
|
||||||
tty.debug("Cloning git repository: {0}".format(self._repo_info()))
|
tty.debug("Cloning git repository: {0}".format(self._repo_info()))
|
||||||
|
|
||||||
git = self.git
|
git = self.git
|
||||||
debug = spack.config.get("config:debug")
|
debug = spack.config.get("config:debug")
|
||||||
|
|
||||||
if bare:
|
# We don't need to worry about which commit/branch/tag is checked out
|
||||||
# We don't need to worry about which commit/branch/tag is checked out
|
clone_args = ["clone", "--bare"]
|
||||||
clone_args = ["clone", "--bare"]
|
if not debug:
|
||||||
if not debug:
|
clone_args.append("--quiet")
|
||||||
clone_args.append("--quiet")
|
clone_args.extend([self.url, dest])
|
||||||
clone_args.extend([self.url, dest])
|
git(*clone_args)
|
||||||
git(*clone_args)
|
|
||||||
elif commit:
|
def _clone_src(self, commit=None, branch=None, tag=None):
|
||||||
|
"""
|
||||||
|
Clone a repository to a path using git.
|
||||||
|
|
||||||
|
Arguments:
|
||||||
|
commit (str or None): A commit to fetch from the remote. Only one of
|
||||||
|
commit, branch, and tag may be non-None.
|
||||||
|
branch (str or None): A branch to fetch from the remote.
|
||||||
|
tag (str or None): A tag to fetch from the remote.
|
||||||
|
"""
|
||||||
|
# Default to spack source path
|
||||||
|
dest = self.stage.source_path
|
||||||
|
tty.debug("Cloning git repository: {0}".format(self._repo_info()))
|
||||||
|
|
||||||
|
git = self.git
|
||||||
|
debug = spack.config.get("config:debug")
|
||||||
|
|
||||||
|
if commit:
|
||||||
# Need to do a regular clone and check out everything if
|
# Need to do a regular clone and check out everything if
|
||||||
# they asked for a particular commit.
|
# they asked for a particular commit.
|
||||||
clone_args = ["clone", self.url]
|
clone_args = ["clone", self.url]
|
||||||
@@ -917,6 +917,85 @@ def clone(self, dest=None, commit=None, branch=None, tag=None, bare=False):
|
|||||||
git(*pull_args, ignore_errors=1)
|
git(*pull_args, ignore_errors=1)
|
||||||
git(*co_args)
|
git(*co_args)
|
||||||
|
|
||||||
|
def _sparse_clone_src(self, commit=None, branch=None, tag=None, **kwargs):
|
||||||
|
"""
|
||||||
|
Use git's sparse checkout feature to clone portions of a git repository
|
||||||
|
|
||||||
|
Arguments:
|
||||||
|
commit (str or None): A commit to fetch from the remote. Only one of
|
||||||
|
commit, branch, and tag may be non-None.
|
||||||
|
branch (str or None): A branch to fetch from the remote.
|
||||||
|
tag (str or None): A tag to fetch from the remote.
|
||||||
|
"""
|
||||||
|
dest = self.stage.source_path
|
||||||
|
git = self.git
|
||||||
|
|
||||||
|
if self.git_version < spack.version.Version("2.25.0.0"):
|
||||||
|
# code paths exist where the package is not set. Assure some indentifier for the
|
||||||
|
# package that was configured for sparse checkout exists in the error message
|
||||||
|
identifier = str(self.url)
|
||||||
|
if self.package:
|
||||||
|
identifier += f" ({self.package.name})"
|
||||||
|
tty.warn(
|
||||||
|
(
|
||||||
|
f"{identifier} is configured for git sparse-checkout "
|
||||||
|
"but the git version is too old to support sparse cloning. "
|
||||||
|
"Cloning the full repository instead."
|
||||||
|
)
|
||||||
|
)
|
||||||
|
self._clone_src(commit, branch, tag)
|
||||||
|
else:
|
||||||
|
# default to depth=2 to allow for retention of some git properties
|
||||||
|
depth = kwargs.get("depth", 2)
|
||||||
|
needs_fetch = branch or tag
|
||||||
|
git_ref = branch or tag or commit
|
||||||
|
|
||||||
|
assert git_ref
|
||||||
|
|
||||||
|
clone_args = ["clone"]
|
||||||
|
|
||||||
|
if needs_fetch:
|
||||||
|
clone_args.extend(["--branch", git_ref])
|
||||||
|
|
||||||
|
if self.get_full_repo:
|
||||||
|
clone_args.append("--no-single-branch")
|
||||||
|
else:
|
||||||
|
clone_args.append("--single-branch")
|
||||||
|
|
||||||
|
clone_args.extend(
|
||||||
|
[f"--depth={depth}", "--no-checkout", "--filter=blob:none", self.url]
|
||||||
|
)
|
||||||
|
|
||||||
|
sparse_args = ["sparse-checkout", "set"]
|
||||||
|
|
||||||
|
if callable(self.git_sparse_paths):
|
||||||
|
sparse_args.extend(self.git_sparse_paths())
|
||||||
|
else:
|
||||||
|
sparse_args.extend([p for p in self.git_sparse_paths])
|
||||||
|
|
||||||
|
sparse_args.append("--cone")
|
||||||
|
|
||||||
|
checkout_args = ["checkout", git_ref]
|
||||||
|
|
||||||
|
if not spack.config.get("config:debug"):
|
||||||
|
clone_args.insert(1, "--quiet")
|
||||||
|
checkout_args.insert(1, "--quiet")
|
||||||
|
|
||||||
|
with temp_cwd():
|
||||||
|
git(*clone_args)
|
||||||
|
repo_name = get_single_file(".")
|
||||||
|
if self.stage:
|
||||||
|
self.stage.srcdir = repo_name
|
||||||
|
shutil.move(repo_name, dest)
|
||||||
|
|
||||||
|
with working_dir(dest):
|
||||||
|
git(*sparse_args)
|
||||||
|
git(*checkout_args)
|
||||||
|
|
||||||
|
def submodule_operations(self):
|
||||||
|
dest = self.stage.source_path
|
||||||
|
git = self.git
|
||||||
|
|
||||||
if self.submodules_delete:
|
if self.submodules_delete:
|
||||||
with working_dir(dest):
|
with working_dir(dest):
|
||||||
for submodule_to_delete in self.submodules_delete:
|
for submodule_to_delete in self.submodules_delete:
|
||||||
@@ -1293,7 +1372,7 @@ def reset(self):
|
|||||||
shutil.move(scrubbed, source_path)
|
shutil.move(scrubbed, source_path)
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
return "[hg] %s" % self.url
|
return f"[hg] {self.url}"
|
||||||
|
|
||||||
|
|
||||||
@fetcher
|
@fetcher
|
||||||
@@ -1302,45 +1381,20 @@ class S3FetchStrategy(URLFetchStrategy):
|
|||||||
|
|
||||||
url_attr = "s3"
|
url_attr = "s3"
|
||||||
|
|
||||||
def __init__(self, *args, **kwargs):
|
|
||||||
try:
|
|
||||||
super().__init__(*args, **kwargs)
|
|
||||||
except ValueError:
|
|
||||||
if not kwargs.get("url"):
|
|
||||||
raise ValueError("S3FetchStrategy requires a url for fetching.")
|
|
||||||
|
|
||||||
@_needs_stage
|
@_needs_stage
|
||||||
def fetch(self):
|
def fetch(self):
|
||||||
if self.archive_file:
|
if not self.url.startswith("s3://"):
|
||||||
tty.debug("Already downloaded {0}".format(self.archive_file))
|
raise spack.error.FetchError(
|
||||||
return
|
f"{self.__class__.__name__} can only fetch from s3:// urls."
|
||||||
|
|
||||||
parsed_url = urllib.parse.urlparse(self.url)
|
|
||||||
if parsed_url.scheme != "s3":
|
|
||||||
raise spack.error.FetchError("S3FetchStrategy can only fetch from s3:// urls.")
|
|
||||||
|
|
||||||
tty.debug("Fetching {0}".format(self.url))
|
|
||||||
|
|
||||||
basename = os.path.basename(parsed_url.path)
|
|
||||||
|
|
||||||
with working_dir(self.stage.path):
|
|
||||||
_, headers, stream = web_util.read_from_url(self.url)
|
|
||||||
|
|
||||||
with open(basename, "wb") as f:
|
|
||||||
shutil.copyfileobj(stream, f)
|
|
||||||
|
|
||||||
content_type = web_util.get_header(headers, "Content-type")
|
|
||||||
|
|
||||||
if content_type == "text/html":
|
|
||||||
warn_content_type_mismatch(self.archive_file or "the archive")
|
|
||||||
|
|
||||||
if self.stage.save_filename:
|
|
||||||
llnl.util.filesystem.rename(
|
|
||||||
os.path.join(self.stage.path, basename), self.stage.save_filename
|
|
||||||
)
|
)
|
||||||
|
if self.archive_file:
|
||||||
|
tty.debug(f"Already downloaded {self.archive_file}")
|
||||||
|
return
|
||||||
|
self._fetch_urllib(self.url)
|
||||||
if not self.archive_file:
|
if not self.archive_file:
|
||||||
raise FailedDownloadError(self.url)
|
raise FailedDownloadError(
|
||||||
|
RuntimeError(f"Missing archive {self.archive_file} after fetching")
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
@fetcher
|
@fetcher
|
||||||
@@ -1349,43 +1403,22 @@ class GCSFetchStrategy(URLFetchStrategy):
|
|||||||
|
|
||||||
url_attr = "gs"
|
url_attr = "gs"
|
||||||
|
|
||||||
def __init__(self, *args, **kwargs):
|
|
||||||
try:
|
|
||||||
super().__init__(*args, **kwargs)
|
|
||||||
except ValueError:
|
|
||||||
if not kwargs.get("url"):
|
|
||||||
raise ValueError("GCSFetchStrategy requires a url for fetching.")
|
|
||||||
|
|
||||||
@_needs_stage
|
@_needs_stage
|
||||||
def fetch(self):
|
def fetch(self):
|
||||||
|
if not self.url.startswith("gs"):
|
||||||
|
raise spack.error.FetchError(
|
||||||
|
f"{self.__class__.__name__} can only fetch from gs:// urls."
|
||||||
|
)
|
||||||
if self.archive_file:
|
if self.archive_file:
|
||||||
tty.debug("Already downloaded {0}".format(self.archive_file))
|
tty.debug(f"Already downloaded {self.archive_file}")
|
||||||
return
|
return
|
||||||
|
|
||||||
parsed_url = urllib.parse.urlparse(self.url)
|
self._fetch_urllib(self.url)
|
||||||
if parsed_url.scheme != "gs":
|
|
||||||
raise spack.error.FetchError("GCSFetchStrategy can only fetch from gs:// urls.")
|
|
||||||
|
|
||||||
tty.debug("Fetching {0}".format(self.url))
|
|
||||||
|
|
||||||
basename = os.path.basename(parsed_url.path)
|
|
||||||
|
|
||||||
with working_dir(self.stage.path):
|
|
||||||
_, headers, stream = web_util.read_from_url(self.url)
|
|
||||||
|
|
||||||
with open(basename, "wb") as f:
|
|
||||||
shutil.copyfileobj(stream, f)
|
|
||||||
|
|
||||||
content_type = web_util.get_header(headers, "Content-type")
|
|
||||||
|
|
||||||
if content_type == "text/html":
|
|
||||||
warn_content_type_mismatch(self.archive_file or "the archive")
|
|
||||||
|
|
||||||
if self.stage.save_filename:
|
|
||||||
os.rename(os.path.join(self.stage.path, basename), self.stage.save_filename)
|
|
||||||
|
|
||||||
if not self.archive_file:
|
if not self.archive_file:
|
||||||
raise FailedDownloadError(self.url)
|
raise FailedDownloadError(
|
||||||
|
RuntimeError(f"Missing archive {self.archive_file} after fetching")
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
@fetcher
|
@fetcher
|
||||||
@@ -1394,7 +1427,7 @@ class FetchAndVerifyExpandedFile(URLFetchStrategy):
|
|||||||
as well as after expanding it."""
|
as well as after expanding it."""
|
||||||
|
|
||||||
def __init__(self, url, archive_sha256: str, expanded_sha256: str):
|
def __init__(self, url, archive_sha256: str, expanded_sha256: str):
|
||||||
super().__init__(url, archive_sha256)
|
super().__init__(url=url, checksum=archive_sha256)
|
||||||
self.expanded_sha256 = expanded_sha256
|
self.expanded_sha256 = expanded_sha256
|
||||||
|
|
||||||
def expand(self):
|
def expand(self):
|
||||||
@@ -1436,14 +1469,14 @@ def stable_target(fetcher):
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
def from_url(url):
|
def from_url(url: str) -> URLFetchStrategy:
|
||||||
"""Given a URL, find an appropriate fetch strategy for it.
|
"""Given a URL, find an appropriate fetch strategy for it.
|
||||||
Currently just gives you a URLFetchStrategy that uses curl.
|
Currently just gives you a URLFetchStrategy that uses curl.
|
||||||
|
|
||||||
TODO: make this return appropriate fetch strategies for other
|
TODO: make this return appropriate fetch strategies for other
|
||||||
types of URLs.
|
types of URLs.
|
||||||
"""
|
"""
|
||||||
return URLFetchStrategy(url)
|
return URLFetchStrategy(url=url)
|
||||||
|
|
||||||
|
|
||||||
def from_kwargs(**kwargs):
|
def from_kwargs(**kwargs):
|
||||||
@@ -1512,10 +1545,12 @@ def _check_version_attributes(fetcher, pkg, version):
|
|||||||
def _extrapolate(pkg, version):
|
def _extrapolate(pkg, version):
|
||||||
"""Create a fetcher from an extrapolated URL for this version."""
|
"""Create a fetcher from an extrapolated URL for this version."""
|
||||||
try:
|
try:
|
||||||
return URLFetchStrategy(pkg.url_for_version(version), fetch_options=pkg.fetch_options)
|
return URLFetchStrategy(url=pkg.url_for_version(version), fetch_options=pkg.fetch_options)
|
||||||
except spack.package_base.NoURLError:
|
except spack.package_base.NoURLError:
|
||||||
msg = "Can't extrapolate a URL for version %s " "because package %s defines no URLs"
|
raise ExtrapolationError(
|
||||||
raise ExtrapolationError(msg % (version, pkg.name))
|
f"Can't extrapolate a URL for version {version} because "
|
||||||
|
f"package {pkg.name} defines no URLs"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def _from_merged_attrs(fetcher, pkg, version):
|
def _from_merged_attrs(fetcher, pkg, version):
|
||||||
@@ -1532,8 +1567,11 @@ def _from_merged_attrs(fetcher, pkg, version):
|
|||||||
attrs["fetch_options"] = pkg.fetch_options
|
attrs["fetch_options"] = pkg.fetch_options
|
||||||
attrs.update(pkg.versions[version])
|
attrs.update(pkg.versions[version])
|
||||||
|
|
||||||
if fetcher.url_attr == "git" and hasattr(pkg, "submodules"):
|
if fetcher.url_attr == "git":
|
||||||
attrs.setdefault("submodules", pkg.submodules)
|
pkg_attr_list = ["submodules", "git_sparse_paths"]
|
||||||
|
for pkg_attr in pkg_attr_list:
|
||||||
|
if hasattr(pkg, pkg_attr):
|
||||||
|
attrs.setdefault(pkg_attr, getattr(pkg, pkg_attr))
|
||||||
|
|
||||||
return fetcher(**attrs)
|
return fetcher(**attrs)
|
||||||
|
|
||||||
@@ -1628,11 +1666,9 @@ def for_package_version(pkg, version=None):
|
|||||||
raise InvalidArgsError(pkg, version, **args)
|
raise InvalidArgsError(pkg, version, **args)
|
||||||
|
|
||||||
|
|
||||||
def from_url_scheme(url, *args, **kwargs):
|
def from_url_scheme(url: str, **kwargs):
|
||||||
"""Finds a suitable FetchStrategy by matching its url_attr with the scheme
|
"""Finds a suitable FetchStrategy by matching its url_attr with the scheme
|
||||||
in the given url."""
|
in the given url."""
|
||||||
|
|
||||||
url = kwargs.get("url", url)
|
|
||||||
parsed_url = urllib.parse.urlparse(url, scheme="file")
|
parsed_url = urllib.parse.urlparse(url, scheme="file")
|
||||||
|
|
||||||
scheme_mapping = kwargs.get("scheme_mapping") or {
|
scheme_mapping = kwargs.get("scheme_mapping") or {
|
||||||
@@ -1649,11 +1685,9 @@ def from_url_scheme(url, *args, **kwargs):
|
|||||||
for fetcher in all_strategies:
|
for fetcher in all_strategies:
|
||||||
url_attr = getattr(fetcher, "url_attr", None)
|
url_attr = getattr(fetcher, "url_attr", None)
|
||||||
if url_attr and url_attr == scheme:
|
if url_attr and url_attr == scheme:
|
||||||
return fetcher(url, *args, **kwargs)
|
return fetcher(url=url, **kwargs)
|
||||||
|
|
||||||
raise ValueError(
|
raise ValueError(f'No FetchStrategy found for url with scheme: "{parsed_url.scheme}"')
|
||||||
'No FetchStrategy found for url with scheme: "{SCHEME}"'.format(SCHEME=parsed_url.scheme)
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def from_list_url(pkg):
|
def from_list_url(pkg):
|
||||||
@@ -1678,7 +1712,9 @@ def from_list_url(pkg):
|
|||||||
)
|
)
|
||||||
|
|
||||||
# construct a fetcher
|
# construct a fetcher
|
||||||
return URLFetchStrategy(url_from_list, checksum, fetch_options=pkg.fetch_options)
|
return URLFetchStrategy(
|
||||||
|
url=url_from_list, checksum=checksum, fetch_options=pkg.fetch_options
|
||||||
|
)
|
||||||
except KeyError as e:
|
except KeyError as e:
|
||||||
tty.debug(e)
|
tty.debug(e)
|
||||||
tty.msg("Cannot find version %s in url_list" % pkg.version)
|
tty.msg("Cannot find version %s in url_list" % pkg.version)
|
||||||
@@ -1706,10 +1742,10 @@ def store(self, fetcher, relative_dest):
|
|||||||
mkdirp(os.path.dirname(dst))
|
mkdirp(os.path.dirname(dst))
|
||||||
fetcher.archive(dst)
|
fetcher.archive(dst)
|
||||||
|
|
||||||
def fetcher(self, target_path, digest, **kwargs):
|
def fetcher(self, target_path: str, digest: Optional[str], **kwargs) -> CacheURLFetchStrategy:
|
||||||
path = os.path.join(self.root, target_path)
|
path = os.path.join(self.root, target_path)
|
||||||
url = url_util.path_to_file_url(path)
|
url = url_util.path_to_file_url(path)
|
||||||
return CacheURLFetchStrategy(url, digest, **kwargs)
|
return CacheURLFetchStrategy(url=url, checksum=digest, **kwargs)
|
||||||
|
|
||||||
def destroy(self):
|
def destroy(self):
|
||||||
shutil.rmtree(self.root, ignore_errors=True)
|
shutil.rmtree(self.root, ignore_errors=True)
|
||||||
@@ -1722,9 +1758,9 @@ class NoCacheError(spack.error.FetchError):
|
|||||||
class FailedDownloadError(spack.error.FetchError):
|
class FailedDownloadError(spack.error.FetchError):
|
||||||
"""Raised when a download fails."""
|
"""Raised when a download fails."""
|
||||||
|
|
||||||
def __init__(self, url, msg=""):
|
def __init__(self, *exceptions: Exception):
|
||||||
super().__init__("Failed to fetch file from URL: %s" % url, msg)
|
super().__init__("Failed to download")
|
||||||
self.url = url
|
self.exceptions = exceptions
|
||||||
|
|
||||||
|
|
||||||
class NoArchiveFileError(spack.error.FetchError):
|
class NoArchiveFileError(spack.error.FetchError):
|
||||||
|
|||||||
@@ -37,6 +37,12 @@ def __call__(self, spec):
|
|||||||
"""Run this hash on the provided spec."""
|
"""Run this hash on the provided spec."""
|
||||||
return spec.spec_hash(self)
|
return spec.spec_hash(self)
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return (
|
||||||
|
f"SpecHashDescriptor(depflag={self.depflag!r}, "
|
||||||
|
f"package_hash={self.package_hash!r}, name={self.name!r}, override={self.override!r})"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
#: Spack's deployment hash. Includes all inputs that can affect how a package is built.
|
#: Spack's deployment hash. Includes all inputs that can affect how a package is built.
|
||||||
dag_hash = SpecHashDescriptor(depflag=dt.BUILD | dt.LINK | dt.RUN, package_hash=True, name="hash")
|
dag_hash = SpecHashDescriptor(depflag=dt.BUILD | dt.LINK | dt.RUN, package_hash=True, name="hash")
|
||||||
|
|||||||
@@ -23,9 +23,6 @@ def post_install(spec, explicit):
|
|||||||
|
|
||||||
# Push the package to all autopush mirrors
|
# Push the package to all autopush mirrors
|
||||||
for mirror in spack.mirror.MirrorCollection(binary=True, autopush=True).values():
|
for mirror in spack.mirror.MirrorCollection(binary=True, autopush=True).values():
|
||||||
bindist.push_or_raise(
|
signing_key = bindist.select_signing_key() if mirror.signed else None
|
||||||
spec,
|
bindist.push_or_raise([spec], out_url=mirror.push_url, signing_key=signing_key, force=True)
|
||||||
mirror.push_url,
|
|
||||||
bindist.PushOptions(force=True, regenerate_index=False, unsigned=not mirror.signed),
|
|
||||||
)
|
|
||||||
tty.msg(f"{spec.name}: Pushed to build cache: '{mirror.name}'")
|
tty.msg(f"{spec.name}: Pushed to build cache: '{mirror.name}'")
|
||||||
|
|||||||
@@ -757,6 +757,10 @@ def test_process(pkg: Pb, kwargs):
|
|||||||
pkg.tester.status(pkg.spec.name, TestStatus.SKIPPED)
|
pkg.tester.status(pkg.spec.name, TestStatus.SKIPPED)
|
||||||
return
|
return
|
||||||
|
|
||||||
|
# Make sure properly named build-time test methods actually run as
|
||||||
|
# stand-alone tests.
|
||||||
|
pkg.run_tests = True
|
||||||
|
|
||||||
# run test methods from the package and all virtuals it provides
|
# run test methods from the package and all virtuals it provides
|
||||||
v_names = virtuals(pkg)
|
v_names = virtuals(pkg)
|
||||||
test_specs = [pkg.spec] + [spack.spec.Spec(v_name) for v_name in sorted(v_names)]
|
test_specs = [pkg.spec] + [spack.spec.Spec(v_name) for v_name in sorted(v_names)]
|
||||||
|
|||||||
@@ -426,48 +426,36 @@ def _determine_extension(fetcher):
|
|||||||
return ext
|
return ext
|
||||||
|
|
||||||
|
|
||||||
class MirrorReference:
|
class MirrorLayout:
|
||||||
"""A ``MirrorReference`` stores the relative paths where you can store a
|
"""A ``MirrorLayout`` stores the relative locations of files in a mirror directory. The main
|
||||||
package/resource in a mirror directory.
|
storage location is ``storage_path``. An additional, human-readable path may be obtained as the
|
||||||
|
second entry when iterating this object."""
|
||||||
|
|
||||||
The appropriate storage location is given by ``storage_path``. The
|
def __init__(self, storage_path: str) -> None:
|
||||||
``cosmetic_path`` property provides a reference that a human could generate
|
self.storage_path = storage_path
|
||||||
themselves based on reading the details of the package.
|
|
||||||
|
|
||||||
A user can iterate over a ``MirrorReference`` object to get all the
|
def __iter__(self):
|
||||||
possible names that might be used to refer to the resource in a mirror;
|
yield self.storage_path
|
||||||
this includes names generated by previous naming schemes that are no-longer
|
|
||||||
reported by ``storage_path`` or ``cosmetic_path``.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, cosmetic_path, global_path=None):
|
|
||||||
|
class DefaultLayout(MirrorLayout):
|
||||||
|
def __init__(self, cosmetic_path: str, global_path: Optional[str] = None) -> None:
|
||||||
|
super().__init__(global_path or cosmetic_path)
|
||||||
self.global_path = global_path
|
self.global_path = global_path
|
||||||
self.cosmetic_path = cosmetic_path
|
self.cosmetic_path = cosmetic_path
|
||||||
|
|
||||||
@property
|
|
||||||
def storage_path(self):
|
|
||||||
if self.global_path:
|
|
||||||
return self.global_path
|
|
||||||
else:
|
|
||||||
return self.cosmetic_path
|
|
||||||
|
|
||||||
def __iter__(self):
|
def __iter__(self):
|
||||||
if self.global_path:
|
if self.global_path:
|
||||||
yield self.global_path
|
yield self.global_path
|
||||||
yield self.cosmetic_path
|
yield self.cosmetic_path
|
||||||
|
|
||||||
|
|
||||||
class OCIImageLayout:
|
class OCILayout(MirrorLayout):
|
||||||
"""Follow the OCI Image Layout Specification to archive blobs
|
"""Follow the OCI Image Layout Specification to archive blobs where paths are of the form
|
||||||
|
``blobs/<algorithm>/<digest>``"""
|
||||||
Paths are of the form `blobs/<algorithm>/<digest>`
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, digest: spack.oci.image.Digest) -> None:
|
def __init__(self, digest: spack.oci.image.Digest) -> None:
|
||||||
self.storage_path = os.path.join("blobs", digest.algorithm, digest.digest)
|
super().__init__(os.path.join("blobs", digest.algorithm, digest.digest))
|
||||||
|
|
||||||
def __iter__(self):
|
|
||||||
yield self.storage_path
|
|
||||||
|
|
||||||
|
|
||||||
def mirror_archive_paths(fetcher, per_package_ref, spec=None):
|
def mirror_archive_paths(fetcher, per_package_ref, spec=None):
|
||||||
@@ -494,7 +482,7 @@ def mirror_archive_paths(fetcher, per_package_ref, spec=None):
|
|||||||
if global_ref and ext:
|
if global_ref and ext:
|
||||||
global_ref += ".%s" % ext
|
global_ref += ".%s" % ext
|
||||||
|
|
||||||
return MirrorReference(per_package_ref, global_ref)
|
return DefaultLayout(per_package_ref, global_ref)
|
||||||
|
|
||||||
|
|
||||||
def get_all_versions(specs):
|
def get_all_versions(specs):
|
||||||
|
|||||||
@@ -6,7 +6,6 @@
|
|||||||
import hashlib
|
import hashlib
|
||||||
import json
|
import json
|
||||||
import os
|
import os
|
||||||
import time
|
|
||||||
import urllib.error
|
import urllib.error
|
||||||
import urllib.parse
|
import urllib.parse
|
||||||
import urllib.request
|
import urllib.request
|
||||||
@@ -43,11 +42,6 @@ def create_tarball(spec: spack.spec.Spec, tarfile_path):
|
|||||||
return spack.binary_distribution._do_create_tarball(tarfile_path, spec.prefix, buildinfo)
|
return spack.binary_distribution._do_create_tarball(tarfile_path, spec.prefix, buildinfo)
|
||||||
|
|
||||||
|
|
||||||
def _log_upload_progress(digest: Digest, size: int, elapsed: float):
|
|
||||||
elapsed = max(elapsed, 0.001) # guard against division by zero
|
|
||||||
tty.info(f"Uploaded {digest} ({elapsed:.2f}s, {size / elapsed / 1024 / 1024:.2f} MB/s)")
|
|
||||||
|
|
||||||
|
|
||||||
def with_query_param(url: str, param: str, value: str) -> str:
|
def with_query_param(url: str, param: str, value: str) -> str:
|
||||||
"""Add a query parameter to a URL
|
"""Add a query parameter to a URL
|
||||||
|
|
||||||
@@ -141,8 +135,6 @@ def upload_blob(
|
|||||||
if not force and blob_exists(ref, digest, _urlopen):
|
if not force and blob_exists(ref, digest, _urlopen):
|
||||||
return False
|
return False
|
||||||
|
|
||||||
start = time.time()
|
|
||||||
|
|
||||||
with open(file, "rb") as f:
|
with open(file, "rb") as f:
|
||||||
file_size = os.fstat(f.fileno()).st_size
|
file_size = os.fstat(f.fileno()).st_size
|
||||||
|
|
||||||
@@ -167,7 +159,6 @@ def upload_blob(
|
|||||||
|
|
||||||
# Created the blob in one go.
|
# Created the blob in one go.
|
||||||
if response.status == 201:
|
if response.status == 201:
|
||||||
_log_upload_progress(digest, file_size, time.time() - start)
|
|
||||||
return True
|
return True
|
||||||
|
|
||||||
# Otherwise, do another PUT request.
|
# Otherwise, do another PUT request.
|
||||||
@@ -191,8 +182,6 @@ def upload_blob(
|
|||||||
|
|
||||||
spack.oci.opener.ensure_status(request, response, 201)
|
spack.oci.opener.ensure_status(request, response, 201)
|
||||||
|
|
||||||
# print elapsed time and # MB/s
|
|
||||||
_log_upload_progress(digest, file_size, time.time() - start)
|
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
||||||
@@ -401,15 +390,12 @@ def make_stage(
|
|||||||
) -> spack.stage.Stage:
|
) -> spack.stage.Stage:
|
||||||
_urlopen = _urlopen or spack.oci.opener.urlopen
|
_urlopen = _urlopen or spack.oci.opener.urlopen
|
||||||
fetch_strategy = spack.fetch_strategy.OCIRegistryFetchStrategy(
|
fetch_strategy = spack.fetch_strategy.OCIRegistryFetchStrategy(
|
||||||
url, checksum=digest.digest, _urlopen=_urlopen
|
url=url, checksum=digest.digest, _urlopen=_urlopen
|
||||||
)
|
)
|
||||||
# Use blobs/<alg>/<encoded> as the cache path, which follows
|
# Use blobs/<alg>/<encoded> as the cache path, which follows
|
||||||
# the OCI Image Layout Specification. What's missing though,
|
# the OCI Image Layout Specification. What's missing though,
|
||||||
# is the `oci-layout` and `index.json` files, which are
|
# is the `oci-layout` and `index.json` files, which are
|
||||||
# required by the spec.
|
# required by the spec.
|
||||||
return spack.stage.Stage(
|
return spack.stage.Stage(
|
||||||
fetch_strategy,
|
fetch_strategy, mirror_paths=spack.mirror.OCILayout(digest), name=digest.digest, keep=keep
|
||||||
mirror_paths=spack.mirror.OCIImageLayout(digest),
|
|
||||||
name=digest.digest,
|
|
||||||
keep=keep,
|
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -197,13 +197,12 @@ def __init__(cls, name, bases, attr_dict):
|
|||||||
# that "foo" was a possible executable.
|
# that "foo" was a possible executable.
|
||||||
|
|
||||||
# If a package has the executables or libraries attribute then it's
|
# If a package has the executables or libraries attribute then it's
|
||||||
# assumed to be detectable
|
# assumed to be detectable. Add a tag, so finding them is faster
|
||||||
if hasattr(cls, "executables") or hasattr(cls, "libraries"):
|
if hasattr(cls, "executables") or hasattr(cls, "libraries"):
|
||||||
# Append a tag to each detectable package, so that finding them is faster
|
# To add the tag, we need to copy the tags attribute, and attach it to
|
||||||
if not hasattr(cls, "tags"):
|
# the current class. We don't use append, since it might modify base classes,
|
||||||
setattr(cls, "tags", [DetectablePackageMeta.TAG])
|
# if "tags" is retrieved following the MRO.
|
||||||
elif DetectablePackageMeta.TAG not in cls.tags:
|
cls.tags = getattr(cls, "tags", []) + [DetectablePackageMeta.TAG]
|
||||||
cls.tags.append(DetectablePackageMeta.TAG)
|
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def platform_executables(cls):
|
def platform_executables(cls):
|
||||||
@@ -1102,6 +1101,7 @@ def _make_resource_stage(self, root_stage, resource):
|
|||||||
mirror_paths=spack.mirror.mirror_archive_paths(
|
mirror_paths=spack.mirror.mirror_archive_paths(
|
||||||
resource.fetcher, os.path.join(self.name, pretty_resource_name)
|
resource.fetcher, os.path.join(self.name, pretty_resource_name)
|
||||||
),
|
),
|
||||||
|
mirrors=spack.mirror.MirrorCollection(source=True).values(),
|
||||||
path=self.path,
|
path=self.path,
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -1122,6 +1122,7 @@ def _make_root_stage(self, fetcher):
|
|||||||
stage = Stage(
|
stage = Stage(
|
||||||
fetcher,
|
fetcher,
|
||||||
mirror_paths=mirror_paths,
|
mirror_paths=mirror_paths,
|
||||||
|
mirrors=spack.mirror.MirrorCollection(source=True).values(),
|
||||||
name=stage_name,
|
name=stage_name,
|
||||||
path=self.path,
|
path=self.path,
|
||||||
search_fn=self._download_search,
|
search_fn=self._download_search,
|
||||||
|
|||||||
@@ -328,19 +328,26 @@ def next_spec(
|
|||||||
if not self.ctx.next_token:
|
if not self.ctx.next_token:
|
||||||
return initial_spec
|
return initial_spec
|
||||||
|
|
||||||
|
def add_dependency(dep, **edge_properties):
|
||||||
|
"""wrapper around root_spec._add_dependency"""
|
||||||
|
try:
|
||||||
|
root_spec._add_dependency(dep, **edge_properties)
|
||||||
|
except spack.error.SpecError as e:
|
||||||
|
raise SpecParsingError(str(e), self.ctx.current_token, self.literal_str) from e
|
||||||
|
|
||||||
initial_spec = initial_spec or spack.spec.Spec()
|
initial_spec = initial_spec or spack.spec.Spec()
|
||||||
root_spec = SpecNodeParser(self.ctx).parse(initial_spec)
|
root_spec = SpecNodeParser(self.ctx, self.literal_str).parse(initial_spec)
|
||||||
while True:
|
while True:
|
||||||
if self.ctx.accept(TokenType.START_EDGE_PROPERTIES):
|
if self.ctx.accept(TokenType.START_EDGE_PROPERTIES):
|
||||||
edge_properties = EdgeAttributeParser(self.ctx, self.literal_str).parse()
|
edge_properties = EdgeAttributeParser(self.ctx, self.literal_str).parse()
|
||||||
edge_properties.setdefault("depflag", 0)
|
edge_properties.setdefault("depflag", 0)
|
||||||
edge_properties.setdefault("virtuals", ())
|
edge_properties.setdefault("virtuals", ())
|
||||||
dependency = self._parse_node(root_spec)
|
dependency = self._parse_node(root_spec)
|
||||||
root_spec._add_dependency(dependency, **edge_properties)
|
add_dependency(dependency, **edge_properties)
|
||||||
|
|
||||||
elif self.ctx.accept(TokenType.DEPENDENCY):
|
elif self.ctx.accept(TokenType.DEPENDENCY):
|
||||||
dependency = self._parse_node(root_spec)
|
dependency = self._parse_node(root_spec)
|
||||||
root_spec._add_dependency(dependency, depflag=0, virtuals=())
|
add_dependency(dependency, depflag=0, virtuals=())
|
||||||
|
|
||||||
else:
|
else:
|
||||||
break
|
break
|
||||||
@@ -348,7 +355,7 @@ def next_spec(
|
|||||||
return root_spec
|
return root_spec
|
||||||
|
|
||||||
def _parse_node(self, root_spec):
|
def _parse_node(self, root_spec):
|
||||||
dependency = SpecNodeParser(self.ctx).parse()
|
dependency = SpecNodeParser(self.ctx, self.literal_str).parse()
|
||||||
if dependency is None:
|
if dependency is None:
|
||||||
msg = (
|
msg = (
|
||||||
"the dependency sigil and any optional edge attributes must be followed by a "
|
"the dependency sigil and any optional edge attributes must be followed by a "
|
||||||
@@ -367,10 +374,11 @@ def all_specs(self) -> List["spack.spec.Spec"]:
|
|||||||
class SpecNodeParser:
|
class SpecNodeParser:
|
||||||
"""Parse a single spec node from a stream of tokens"""
|
"""Parse a single spec node from a stream of tokens"""
|
||||||
|
|
||||||
__slots__ = "ctx", "has_compiler", "has_version"
|
__slots__ = "ctx", "has_compiler", "has_version", "literal_str"
|
||||||
|
|
||||||
def __init__(self, ctx):
|
def __init__(self, ctx, literal_str):
|
||||||
self.ctx = ctx
|
self.ctx = ctx
|
||||||
|
self.literal_str = literal_str
|
||||||
self.has_compiler = False
|
self.has_compiler = False
|
||||||
self.has_version = False
|
self.has_version = False
|
||||||
|
|
||||||
@@ -388,7 +396,8 @@ def parse(
|
|||||||
if not self.ctx.next_token or self.ctx.expect(TokenType.DEPENDENCY):
|
if not self.ctx.next_token or self.ctx.expect(TokenType.DEPENDENCY):
|
||||||
return initial_spec
|
return initial_spec
|
||||||
|
|
||||||
initial_spec = initial_spec or spack.spec.Spec()
|
if initial_spec is None:
|
||||||
|
initial_spec = spack.spec.Spec()
|
||||||
|
|
||||||
# If we start with a package name we have a named spec, we cannot
|
# If we start with a package name we have a named spec, we cannot
|
||||||
# accept another package name afterwards in a node
|
# accept another package name afterwards in a node
|
||||||
@@ -405,12 +414,21 @@ def parse(
|
|||||||
elif self.ctx.accept(TokenType.FILENAME):
|
elif self.ctx.accept(TokenType.FILENAME):
|
||||||
return FileParser(self.ctx).parse(initial_spec)
|
return FileParser(self.ctx).parse(initial_spec)
|
||||||
|
|
||||||
|
def raise_parsing_error(string: str, cause: Optional[Exception] = None):
|
||||||
|
"""Raise a spec parsing error with token context."""
|
||||||
|
raise SpecParsingError(string, self.ctx.current_token, self.literal_str) from cause
|
||||||
|
|
||||||
|
def add_flag(name: str, value: str, propagate: bool):
|
||||||
|
"""Wrapper around ``Spec._add_flag()`` that adds parser context to errors raised."""
|
||||||
|
try:
|
||||||
|
initial_spec._add_flag(name, value, propagate)
|
||||||
|
except Exception as e:
|
||||||
|
raise_parsing_error(str(e), e)
|
||||||
|
|
||||||
while True:
|
while True:
|
||||||
if self.ctx.accept(TokenType.COMPILER):
|
if self.ctx.accept(TokenType.COMPILER):
|
||||||
if self.has_compiler:
|
if self.has_compiler:
|
||||||
raise spack.spec.DuplicateCompilerSpecError(
|
raise_parsing_error("Spec cannot have multiple compilers")
|
||||||
f"{initial_spec} cannot have multiple compilers"
|
|
||||||
)
|
|
||||||
|
|
||||||
compiler_name = self.ctx.current_token.value[1:]
|
compiler_name = self.ctx.current_token.value[1:]
|
||||||
initial_spec.compiler = spack.spec.CompilerSpec(compiler_name.strip(), ":")
|
initial_spec.compiler = spack.spec.CompilerSpec(compiler_name.strip(), ":")
|
||||||
@@ -418,9 +436,7 @@ def parse(
|
|||||||
|
|
||||||
elif self.ctx.accept(TokenType.COMPILER_AND_VERSION):
|
elif self.ctx.accept(TokenType.COMPILER_AND_VERSION):
|
||||||
if self.has_compiler:
|
if self.has_compiler:
|
||||||
raise spack.spec.DuplicateCompilerSpecError(
|
raise_parsing_error("Spec cannot have multiple compilers")
|
||||||
f"{initial_spec} cannot have multiple compilers"
|
|
||||||
)
|
|
||||||
|
|
||||||
compiler_name, compiler_version = self.ctx.current_token.value[1:].split("@")
|
compiler_name, compiler_version = self.ctx.current_token.value[1:].split("@")
|
||||||
initial_spec.compiler = spack.spec.CompilerSpec(
|
initial_spec.compiler = spack.spec.CompilerSpec(
|
||||||
@@ -434,9 +450,8 @@ def parse(
|
|||||||
or self.ctx.accept(TokenType.VERSION)
|
or self.ctx.accept(TokenType.VERSION)
|
||||||
):
|
):
|
||||||
if self.has_version:
|
if self.has_version:
|
||||||
raise spack.spec.MultipleVersionError(
|
raise_parsing_error("Spec cannot have multiple versions")
|
||||||
f"{initial_spec} cannot have multiple versions"
|
|
||||||
)
|
|
||||||
initial_spec.versions = spack.version.VersionList(
|
initial_spec.versions = spack.version.VersionList(
|
||||||
[spack.version.from_string(self.ctx.current_token.value[1:])]
|
[spack.version.from_string(self.ctx.current_token.value[1:])]
|
||||||
)
|
)
|
||||||
@@ -445,29 +460,25 @@ def parse(
|
|||||||
|
|
||||||
elif self.ctx.accept(TokenType.BOOL_VARIANT):
|
elif self.ctx.accept(TokenType.BOOL_VARIANT):
|
||||||
variant_value = self.ctx.current_token.value[0] == "+"
|
variant_value = self.ctx.current_token.value[0] == "+"
|
||||||
initial_spec._add_flag(
|
add_flag(self.ctx.current_token.value[1:].strip(), variant_value, propagate=False)
|
||||||
self.ctx.current_token.value[1:].strip(), variant_value, propagate=False
|
|
||||||
)
|
|
||||||
|
|
||||||
elif self.ctx.accept(TokenType.PROPAGATED_BOOL_VARIANT):
|
elif self.ctx.accept(TokenType.PROPAGATED_BOOL_VARIANT):
|
||||||
variant_value = self.ctx.current_token.value[0:2] == "++"
|
variant_value = self.ctx.current_token.value[0:2] == "++"
|
||||||
initial_spec._add_flag(
|
add_flag(self.ctx.current_token.value[2:].strip(), variant_value, propagate=True)
|
||||||
self.ctx.current_token.value[2:].strip(), variant_value, propagate=True
|
|
||||||
)
|
|
||||||
|
|
||||||
elif self.ctx.accept(TokenType.KEY_VALUE_PAIR):
|
elif self.ctx.accept(TokenType.KEY_VALUE_PAIR):
|
||||||
match = SPLIT_KVP.match(self.ctx.current_token.value)
|
match = SPLIT_KVP.match(self.ctx.current_token.value)
|
||||||
assert match, "SPLIT_KVP and KEY_VALUE_PAIR do not agree."
|
assert match, "SPLIT_KVP and KEY_VALUE_PAIR do not agree."
|
||||||
|
|
||||||
name, delim, value = match.groups()
|
name, _, value = match.groups()
|
||||||
initial_spec._add_flag(name, strip_quotes_and_unescape(value), propagate=False)
|
add_flag(name, strip_quotes_and_unescape(value), propagate=False)
|
||||||
|
|
||||||
elif self.ctx.accept(TokenType.PROPAGATED_KEY_VALUE_PAIR):
|
elif self.ctx.accept(TokenType.PROPAGATED_KEY_VALUE_PAIR):
|
||||||
match = SPLIT_KVP.match(self.ctx.current_token.value)
|
match = SPLIT_KVP.match(self.ctx.current_token.value)
|
||||||
assert match, "SPLIT_KVP and PROPAGATED_KEY_VALUE_PAIR do not agree."
|
assert match, "SPLIT_KVP and PROPAGATED_KEY_VALUE_PAIR do not agree."
|
||||||
|
|
||||||
name, delim, value = match.groups()
|
name, _, value = match.groups()
|
||||||
initial_spec._add_flag(name, strip_quotes_and_unescape(value), propagate=True)
|
add_flag(name, strip_quotes_and_unescape(value), propagate=True)
|
||||||
|
|
||||||
elif self.ctx.expect(TokenType.DAG_HASH):
|
elif self.ctx.expect(TokenType.DAG_HASH):
|
||||||
if initial_spec.abstract_hash:
|
if initial_spec.abstract_hash:
|
||||||
|
|||||||
@@ -9,6 +9,7 @@
|
|||||||
import os.path
|
import os.path
|
||||||
import pathlib
|
import pathlib
|
||||||
import sys
|
import sys
|
||||||
|
import zipfile
|
||||||
from typing import Any, Dict, Optional, Tuple, Type, Union
|
from typing import Any, Dict, Optional, Tuple, Type, Union
|
||||||
|
|
||||||
import llnl.util.filesystem
|
import llnl.util.filesystem
|
||||||
@@ -21,7 +22,7 @@
|
|||||||
import spack.repo
|
import spack.repo
|
||||||
import spack.stage
|
import spack.stage
|
||||||
import spack.util.spack_json as sjson
|
import spack.util.spack_json as sjson
|
||||||
from spack.util.crypto import Checker, checksum
|
from spack.util.crypto import Checker, checksum_stream
|
||||||
from spack.util.executable import which, which_string
|
from spack.util.executable import which, which_string
|
||||||
|
|
||||||
|
|
||||||
@@ -155,6 +156,9 @@ def __hash__(self) -> int:
|
|||||||
return hash(self.sha256)
|
return hash(self.sha256)
|
||||||
|
|
||||||
|
|
||||||
|
zipfilecache = {}
|
||||||
|
|
||||||
|
|
||||||
class FilePatch(Patch):
|
class FilePatch(Patch):
|
||||||
"""Describes a patch that is retrieved from a file in the repository."""
|
"""Describes a patch that is retrieved from a file in the repository."""
|
||||||
|
|
||||||
@@ -194,9 +198,27 @@ def __init__(
|
|||||||
# Cannot use pkg.package_dir because it's a property and we have
|
# Cannot use pkg.package_dir because it's a property and we have
|
||||||
# classes, not instances.
|
# classes, not instances.
|
||||||
pkg_dir = os.path.abspath(os.path.dirname(cls.module.__file__))
|
pkg_dir = os.path.abspath(os.path.dirname(cls.module.__file__))
|
||||||
path = os.path.join(pkg_dir, self.relative_path)
|
path = pathlib.Path(os.path.join(pkg_dir, self.relative_path))
|
||||||
if os.path.exists(path):
|
|
||||||
abs_path = path
|
if "packages.zip" in path.parts:
|
||||||
|
# check if it exists in the zip file.
|
||||||
|
idx = path.parts.index("packages.zip")
|
||||||
|
zip_path, entry_path = pathlib.PurePath(*path.parts[: idx + 1]), pathlib.PurePath(
|
||||||
|
*path.parts[idx + 1 :]
|
||||||
|
)
|
||||||
|
|
||||||
|
lookup = zipfilecache.get(zip_path)
|
||||||
|
if lookup is None:
|
||||||
|
zip = zipfile.ZipFile(zip_path, "r")
|
||||||
|
namelist = set(zip.namelist())
|
||||||
|
zipfilecache[zip_path] = (zip, namelist)
|
||||||
|
else:
|
||||||
|
zip, namelist = lookup
|
||||||
|
if str(entry_path) in namelist:
|
||||||
|
abs_path = str(path)
|
||||||
|
break
|
||||||
|
elif path.exists():
|
||||||
|
abs_path = str(path)
|
||||||
break
|
break
|
||||||
|
|
||||||
if abs_path is None:
|
if abs_path is None:
|
||||||
@@ -216,7 +238,24 @@ def sha256(self) -> str:
|
|||||||
The sha256 of the patch file.
|
The sha256 of the patch file.
|
||||||
"""
|
"""
|
||||||
if self._sha256 is None and self.path is not None:
|
if self._sha256 is None and self.path is not None:
|
||||||
self._sha256 = checksum(hashlib.sha256, self.path)
|
path = pathlib.PurePath(self.path)
|
||||||
|
if "packages.zip" in path.parts:
|
||||||
|
print("yes")
|
||||||
|
# split in path to packages.zip and the path within the zip
|
||||||
|
idx = path.parts.index("packages.zip")
|
||||||
|
path_to_zip, path_in_zip = pathlib.PurePath(
|
||||||
|
*path.parts[: idx + 1]
|
||||||
|
), pathlib.PurePath(*path.parts[idx + 1 :])
|
||||||
|
zip = zipfilecache.get(path_to_zip)
|
||||||
|
if not zip:
|
||||||
|
zip = zipfile.ZipFile(path_to_zip, "r")
|
||||||
|
zipfilecache[path_to_zip] = zip
|
||||||
|
f = zip.open(str(path_in_zip), "r")
|
||||||
|
else:
|
||||||
|
f = open(self.path, "rb")
|
||||||
|
self._sha256 = checksum_stream(hashlib.sha256, f)
|
||||||
|
f.close()
|
||||||
|
|
||||||
assert isinstance(self._sha256, str)
|
assert isinstance(self._sha256, str)
|
||||||
return self._sha256
|
return self._sha256
|
||||||
|
|
||||||
@@ -319,7 +358,7 @@ def stage(self) -> "spack.stage.Stage":
|
|||||||
self.url, archive_sha256=self.archive_sha256, expanded_sha256=self.sha256
|
self.url, archive_sha256=self.archive_sha256, expanded_sha256=self.sha256
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
fetcher = fs.URLFetchStrategy(self.url, sha256=self.sha256, expand=False)
|
fetcher = fs.URLFetchStrategy(url=self.url, sha256=self.sha256, expand=False)
|
||||||
|
|
||||||
# The same package can have multiple patches with the same name but
|
# The same package can have multiple patches with the same name but
|
||||||
# with different contents, therefore apply a subset of the hash.
|
# with different contents, therefore apply a subset of the hash.
|
||||||
@@ -331,6 +370,7 @@ def stage(self) -> "spack.stage.Stage":
|
|||||||
fetcher,
|
fetcher,
|
||||||
name=f"{spack.stage.stage_prefix}patch-{fetch_digest}",
|
name=f"{spack.stage.stage_prefix}patch-{fetch_digest}",
|
||||||
mirror_paths=mirror_ref,
|
mirror_paths=mirror_ref,
|
||||||
|
mirrors=spack.mirror.MirrorCollection(source=True).values(),
|
||||||
)
|
)
|
||||||
return self._stage
|
return self._stage
|
||||||
|
|
||||||
|
|||||||
@@ -26,6 +26,7 @@
|
|||||||
import types
|
import types
|
||||||
import uuid
|
import uuid
|
||||||
import warnings
|
import warnings
|
||||||
|
import zipimport
|
||||||
from typing import Any, Dict, Generator, List, Optional, Set, Tuple, Type, Union
|
from typing import Any, Dict, Generator, List, Optional, Set, Tuple, Type, Union
|
||||||
|
|
||||||
import llnl.path
|
import llnl.path
|
||||||
@@ -100,32 +101,6 @@ def get_data(self, path):
|
|||||||
return self.prepend.encode() + b"\n" + data
|
return self.prepend.encode() + b"\n" + data
|
||||||
|
|
||||||
|
|
||||||
class RepoLoader(_PrependFileLoader):
|
|
||||||
"""Loads a Python module associated with a package in specific repository"""
|
|
||||||
|
|
||||||
#: Code in ``_package_prepend`` is prepended to imported packages.
|
|
||||||
#:
|
|
||||||
#: Spack packages are expected to call `from spack.package import *`
|
|
||||||
#: themselves, but we are allowing a deprecation period before breaking
|
|
||||||
#: external repos that don't do this yet.
|
|
||||||
_package_prepend = "from spack.package import *"
|
|
||||||
|
|
||||||
def __init__(self, fullname, repo, package_name):
|
|
||||||
self.repo = repo
|
|
||||||
self.package_name = package_name
|
|
||||||
self.package_py = repo.filename_for_package_name(package_name)
|
|
||||||
self.fullname = fullname
|
|
||||||
super().__init__(self.fullname, self.package_py, prepend=self._package_prepend)
|
|
||||||
|
|
||||||
|
|
||||||
class SpackNamespaceLoader:
|
|
||||||
def create_module(self, spec):
|
|
||||||
return SpackNamespace(spec.name)
|
|
||||||
|
|
||||||
def exec_module(self, module):
|
|
||||||
module.__loader__ = self
|
|
||||||
|
|
||||||
|
|
||||||
class ReposFinder:
|
class ReposFinder:
|
||||||
"""MetaPathFinder class that loads a Python module corresponding to a Spack package.
|
"""MetaPathFinder class that loads a Python module corresponding to a Spack package.
|
||||||
|
|
||||||
@@ -149,12 +124,12 @@ def current_repository(self, value):
|
|||||||
@contextlib.contextmanager
|
@contextlib.contextmanager
|
||||||
def switch_repo(self, substitute: "RepoType"):
|
def switch_repo(self, substitute: "RepoType"):
|
||||||
"""Switch the current repository list for the duration of the context manager."""
|
"""Switch the current repository list for the duration of the context manager."""
|
||||||
old = self.current_repository
|
old = self._repo
|
||||||
try:
|
try:
|
||||||
self.current_repository = substitute
|
self._repo = substitute
|
||||||
yield
|
yield
|
||||||
finally:
|
finally:
|
||||||
self.current_repository = old
|
self._repo = old
|
||||||
|
|
||||||
def find_spec(self, fullname, python_path, target=None):
|
def find_spec(self, fullname, python_path, target=None):
|
||||||
# "target" is not None only when calling importlib.reload()
|
# "target" is not None only when calling importlib.reload()
|
||||||
@@ -165,10 +140,11 @@ def find_spec(self, fullname, python_path, target=None):
|
|||||||
if not fullname.startswith(ROOT_PYTHON_NAMESPACE):
|
if not fullname.startswith(ROOT_PYTHON_NAMESPACE):
|
||||||
return None
|
return None
|
||||||
|
|
||||||
loader = self.compute_loader(fullname)
|
result = self.compute_loader(fullname)
|
||||||
if loader is None:
|
if result is None:
|
||||||
return None
|
return None
|
||||||
return importlib.util.spec_from_loader(fullname, loader)
|
loader, actual_fullname = result
|
||||||
|
return importlib.util.spec_from_loader(actual_fullname, loader)
|
||||||
|
|
||||||
def compute_loader(self, fullname):
|
def compute_loader(self, fullname):
|
||||||
# namespaces are added to repo, and package modules are leaves.
|
# namespaces are added to repo, and package modules are leaves.
|
||||||
@@ -187,16 +163,29 @@ def compute_loader(self, fullname):
|
|||||||
# With 2 nested conditionals we can call "repo.real_name" only once
|
# With 2 nested conditionals we can call "repo.real_name" only once
|
||||||
package_name = repo.real_name(module_name)
|
package_name = repo.real_name(module_name)
|
||||||
if package_name:
|
if package_name:
|
||||||
return RepoLoader(fullname, repo, package_name)
|
# annoyingly there is a many to one mapping for pkg module to file, have to
|
||||||
|
# figure out how to deal with this properly.
|
||||||
|
return (
|
||||||
|
(repo.zipimporter, f"{namespace}.{package_name}")
|
||||||
|
if repo.zipimporter
|
||||||
|
else (
|
||||||
|
_PrependFileLoader(
|
||||||
|
fullname=fullname,
|
||||||
|
path=repo.filename_for_package_name(package_name),
|
||||||
|
prepend="from spack.package import *",
|
||||||
|
),
|
||||||
|
fullname,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
# We are importing a full namespace like 'spack.pkg.builtin'
|
# We are importing a full namespace like 'spack.pkg.builtin'
|
||||||
if fullname == repo.full_namespace:
|
if fullname == repo.full_namespace:
|
||||||
return SpackNamespaceLoader()
|
return SpackNamespaceLoader(), fullname
|
||||||
|
|
||||||
# No repo provides the namespace, but it is a valid prefix of
|
# No repo provides the namespace, but it is a valid prefix of
|
||||||
# something in the RepoPath.
|
# something in the RepoPath.
|
||||||
if is_repo_path and self.current_repository.by_namespace.is_prefix(fullname):
|
if is_repo_path and self.current_repository.by_namespace.is_prefix(fullname):
|
||||||
return SpackNamespaceLoader()
|
return SpackNamespaceLoader(), fullname
|
||||||
|
|
||||||
return None
|
return None
|
||||||
|
|
||||||
@@ -207,6 +196,7 @@ def compute_loader(self, fullname):
|
|||||||
repo_config_name = "repo.yaml" # Top-level filename for repo config.
|
repo_config_name = "repo.yaml" # Top-level filename for repo config.
|
||||||
repo_index_name = "index.yaml" # Top-level filename for repository index.
|
repo_index_name = "index.yaml" # Top-level filename for repository index.
|
||||||
packages_dir_name = "packages" # Top-level repo directory containing pkgs.
|
packages_dir_name = "packages" # Top-level repo directory containing pkgs.
|
||||||
|
packages_zip_name = "packages.zip" # Top-level filename for zipped packages.
|
||||||
package_file_name = "package.py" # Filename for packages in a repository.
|
package_file_name = "package.py" # Filename for packages in a repository.
|
||||||
|
|
||||||
#: Guaranteed unused default value for some functions.
|
#: Guaranteed unused default value for some functions.
|
||||||
@@ -216,9 +206,9 @@ def compute_loader(self, fullname):
|
|||||||
def packages_path():
|
def packages_path():
|
||||||
"""Get the test repo if it is active, otherwise the builtin repo."""
|
"""Get the test repo if it is active, otherwise the builtin repo."""
|
||||||
try:
|
try:
|
||||||
return spack.repo.PATH.get_repo("builtin.mock").packages_path
|
return PATH.get_repo("builtin.mock").packages_path
|
||||||
except spack.repo.UnknownNamespaceError:
|
except UnknownNamespaceError:
|
||||||
return spack.repo.PATH.get_repo("builtin").packages_path
|
return PATH.get_repo("builtin").packages_path
|
||||||
|
|
||||||
|
|
||||||
class GitExe:
|
class GitExe:
|
||||||
@@ -683,7 +673,7 @@ class RepoPath:
|
|||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
*repos: Union[str, "Repo"],
|
*repos: Union[str, "Repo"],
|
||||||
cache: "spack.caches.FileCacheType",
|
cache: Optional["spack.caches.FileCacheType"],
|
||||||
overrides: Optional[Dict[str, Any]] = None,
|
overrides: Optional[Dict[str, Any]] = None,
|
||||||
) -> None:
|
) -> None:
|
||||||
self.repos: List[Repo] = []
|
self.repos: List[Repo] = []
|
||||||
@@ -696,6 +686,7 @@ def __init__(
|
|||||||
for repo in repos:
|
for repo in repos:
|
||||||
try:
|
try:
|
||||||
if isinstance(repo, str):
|
if isinstance(repo, str):
|
||||||
|
assert cache is not None, "cache must hold a value, when repo is a string"
|
||||||
repo = Repo(repo, cache=cache, overrides=overrides)
|
repo = Repo(repo, cache=cache, overrides=overrides)
|
||||||
repo.finder(self)
|
repo.finder(self)
|
||||||
self.put_last(repo)
|
self.put_last(repo)
|
||||||
@@ -707,6 +698,10 @@ def __init__(
|
|||||||
f" spack repo rm {repo}",
|
f" spack repo rm {repo}",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def ensure_unwrapped(self) -> "RepoPath":
|
||||||
|
"""Ensure we unwrap this object from any dynamic wrapper (like Singleton)"""
|
||||||
|
return self
|
||||||
|
|
||||||
def put_first(self, repo: "Repo") -> None:
|
def put_first(self, repo: "Repo") -> None:
|
||||||
"""Add repo first in the search path."""
|
"""Add repo first in the search path."""
|
||||||
if isinstance(repo, RepoPath):
|
if isinstance(repo, RepoPath):
|
||||||
@@ -930,6 +925,16 @@ def is_virtual_safe(self, pkg_name: str) -> bool:
|
|||||||
def __contains__(self, pkg_name):
|
def __contains__(self, pkg_name):
|
||||||
return self.exists(pkg_name)
|
return self.exists(pkg_name)
|
||||||
|
|
||||||
|
def marshal(self):
|
||||||
|
return (self.repos,)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def unmarshal(repos):
|
||||||
|
return RepoPath(*repos, cache=None)
|
||||||
|
|
||||||
|
def __reduce__(self):
|
||||||
|
return RepoPath.unmarshal, self.marshal()
|
||||||
|
|
||||||
|
|
||||||
class Repo:
|
class Repo:
|
||||||
"""Class representing a package repository in the filesystem.
|
"""Class representing a package repository in the filesystem.
|
||||||
@@ -994,9 +999,14 @@ def check(condition, msg):
|
|||||||
self._names = self.full_namespace.split(".")
|
self._names = self.full_namespace.split(".")
|
||||||
|
|
||||||
packages_dir = config.get("subdirectory", packages_dir_name)
|
packages_dir = config.get("subdirectory", packages_dir_name)
|
||||||
|
packages_zip = os.path.join(self.root, "packages.zip")
|
||||||
|
self.zipimporter = (
|
||||||
|
zipimport.zipimporter(packages_zip) if os.path.exists(packages_zip) else None
|
||||||
|
)
|
||||||
self.packages_path = os.path.join(self.root, packages_dir)
|
self.packages_path = os.path.join(self.root, packages_dir)
|
||||||
check(
|
check(
|
||||||
os.path.isdir(self.packages_path), f"No directory '{packages_dir}' found in '{root}'"
|
self.zipimporter or os.path.isdir(self.packages_path),
|
||||||
|
f"No '{self.packages_path}' or '{packages_zip} found in '{root}'",
|
||||||
)
|
)
|
||||||
|
|
||||||
# Class attribute overrides by package name
|
# Class attribute overrides by package name
|
||||||
@@ -1319,6 +1329,20 @@ def __repr__(self) -> str:
|
|||||||
def __contains__(self, pkg_name: str) -> bool:
|
def __contains__(self, pkg_name: str) -> bool:
|
||||||
return self.exists(pkg_name)
|
return self.exists(pkg_name)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def unmarshal(root, cache, overrides):
|
||||||
|
"""Helper method to unmarshal keyword arguments"""
|
||||||
|
return Repo(root, cache=cache, overrides=overrides)
|
||||||
|
|
||||||
|
def marshal(self):
|
||||||
|
cache = self._cache
|
||||||
|
if isinstance(cache, llnl.util.lang.Singleton):
|
||||||
|
cache = cache.instance
|
||||||
|
return self.root, cache, self.overrides
|
||||||
|
|
||||||
|
def __reduce__(self):
|
||||||
|
return Repo.unmarshal, self.marshal()
|
||||||
|
|
||||||
|
|
||||||
RepoType = Union[Repo, RepoPath]
|
RepoType = Union[Repo, RepoPath]
|
||||||
|
|
||||||
@@ -1478,6 +1502,14 @@ def use_repositories(
|
|||||||
PATH = saved
|
PATH = saved
|
||||||
|
|
||||||
|
|
||||||
|
class SpackNamespaceLoader:
|
||||||
|
def create_module(self, spec):
|
||||||
|
return SpackNamespace(spec.name)
|
||||||
|
|
||||||
|
def exec_module(self, module):
|
||||||
|
module.__loader__ = self
|
||||||
|
|
||||||
|
|
||||||
class MockRepositoryBuilder:
|
class MockRepositoryBuilder:
|
||||||
"""Build a mock repository in a directory"""
|
"""Build a mock repository in a directory"""
|
||||||
|
|
||||||
|
|||||||
@@ -11,6 +11,26 @@
|
|||||||
|
|
||||||
import spack.schema.environment
|
import spack.schema.environment
|
||||||
|
|
||||||
|
flags: Dict[str, Any] = {
|
||||||
|
"type": "object",
|
||||||
|
"additionalProperties": False,
|
||||||
|
"properties": {
|
||||||
|
"cflags": {"anyOf": [{"type": "string"}, {"type": "null"}]},
|
||||||
|
"cxxflags": {"anyOf": [{"type": "string"}, {"type": "null"}]},
|
||||||
|
"fflags": {"anyOf": [{"type": "string"}, {"type": "null"}]},
|
||||||
|
"cppflags": {"anyOf": [{"type": "string"}, {"type": "null"}]},
|
||||||
|
"ldflags": {"anyOf": [{"type": "string"}, {"type": "null"}]},
|
||||||
|
"ldlibs": {"anyOf": [{"type": "string"}, {"type": "null"}]},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
extra_rpaths: Dict[str, Any] = {"type": "array", "default": [], "items": {"type": "string"}}
|
||||||
|
|
||||||
|
implicit_rpaths: Dict[str, Any] = {
|
||||||
|
"anyOf": [{"type": "array", "items": {"type": "string"}}, {"type": "boolean"}]
|
||||||
|
}
|
||||||
|
|
||||||
#: Properties for inclusion in other schemas
|
#: Properties for inclusion in other schemas
|
||||||
properties: Dict[str, Any] = {
|
properties: Dict[str, Any] = {
|
||||||
"compilers": {
|
"compilers": {
|
||||||
@@ -35,18 +55,7 @@
|
|||||||
"fc": {"anyOf": [{"type": "string"}, {"type": "null"}]},
|
"fc": {"anyOf": [{"type": "string"}, {"type": "null"}]},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
"flags": {
|
"flags": flags,
|
||||||
"type": "object",
|
|
||||||
"additionalProperties": False,
|
|
||||||
"properties": {
|
|
||||||
"cflags": {"anyOf": [{"type": "string"}, {"type": "null"}]},
|
|
||||||
"cxxflags": {"anyOf": [{"type": "string"}, {"type": "null"}]},
|
|
||||||
"fflags": {"anyOf": [{"type": "string"}, {"type": "null"}]},
|
|
||||||
"cppflags": {"anyOf": [{"type": "string"}, {"type": "null"}]},
|
|
||||||
"ldflags": {"anyOf": [{"type": "string"}, {"type": "null"}]},
|
|
||||||
"ldlibs": {"anyOf": [{"type": "string"}, {"type": "null"}]},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
"spec": {"type": "string"},
|
"spec": {"type": "string"},
|
||||||
"operating_system": {"type": "string"},
|
"operating_system": {"type": "string"},
|
||||||
"target": {"type": "string"},
|
"target": {"type": "string"},
|
||||||
@@ -54,18 +63,9 @@
|
|||||||
"modules": {
|
"modules": {
|
||||||
"anyOf": [{"type": "string"}, {"type": "null"}, {"type": "array"}]
|
"anyOf": [{"type": "string"}, {"type": "null"}, {"type": "array"}]
|
||||||
},
|
},
|
||||||
"implicit_rpaths": {
|
"implicit_rpaths": implicit_rpaths,
|
||||||
"anyOf": [
|
|
||||||
{"type": "array", "items": {"type": "string"}},
|
|
||||||
{"type": "boolean"},
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"environment": spack.schema.environment.definition,
|
"environment": spack.schema.environment.definition,
|
||||||
"extra_rpaths": {
|
"extra_rpaths": extra_rpaths,
|
||||||
"type": "array",
|
|
||||||
"default": [],
|
|
||||||
"items": {"type": "string"},
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -84,7 +84,6 @@
|
|||||||
"build_language": {"type": "string"},
|
"build_language": {"type": "string"},
|
||||||
"build_jobs": {"type": "integer", "minimum": 1},
|
"build_jobs": {"type": "integer", "minimum": 1},
|
||||||
"ccache": {"type": "boolean"},
|
"ccache": {"type": "boolean"},
|
||||||
"concretizer": {"type": "string", "enum": ["original", "clingo"]},
|
|
||||||
"db_lock_timeout": {"type": "integer", "minimum": 1},
|
"db_lock_timeout": {"type": "integer", "minimum": 1},
|
||||||
"package_lock_timeout": {
|
"package_lock_timeout": {
|
||||||
"anyOf": [{"type": "integer", "minimum": 1}, {"type": "null"}]
|
"anyOf": [{"type": "integer", "minimum": 1}, {"type": "null"}]
|
||||||
@@ -98,9 +97,9 @@
|
|||||||
"aliases": {"type": "object", "patternProperties": {r"\w[\w-]*": {"type": "string"}}},
|
"aliases": {"type": "object", "patternProperties": {r"\w[\w-]*": {"type": "string"}}},
|
||||||
},
|
},
|
||||||
"deprecatedProperties": {
|
"deprecatedProperties": {
|
||||||
"properties": ["terminal_title"],
|
"properties": ["concretizer"],
|
||||||
"message": "config:terminal_title has been replaced by "
|
"message": "Spack supports only clingo as a concretizer from v0.23. "
|
||||||
"install_status and is ignored",
|
"The config:concretizer config option is ignored.",
|
||||||
"error": False,
|
"error": False,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -11,6 +11,8 @@
|
|||||||
|
|
||||||
import spack.schema.environment
|
import spack.schema.environment
|
||||||
|
|
||||||
|
from .compilers import extra_rpaths, flags, implicit_rpaths
|
||||||
|
|
||||||
permissions = {
|
permissions = {
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"additionalProperties": False,
|
"additionalProperties": False,
|
||||||
@@ -184,7 +186,16 @@
|
|||||||
"type": "object",
|
"type": "object",
|
||||||
"additionalProperties": True,
|
"additionalProperties": True,
|
||||||
"properties": {
|
"properties": {
|
||||||
"environment": spack.schema.environment.definition
|
"compilers": {
|
||||||
|
"type": "object",
|
||||||
|
"patternProperties": {
|
||||||
|
r"(^\w[\w-]*)": {"type": "string"}
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"environment": spack.schema.environment.definition,
|
||||||
|
"extra_rpaths": extra_rpaths,
|
||||||
|
"implicit_rpaths": implicit_rpaths,
|
||||||
|
"flags": flags,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -23,6 +23,7 @@
|
|||||||
|
|
||||||
import llnl.util.lang
|
import llnl.util.lang
|
||||||
import llnl.util.tty as tty
|
import llnl.util.tty as tty
|
||||||
|
from llnl.util.lang import elide_list
|
||||||
|
|
||||||
import spack
|
import spack
|
||||||
import spack.binary_distribution
|
import spack.binary_distribution
|
||||||
@@ -621,8 +622,9 @@ def _external_config_with_implicit_externals(configuration):
|
|||||||
|
|
||||||
|
|
||||||
class ErrorHandler:
|
class ErrorHandler:
|
||||||
def __init__(self, model):
|
def __init__(self, model, input_specs: List[spack.spec.Spec]):
|
||||||
self.model = model
|
self.model = model
|
||||||
|
self.input_specs = input_specs
|
||||||
self.full_model = None
|
self.full_model = None
|
||||||
|
|
||||||
def multiple_values_error(self, attribute, pkg):
|
def multiple_values_error(self, attribute, pkg):
|
||||||
@@ -709,12 +711,13 @@ def handle_error(self, msg, *args):
|
|||||||
return msg
|
return msg
|
||||||
|
|
||||||
def message(self, errors) -> str:
|
def message(self, errors) -> str:
|
||||||
messages = [
|
input_specs = ", ".join(elide_list([f"`{s}`" for s in self.input_specs], 5))
|
||||||
f" {idx+1: 2}. {self.handle_error(msg, *args)}"
|
header = f"failed to concretize {input_specs} for the following reasons:"
|
||||||
|
messages = (
|
||||||
|
f" {idx+1:2}. {self.handle_error(msg, *args)}"
|
||||||
for idx, (_, msg, args) in enumerate(errors)
|
for idx, (_, msg, args) in enumerate(errors)
|
||||||
]
|
)
|
||||||
header = "concretization failed for the following reasons:\n"
|
return "\n".join((header, *messages))
|
||||||
return "\n".join([header] + messages)
|
|
||||||
|
|
||||||
def raise_if_errors(self):
|
def raise_if_errors(self):
|
||||||
initial_error_args = extract_args(self.model, "error")
|
initial_error_args = extract_args(self.model, "error")
|
||||||
@@ -750,7 +753,7 @@ def on_model(model):
|
|||||||
f"unexpected error during concretization [{str(e)}]. "
|
f"unexpected error during concretization [{str(e)}]. "
|
||||||
f"Please report a bug at https://github.com/spack/spack/issues"
|
f"Please report a bug at https://github.com/spack/spack/issues"
|
||||||
)
|
)
|
||||||
raise spack.error.SpackError(msg)
|
raise spack.error.SpackError(msg) from e
|
||||||
raise UnsatisfiableSpecError(msg)
|
raise UnsatisfiableSpecError(msg)
|
||||||
|
|
||||||
|
|
||||||
@@ -894,7 +897,7 @@ def on_model(model):
|
|||||||
min_cost, best_model = min(models)
|
min_cost, best_model = min(models)
|
||||||
|
|
||||||
# first check for errors
|
# first check for errors
|
||||||
error_handler = ErrorHandler(best_model)
|
error_handler = ErrorHandler(best_model, specs)
|
||||||
error_handler.raise_if_errors()
|
error_handler.raise_if_errors()
|
||||||
|
|
||||||
# build specs from spec attributes in the model
|
# build specs from spec attributes in the model
|
||||||
@@ -1435,16 +1438,14 @@ def condition(
|
|||||||
# caller, we won't emit partial facts.
|
# caller, we won't emit partial facts.
|
||||||
|
|
||||||
condition_id = next(self._id_counter)
|
condition_id = next(self._id_counter)
|
||||||
self.gen.fact(fn.pkg_fact(required_spec.name, fn.condition(condition_id)))
|
|
||||||
self.gen.fact(fn.condition_reason(condition_id, msg))
|
|
||||||
|
|
||||||
trigger_id = self._get_condition_id(
|
trigger_id = self._get_condition_id(
|
||||||
required_spec, cache=self._trigger_cache, body=True, transform=transform_required
|
required_spec, cache=self._trigger_cache, body=True, transform=transform_required
|
||||||
)
|
)
|
||||||
|
self.gen.fact(fn.pkg_fact(required_spec.name, fn.condition(condition_id)))
|
||||||
|
self.gen.fact(fn.condition_reason(condition_id, msg))
|
||||||
self.gen.fact(
|
self.gen.fact(
|
||||||
fn.pkg_fact(required_spec.name, fn.condition_trigger(condition_id, trigger_id))
|
fn.pkg_fact(required_spec.name, fn.condition_trigger(condition_id, trigger_id))
|
||||||
)
|
)
|
||||||
|
|
||||||
if not imposed_spec:
|
if not imposed_spec:
|
||||||
return condition_id
|
return condition_id
|
||||||
|
|
||||||
@@ -1693,19 +1694,43 @@ def external_packages(self):
|
|||||||
spack.spec.parse_with_version_concrete(x["spec"]) for x in externals
|
spack.spec.parse_with_version_concrete(x["spec"]) for x in externals
|
||||||
]
|
]
|
||||||
|
|
||||||
external_specs = []
|
selected_externals = set()
|
||||||
if spec_filters:
|
if spec_filters:
|
||||||
for current_filter in spec_filters:
|
for current_filter in spec_filters:
|
||||||
current_filter.factory = lambda: candidate_specs
|
current_filter.factory = lambda: candidate_specs
|
||||||
external_specs.extend(current_filter.selected_specs())
|
selected_externals.update(current_filter.selected_specs())
|
||||||
else:
|
|
||||||
external_specs.extend(candidate_specs)
|
# Emit facts for externals specs. Note that "local_idx" is the index of the spec
|
||||||
|
# in packages:<pkg_name>:externals. This means:
|
||||||
|
#
|
||||||
|
# packages:<pkg_name>:externals[local_idx].spec == spec
|
||||||
|
external_versions = []
|
||||||
|
for local_idx, spec in enumerate(candidate_specs):
|
||||||
|
msg = f"{spec.name} available as external when satisfying {spec}"
|
||||||
|
|
||||||
|
if spec_filters and spec not in selected_externals:
|
||||||
|
continue
|
||||||
|
|
||||||
|
if not spec.versions.concrete:
|
||||||
|
warnings.warn(f"cannot use the external spec {spec}: needs a concrete version")
|
||||||
|
continue
|
||||||
|
|
||||||
|
def external_imposition(input_spec, requirements):
|
||||||
|
return requirements + [
|
||||||
|
fn.attr("external_conditions_hold", input_spec.name, local_idx)
|
||||||
|
]
|
||||||
|
|
||||||
|
try:
|
||||||
|
self.condition(spec, spec, msg=msg, transform_imposed=external_imposition)
|
||||||
|
except (spack.error.SpecError, RuntimeError) as e:
|
||||||
|
warnings.warn(f"while setting up external spec {spec}: {e}")
|
||||||
|
continue
|
||||||
|
external_versions.append((spec.version, local_idx))
|
||||||
|
self.possible_versions[spec.name].add(spec.version)
|
||||||
|
self.gen.newline()
|
||||||
|
|
||||||
# Order the external versions to prefer more recent versions
|
# Order the external versions to prefer more recent versions
|
||||||
# even if specs in packages.yaml are not ordered that way
|
# even if specs in packages.yaml are not ordered that way
|
||||||
external_versions = [
|
|
||||||
(x.version, external_id) for external_id, x in enumerate(external_specs)
|
|
||||||
]
|
|
||||||
external_versions = [
|
external_versions = [
|
||||||
(v, idx, external_id)
|
(v, idx, external_id)
|
||||||
for idx, (v, external_id) in enumerate(sorted(external_versions, reverse=True))
|
for idx, (v, external_id) in enumerate(sorted(external_versions, reverse=True))
|
||||||
@@ -1715,19 +1740,6 @@ def external_packages(self):
|
|||||||
DeclaredVersion(version=version, idx=idx, origin=Provenance.EXTERNAL)
|
DeclaredVersion(version=version, idx=idx, origin=Provenance.EXTERNAL)
|
||||||
)
|
)
|
||||||
|
|
||||||
# Declare external conditions with a local index into packages.yaml
|
|
||||||
for local_idx, spec in enumerate(external_specs):
|
|
||||||
msg = "%s available as external when satisfying %s" % (spec.name, spec)
|
|
||||||
|
|
||||||
def external_imposition(input_spec, requirements):
|
|
||||||
return requirements + [
|
|
||||||
fn.attr("external_conditions_hold", input_spec.name, local_idx)
|
|
||||||
]
|
|
||||||
|
|
||||||
self.condition(spec, spec, msg=msg, transform_imposed=external_imposition)
|
|
||||||
self.possible_versions[spec.name].add(spec.version)
|
|
||||||
self.gen.newline()
|
|
||||||
|
|
||||||
self.trigger_rules()
|
self.trigger_rules()
|
||||||
self.effect_rules()
|
self.effect_rules()
|
||||||
|
|
||||||
@@ -1839,6 +1851,8 @@ def _spec_clauses(
|
|||||||
|
|
||||||
if spec.name:
|
if spec.name:
|
||||||
clauses.append(f.node(spec.name) if not spec.virtual else f.virtual_node(spec.name))
|
clauses.append(f.node(spec.name) if not spec.virtual else f.virtual_node(spec.name))
|
||||||
|
if spec.namespace:
|
||||||
|
clauses.append(f.namespace(spec.name, spec.namespace))
|
||||||
|
|
||||||
clauses.extend(self.spec_versions(spec))
|
clauses.extend(self.spec_versions(spec))
|
||||||
|
|
||||||
@@ -2736,6 +2750,7 @@ class _Head:
|
|||||||
"""ASP functions used to express spec clauses in the HEAD of a rule"""
|
"""ASP functions used to express spec clauses in the HEAD of a rule"""
|
||||||
|
|
||||||
node = fn.attr("node")
|
node = fn.attr("node")
|
||||||
|
namespace = fn.attr("namespace_set")
|
||||||
virtual_node = fn.attr("virtual_node")
|
virtual_node = fn.attr("virtual_node")
|
||||||
node_platform = fn.attr("node_platform_set")
|
node_platform = fn.attr("node_platform_set")
|
||||||
node_os = fn.attr("node_os_set")
|
node_os = fn.attr("node_os_set")
|
||||||
@@ -2751,6 +2766,7 @@ class _Body:
|
|||||||
"""ASP functions used to express spec clauses in the BODY of a rule"""
|
"""ASP functions used to express spec clauses in the BODY of a rule"""
|
||||||
|
|
||||||
node = fn.attr("node")
|
node = fn.attr("node")
|
||||||
|
namespace = fn.attr("namespace")
|
||||||
virtual_node = fn.attr("virtual_node")
|
virtual_node = fn.attr("virtual_node")
|
||||||
node_platform = fn.attr("node_platform")
|
node_platform = fn.attr("node_platform")
|
||||||
node_os = fn.attr("node_os")
|
node_os = fn.attr("node_os")
|
||||||
|
|||||||
@@ -18,38 +18,79 @@
|
|||||||
{ attr("virtual_node", node(0..X-1, Package)) } :- max_dupes(Package, X), virtual(Package).
|
{ attr("virtual_node", node(0..X-1, Package)) } :- max_dupes(Package, X), virtual(Package).
|
||||||
|
|
||||||
% Integrity constraints on DAG nodes
|
% Integrity constraints on DAG nodes
|
||||||
:- attr("root", PackageNode), not attr("node", PackageNode).
|
:- attr("root", PackageNode),
|
||||||
:- attr("version", PackageNode, _), not attr("node", PackageNode), not attr("virtual_node", PackageNode).
|
not attr("node", PackageNode),
|
||||||
:- attr("node_version_satisfies", PackageNode, _), not attr("node", PackageNode), not attr("virtual_node", PackageNode).
|
internal_error("Every root must be a node").
|
||||||
:- attr("hash", PackageNode, _), not attr("node", PackageNode).
|
:- attr("version", PackageNode, _),
|
||||||
:- attr("node_platform", PackageNode, _), not attr("node", PackageNode).
|
not attr("node", PackageNode),
|
||||||
:- attr("node_os", PackageNode, _), not attr("node", PackageNode).
|
not attr("virtual_node", PackageNode),
|
||||||
:- attr("node_target", PackageNode, _), not attr("node", PackageNode).
|
internal_error("Only nodes and virtual_nodes can have versions").
|
||||||
:- attr("node_compiler_version", PackageNode, _, _), not attr("node", PackageNode).
|
:- attr("node_version_satisfies", PackageNode, _),
|
||||||
:- attr("variant_value", PackageNode, _, _), not attr("node", PackageNode).
|
not attr("node", PackageNode),
|
||||||
:- attr("node_flag_compiler_default", PackageNode), not attr("node", PackageNode).
|
not attr("virtual_node", PackageNode),
|
||||||
:- attr("node_flag", PackageNode, _, _), not attr("node", PackageNode).
|
internal_error("Only nodes and virtual_nodes can have version satisfaction").
|
||||||
:- attr("external_spec_selected", PackageNode, _), not attr("node", PackageNode).
|
:- attr("hash", PackageNode, _),
|
||||||
:- attr("depends_on", ParentNode, _, _), not attr("node", ParentNode).
|
not attr("node", PackageNode),
|
||||||
:- attr("depends_on", _, ChildNode, _), not attr("node", ChildNode).
|
internal_error("Only nodes can have hashes").
|
||||||
:- attr("node_flag_source", ParentNode, _, _), not attr("node", ParentNode).
|
:- attr("node_platform", PackageNode, _),
|
||||||
:- attr("node_flag_source", _, _, ChildNode), not attr("node", ChildNode).
|
not attr("node", PackageNode),
|
||||||
:- attr("virtual_node", VirtualNode), not provider(_, VirtualNode), internal_error("virtual node with no provider").
|
internal_error("Only nodes can have platforms").
|
||||||
:- provider(_, VirtualNode), not attr("virtual_node", VirtualNode), internal_error("provider with no virtual node").
|
:- attr("node_os", PackageNode, _), not attr("node", PackageNode),
|
||||||
:- provider(PackageNode, _), not attr("node", PackageNode), internal_error("provider with no real node").
|
internal_error("Only nodes can have node_os").
|
||||||
|
:- attr("node_target", PackageNode, _), not attr("node", PackageNode),
|
||||||
|
internal_error("Only nodes can have node_target").
|
||||||
|
:- attr("node_compiler_version", PackageNode, _, _), not attr("node", PackageNode),
|
||||||
|
internal_error("Only nodes can have node_compiler_version").
|
||||||
|
:- attr("variant_value", PackageNode, _, _), not attr("node", PackageNode),
|
||||||
|
internal_error("variant_value true for a non-node").
|
||||||
|
:- attr("node_flag_compiler_default", PackageNode), not attr("node", PackageNode),
|
||||||
|
internal_error("node_flag_compiler_default true for non-node").
|
||||||
|
:- attr("node_flag", PackageNode, _, _), not attr("node", PackageNode),
|
||||||
|
internal_error("node_flag assigned for non-node").
|
||||||
|
:- attr("external_spec_selected", PackageNode, _), not attr("node", PackageNode),
|
||||||
|
internal_error("external_spec_selected for non-node").
|
||||||
|
:- attr("depends_on", ParentNode, _, _), not attr("node", ParentNode),
|
||||||
|
internal_error("non-node depends on something").
|
||||||
|
:- attr("depends_on", _, ChildNode, _), not attr("node", ChildNode),
|
||||||
|
internal_error("something depends_on a non-node").
|
||||||
|
:- attr("node_flag_source", Node, _, _), not attr("node", Node),
|
||||||
|
internal_error("node_flag_source assigned for a non-node").
|
||||||
|
:- attr("node_flag_source", _, _, SourceNode), not attr("node", SourceNode),
|
||||||
|
internal_error("node_flag_source assigned with a non-node source").
|
||||||
|
:- attr("virtual_node", VirtualNode), not provider(_, VirtualNode),
|
||||||
|
internal_error("virtual node with no provider").
|
||||||
|
:- provider(_, VirtualNode), not attr("virtual_node", VirtualNode),
|
||||||
|
internal_error("provider with no virtual node").
|
||||||
|
:- provider(PackageNode, _), not attr("node", PackageNode),
|
||||||
|
internal_error("provider with no real node").
|
||||||
|
|
||||||
:- attr("root", node(ID, PackageNode)), ID > min_dupe_id, internal_error("root with a non-minimal duplicate ID").
|
:- attr("root", node(ID, PackageNode)), ID > min_dupe_id,
|
||||||
|
internal_error("root with a non-minimal duplicate ID").
|
||||||
|
|
||||||
% Nodes in the "root" unification set cannot depend on non-root nodes if the dependency is "link" or "run"
|
% Nodes in the "root" unification set cannot depend on non-root nodes if the dependency is "link" or "run"
|
||||||
:- attr("depends_on", node(min_dupe_id, Package), node(ID, _), "link"), ID != min_dupe_id, unification_set("root", node(min_dupe_id, Package)), internal_error("link dependency out of the root unification set").
|
:- attr("depends_on", node(min_dupe_id, Package), node(ID, _), "link"), ID != min_dupe_id, unification_set("root", node(min_dupe_id, Package)), internal_error("link dependency out of the root unification set").
|
||||||
:- attr("depends_on", node(min_dupe_id, Package), node(ID, _), "run"), ID != min_dupe_id, unification_set("root", node(min_dupe_id, Package)), internal_error("run dependency out of the root unification set").
|
:- attr("depends_on", node(min_dupe_id, Package), node(ID, _), "run"), ID != min_dupe_id, unification_set("root", node(min_dupe_id, Package)), internal_error("run dependency out of the root unification set").
|
||||||
|
|
||||||
% Namespaces are statically assigned by a package fact
|
% Namespaces are statically assigned by a package fact if not otherwise set
|
||||||
attr("namespace", node(ID, Package), Namespace) :- attr("node", node(ID, Package)), pkg_fact(Package, namespace(Namespace)).
|
error(100, "{0} does not have a namespace", Package) :- attr("node", node(ID, Package)),
|
||||||
|
not attr("namespace", node(ID, Package), _),
|
||||||
|
internal_error("A node must have a namespace").
|
||||||
|
error(100, "{0} cannot come from both {1} and {2} namespaces", Package, NS1, NS2) :- attr("node", node(ID, Package)),
|
||||||
|
attr("namespace", node(ID, Package), NS1),
|
||||||
|
attr("namespace", node(ID, Package), NS2),
|
||||||
|
NS1 != NS2,
|
||||||
|
internal_error("A node cannot have two namespaces").
|
||||||
|
|
||||||
|
attr("namespace", node(ID, Package), Namespace) :- attr("namespace_set", node(ID, Package), Namespace).
|
||||||
|
attr("namespace", node(ID, Package), Namespace)
|
||||||
|
:- attr("node", node(ID, Package)),
|
||||||
|
not attr("namespace_set", node(ID, Package), _),
|
||||||
|
pkg_fact(Package, namespace(Namespace)).
|
||||||
|
|
||||||
% Rules on "unification sets", i.e. on sets of nodes allowing a single configuration of any given package
|
% Rules on "unification sets", i.e. on sets of nodes allowing a single configuration of any given package
|
||||||
unify(SetID, PackageName) :- unification_set(SetID, node(_, PackageName)).
|
unify(SetID, PackageName) :- unification_set(SetID, node(_, PackageName)).
|
||||||
:- 2 { unification_set(SetID, node(_, PackageName)) }, unify(SetID, PackageName).
|
:- 2 { unification_set(SetID, node(_, PackageName)) }, unify(SetID, PackageName),
|
||||||
|
internal_error("Cannot have multiple unification sets IDs for one set").
|
||||||
|
|
||||||
unification_set("root", PackageNode) :- attr("root", PackageNode).
|
unification_set("root", PackageNode) :- attr("root", PackageNode).
|
||||||
unification_set(SetID, ChildNode) :- attr("depends_on", ParentNode, ChildNode, Type), Type != "build", unification_set(SetID, ParentNode).
|
unification_set(SetID, ChildNode) :- attr("depends_on", ParentNode, ChildNode, Type), Type != "build", unification_set(SetID, ParentNode).
|
||||||
@@ -75,7 +116,8 @@ unification_set(SetID, VirtualNode)
|
|||||||
% as a build dependency.
|
% as a build dependency.
|
||||||
%
|
%
|
||||||
% We'll need to relax the rule before we get to actual cross-compilation
|
% We'll need to relax the rule before we get to actual cross-compilation
|
||||||
:- depends_on(ParentNode, node(X, Dependency)), depends_on(ParentNode, node(Y, Dependency)), X < Y.
|
:- depends_on(ParentNode, node(X, Dependency)), depends_on(ParentNode, node(Y, Dependency)), X < Y,
|
||||||
|
internal_error("Cannot split link/build deptypes for a single edge (yet)").
|
||||||
|
|
||||||
|
|
||||||
#defined multiple_unification_sets/1.
|
#defined multiple_unification_sets/1.
|
||||||
@@ -131,7 +173,8 @@ mentioned_in_literal(Root, Mentioned) :- mentioned_in_literal(TriggerID, Root, M
|
|||||||
condition_set(node(min_dupe_id, Root), node(min_dupe_id, Root)) :- mentioned_in_literal(Root, Root).
|
condition_set(node(min_dupe_id, Root), node(min_dupe_id, Root)) :- mentioned_in_literal(Root, Root).
|
||||||
|
|
||||||
1 { condition_set(node(min_dupe_id, Root), node(0..Y-1, Mentioned)) : max_dupes(Mentioned, Y) } 1 :-
|
1 { condition_set(node(min_dupe_id, Root), node(0..Y-1, Mentioned)) : max_dupes(Mentioned, Y) } 1 :-
|
||||||
mentioned_in_literal(Root, Mentioned), Mentioned != Root.
|
mentioned_in_literal(Root, Mentioned), Mentioned != Root,
|
||||||
|
internal_error("must have exactly one condition_set for literals").
|
||||||
|
|
||||||
% Discriminate between "roots" that have been explicitly requested, and roots that are deduced from "virtual roots"
|
% Discriminate between "roots" that have been explicitly requested, and roots that are deduced from "virtual roots"
|
||||||
explicitly_requested_root(node(min_dupe_id, Package)) :-
|
explicitly_requested_root(node(min_dupe_id, Package)) :-
|
||||||
@@ -151,7 +194,8 @@ associated_with_root(RootNode, ChildNode) :-
|
|||||||
:- attr("root", RootNode),
|
:- attr("root", RootNode),
|
||||||
condition_set(RootNode, node(X, Package)),
|
condition_set(RootNode, node(X, Package)),
|
||||||
not virtual(Package),
|
not virtual(Package),
|
||||||
not associated_with_root(RootNode, node(X, Package)).
|
not associated_with_root(RootNode, node(X, Package)),
|
||||||
|
internal_error("nodes in root condition set must be associated with root").
|
||||||
|
|
||||||
#defined concretize_everything/0.
|
#defined concretize_everything/0.
|
||||||
#defined literal/1.
|
#defined literal/1.
|
||||||
@@ -385,8 +429,10 @@ imposed_nodes(ConditionID, PackageNode, node(X, A1))
|
|||||||
condition_set(PackageNode, node(X, A1)),
|
condition_set(PackageNode, node(X, A1)),
|
||||||
attr("hash", PackageNode, ConditionID).
|
attr("hash", PackageNode, ConditionID).
|
||||||
|
|
||||||
:- imposed_packages(ID, A1), impose(ID, PackageNode), not condition_set(PackageNode, node(_, A1)).
|
:- imposed_packages(ID, A1), impose(ID, PackageNode), not condition_set(PackageNode, node(_, A1)),
|
||||||
:- imposed_packages(ID, A1), impose(ID, PackageNode), not imposed_nodes(ID, PackageNode, node(_, A1)).
|
internal_error("Imposing constraint outside of condition set").
|
||||||
|
:- imposed_packages(ID, A1), impose(ID, PackageNode), not imposed_nodes(ID, PackageNode, node(_, A1)),
|
||||||
|
internal_error("Imposing constraint outside of imposed_nodes").
|
||||||
|
|
||||||
% Conditions that hold impose may impose constraints on other specs
|
% Conditions that hold impose may impose constraints on other specs
|
||||||
attr(Name, node(X, A1)) :- impose(ID, PackageNode), imposed_constraint(ID, Name, A1), imposed_nodes(ID, PackageNode, node(X, A1)).
|
attr(Name, node(X, A1)) :- impose(ID, PackageNode), imposed_constraint(ID, Name, A1), imposed_nodes(ID, PackageNode, node(X, A1)).
|
||||||
@@ -416,7 +462,8 @@ provider(ProviderNode, VirtualNode) :- attr("provider_set", ProviderNode, Virtua
|
|||||||
% satisfy the dependency.
|
% satisfy the dependency.
|
||||||
1 { attr("depends_on", node(X, A1), node(0..Y-1, A2), A3) : max_dupes(A2, Y) } 1
|
1 { attr("depends_on", node(X, A1), node(0..Y-1, A2), A3) : max_dupes(A2, Y) } 1
|
||||||
:- impose(ID, node(X, A1)),
|
:- impose(ID, node(X, A1)),
|
||||||
imposed_constraint(ID, "depends_on", A1, A2, A3).
|
imposed_constraint(ID, "depends_on", A1, A2, A3),
|
||||||
|
internal_error("Build deps must land in exactly one duplicate").
|
||||||
|
|
||||||
% Reconstruct virtual dependencies for reused specs
|
% Reconstruct virtual dependencies for reused specs
|
||||||
attr("virtual_on_edge", node(X, A1), node(Y, A2), Virtual)
|
attr("virtual_on_edge", node(X, A1), node(Y, A2), Virtual)
|
||||||
@@ -1155,8 +1202,11 @@ target_weight(Target, 0)
|
|||||||
node_target_weight(PackageNode, MinWeight)
|
node_target_weight(PackageNode, MinWeight)
|
||||||
:- attr("node", PackageNode),
|
:- attr("node", PackageNode),
|
||||||
attr("node_target", PackageNode, Target),
|
attr("node_target", PackageNode, Target),
|
||||||
|
target(Target),
|
||||||
MinWeight = #min { Weight : target_weight(Target, Weight) }.
|
MinWeight = #min { Weight : target_weight(Target, Weight) }.
|
||||||
|
|
||||||
|
:- attr("node_target", PackageNode, Target), not node_target_weight(PackageNode, _).
|
||||||
|
|
||||||
% compatibility rules for targets among nodes
|
% compatibility rules for targets among nodes
|
||||||
node_target_match(ParentNode, DependencyNode)
|
node_target_match(ParentNode, DependencyNode)
|
||||||
:- attr("depends_on", ParentNode, DependencyNode, Type), Type != "build",
|
:- attr("depends_on", ParentNode, DependencyNode, Type), Type != "build",
|
||||||
|
|||||||
@@ -70,7 +70,6 @@
|
|||||||
import spack.compiler
|
import spack.compiler
|
||||||
import spack.compilers
|
import spack.compilers
|
||||||
import spack.config
|
import spack.config
|
||||||
import spack.dependency as dp
|
|
||||||
import spack.deptypes as dt
|
import spack.deptypes as dt
|
||||||
import spack.error
|
import spack.error
|
||||||
import spack.hash_types as ht
|
import spack.hash_types as ht
|
||||||
@@ -99,7 +98,7 @@
|
|||||||
"CompilerSpec",
|
"CompilerSpec",
|
||||||
"Spec",
|
"Spec",
|
||||||
"SpecParseError",
|
"SpecParseError",
|
||||||
"ArchitecturePropagationError",
|
"UnsupportedPropagationError",
|
||||||
"DuplicateDependencyError",
|
"DuplicateDependencyError",
|
||||||
"DuplicateCompilerSpecError",
|
"DuplicateCompilerSpecError",
|
||||||
"UnsupportedCompilerError",
|
"UnsupportedCompilerError",
|
||||||
@@ -129,7 +128,7 @@
|
|||||||
r"|" # or
|
r"|" # or
|
||||||
# OPTION 2: an actual format string
|
# OPTION 2: an actual format string
|
||||||
r"{" # non-escaped open brace {
|
r"{" # non-escaped open brace {
|
||||||
r"([%@/]|arch=)?" # optional sigil (to print sigil in color)
|
r"([%@/]|[\w ][\w -]*=)?" # optional sigil (or identifier or space) to print sigil in color
|
||||||
r"(?:\^([^}\.]+)\.)?" # optional ^depname. (to get attr from dependency)
|
r"(?:\^([^}\.]+)\.)?" # optional ^depname. (to get attr from dependency)
|
||||||
# after the sigil or depname, we can have a hash expression or another attribute
|
# after the sigil or depname, we can have a hash expression or another attribute
|
||||||
r"(?:" # one of
|
r"(?:" # one of
|
||||||
@@ -163,14 +162,14 @@
|
|||||||
DEFAULT_FORMAT = (
|
DEFAULT_FORMAT = (
|
||||||
"{name}{@versions}"
|
"{name}{@versions}"
|
||||||
"{%compiler.name}{@compiler.versions}{compiler_flags}"
|
"{%compiler.name}{@compiler.versions}{compiler_flags}"
|
||||||
"{variants}{arch=architecture}{/abstract_hash}"
|
"{variants}{ namespace=namespace_if_anonymous}{ arch=architecture}{/abstract_hash}"
|
||||||
)
|
)
|
||||||
|
|
||||||
#: Display format, which eliminates extra `@=` in the output, for readability.
|
#: Display format, which eliminates extra `@=` in the output, for readability.
|
||||||
DISPLAY_FORMAT = (
|
DISPLAY_FORMAT = (
|
||||||
"{name}{@version}"
|
"{name}{@version}"
|
||||||
"{%compiler.name}{@compiler.version}{compiler_flags}"
|
"{%compiler.name}{@compiler.version}{compiler_flags}"
|
||||||
"{variants}{arch=architecture}{/abstract_hash}"
|
"{variants}{ namespace=namespace_if_anonymous}{ arch=architecture}{/abstract_hash}"
|
||||||
)
|
)
|
||||||
|
|
||||||
#: Regular expression to pull spec contents out of clearsigned signature
|
#: Regular expression to pull spec contents out of clearsigned signature
|
||||||
@@ -1640,19 +1639,9 @@ def _add_flag(self, name, value, propagate):
|
|||||||
Known flags currently include "arch"
|
Known flags currently include "arch"
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# If the == syntax is used to propagate the spec architecture
|
if propagate and name in spack.directives.reserved_names:
|
||||||
# This is an error
|
raise UnsupportedPropagationError(
|
||||||
architecture_names = [
|
f"Propagation with '==' is not supported for '{name}'."
|
||||||
"arch",
|
|
||||||
"architecture",
|
|
||||||
"platform",
|
|
||||||
"os",
|
|
||||||
"operating_system",
|
|
||||||
"target",
|
|
||||||
]
|
|
||||||
if propagate and name in architecture_names:
|
|
||||||
raise ArchitecturePropagationError(
|
|
||||||
"Unable to propagate the architecture failed." " Use a '=' instead."
|
|
||||||
)
|
)
|
||||||
|
|
||||||
valid_flags = FlagMap.valid_compiler_flags()
|
valid_flags = FlagMap.valid_compiler_flags()
|
||||||
@@ -1666,6 +1655,8 @@ def _add_flag(self, name, value, propagate):
|
|||||||
self._set_architecture(os=value)
|
self._set_architecture(os=value)
|
||||||
elif name == "target":
|
elif name == "target":
|
||||||
self._set_architecture(target=value)
|
self._set_architecture(target=value)
|
||||||
|
elif name == "namespace":
|
||||||
|
self.namespace = value
|
||||||
elif name in valid_flags:
|
elif name in valid_flags:
|
||||||
assert self.compiler_flags is not None
|
assert self.compiler_flags is not None
|
||||||
flags_and_propagation = spack.compiler.tokenize_flags(value, propagate)
|
flags_and_propagation = spack.compiler.tokenize_flags(value, propagate)
|
||||||
@@ -1685,9 +1676,7 @@ def _set_architecture(self, **kwargs):
|
|||||||
"""Called by the parser to set the architecture."""
|
"""Called by the parser to set the architecture."""
|
||||||
arch_attrs = ["platform", "os", "target"]
|
arch_attrs = ["platform", "os", "target"]
|
||||||
if self.architecture and self.architecture.concrete:
|
if self.architecture and self.architecture.concrete:
|
||||||
raise DuplicateArchitectureError(
|
raise DuplicateArchitectureError("Spec cannot have two architectures.")
|
||||||
"Spec for '%s' cannot have two architectures." % self.name
|
|
||||||
)
|
|
||||||
|
|
||||||
if not self.architecture:
|
if not self.architecture:
|
||||||
new_vals = tuple(kwargs.get(arg, None) for arg in arch_attrs)
|
new_vals = tuple(kwargs.get(arg, None) for arg in arch_attrs)
|
||||||
@@ -1696,10 +1685,7 @@ def _set_architecture(self, **kwargs):
|
|||||||
new_attrvals = [(a, v) for a, v in kwargs.items() if a in arch_attrs]
|
new_attrvals = [(a, v) for a, v in kwargs.items() if a in arch_attrs]
|
||||||
for new_attr, new_value in new_attrvals:
|
for new_attr, new_value in new_attrvals:
|
||||||
if getattr(self.architecture, new_attr):
|
if getattr(self.architecture, new_attr):
|
||||||
raise DuplicateArchitectureError(
|
raise DuplicateArchitectureError(f"Cannot specify '{new_attr}' twice")
|
||||||
"Spec for '%s' cannot have two '%s' specified "
|
|
||||||
"for its architecture" % (self.name, new_attr)
|
|
||||||
)
|
|
||||||
else:
|
else:
|
||||||
setattr(self.architecture, new_attr, new_value)
|
setattr(self.architecture, new_attr, new_value)
|
||||||
|
|
||||||
@@ -1894,14 +1880,14 @@ def short_spec(self):
|
|||||||
"""Returns a version of the spec with the dependencies hashed
|
"""Returns a version of the spec with the dependencies hashed
|
||||||
instead of completely enumerated."""
|
instead of completely enumerated."""
|
||||||
spec_format = "{name}{@version}{%compiler.name}{@compiler.version}"
|
spec_format = "{name}{@version}{%compiler.name}{@compiler.version}"
|
||||||
spec_format += "{variants}{arch=architecture}{/hash:7}"
|
spec_format += "{variants}{ arch=architecture}{/hash:7}"
|
||||||
return self.format(spec_format)
|
return self.format(spec_format)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def cshort_spec(self):
|
def cshort_spec(self):
|
||||||
"""Returns an auto-colorized version of ``self.short_spec``."""
|
"""Returns an auto-colorized version of ``self.short_spec``."""
|
||||||
spec_format = "{name}{@version}{%compiler.name}{@compiler.version}"
|
spec_format = "{name}{@version}{%compiler.name}{@compiler.version}"
|
||||||
spec_format += "{variants}{arch=architecture}{/hash:7}"
|
spec_format += "{variants}{ arch=architecture}{/hash:7}"
|
||||||
return self.cformat(spec_format)
|
return self.cformat(spec_format)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
@@ -2628,294 +2614,6 @@ def validate_detection(self):
|
|||||||
validate_fn = getattr(pkg_cls, "validate_detected_spec", lambda x, y: None)
|
validate_fn = getattr(pkg_cls, "validate_detected_spec", lambda x, y: None)
|
||||||
validate_fn(self, self.extra_attributes)
|
validate_fn(self, self.extra_attributes)
|
||||||
|
|
||||||
def _concretize_helper(self, concretizer, presets=None, visited=None):
|
|
||||||
"""Recursive helper function for concretize().
|
|
||||||
This concretizes everything bottom-up. As things are
|
|
||||||
concretized, they're added to the presets, and ancestors
|
|
||||||
will prefer the settings of their children.
|
|
||||||
"""
|
|
||||||
if presets is None:
|
|
||||||
presets = {}
|
|
||||||
if visited is None:
|
|
||||||
visited = set()
|
|
||||||
|
|
||||||
if self.name in visited:
|
|
||||||
return False
|
|
||||||
|
|
||||||
if self.concrete:
|
|
||||||
visited.add(self.name)
|
|
||||||
return False
|
|
||||||
|
|
||||||
changed = False
|
|
||||||
|
|
||||||
# Concretize deps first -- this is a bottom-up process.
|
|
||||||
for name in sorted(self._dependencies):
|
|
||||||
# WARNING: This function is an implementation detail of the
|
|
||||||
# WARNING: original concretizer. Since with that greedy
|
|
||||||
# WARNING: algorithm we don't allow multiple nodes from
|
|
||||||
# WARNING: the same package in a DAG, here we hard-code
|
|
||||||
# WARNING: using index 0 i.e. we assume that we have only
|
|
||||||
# WARNING: one edge from package "name"
|
|
||||||
changed |= self._dependencies[name][0].spec._concretize_helper(
|
|
||||||
concretizer, presets, visited
|
|
||||||
)
|
|
||||||
|
|
||||||
if self.name in presets:
|
|
||||||
changed |= self.constrain(presets[self.name])
|
|
||||||
else:
|
|
||||||
# Concretize virtual dependencies last. Because they're added
|
|
||||||
# to presets below, their constraints will all be merged, but we'll
|
|
||||||
# still need to select a concrete package later.
|
|
||||||
if not self.virtual:
|
|
||||||
changed |= any(
|
|
||||||
(
|
|
||||||
concretizer.concretize_develop(self), # special variant
|
|
||||||
concretizer.concretize_architecture(self),
|
|
||||||
concretizer.concretize_compiler(self),
|
|
||||||
concretizer.adjust_target(self),
|
|
||||||
# flags must be concretized after compiler
|
|
||||||
concretizer.concretize_compiler_flags(self),
|
|
||||||
concretizer.concretize_version(self),
|
|
||||||
concretizer.concretize_variants(self),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
presets[self.name] = self
|
|
||||||
|
|
||||||
visited.add(self.name)
|
|
||||||
return changed
|
|
||||||
|
|
||||||
def _replace_with(self, concrete):
|
|
||||||
"""Replace this virtual spec with a concrete spec."""
|
|
||||||
assert self.virtual
|
|
||||||
virtuals = (self.name,)
|
|
||||||
for dep_spec in itertools.chain.from_iterable(self._dependents.values()):
|
|
||||||
dependent = dep_spec.parent
|
|
||||||
depflag = dep_spec.depflag
|
|
||||||
|
|
||||||
# remove self from all dependents, unless it is already removed
|
|
||||||
if self.name in dependent._dependencies:
|
|
||||||
del dependent._dependencies.edges[self.name]
|
|
||||||
|
|
||||||
# add the replacement, unless it is already a dep of dependent.
|
|
||||||
if concrete.name not in dependent._dependencies:
|
|
||||||
dependent._add_dependency(concrete, depflag=depflag, virtuals=virtuals)
|
|
||||||
else:
|
|
||||||
dependent.edges_to_dependencies(name=concrete.name)[0].update_virtuals(
|
|
||||||
virtuals=virtuals
|
|
||||||
)
|
|
||||||
|
|
||||||
def _expand_virtual_packages(self, concretizer):
|
|
||||||
"""Find virtual packages in this spec, replace them with providers,
|
|
||||||
and normalize again to include the provider's (potentially virtual)
|
|
||||||
dependencies. Repeat until there are no virtual deps.
|
|
||||||
|
|
||||||
Precondition: spec is normalized.
|
|
||||||
|
|
||||||
.. todo::
|
|
||||||
|
|
||||||
If a provider depends on something that conflicts with
|
|
||||||
other dependencies in the spec being expanded, this can
|
|
||||||
produce a conflicting spec. For example, if mpich depends
|
|
||||||
on hwloc@:1.3 but something in the spec needs hwloc1.4:,
|
|
||||||
then we should choose an MPI other than mpich. Cases like
|
|
||||||
this are infrequent, but should implement this before it is
|
|
||||||
a problem.
|
|
||||||
"""
|
|
||||||
# Make an index of stuff this spec already provides
|
|
||||||
self_index = spack.provider_index.ProviderIndex(
|
|
||||||
repository=spack.repo.PATH, specs=self.traverse(), restrict=True
|
|
||||||
)
|
|
||||||
changed = False
|
|
||||||
done = False
|
|
||||||
|
|
||||||
while not done:
|
|
||||||
done = True
|
|
||||||
for spec in list(self.traverse()):
|
|
||||||
replacement = None
|
|
||||||
if spec.external:
|
|
||||||
continue
|
|
||||||
if spec.virtual:
|
|
||||||
replacement = self._find_provider(spec, self_index)
|
|
||||||
if replacement:
|
|
||||||
# TODO: may break if in-place on self but
|
|
||||||
# shouldn't happen if root is traversed first.
|
|
||||||
spec._replace_with(replacement)
|
|
||||||
done = False
|
|
||||||
break
|
|
||||||
|
|
||||||
if not replacement:
|
|
||||||
# Get a list of possible replacements in order of
|
|
||||||
# preference.
|
|
||||||
candidates = concretizer.choose_virtual_or_external(spec)
|
|
||||||
|
|
||||||
# Try the replacements in order, skipping any that cause
|
|
||||||
# satisfiability problems.
|
|
||||||
for replacement in candidates:
|
|
||||||
if replacement is spec:
|
|
||||||
break
|
|
||||||
|
|
||||||
# Replace spec with the candidate and normalize
|
|
||||||
copy = self.copy()
|
|
||||||
copy[spec.name]._dup(replacement, deps=False)
|
|
||||||
|
|
||||||
try:
|
|
||||||
# If there are duplicate providers or duplicate
|
|
||||||
# provider deps, consolidate them and merge
|
|
||||||
# constraints.
|
|
||||||
copy.normalize(force=True)
|
|
||||||
break
|
|
||||||
except spack.error.SpecError:
|
|
||||||
# On error, we'll try the next replacement.
|
|
||||||
continue
|
|
||||||
|
|
||||||
# If replacement is external then trim the dependencies
|
|
||||||
if replacement.external:
|
|
||||||
if spec._dependencies:
|
|
||||||
for dep in spec.dependencies():
|
|
||||||
del dep._dependents.edges[spec.name]
|
|
||||||
changed = True
|
|
||||||
spec.clear_dependencies()
|
|
||||||
replacement.clear_dependencies()
|
|
||||||
replacement.architecture = self.architecture
|
|
||||||
|
|
||||||
# TODO: could this and the stuff in _dup be cleaned up?
|
|
||||||
def feq(cfield, sfield):
|
|
||||||
return (not cfield) or (cfield == sfield)
|
|
||||||
|
|
||||||
if replacement is spec or (
|
|
||||||
feq(replacement.name, spec.name)
|
|
||||||
and feq(replacement.versions, spec.versions)
|
|
||||||
and feq(replacement.compiler, spec.compiler)
|
|
||||||
and feq(replacement.architecture, spec.architecture)
|
|
||||||
and feq(replacement._dependencies, spec._dependencies)
|
|
||||||
and feq(replacement.variants, spec.variants)
|
|
||||||
and feq(replacement.external_path, spec.external_path)
|
|
||||||
and feq(replacement.external_modules, spec.external_modules)
|
|
||||||
):
|
|
||||||
continue
|
|
||||||
# Refine this spec to the candidate. This uses
|
|
||||||
# replace_with AND dup so that it can work in
|
|
||||||
# place. TODO: make this more efficient.
|
|
||||||
if spec.virtual:
|
|
||||||
spec._replace_with(replacement)
|
|
||||||
changed = True
|
|
||||||
if spec._dup(replacement, deps=False, cleardeps=False):
|
|
||||||
changed = True
|
|
||||||
|
|
||||||
self_index.update(spec)
|
|
||||||
done = False
|
|
||||||
break
|
|
||||||
|
|
||||||
return changed
|
|
||||||
|
|
||||||
def _old_concretize(self, tests=False, deprecation_warning=True):
|
|
||||||
"""A spec is concrete if it describes one build of a package uniquely.
|
|
||||||
This will ensure that this spec is concrete.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
tests (list or bool): list of packages that will need test
|
|
||||||
dependencies, or True/False for test all/none
|
|
||||||
deprecation_warning (bool): enable or disable the deprecation
|
|
||||||
warning for the old concretizer
|
|
||||||
|
|
||||||
If this spec could describe more than one version, variant, or build
|
|
||||||
of a package, this will add constraints to make it concrete.
|
|
||||||
|
|
||||||
Some rigorous validation and checks are also performed on the spec.
|
|
||||||
Concretizing ensures that it is self-consistent and that it's
|
|
||||||
consistent with requirements of its packages. See flatten() and
|
|
||||||
normalize() for more details on this.
|
|
||||||
"""
|
|
||||||
import spack.concretize
|
|
||||||
|
|
||||||
# Add a warning message to inform users that the original concretizer
|
|
||||||
# will be removed
|
|
||||||
if deprecation_warning:
|
|
||||||
msg = (
|
|
||||||
"the original concretizer is currently being used.\n\tUpgrade to "
|
|
||||||
'"clingo" at your earliest convenience. The original concretizer '
|
|
||||||
"will be removed from Spack in a future version."
|
|
||||||
)
|
|
||||||
warnings.warn(msg)
|
|
||||||
|
|
||||||
self.replace_hash()
|
|
||||||
|
|
||||||
if not self.name:
|
|
||||||
raise spack.error.SpecError("Attempting to concretize anonymous spec")
|
|
||||||
|
|
||||||
if self._concrete:
|
|
||||||
return
|
|
||||||
|
|
||||||
# take the spec apart once before starting the main concretization loop and resolving
|
|
||||||
# deps, but don't break dependencies during concretization as the spec is built.
|
|
||||||
user_spec_deps = self.flat_dependencies(disconnect=True)
|
|
||||||
|
|
||||||
changed = True
|
|
||||||
force = False
|
|
||||||
concretizer = spack.concretize.Concretizer(self.copy())
|
|
||||||
while changed:
|
|
||||||
changes = (
|
|
||||||
self.normalize(force, tests, user_spec_deps, disconnect=False),
|
|
||||||
self._expand_virtual_packages(concretizer),
|
|
||||||
self._concretize_helper(concretizer),
|
|
||||||
)
|
|
||||||
changed = any(changes)
|
|
||||||
force = True
|
|
||||||
|
|
||||||
visited_user_specs = set()
|
|
||||||
for dep in self.traverse():
|
|
||||||
visited_user_specs.add(dep.name)
|
|
||||||
pkg_cls = spack.repo.PATH.get_pkg_class(dep.name)
|
|
||||||
visited_user_specs.update(pkg_cls(dep).provided_virtual_names())
|
|
||||||
|
|
||||||
extra = set(user_spec_deps.keys()).difference(visited_user_specs)
|
|
||||||
if extra:
|
|
||||||
raise InvalidDependencyError(self.name, extra)
|
|
||||||
|
|
||||||
Spec.inject_patches_variant(self)
|
|
||||||
|
|
||||||
for s in self.traverse():
|
|
||||||
# TODO: Refactor this into a common method to build external specs
|
|
||||||
# TODO: or turn external_path into a lazy property
|
|
||||||
Spec.ensure_external_path_if_external(s)
|
|
||||||
|
|
||||||
# assign hashes and mark concrete
|
|
||||||
self._finalize_concretization()
|
|
||||||
|
|
||||||
# If any spec in the DAG is deprecated, throw an error
|
|
||||||
Spec.ensure_no_deprecated(self)
|
|
||||||
|
|
||||||
# Update externals as needed
|
|
||||||
for dep in self.traverse():
|
|
||||||
if dep.external:
|
|
||||||
dep.package.update_external_dependencies()
|
|
||||||
|
|
||||||
# Now that the spec is concrete we should check if
|
|
||||||
# there are declared conflicts
|
|
||||||
#
|
|
||||||
# TODO: this needs rethinking, as currently we can only express
|
|
||||||
# TODO: internal configuration conflicts within one package.
|
|
||||||
matches = []
|
|
||||||
for x in self.traverse():
|
|
||||||
if x.external:
|
|
||||||
# external specs are already built, don't worry about whether
|
|
||||||
# it's possible to build that configuration with Spack
|
|
||||||
continue
|
|
||||||
|
|
||||||
for when_spec, conflict_list in x.package_class.conflicts.items():
|
|
||||||
if x.satisfies(when_spec):
|
|
||||||
for conflict_spec, msg in conflict_list:
|
|
||||||
if x.satisfies(conflict_spec):
|
|
||||||
when = when_spec.copy()
|
|
||||||
when.name = x.name
|
|
||||||
matches.append((x, conflict_spec, when, msg))
|
|
||||||
if matches:
|
|
||||||
raise ConflictsInSpecError(self, matches)
|
|
||||||
|
|
||||||
# Check if we can produce an optimized binary (will throw if
|
|
||||||
# there are declared inconsistencies)
|
|
||||||
self.architecture.target.optimization_flags(self.compiler)
|
|
||||||
|
|
||||||
def _patches_assigned(self):
|
def _patches_assigned(self):
|
||||||
"""Whether patches have been assigned to this spec by the concretizer."""
|
"""Whether patches have been assigned to this spec by the concretizer."""
|
||||||
# FIXME: _patches_in_order_of_appearance is attached after concretization
|
# FIXME: _patches_in_order_of_appearance is attached after concretization
|
||||||
@@ -3045,7 +2743,13 @@ def ensure_no_deprecated(root):
|
|||||||
msg += " For each package listed, choose another spec\n"
|
msg += " For each package listed, choose another spec\n"
|
||||||
raise SpecDeprecatedError(msg)
|
raise SpecDeprecatedError(msg)
|
||||||
|
|
||||||
def _new_concretize(self, tests=False):
|
def concretize(self, tests: Union[bool, List[str]] = False) -> None:
|
||||||
|
"""Concretize the current spec.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
tests: if False disregard 'test' dependencies, if a list of names activate them for
|
||||||
|
the packages in the list, if True activate 'test' dependencies for all packages.
|
||||||
|
"""
|
||||||
import spack.solver.asp
|
import spack.solver.asp
|
||||||
|
|
||||||
self.replace_hash()
|
self.replace_hash()
|
||||||
@@ -3079,19 +2783,6 @@ def _new_concretize(self, tests=False):
|
|||||||
concretized = answer[node]
|
concretized = answer[node]
|
||||||
self._dup(concretized)
|
self._dup(concretized)
|
||||||
|
|
||||||
def concretize(self, tests=False):
|
|
||||||
"""Concretize the current spec.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
tests (bool or list): if False disregard 'test' dependencies,
|
|
||||||
if a list of names activate them for the packages in the list,
|
|
||||||
if True activate 'test' dependencies for all packages.
|
|
||||||
"""
|
|
||||||
if spack.config.get("config:concretizer", "clingo") == "clingo":
|
|
||||||
self._new_concretize(tests)
|
|
||||||
else:
|
|
||||||
self._old_concretize(tests)
|
|
||||||
|
|
||||||
def _mark_root_concrete(self, value=True):
|
def _mark_root_concrete(self, value=True):
|
||||||
"""Mark just this spec (not dependencies) concrete."""
|
"""Mark just this spec (not dependencies) concrete."""
|
||||||
if (not value) and self.concrete and self.installed:
|
if (not value) and self.concrete and self.installed:
|
||||||
@@ -3195,34 +2886,6 @@ def concretized(self, tests=False):
|
|||||||
clone.concretize(tests=tests)
|
clone.concretize(tests=tests)
|
||||||
return clone
|
return clone
|
||||||
|
|
||||||
def flat_dependencies(self, disconnect: bool = False):
|
|
||||||
"""Build DependencyMap of all of this spec's dependencies with their constraints merged.
|
|
||||||
|
|
||||||
Arguments:
|
|
||||||
disconnect: if True, disconnect all dependents and dependencies among nodes in this
|
|
||||||
spec's DAG.
|
|
||||||
"""
|
|
||||||
flat_deps = {}
|
|
||||||
deptree = self.traverse(root=False)
|
|
||||||
|
|
||||||
for spec in deptree:
|
|
||||||
if spec.name not in flat_deps:
|
|
||||||
flat_deps[spec.name] = spec
|
|
||||||
else:
|
|
||||||
try:
|
|
||||||
flat_deps[spec.name].constrain(spec)
|
|
||||||
except spack.error.UnsatisfiableSpecError as e:
|
|
||||||
# DAG contains two instances of the same package with inconsistent constraints.
|
|
||||||
raise InconsistentSpecError("Invalid Spec DAG: %s" % e.message) from e
|
|
||||||
|
|
||||||
if disconnect:
|
|
||||||
for spec in flat_deps.values():
|
|
||||||
if not spec.concrete:
|
|
||||||
spec.clear_edges()
|
|
||||||
self.clear_dependencies()
|
|
||||||
|
|
||||||
return flat_deps
|
|
||||||
|
|
||||||
def index(self, deptype="all"):
|
def index(self, deptype="all"):
|
||||||
"""Return a dictionary that points to all the dependencies in this
|
"""Return a dictionary that points to all the dependencies in this
|
||||||
spec.
|
spec.
|
||||||
@@ -3232,312 +2895,6 @@ def index(self, deptype="all"):
|
|||||||
dm[spec.name].append(spec)
|
dm[spec.name].append(spec)
|
||||||
return dm
|
return dm
|
||||||
|
|
||||||
def _evaluate_dependency_conditions(self, name):
|
|
||||||
"""Evaluate all the conditions on a dependency with this name.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
name (str): name of dependency to evaluate conditions on.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
(Dependency): new Dependency object combining all constraints.
|
|
||||||
|
|
||||||
If the package depends on <name> in the current spec
|
|
||||||
configuration, return the constrained dependency and
|
|
||||||
corresponding dependency types.
|
|
||||||
|
|
||||||
If no conditions are True (and we don't depend on it), return
|
|
||||||
``(None, None)``.
|
|
||||||
"""
|
|
||||||
vt.substitute_abstract_variants(self)
|
|
||||||
# evaluate when specs to figure out constraints on the dependency.
|
|
||||||
dep = None
|
|
||||||
for when_spec, deps_by_name in self.package_class.dependencies.items():
|
|
||||||
if not self.satisfies(when_spec):
|
|
||||||
continue
|
|
||||||
|
|
||||||
for dep_name, dependency in deps_by_name.items():
|
|
||||||
if dep_name != name:
|
|
||||||
continue
|
|
||||||
|
|
||||||
if dep is None:
|
|
||||||
dep = dp.Dependency(Spec(self.name), Spec(name), depflag=0)
|
|
||||||
try:
|
|
||||||
dep.merge(dependency)
|
|
||||||
except spack.error.UnsatisfiableSpecError as e:
|
|
||||||
e.message = (
|
|
||||||
"Conflicting conditional dependencies for spec"
|
|
||||||
"\n\n\t{0}\n\n"
|
|
||||||
"Cannot merge constraint"
|
|
||||||
"\n\n\t{1}\n\n"
|
|
||||||
"into"
|
|
||||||
"\n\n\t{2}".format(self, dependency.spec, dep.spec)
|
|
||||||
)
|
|
||||||
raise e
|
|
||||||
|
|
||||||
return dep
|
|
||||||
|
|
||||||
def _find_provider(self, vdep, provider_index):
|
|
||||||
"""Find provider for a virtual spec in the provider index.
|
|
||||||
Raise an exception if there is a conflicting virtual
|
|
||||||
dependency already in this spec.
|
|
||||||
"""
|
|
||||||
assert spack.repo.PATH.is_virtual_safe(vdep.name), vdep
|
|
||||||
|
|
||||||
# note that this defensively copies.
|
|
||||||
providers = provider_index.providers_for(vdep)
|
|
||||||
|
|
||||||
# If there is a provider for the vpkg, then use that instead of
|
|
||||||
# the virtual package.
|
|
||||||
if providers:
|
|
||||||
# Remove duplicate providers that can concretize to the same
|
|
||||||
# result.
|
|
||||||
for provider in providers:
|
|
||||||
for spec in providers:
|
|
||||||
if spec is not provider and provider.intersects(spec):
|
|
||||||
providers.remove(spec)
|
|
||||||
# Can't have multiple providers for the same thing in one spec.
|
|
||||||
if len(providers) > 1:
|
|
||||||
raise MultipleProviderError(vdep, providers)
|
|
||||||
return providers[0]
|
|
||||||
else:
|
|
||||||
# The user might have required something insufficient for
|
|
||||||
# pkg_dep -- so we'll get a conflict. e.g., user asked for
|
|
||||||
# mpi@:1.1 but some package required mpi@2.1:.
|
|
||||||
required = provider_index.providers_for(vdep.name)
|
|
||||||
if len(required) > 1:
|
|
||||||
raise MultipleProviderError(vdep, required)
|
|
||||||
elif required:
|
|
||||||
raise UnsatisfiableProviderSpecError(required[0], vdep)
|
|
||||||
|
|
||||||
def _merge_dependency(self, dependency, visited, spec_deps, provider_index, tests):
|
|
||||||
"""Merge dependency information from a Package into this Spec.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
dependency (Dependency): dependency metadata from a package;
|
|
||||||
this is typically the result of merging *all* matching
|
|
||||||
dependency constraints from the package.
|
|
||||||
visited (set): set of dependency nodes already visited by
|
|
||||||
``normalize()``.
|
|
||||||
spec_deps (dict): ``dict`` of all dependencies from the spec
|
|
||||||
being normalized.
|
|
||||||
provider_index (dict): ``provider_index`` of virtual dep
|
|
||||||
providers in the ``Spec`` as normalized so far.
|
|
||||||
|
|
||||||
NOTE: Caller should assume that this routine owns the
|
|
||||||
``dependency`` parameter, i.e., it needs to be a copy of any
|
|
||||||
internal structures.
|
|
||||||
|
|
||||||
This is the core of ``normalize()``. There are some basic steps:
|
|
||||||
|
|
||||||
* If dep is virtual, evaluate whether it corresponds to an
|
|
||||||
existing concrete dependency, and merge if so.
|
|
||||||
|
|
||||||
* If it's real and it provides some virtual dep, see if it provides
|
|
||||||
what some virtual dependency wants and merge if so.
|
|
||||||
|
|
||||||
* Finally, if none of the above, merge dependency and its
|
|
||||||
constraints into this spec.
|
|
||||||
|
|
||||||
This method returns True if the spec was changed, False otherwise.
|
|
||||||
|
|
||||||
"""
|
|
||||||
changed = False
|
|
||||||
dep = dependency.spec
|
|
||||||
|
|
||||||
# If it's a virtual dependency, try to find an existing
|
|
||||||
# provider in the spec, and merge that.
|
|
||||||
virtuals = ()
|
|
||||||
if spack.repo.PATH.is_virtual_safe(dep.name):
|
|
||||||
virtuals = (dep.name,)
|
|
||||||
visited.add(dep.name)
|
|
||||||
provider = self._find_provider(dep, provider_index)
|
|
||||||
if provider:
|
|
||||||
dep = provider
|
|
||||||
else:
|
|
||||||
index = spack.provider_index.ProviderIndex(
|
|
||||||
repository=spack.repo.PATH, specs=[dep], restrict=True
|
|
||||||
)
|
|
||||||
items = list(spec_deps.items())
|
|
||||||
for name, vspec in items:
|
|
||||||
if not spack.repo.PATH.is_virtual_safe(vspec.name):
|
|
||||||
continue
|
|
||||||
|
|
||||||
if index.providers_for(vspec):
|
|
||||||
vspec._replace_with(dep)
|
|
||||||
del spec_deps[vspec.name]
|
|
||||||
changed = True
|
|
||||||
else:
|
|
||||||
required = index.providers_for(vspec.name)
|
|
||||||
if required:
|
|
||||||
raise UnsatisfiableProviderSpecError(required[0], dep)
|
|
||||||
provider_index.update(dep)
|
|
||||||
|
|
||||||
# If the spec isn't already in the set of dependencies, add it.
|
|
||||||
# Note: dep is always owned by this method. If it's from the
|
|
||||||
# caller, it's a copy from _evaluate_dependency_conditions. If it
|
|
||||||
# comes from a vdep, it's a defensive copy from _find_provider.
|
|
||||||
if dep.name not in spec_deps:
|
|
||||||
if self.concrete:
|
|
||||||
return False
|
|
||||||
|
|
||||||
spec_deps[dep.name] = dep
|
|
||||||
changed = True
|
|
||||||
else:
|
|
||||||
# merge package/vdep information into spec
|
|
||||||
try:
|
|
||||||
tty.debug("{0} applying constraint {1}".format(self.name, str(dep)))
|
|
||||||
changed |= spec_deps[dep.name].constrain(dep)
|
|
||||||
except spack.error.UnsatisfiableSpecError as e:
|
|
||||||
fmt = "An unsatisfiable {0}".format(e.constraint_type)
|
|
||||||
fmt += " constraint has been detected for spec:"
|
|
||||||
fmt += "\n\n{0}\n\n".format(spec_deps[dep.name].tree(indent=4))
|
|
||||||
fmt += "while trying to concretize the partial spec:"
|
|
||||||
fmt += "\n\n{0}\n\n".format(self.tree(indent=4))
|
|
||||||
fmt += "{0} requires {1} {2} {3}, but spec asked for {4}"
|
|
||||||
|
|
||||||
e.message = fmt.format(
|
|
||||||
self.name, dep.name, e.constraint_type, e.required, e.provided
|
|
||||||
)
|
|
||||||
|
|
||||||
raise
|
|
||||||
|
|
||||||
# Add merged spec to my deps and recurse
|
|
||||||
spec_dependency = spec_deps[dep.name]
|
|
||||||
if dep.name not in self._dependencies:
|
|
||||||
self._add_dependency(spec_dependency, depflag=dependency.depflag, virtuals=virtuals)
|
|
||||||
|
|
||||||
changed |= spec_dependency._normalize_helper(visited, spec_deps, provider_index, tests)
|
|
||||||
return changed
|
|
||||||
|
|
||||||
def _normalize_helper(self, visited, spec_deps, provider_index, tests):
|
|
||||||
"""Recursive helper function for _normalize."""
|
|
||||||
if self.name in visited:
|
|
||||||
return False
|
|
||||||
visited.add(self.name)
|
|
||||||
|
|
||||||
# If we descend into a virtual spec, there's nothing more
|
|
||||||
# to normalize. Concretize will finish resolving it later.
|
|
||||||
if self.virtual or self.external:
|
|
||||||
return False
|
|
||||||
|
|
||||||
# Avoid recursively adding constraints for already-installed packages:
|
|
||||||
# these may include build dependencies which are not needed for this
|
|
||||||
# install (since this package is already installed).
|
|
||||||
if self.concrete and self.installed:
|
|
||||||
return False
|
|
||||||
|
|
||||||
# Combine constraints from package deps with constraints from
|
|
||||||
# the spec, until nothing changes.
|
|
||||||
any_change = False
|
|
||||||
changed = True
|
|
||||||
|
|
||||||
while changed:
|
|
||||||
changed = False
|
|
||||||
for dep_name in self.package_class.dependency_names():
|
|
||||||
# Do we depend on dep_name? If so pkg_dep is not None.
|
|
||||||
dep = self._evaluate_dependency_conditions(dep_name)
|
|
||||||
|
|
||||||
# If dep is a needed dependency, merge it.
|
|
||||||
if dep:
|
|
||||||
merge = (
|
|
||||||
# caller requested test dependencies
|
|
||||||
tests is True
|
|
||||||
or (tests and self.name in tests)
|
|
||||||
or
|
|
||||||
# this is not a test-only dependency
|
|
||||||
(dep.depflag & ~dt.TEST)
|
|
||||||
)
|
|
||||||
|
|
||||||
if merge:
|
|
||||||
changed |= self._merge_dependency(
|
|
||||||
dep, visited, spec_deps, provider_index, tests
|
|
||||||
)
|
|
||||||
any_change |= changed
|
|
||||||
|
|
||||||
return any_change
|
|
||||||
|
|
||||||
def normalize(self, force=False, tests=False, user_spec_deps=None, disconnect=True):
|
|
||||||
"""When specs are parsed, any dependencies specified are hanging off
|
|
||||||
the root, and ONLY the ones that were explicitly provided are there.
|
|
||||||
Normalization turns a partial flat spec into a DAG, where:
|
|
||||||
|
|
||||||
1. Known dependencies of the root package are in the DAG.
|
|
||||||
2. Each node's dependencies dict only contains its known direct
|
|
||||||
deps.
|
|
||||||
3. There is only ONE unique spec for each package in the DAG.
|
|
||||||
|
|
||||||
* This includes virtual packages. If there a non-virtual
|
|
||||||
package that provides a virtual package that is in the spec,
|
|
||||||
then we replace the virtual package with the non-virtual one.
|
|
||||||
|
|
||||||
TODO: normalize should probably implement some form of cycle
|
|
||||||
detection, to ensure that the spec is actually a DAG.
|
|
||||||
"""
|
|
||||||
if not self.name:
|
|
||||||
raise spack.error.SpecError("Attempting to normalize anonymous spec")
|
|
||||||
|
|
||||||
# Set _normal and _concrete to False when forced
|
|
||||||
if force and not self._concrete:
|
|
||||||
self._normal = False
|
|
||||||
|
|
||||||
if self._normal:
|
|
||||||
return False
|
|
||||||
|
|
||||||
# Ensure first that all packages & compilers in the DAG exist.
|
|
||||||
self.validate_or_raise()
|
|
||||||
# Clear the DAG and collect all dependencies in the DAG, which will be
|
|
||||||
# reapplied as constraints. All dependencies collected this way will
|
|
||||||
# have been created by a previous execution of 'normalize'.
|
|
||||||
# A dependency extracted here will only be reintegrated if it is
|
|
||||||
# discovered to apply according to _normalize_helper, so
|
|
||||||
# user-specified dependencies are recorded separately in case they
|
|
||||||
# refer to specs which take several normalization passes to
|
|
||||||
# materialize.
|
|
||||||
all_spec_deps = self.flat_dependencies(disconnect=disconnect)
|
|
||||||
|
|
||||||
if user_spec_deps:
|
|
||||||
for name, spec in user_spec_deps.items():
|
|
||||||
if not name:
|
|
||||||
msg = "Attempted to normalize anonymous dependency spec"
|
|
||||||
msg += " %s" % spec
|
|
||||||
raise InvalidSpecDetected(msg)
|
|
||||||
if name not in all_spec_deps:
|
|
||||||
all_spec_deps[name] = spec
|
|
||||||
else:
|
|
||||||
all_spec_deps[name].constrain(spec)
|
|
||||||
|
|
||||||
# Initialize index of virtual dependency providers if
|
|
||||||
# concretize didn't pass us one already
|
|
||||||
provider_index = spack.provider_index.ProviderIndex(
|
|
||||||
repository=spack.repo.PATH, specs=[s for s in all_spec_deps.values()], restrict=True
|
|
||||||
)
|
|
||||||
|
|
||||||
# traverse the package DAG and fill out dependencies according
|
|
||||||
# to package files & their 'when' specs
|
|
||||||
visited = set()
|
|
||||||
|
|
||||||
any_change = self._normalize_helper(visited, all_spec_deps, provider_index, tests)
|
|
||||||
|
|
||||||
# remove any leftover dependents outside the spec from, e.g., pruning externals
|
|
||||||
valid = {id(spec) for spec in all_spec_deps.values()} | {id(self)}
|
|
||||||
for spec in all_spec_deps.values():
|
|
||||||
remove = [dep for dep in spec.dependents() if id(dep) not in valid]
|
|
||||||
for dep in remove:
|
|
||||||
del spec._dependents.edges[dep.name]
|
|
||||||
del dep._dependencies.edges[spec.name]
|
|
||||||
|
|
||||||
# Mark the spec as normal once done.
|
|
||||||
self._normal = True
|
|
||||||
return any_change
|
|
||||||
|
|
||||||
def normalized(self):
|
|
||||||
"""
|
|
||||||
Return a normalized copy of this spec without modifying this spec.
|
|
||||||
"""
|
|
||||||
clone = self.copy()
|
|
||||||
clone.normalize()
|
|
||||||
return clone
|
|
||||||
|
|
||||||
def validate_or_raise(self):
|
def validate_or_raise(self):
|
||||||
"""Checks that names and values in this spec are real. If they're not,
|
"""Checks that names and values in this spec are real. If they're not,
|
||||||
it will raise an appropriate exception.
|
it will raise an appropriate exception.
|
||||||
@@ -4386,14 +3743,19 @@ def deps():
|
|||||||
|
|
||||||
yield deps
|
yield deps
|
||||||
|
|
||||||
def format(self, format_string: str = DEFAULT_FORMAT, color: Optional[bool] = False) -> str:
|
@property
|
||||||
r"""Prints out particular pieces of a spec, depending on what is
|
def namespace_if_anonymous(self):
|
||||||
in the format string.
|
return self.namespace if not self.name else None
|
||||||
|
|
||||||
Using the ``{attribute}`` syntax, any field of the spec can be
|
def format(self, format_string: str = DEFAULT_FORMAT, color: Optional[bool] = False) -> str:
|
||||||
selected. Those attributes can be recursive. For example,
|
r"""Prints out attributes of a spec according to a format string.
|
||||||
``s.format({compiler.version})`` will print the version of the
|
|
||||||
compiler.
|
Using an ``{attribute}`` format specifier, any field of the spec can be
|
||||||
|
selected. Those attributes can be recursive. For example,
|
||||||
|
``s.format({compiler.version})`` will print the version of the compiler.
|
||||||
|
|
||||||
|
If the attribute in a format specifier evaluates to ``None``, then the format
|
||||||
|
specifier will evaluate to the empty string, ``""``.
|
||||||
|
|
||||||
Commonly used attributes of the Spec for format strings include::
|
Commonly used attributes of the Spec for format strings include::
|
||||||
|
|
||||||
@@ -4409,6 +3771,7 @@ def format(self, format_string: str = DEFAULT_FORMAT, color: Optional[bool] = Fa
|
|||||||
architecture.os
|
architecture.os
|
||||||
architecture.target
|
architecture.target
|
||||||
prefix
|
prefix
|
||||||
|
namespace
|
||||||
|
|
||||||
Some additional special-case properties can be added::
|
Some additional special-case properties can be added::
|
||||||
|
|
||||||
@@ -4417,40 +3780,51 @@ def format(self, format_string: str = DEFAULT_FORMAT, color: Optional[bool] = Fa
|
|||||||
spack_install The spack install directory
|
spack_install The spack install directory
|
||||||
|
|
||||||
The ``^`` sigil can be used to access dependencies by name.
|
The ``^`` sigil can be used to access dependencies by name.
|
||||||
``s.format({^mpi.name})`` will print the name of the MPI
|
``s.format({^mpi.name})`` will print the name of the MPI implementation in the
|
||||||
implementation in the spec.
|
spec.
|
||||||
|
|
||||||
The ``@``, ``%``, ``arch=``, and ``/`` sigils
|
The ``@``, ``%``, and ``/`` sigils can be used to include the sigil with the
|
||||||
can be used to include the sigil with the printed
|
printed string. These sigils may only be used with the appropriate attributes,
|
||||||
string. These sigils may only be used with the appropriate
|
listed below::
|
||||||
attributes, listed below::
|
|
||||||
|
|
||||||
@ ``{@version}``, ``{@compiler.version}``
|
@ ``{@version}``, ``{@compiler.version}``
|
||||||
% ``{%compiler}``, ``{%compiler.name}``
|
% ``{%compiler}``, ``{%compiler.name}``
|
||||||
arch= ``{arch=architecture}``
|
|
||||||
/ ``{/hash}``, ``{/hash:7}``, etc
|
/ ``{/hash}``, ``{/hash:7}``, etc
|
||||||
|
|
||||||
The ``@`` sigil may also be used for any other property named
|
The ``@`` sigil may also be used for any other property named ``version``.
|
||||||
``version``. Sigils printed with the attribute string are only
|
Sigils printed with the attribute string are only printed if the attribute
|
||||||
printed if the attribute string is non-empty, and are colored
|
string is non-empty, and are colored according to the color of the attribute.
|
||||||
according to the color of the attribute.
|
|
||||||
|
|
||||||
Sigils are not used for printing variants. Variants listed by
|
Variants listed by name naturally print with their sigil. For example,
|
||||||
name naturally print with their sigil. For example,
|
``spec.format('{variants.debug}')`` prints either ``+debug`` or ``~debug``
|
||||||
``spec.format('{variants.debug}')`` would print either
|
depending on the name of the variant. Non-boolean variants print as
|
||||||
``+debug`` or ``~debug`` depending on the name of the
|
``name=value``. To print variant names or values independently, use
|
||||||
variant. Non-boolean variants print as ``name=value``. To
|
|
||||||
print variant names or values independently, use
|
|
||||||
``spec.format('{variants.<name>.name}')`` or
|
``spec.format('{variants.<name>.name}')`` or
|
||||||
``spec.format('{variants.<name>.value}')``.
|
``spec.format('{variants.<name>.value}')``.
|
||||||
|
|
||||||
Spec format strings use ``\`` as the escape character. Use
|
There are a few attributes on specs that can be specified as key-value pairs
|
||||||
``\{`` and ``\}`` for literal braces, and ``\\`` for the
|
that are *not* variants, e.g.: ``os``, ``arch``, ``architecture``, ``target``,
|
||||||
literal ``\`` character.
|
``namespace``, etc. You can format these with an optional ``key=`` prefix, e.g.
|
||||||
|
``{namespace=namespace}`` or ``{arch=architecture}``, etc. The ``key=`` prefix
|
||||||
|
will be colorized along with the value.
|
||||||
|
|
||||||
|
When formatting specs, key-value pairs are separated from preceding parts of the
|
||||||
|
spec by whitespace. To avoid printing extra whitespace when the formatted
|
||||||
|
attribute is not set, you can add whitespace to the key *inside* the braces of
|
||||||
|
the format string, e.g.:
|
||||||
|
|
||||||
|
{ namespace=namespace}
|
||||||
|
|
||||||
|
This evaluates to `` namespace=builtin`` if ``namespace`` is set to ``builtin``,
|
||||||
|
and to ``""`` if ``namespace`` is ``None``.
|
||||||
|
|
||||||
|
Spec format strings use ``\`` as the escape character. Use ``\{`` and ``\}`` for
|
||||||
|
literal braces, and ``\\`` for the literal ``\`` character.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
format_string: string containing the format to be expanded
|
format_string: string containing the format to be expanded
|
||||||
color: True for colorized result; False for no color; None for auto color.
|
color: True for colorized result; False for no color; None for auto color.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
ensure_modern_format_string(format_string)
|
ensure_modern_format_string(format_string)
|
||||||
|
|
||||||
@@ -4504,10 +3878,6 @@ def format_attribute(match_object: Match) -> str:
|
|||||||
raise SpecFormatSigilError(sig, "compilers", attribute)
|
raise SpecFormatSigilError(sig, "compilers", attribute)
|
||||||
elif sig == "/" and attribute != "abstract_hash":
|
elif sig == "/" and attribute != "abstract_hash":
|
||||||
raise SpecFormatSigilError(sig, "DAG hashes", attribute)
|
raise SpecFormatSigilError(sig, "DAG hashes", attribute)
|
||||||
elif sig == "arch=":
|
|
||||||
if attribute not in ("architecture", "arch"):
|
|
||||||
raise SpecFormatSigilError(sig, "the architecture", attribute)
|
|
||||||
sig = " arch=" # include space as separator
|
|
||||||
|
|
||||||
# Iterate over components using getattr to get next element
|
# Iterate over components using getattr to get next element
|
||||||
for idx, part in enumerate(parts):
|
for idx, part in enumerate(parts):
|
||||||
@@ -4552,15 +3922,19 @@ def format_attribute(match_object: Match) -> str:
|
|||||||
|
|
||||||
# Set color codes for various attributes
|
# Set color codes for various attributes
|
||||||
color = None
|
color = None
|
||||||
if "variants" in parts:
|
if "architecture" in parts:
|
||||||
color = VARIANT_COLOR
|
|
||||||
elif "architecture" in parts:
|
|
||||||
color = ARCHITECTURE_COLOR
|
color = ARCHITECTURE_COLOR
|
||||||
|
elif "variants" in parts or sig.endswith("="):
|
||||||
|
color = VARIANT_COLOR
|
||||||
elif "compiler" in parts or "compiler_flags" in parts:
|
elif "compiler" in parts or "compiler_flags" in parts:
|
||||||
color = COMPILER_COLOR
|
color = COMPILER_COLOR
|
||||||
elif "version" in parts or "versions" in parts:
|
elif "version" in parts or "versions" in parts:
|
||||||
color = VERSION_COLOR
|
color = VERSION_COLOR
|
||||||
|
|
||||||
|
# return empty string if the value of the attribute is None.
|
||||||
|
if current is None:
|
||||||
|
return ""
|
||||||
|
|
||||||
# return colored output
|
# return colored output
|
||||||
return safe_color(sig, str(current), color)
|
return safe_color(sig, str(current), color)
|
||||||
|
|
||||||
@@ -5390,10 +4764,8 @@ def long_message(self):
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class ArchitecturePropagationError(spack.error.SpecError):
|
class UnsupportedPropagationError(spack.error.SpecError):
|
||||||
"""Raised when the double equal symbols are used to assign
|
"""Raised when propagation (==) is used with reserved variant names."""
|
||||||
the spec's architecture.
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
|
||||||
class DuplicateDependencyError(spack.error.SpecError):
|
class DuplicateDependencyError(spack.error.SpecError):
|
||||||
@@ -5523,7 +4895,7 @@ def __init__(self, spec):
|
|||||||
class AmbiguousHashError(spack.error.SpecError):
|
class AmbiguousHashError(spack.error.SpecError):
|
||||||
def __init__(self, msg, *specs):
|
def __init__(self, msg, *specs):
|
||||||
spec_fmt = "{namespace}.{name}{@version}{%compiler}{compiler_flags}"
|
spec_fmt = "{namespace}.{name}{@version}{%compiler}{compiler_flags}"
|
||||||
spec_fmt += "{variants}{arch=architecture}{/hash:7}"
|
spec_fmt += "{variants}{ arch=architecture}{/hash:7}"
|
||||||
specs_str = "\n " + "\n ".join(spec.format(spec_fmt) for spec in specs)
|
specs_str = "\n " + "\n ".join(spec.format(spec_fmt) for spec in specs)
|
||||||
super().__init__(msg + specs_str)
|
super().__init__(msg + specs_str)
|
||||||
|
|
||||||
|
|||||||
@@ -13,7 +13,7 @@
|
|||||||
import stat
|
import stat
|
||||||
import sys
|
import sys
|
||||||
import tempfile
|
import tempfile
|
||||||
from typing import Callable, Dict, Iterable, Optional, Set
|
from typing import Callable, Dict, Generator, Iterable, List, Optional, Set
|
||||||
|
|
||||||
import llnl.string
|
import llnl.string
|
||||||
import llnl.util.lang
|
import llnl.util.lang
|
||||||
@@ -40,6 +40,7 @@
|
|||||||
import spack.resource
|
import spack.resource
|
||||||
import spack.spec
|
import spack.spec
|
||||||
import spack.stage
|
import spack.stage
|
||||||
|
import spack.util.crypto
|
||||||
import spack.util.lock
|
import spack.util.lock
|
||||||
import spack.util.path as sup
|
import spack.util.path as sup
|
||||||
import spack.util.pattern as pattern
|
import spack.util.pattern as pattern
|
||||||
@@ -351,8 +352,10 @@ class Stage(LockableStagingDir):
|
|||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
url_or_fetch_strategy,
|
url_or_fetch_strategy,
|
||||||
|
*,
|
||||||
name=None,
|
name=None,
|
||||||
mirror_paths=None,
|
mirror_paths: Optional[spack.mirror.MirrorLayout] = None,
|
||||||
|
mirrors: Optional[Iterable[spack.mirror.Mirror]] = None,
|
||||||
keep=False,
|
keep=False,
|
||||||
path=None,
|
path=None,
|
||||||
lock=True,
|
lock=True,
|
||||||
@@ -406,12 +409,18 @@ def __init__(
|
|||||||
# self.fetcher can change with mirrors.
|
# self.fetcher can change with mirrors.
|
||||||
self.default_fetcher = self.fetcher
|
self.default_fetcher = self.fetcher
|
||||||
self.search_fn = search_fn
|
self.search_fn = search_fn
|
||||||
# used for mirrored archives of repositories.
|
# If we fetch from a mirror, but the original data is from say git, we can currently not
|
||||||
self.skip_checksum_for_mirror = True
|
# prove that they are equal (we don't even have a tree hash in package.py). This bool is
|
||||||
|
# used to skip checksum verification and instead warn the user.
|
||||||
|
if isinstance(self.default_fetcher, fs.URLFetchStrategy):
|
||||||
|
self.skip_checksum_for_mirror = not bool(self.default_fetcher.digest)
|
||||||
|
else:
|
||||||
|
self.skip_checksum_for_mirror = True
|
||||||
|
|
||||||
self.srcdir = None
|
self.srcdir = None
|
||||||
|
|
||||||
self.mirror_paths = mirror_paths
|
self.mirror_paths = mirror_paths
|
||||||
|
self.mirrors = list(mirrors) if mirrors else []
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def expected_archive_files(self):
|
def expected_archive_files(self):
|
||||||
@@ -466,100 +475,87 @@ def disable_mirrors(self):
|
|||||||
"""The Stage will not attempt to look for the associated fetcher
|
"""The Stage will not attempt to look for the associated fetcher
|
||||||
target in any of Spack's mirrors (including the local download cache).
|
target in any of Spack's mirrors (including the local download cache).
|
||||||
"""
|
"""
|
||||||
self.mirror_paths = []
|
self.mirror_paths = None
|
||||||
|
|
||||||
def fetch(self, mirror_only=False, err_msg=None):
|
def _generate_fetchers(self, mirror_only=False) -> Generator[fs.FetchStrategy, None, None]:
|
||||||
"""Retrieves the code or archive
|
|
||||||
|
|
||||||
Args:
|
|
||||||
mirror_only (bool): only fetch from a mirror
|
|
||||||
err_msg (str or None): the error message to display if all fetchers
|
|
||||||
fail or ``None`` for the default fetch failure message
|
|
||||||
"""
|
|
||||||
fetchers = []
|
fetchers = []
|
||||||
if not mirror_only:
|
if not mirror_only:
|
||||||
fetchers.append(self.default_fetcher)
|
fetchers.append(self.default_fetcher)
|
||||||
|
|
||||||
# TODO: move mirror logic out of here and clean it up!
|
# If this archive is normally fetched from a URL, then use the same digest.
|
||||||
# TODO: Or @alalazo may have some ideas about how to use a
|
if isinstance(self.default_fetcher, fs.URLFetchStrategy):
|
||||||
# TODO: CompositeFetchStrategy here.
|
digest = self.default_fetcher.digest
|
||||||
self.skip_checksum_for_mirror = True
|
expand = self.default_fetcher.expand_archive
|
||||||
if self.mirror_paths:
|
extension = self.default_fetcher.extension
|
||||||
# Join URLs of mirror roots with mirror paths. Because
|
else:
|
||||||
# urljoin() will strip everything past the final '/' in
|
|
||||||
# the root, so we add a '/' if it is not present.
|
|
||||||
mirror_urls = [
|
|
||||||
url_util.join(mirror.fetch_url, rel_path)
|
|
||||||
for mirror in spack.mirror.MirrorCollection(source=True).values()
|
|
||||||
if not mirror.fetch_url.startswith("oci://")
|
|
||||||
for rel_path in self.mirror_paths
|
|
||||||
]
|
|
||||||
|
|
||||||
# If this archive is normally fetched from a tarball URL,
|
|
||||||
# then use the same digest. `spack mirror` ensures that
|
|
||||||
# the checksum will be the same.
|
|
||||||
digest = None
|
digest = None
|
||||||
expand = True
|
expand = True
|
||||||
extension = None
|
extension = None
|
||||||
if isinstance(self.default_fetcher, fs.URLFetchStrategy):
|
|
||||||
digest = self.default_fetcher.digest
|
|
||||||
expand = self.default_fetcher.expand_archive
|
|
||||||
extension = self.default_fetcher.extension
|
|
||||||
|
|
||||||
# Have to skip the checksum for things archived from
|
|
||||||
# repositories. How can this be made safer?
|
|
||||||
self.skip_checksum_for_mirror = not bool(digest)
|
|
||||||
|
|
||||||
|
# TODO: move mirror logic out of here and clean it up!
|
||||||
|
# TODO: Or @alalazo may have some ideas about how to use a
|
||||||
|
# TODO: CompositeFetchStrategy here.
|
||||||
|
if self.mirror_paths and self.mirrors:
|
||||||
# Add URL strategies for all the mirrors with the digest
|
# Add URL strategies for all the mirrors with the digest
|
||||||
# Insert fetchers in the order that the URLs are provided.
|
# Insert fetchers in the order that the URLs are provided.
|
||||||
for url in reversed(mirror_urls):
|
fetchers[:0] = (
|
||||||
fetchers.insert(
|
fs.from_url_scheme(
|
||||||
0, fs.from_url_scheme(url, digest, expand=expand, extension=extension)
|
url_util.join(mirror.fetch_url, rel_path),
|
||||||
|
checksum=digest,
|
||||||
|
expand=expand,
|
||||||
|
extension=extension,
|
||||||
)
|
)
|
||||||
|
for mirror in self.mirrors
|
||||||
|
if not mirror.fetch_url.startswith("oci://")
|
||||||
|
for rel_path in self.mirror_paths
|
||||||
|
)
|
||||||
|
|
||||||
if self.default_fetcher.cachable:
|
if self.mirror_paths and self.default_fetcher.cachable:
|
||||||
for rel_path in reversed(list(self.mirror_paths)):
|
fetchers[:0] = (
|
||||||
cache_fetcher = spack.caches.FETCH_CACHE.fetcher(
|
spack.caches.FETCH_CACHE.fetcher(
|
||||||
rel_path, digest, expand=expand, extension=extension
|
rel_path, digest, expand=expand, extension=extension
|
||||||
)
|
)
|
||||||
fetchers.insert(0, cache_fetcher)
|
for rel_path in self.mirror_paths
|
||||||
|
)
|
||||||
|
|
||||||
def generate_fetchers():
|
yield from fetchers
|
||||||
for fetcher in fetchers:
|
|
||||||
yield fetcher
|
|
||||||
# The search function may be expensive, so wait until now to
|
|
||||||
# call it so the user can stop if a prior fetcher succeeded
|
|
||||||
if self.search_fn and not mirror_only:
|
|
||||||
dynamic_fetchers = self.search_fn()
|
|
||||||
for fetcher in dynamic_fetchers:
|
|
||||||
yield fetcher
|
|
||||||
|
|
||||||
def print_errors(errors):
|
# The search function may be expensive, so wait until now to call it so the user can stop
|
||||||
for msg in errors:
|
# if a prior fetcher succeeded
|
||||||
tty.debug(msg)
|
if self.search_fn and not mirror_only:
|
||||||
|
yield from self.search_fn()
|
||||||
|
|
||||||
errors = []
|
def fetch(self, mirror_only: bool = False, err_msg: Optional[str] = None) -> None:
|
||||||
for fetcher in generate_fetchers():
|
"""Retrieves the code or archive
|
||||||
|
|
||||||
|
Args:
|
||||||
|
mirror_only: only fetch from a mirror
|
||||||
|
err_msg: the error message to display if all fetchers fail or ``None`` for the default
|
||||||
|
fetch failure message
|
||||||
|
"""
|
||||||
|
errors: List[str] = []
|
||||||
|
for fetcher in self._generate_fetchers(mirror_only):
|
||||||
try:
|
try:
|
||||||
fetcher.stage = self
|
fetcher.stage = self
|
||||||
self.fetcher = fetcher
|
self.fetcher = fetcher
|
||||||
self.fetcher.fetch()
|
self.fetcher.fetch()
|
||||||
break
|
break
|
||||||
except spack.fetch_strategy.NoCacheError:
|
except fs.NoCacheError:
|
||||||
# Don't bother reporting when something is not cached.
|
# Don't bother reporting when something is not cached.
|
||||||
continue
|
continue
|
||||||
|
except fs.FailedDownloadError as f:
|
||||||
|
errors.extend(f"{fetcher}: {e.__class__.__name__}: {e}" for e in f.exceptions)
|
||||||
|
continue
|
||||||
except spack.error.SpackError as e:
|
except spack.error.SpackError as e:
|
||||||
errors.append("Fetching from {0} failed.".format(fetcher))
|
errors.append(f"{fetcher}: {e.__class__.__name__}: {e}")
|
||||||
tty.debug(e)
|
|
||||||
continue
|
continue
|
||||||
else:
|
else:
|
||||||
print_errors(errors)
|
|
||||||
|
|
||||||
self.fetcher = self.default_fetcher
|
self.fetcher = self.default_fetcher
|
||||||
default_msg = "All fetchers failed for {0}".format(self.name)
|
if err_msg:
|
||||||
raise spack.error.FetchError(err_msg or default_msg, None)
|
raise spack.error.FetchError(err_msg)
|
||||||
|
raise spack.error.FetchError(
|
||||||
print_errors(errors)
|
f"All fetchers failed for {self.name}", "\n".join(f" {e}" for e in errors)
|
||||||
|
)
|
||||||
|
|
||||||
def steal_source(self, dest):
|
def steal_source(self, dest):
|
||||||
"""Copy the source_path directory in its entirety to directory dest
|
"""Copy the source_path directory in its entirety to directory dest
|
||||||
@@ -597,16 +593,19 @@ def steal_source(self, dest):
|
|||||||
self.destroy()
|
self.destroy()
|
||||||
|
|
||||||
def check(self):
|
def check(self):
|
||||||
"""Check the downloaded archive against a checksum digest.
|
"""Check the downloaded archive against a checksum digest."""
|
||||||
No-op if this stage checks code out of a repository."""
|
|
||||||
if self.fetcher is not self.default_fetcher and self.skip_checksum_for_mirror:
|
if self.fetcher is not self.default_fetcher and self.skip_checksum_for_mirror:
|
||||||
|
cache = isinstance(self.fetcher, fs.CacheURLFetchStrategy)
|
||||||
|
if cache:
|
||||||
|
secure_msg = "your download cache is in a secure location"
|
||||||
|
else:
|
||||||
|
secure_msg = "you trust this mirror and have a secure connection"
|
||||||
tty.warn(
|
tty.warn(
|
||||||
"Fetching from mirror without a checksum!",
|
f"Using {'download cache' if cache else 'a mirror'} instead of version control",
|
||||||
"This package is normally checked out from a version "
|
"The required sources are normally checked out from a version control system, "
|
||||||
"control system, but it has been archived on a spack "
|
f"but have been archived {'in download cache' if cache else 'on a mirror'}: "
|
||||||
"mirror. This means we cannot know a checksum for the "
|
f"{self.fetcher}. Spack lacks a tree hash to verify the integrity of this "
|
||||||
"tarball in advance. Be sure that your connection to "
|
f"archive. Make sure {secure_msg}.",
|
||||||
"this mirror is secure!",
|
|
||||||
)
|
)
|
||||||
elif spack.config.get("config:checksum"):
|
elif spack.config.get("config:checksum"):
|
||||||
self.fetcher.check()
|
self.fetcher.check()
|
||||||
@@ -928,7 +927,7 @@ def interactive_version_filter(
|
|||||||
orig_url_dict = url_dict # only copy when using editor to modify
|
orig_url_dict = url_dict # only copy when using editor to modify
|
||||||
print_header = True
|
print_header = True
|
||||||
VERSION_COLOR = spack.spec.VERSION_COLOR
|
VERSION_COLOR = spack.spec.VERSION_COLOR
|
||||||
while True:
|
while sys.stdin.isatty():
|
||||||
if print_header:
|
if print_header:
|
||||||
has_filter = version_filter != VersionList([":"])
|
has_filter = version_filter != VersionList([":"])
|
||||||
header = []
|
header = []
|
||||||
@@ -945,7 +944,9 @@ def interactive_version_filter(
|
|||||||
num_new = sum(1 for v in sorted_and_filtered if v not in known_versions)
|
num_new = sum(1 for v in sorted_and_filtered if v not in known_versions)
|
||||||
header.append(f"{llnl.string.plural(num_new, 'new version')}")
|
header.append(f"{llnl.string.plural(num_new, 'new version')}")
|
||||||
if has_filter:
|
if has_filter:
|
||||||
header.append(colorize(f"Filtered by {VERSION_COLOR}@@{version_filter}@."))
|
header.append(
|
||||||
|
colorize(f"Filtered by {VERSION_COLOR}@@{version_filter}@. (clear with c)")
|
||||||
|
)
|
||||||
|
|
||||||
version_with_url = [
|
version_with_url = [
|
||||||
colorize(
|
colorize(
|
||||||
@@ -1175,7 +1176,7 @@ def _fetch_and_checksum(url, options, keep_stage, action_fn=None):
|
|||||||
try:
|
try:
|
||||||
url_or_fs = url
|
url_or_fs = url
|
||||||
if options:
|
if options:
|
||||||
url_or_fs = fs.URLFetchStrategy(url, fetch_options=options)
|
url_or_fs = fs.URLFetchStrategy(url=url, fetch_options=options)
|
||||||
|
|
||||||
with Stage(url_or_fs, keep=keep_stage) as stage:
|
with Stage(url_or_fs, keep=keep_stage) as stage:
|
||||||
# Fetch the archive
|
# Fetch the archive
|
||||||
@@ -1188,7 +1189,7 @@ def _fetch_and_checksum(url, options, keep_stage, action_fn=None):
|
|||||||
# Checksum the archive and add it to the list
|
# Checksum the archive and add it to the list
|
||||||
checksum = spack.util.crypto.checksum(hashlib.sha256, stage.archive_file)
|
checksum = spack.util.crypto.checksum(hashlib.sha256, stage.archive_file)
|
||||||
return checksum, None
|
return checksum, None
|
||||||
except FailedDownloadError:
|
except fs.FailedDownloadError:
|
||||||
return None, f"[WORKER] Failed to fetch {url}"
|
return None, f"[WORKER] Failed to fetch {url}"
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
return None, f"[WORKER] Something failed on {url}, skipping. ({e})"
|
return None, f"[WORKER] Something failed on {url}, skipping. ({e})"
|
||||||
@@ -1208,7 +1209,3 @@ class RestageError(StageError):
|
|||||||
|
|
||||||
class VersionFetchError(StageError):
|
class VersionFetchError(StageError):
|
||||||
"""Raised when we can't determine a URL to fetch a package."""
|
"""Raised when we can't determine a URL to fetch a package."""
|
||||||
|
|
||||||
|
|
||||||
# Keep this in namespace for convenience
|
|
||||||
FailedDownloadError = fs.FailedDownloadError
|
|
||||||
|
|||||||
@@ -371,7 +371,6 @@ def use_store(
|
|||||||
data.update(extra_data)
|
data.update(extra_data)
|
||||||
|
|
||||||
# Swap the store with the one just constructed and return it
|
# Swap the store with the one just constructed and return it
|
||||||
ensure_singleton_created()
|
|
||||||
spack.config.CONFIG.push_scope(
|
spack.config.CONFIG.push_scope(
|
||||||
spack.config.InternalConfigScope(name=scope_name, data={"config": {"install_tree": data}})
|
spack.config.InternalConfigScope(name=scope_name, data={"config": {"install_tree": data}})
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -79,9 +79,11 @@ def restore(self):
|
|||||||
self.test_state.restore()
|
self.test_state.restore()
|
||||||
spack.main.spack_working_dir = self.spack_working_dir
|
spack.main.spack_working_dir = self.spack_working_dir
|
||||||
env = pickle.load(self.serialized_env) if _SERIALIZE else self.env
|
env = pickle.load(self.serialized_env) if _SERIALIZE else self.env
|
||||||
pkg = pickle.load(self.serialized_pkg) if _SERIALIZE else self.pkg
|
|
||||||
if env:
|
if env:
|
||||||
spack.environment.activate(env)
|
spack.environment.activate(env)
|
||||||
|
# Order of operation is important, since the package might be retrieved
|
||||||
|
# from a repo defined within the environment configuration
|
||||||
|
pkg = pickle.load(self.serialized_pkg) if _SERIALIZE else self.pkg
|
||||||
return pkg
|
return pkg
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -208,7 +208,6 @@ def test_satisfy_strict_constraint_when_not_concrete(architecture_tuple, constra
|
|||||||
],
|
],
|
||||||
)
|
)
|
||||||
@pytest.mark.usefixtures("mock_packages", "config")
|
@pytest.mark.usefixtures("mock_packages", "config")
|
||||||
@pytest.mark.only_clingo("Fixing the parser broke this test for the original concretizer.")
|
|
||||||
@pytest.mark.skipif(
|
@pytest.mark.skipif(
|
||||||
str(archspec.cpu.host().family) != "x86_64", reason="tests are for x86_64 uarch ranges"
|
str(archspec.cpu.host().family) != "x86_64", reason="tests are for x86_64 uarch ranges"
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -337,7 +337,7 @@ def test_relative_rpaths_install_nondefault(mirror_dir):
|
|||||||
buildcache_cmd("install", "-uf", cspec.name)
|
buildcache_cmd("install", "-uf", cspec.name)
|
||||||
|
|
||||||
|
|
||||||
def test_push_and_fetch_keys(mock_gnupghome):
|
def test_push_and_fetch_keys(mock_gnupghome, tmp_path):
|
||||||
testpath = str(mock_gnupghome)
|
testpath = str(mock_gnupghome)
|
||||||
|
|
||||||
mirror = os.path.join(testpath, "mirror")
|
mirror = os.path.join(testpath, "mirror")
|
||||||
@@ -357,7 +357,7 @@ def test_push_and_fetch_keys(mock_gnupghome):
|
|||||||
assert len(keys) == 1
|
assert len(keys) == 1
|
||||||
fpr = keys[0]
|
fpr = keys[0]
|
||||||
|
|
||||||
bindist.push_keys(mirror, keys=[fpr], regenerate_index=True)
|
bindist.push_keys(mirror, keys=[fpr], tmpdir=str(tmp_path), update_index=True)
|
||||||
|
|
||||||
# dir 2: import the key from the mirror, and confirm that its fingerprint
|
# dir 2: import the key from the mirror, and confirm that its fingerprint
|
||||||
# matches the one created above
|
# matches the one created above
|
||||||
@@ -464,7 +464,7 @@ def test_generate_index_missing(monkeypatch, tmpdir, mutable_config):
|
|||||||
assert "libelf" not in cache_list
|
assert "libelf" not in cache_list
|
||||||
|
|
||||||
|
|
||||||
def test_generate_key_index_failure(monkeypatch):
|
def test_generate_key_index_failure(monkeypatch, tmp_path):
|
||||||
def list_url(url, recursive=False):
|
def list_url(url, recursive=False):
|
||||||
if "fails-listing" in url:
|
if "fails-listing" in url:
|
||||||
raise Exception("Couldn't list the directory")
|
raise Exception("Couldn't list the directory")
|
||||||
@@ -477,13 +477,13 @@ def push_to_url(*args, **kwargs):
|
|||||||
monkeypatch.setattr(web_util, "push_to_url", push_to_url)
|
monkeypatch.setattr(web_util, "push_to_url", push_to_url)
|
||||||
|
|
||||||
with pytest.raises(CannotListKeys, match="Encountered problem listing keys"):
|
with pytest.raises(CannotListKeys, match="Encountered problem listing keys"):
|
||||||
bindist.generate_key_index("s3://non-existent/fails-listing")
|
bindist.generate_key_index("s3://non-existent/fails-listing", str(tmp_path))
|
||||||
|
|
||||||
with pytest.raises(GenerateIndexError, match="problem pushing .* Couldn't upload"):
|
with pytest.raises(GenerateIndexError, match="problem pushing .* Couldn't upload"):
|
||||||
bindist.generate_key_index("s3://non-existent/fails-uploading")
|
bindist.generate_key_index("s3://non-existent/fails-uploading", str(tmp_path))
|
||||||
|
|
||||||
|
|
||||||
def test_generate_package_index_failure(monkeypatch, capfd):
|
def test_generate_package_index_failure(monkeypatch, tmp_path, capfd):
|
||||||
def mock_list_url(url, recursive=False):
|
def mock_list_url(url, recursive=False):
|
||||||
raise Exception("Some HTTP error")
|
raise Exception("Some HTTP error")
|
||||||
|
|
||||||
@@ -492,15 +492,16 @@ def mock_list_url(url, recursive=False):
|
|||||||
test_url = "file:///fake/keys/dir"
|
test_url = "file:///fake/keys/dir"
|
||||||
|
|
||||||
with pytest.raises(GenerateIndexError, match="Unable to generate package index"):
|
with pytest.raises(GenerateIndexError, match="Unable to generate package index"):
|
||||||
bindist.generate_package_index(test_url)
|
bindist.generate_package_index(test_url, str(tmp_path))
|
||||||
|
|
||||||
assert (
|
assert (
|
||||||
f"Warning: Encountered problem listing packages at {test_url}: Some HTTP error"
|
"Warning: Encountered problem listing packages at "
|
||||||
|
f"{test_url}/{bindist.BUILD_CACHE_RELATIVE_PATH}: Some HTTP error"
|
||||||
in capfd.readouterr().err
|
in capfd.readouterr().err
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def test_generate_indices_exception(monkeypatch, capfd):
|
def test_generate_indices_exception(monkeypatch, tmp_path, capfd):
|
||||||
def mock_list_url(url, recursive=False):
|
def mock_list_url(url, recursive=False):
|
||||||
raise Exception("Test Exception handling")
|
raise Exception("Test Exception handling")
|
||||||
|
|
||||||
@@ -509,10 +510,10 @@ def mock_list_url(url, recursive=False):
|
|||||||
url = "file:///fake/keys/dir"
|
url = "file:///fake/keys/dir"
|
||||||
|
|
||||||
with pytest.raises(GenerateIndexError, match=f"Encountered problem listing keys at {url}"):
|
with pytest.raises(GenerateIndexError, match=f"Encountered problem listing keys at {url}"):
|
||||||
bindist.generate_key_index(url)
|
bindist.generate_key_index(url, str(tmp_path))
|
||||||
|
|
||||||
with pytest.raises(GenerateIndexError, match="Unable to generate package index"):
|
with pytest.raises(GenerateIndexError, match="Unable to generate package index"):
|
||||||
bindist.generate_package_index(url)
|
bindist.generate_package_index(url, str(tmp_path))
|
||||||
|
|
||||||
assert f"Encountered problem listing packages at {url}" in capfd.readouterr().err
|
assert f"Encountered problem listing packages at {url}" in capfd.readouterr().err
|
||||||
|
|
||||||
|
|||||||
@@ -228,3 +228,25 @@ def test_source_is_disabled(mutable_config):
|
|||||||
spack.config.add("bootstrap:trusted:{0}:{1}".format(conf["name"], False))
|
spack.config.add("bootstrap:trusted:{0}:{1}".format(conf["name"], False))
|
||||||
with pytest.raises(ValueError):
|
with pytest.raises(ValueError):
|
||||||
spack.bootstrap.core.source_is_enabled_or_raise(conf)
|
spack.bootstrap.core.source_is_enabled_or_raise(conf)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.regression("45247")
|
||||||
|
def test_use_store_does_not_try_writing_outside_root(tmp_path, monkeypatch, mutable_config):
|
||||||
|
"""Tests that when we use the 'use_store' context manager, there is no attempt at creating
|
||||||
|
a Store outside the given root.
|
||||||
|
"""
|
||||||
|
initial_store = mutable_config.get("config:install_tree:root")
|
||||||
|
user_store = tmp_path / "store"
|
||||||
|
|
||||||
|
fn = spack.store.Store.__init__
|
||||||
|
|
||||||
|
def _checked_init(self, root, *args, **kwargs):
|
||||||
|
fn(self, root, *args, **kwargs)
|
||||||
|
assert self.root == str(user_store)
|
||||||
|
|
||||||
|
monkeypatch.setattr(spack.store.Store, "__init__", _checked_init)
|
||||||
|
|
||||||
|
spack.store.reinitialize()
|
||||||
|
with spack.store.use_store(user_store):
|
||||||
|
assert spack.config.CONFIG.get("config:install_tree:root") == str(user_store)
|
||||||
|
assert spack.config.CONFIG.get("config:install_tree:root") == initial_store
|
||||||
|
|||||||
@@ -13,34 +13,34 @@
|
|||||||
import spack.spec
|
import spack.spec
|
||||||
import spack.util.url
|
import spack.util.url
|
||||||
|
|
||||||
install = spack.main.SpackCommand("install")
|
|
||||||
|
|
||||||
pytestmark = pytest.mark.not_on_windows("does not run on windows")
|
pytestmark = pytest.mark.not_on_windows("does not run on windows")
|
||||||
|
|
||||||
|
|
||||||
def test_build_tarball_overwrite(install_mockery, mock_fetch, monkeypatch, tmpdir):
|
def test_build_tarball_overwrite(install_mockery, mock_fetch, monkeypatch, tmp_path):
|
||||||
with tmpdir.as_cwd():
|
spec = spack.spec.Spec("trivial-install-test-package").concretized()
|
||||||
spec = spack.spec.Spec("trivial-install-test-package").concretized()
|
spec.package.do_install(fake=True)
|
||||||
install(str(spec))
|
|
||||||
|
|
||||||
# Runs fine the first time, throws the second time
|
specs = [spec]
|
||||||
out_url = spack.util.url.path_to_file_url(str(tmpdir))
|
|
||||||
bd.push_or_raise(spec, out_url, bd.PushOptions(unsigned=True))
|
|
||||||
with pytest.raises(bd.NoOverwriteException):
|
|
||||||
bd.push_or_raise(spec, out_url, bd.PushOptions(unsigned=True))
|
|
||||||
|
|
||||||
# Should work fine with force=True
|
# Runs fine the first time, second time it's a no-op
|
||||||
bd.push_or_raise(spec, out_url, bd.PushOptions(force=True, unsigned=True))
|
out_url = spack.util.url.path_to_file_url(str(tmp_path))
|
||||||
|
skipped = bd.push_or_raise(specs, out_url, signing_key=None)
|
||||||
|
assert not skipped
|
||||||
|
|
||||||
# Remove the tarball and try again.
|
skipped = bd.push_or_raise(specs, out_url, signing_key=None)
|
||||||
# This must *also* throw, because of the existing .spec.json file
|
assert skipped == specs
|
||||||
os.remove(
|
|
||||||
os.path.join(
|
|
||||||
bd.build_cache_prefix("."),
|
|
||||||
bd.tarball_directory_name(spec),
|
|
||||||
bd.tarball_name(spec, ".spack"),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
with pytest.raises(bd.NoOverwriteException):
|
# Should work fine with force=True
|
||||||
bd.push_or_raise(spec, out_url, bd.PushOptions(unsigned=True))
|
skipped = bd.push_or_raise(specs, out_url, signing_key=None, force=True)
|
||||||
|
assert not skipped
|
||||||
|
|
||||||
|
# Remove the tarball, which should cause push to push.
|
||||||
|
os.remove(
|
||||||
|
tmp_path
|
||||||
|
/ bd.BUILD_CACHE_RELATIVE_PATH
|
||||||
|
/ bd.tarball_directory_name(spec)
|
||||||
|
/ bd.tarball_name(spec, ".spack")
|
||||||
|
)
|
||||||
|
|
||||||
|
skipped = bd.push_or_raise(specs, out_url, signing_key=None)
|
||||||
|
assert not skipped
|
||||||
|
|||||||
@@ -6,6 +6,7 @@
|
|||||||
import os
|
import os
|
||||||
import platform
|
import platform
|
||||||
import posixpath
|
import posixpath
|
||||||
|
import sys
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
@@ -287,6 +288,25 @@ def platform_pathsep(pathlist):
|
|||||||
assert name not in os.environ
|
assert name not in os.environ
|
||||||
|
|
||||||
|
|
||||||
|
def test_compiler_custom_env(config, mock_packages, monkeypatch, working_env):
|
||||||
|
if sys.platform == "win32":
|
||||||
|
test_path = r"C:\test\path\element\custom-env" + "\\"
|
||||||
|
else:
|
||||||
|
test_path = r"/test/path/element/custom-env/"
|
||||||
|
|
||||||
|
def custom_env(pkg, env):
|
||||||
|
env.prepend_path("PATH", test_path)
|
||||||
|
env.append_flags("ENV_CUSTOM_CC_FLAGS", "--custom-env-flag1")
|
||||||
|
|
||||||
|
pkg = spack.spec.Spec("cmake").concretized().package
|
||||||
|
monkeypatch.setattr(pkg.compiler, "setup_custom_environment", custom_env)
|
||||||
|
spack.build_environment.setup_package(pkg, False)
|
||||||
|
|
||||||
|
# Note: trailing slash may be stripped by internal logic
|
||||||
|
assert test_path[:-1] in os.environ["PATH"]
|
||||||
|
assert "--custom-env-flag1" in os.environ["ENV_CUSTOM_CC_FLAGS"]
|
||||||
|
|
||||||
|
|
||||||
def test_external_config_env(mock_packages, mutable_config, working_env):
|
def test_external_config_env(mock_packages, mutable_config, working_env):
|
||||||
cmake_config = {
|
cmake_config = {
|
||||||
"externals": [
|
"externals": [
|
||||||
|
|||||||
@@ -355,6 +355,15 @@ def test_fc_flags(wrapper_environment, wrapper_flags):
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def test_always_cflags(wrapper_environment, wrapper_flags):
|
||||||
|
with set_env(SPACK_ALWAYS_CFLAGS="-always1 -always2"):
|
||||||
|
check_args(
|
||||||
|
cc,
|
||||||
|
["-v", "--cmd-line-v-opt"],
|
||||||
|
[real_cc] + ["-always1", "-always2"] + ["-v", "--cmd-line-v-opt"],
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def test_Wl_parsing(wrapper_environment):
|
def test_Wl_parsing(wrapper_environment):
|
||||||
check_args(
|
check_args(
|
||||||
cc,
|
cc,
|
||||||
|
|||||||
@@ -286,7 +286,7 @@ def _fail(self, args):
|
|||||||
def test_ci_create_buildcache(tmpdir, working_env, config, mock_packages, monkeypatch):
|
def test_ci_create_buildcache(tmpdir, working_env, config, mock_packages, monkeypatch):
|
||||||
"""Test that create_buildcache returns a list of objects with the correct
|
"""Test that create_buildcache returns a list of objects with the correct
|
||||||
keys and types."""
|
keys and types."""
|
||||||
monkeypatch.setattr(spack.ci, "_push_to_build_cache", lambda a, b, c: True)
|
monkeypatch.setattr(ci, "push_to_build_cache", lambda a, b, c: True)
|
||||||
|
|
||||||
results = ci.create_buildcache(
|
results = ci.create_buildcache(
|
||||||
None, destination_mirror_urls=["file:///fake-url-one", "file:///fake-url-two"]
|
None, destination_mirror_urls=["file:///fake-url-one", "file:///fake-url-two"]
|
||||||
|
|||||||
@@ -12,7 +12,9 @@
|
|||||||
|
|
||||||
import spack.binary_distribution
|
import spack.binary_distribution
|
||||||
import spack.cmd.buildcache
|
import spack.cmd.buildcache
|
||||||
|
import spack.deptypes
|
||||||
import spack.environment as ev
|
import spack.environment as ev
|
||||||
|
import spack.error
|
||||||
import spack.main
|
import spack.main
|
||||||
import spack.spec
|
import spack.spec
|
||||||
import spack.util.url
|
import spack.util.url
|
||||||
@@ -382,11 +384,14 @@ def test_correct_specs_are_pushed(
|
|||||||
|
|
||||||
packages_to_push = []
|
packages_to_push = []
|
||||||
|
|
||||||
def fake_push(node, push_url, options):
|
def fake_push(specs, *args, **kwargs):
|
||||||
assert isinstance(node, Spec)
|
assert all(isinstance(s, Spec) for s in specs)
|
||||||
packages_to_push.append(node.name)
|
packages_to_push.extend(s.name for s in specs)
|
||||||
|
skipped = []
|
||||||
|
errors = []
|
||||||
|
return skipped, errors
|
||||||
|
|
||||||
monkeypatch.setattr(spack.binary_distribution, "push_or_raise", fake_push)
|
monkeypatch.setattr(spack.binary_distribution, "_push", fake_push)
|
||||||
|
|
||||||
buildcache_create_args = ["create", "--unsigned"]
|
buildcache_create_args = ["create", "--unsigned"]
|
||||||
|
|
||||||
@@ -443,3 +448,54 @@ def test_skip_no_redistribute(mock_packages, config):
|
|||||||
filtered = spack.cmd.buildcache._skip_no_redistribute_for_public(specs)
|
filtered = spack.cmd.buildcache._skip_no_redistribute_for_public(specs)
|
||||||
assert not any(s.name == "no-redistribute" for s in filtered)
|
assert not any(s.name == "no-redistribute" for s in filtered)
|
||||||
assert any(s.name == "no-redistribute-dependent" for s in filtered)
|
assert any(s.name == "no-redistribute-dependent" for s in filtered)
|
||||||
|
|
||||||
|
|
||||||
|
def test_best_effort_vs_fail_fast_when_dep_not_installed(tmp_path, mutable_database):
|
||||||
|
"""When --fail-fast is passed, the push command should fail if it immediately finds an
|
||||||
|
uninstalled dependency. Otherwise, failure to push one dependency shouldn't prevent the
|
||||||
|
others from being pushed."""
|
||||||
|
|
||||||
|
mirror("add", "--unsigned", "my-mirror", str(tmp_path))
|
||||||
|
|
||||||
|
# Uninstall mpich so that its dependent mpileaks can't be pushed
|
||||||
|
for s in mutable_database.query_local("mpich"):
|
||||||
|
s.package.do_uninstall(force=True)
|
||||||
|
|
||||||
|
with pytest.raises(spack.cmd.buildcache.PackagesAreNotInstalledError, match="mpich"):
|
||||||
|
buildcache("push", "--update-index", "--fail-fast", "my-mirror", "mpileaks^mpich")
|
||||||
|
|
||||||
|
# nothing should be pushed due to --fail-fast.
|
||||||
|
assert not os.listdir(tmp_path)
|
||||||
|
assert not spack.binary_distribution.update_cache_and_get_specs()
|
||||||
|
|
||||||
|
with pytest.raises(spack.cmd.buildcache.PackageNotInstalledError):
|
||||||
|
buildcache("push", "--update-index", "my-mirror", "mpileaks^mpich")
|
||||||
|
|
||||||
|
specs = spack.binary_distribution.update_cache_and_get_specs()
|
||||||
|
|
||||||
|
# everything but mpich should be pushed
|
||||||
|
mpileaks = mutable_database.query_local("mpileaks^mpich")[0]
|
||||||
|
assert set(specs) == {s for s in mpileaks.traverse() if s.name != "mpich"}
|
||||||
|
|
||||||
|
|
||||||
|
def test_push_without_build_deps(tmp_path, temporary_store, mock_packages, mutable_config):
|
||||||
|
"""Spack should not error when build deps are uninstalled and --without-build-dependenies is
|
||||||
|
passed."""
|
||||||
|
|
||||||
|
mirror("add", "--unsigned", "my-mirror", str(tmp_path))
|
||||||
|
|
||||||
|
s = spack.spec.Spec("dtrun3").concretized()
|
||||||
|
s.package.do_install(fake=True)
|
||||||
|
s["dtbuild3"].package.do_uninstall()
|
||||||
|
|
||||||
|
# fails when build deps are required
|
||||||
|
with pytest.raises(spack.error.SpackError, match="package not installed"):
|
||||||
|
buildcache(
|
||||||
|
"push", "--update-index", "--with-build-dependencies", "my-mirror", f"/{s.dag_hash()}"
|
||||||
|
)
|
||||||
|
|
||||||
|
# succeeds when build deps are not required
|
||||||
|
buildcache(
|
||||||
|
"push", "--update-index", "--without-build-dependencies", "my-mirror", f"/{s.dag_hash()}"
|
||||||
|
)
|
||||||
|
assert spack.binary_distribution.update_cache_and_get_specs() == [s]
|
||||||
|
|||||||
@@ -83,7 +83,6 @@ def test_checksum_args(arguments, expected):
|
|||||||
assert check == expected
|
assert check == expected
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.not_on_windows("Not supported on Windows (yet)")
|
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
"arguments,expected",
|
"arguments,expected",
|
||||||
[
|
[
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -6,7 +6,6 @@
|
|||||||
import filecmp
|
import filecmp
|
||||||
import os
|
import os
|
||||||
import shutil
|
import shutil
|
||||||
import subprocess
|
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
@@ -156,22 +155,6 @@ def test_update_with_header(tmpdir):
|
|||||||
commands("--update", str(update_file), "--header", str(filename))
|
commands("--update", str(update_file), "--header", str(filename))
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.xfail
|
|
||||||
def test_no_pipe_error():
|
|
||||||
"""Make sure we don't see any pipe errors when piping output."""
|
|
||||||
|
|
||||||
proc = subprocess.Popen(
|
|
||||||
["spack", "commands", "--format=rst"], stdout=subprocess.PIPE, stderr=subprocess.PIPE
|
|
||||||
)
|
|
||||||
|
|
||||||
# Call close() on stdout to cause a broken pipe
|
|
||||||
proc.stdout.close()
|
|
||||||
proc.wait()
|
|
||||||
stderr = proc.stderr.read().decode("utf-8")
|
|
||||||
|
|
||||||
assert "Broken pipe" not in stderr
|
|
||||||
|
|
||||||
|
|
||||||
def test_bash_completion():
|
def test_bash_completion():
|
||||||
"""Test the bash completion writer."""
|
"""Test the bash completion writer."""
|
||||||
out1 = commands("--format=bash")
|
out1 = commands("--format=bash")
|
||||||
|
|||||||
@@ -16,8 +16,6 @@
|
|||||||
|
|
||||||
debug = SpackCommand("debug")
|
debug = SpackCommand("debug")
|
||||||
|
|
||||||
pytestmark = pytest.mark.not_on_windows("does not run on windows")
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.db
|
@pytest.mark.db
|
||||||
def test_create_db_tarball(tmpdir, database):
|
def test_create_db_tarball(tmpdir, database):
|
||||||
@@ -60,4 +58,3 @@ def test_report():
|
|||||||
assert get_version() in out
|
assert get_version() in out
|
||||||
assert platform.python_version() in out
|
assert platform.python_version() in out
|
||||||
assert str(architecture) in out
|
assert str(architecture) in out
|
||||||
assert spack.config.get("config:concretizer") in out
|
|
||||||
|
|||||||
@@ -14,8 +14,6 @@
|
|||||||
deprecate = SpackCommand("deprecate")
|
deprecate = SpackCommand("deprecate")
|
||||||
find = SpackCommand("find")
|
find = SpackCommand("find")
|
||||||
|
|
||||||
pytestmark = pytest.mark.not_on_windows("does not run on windows")
|
|
||||||
|
|
||||||
|
|
||||||
def test_deprecate(mock_packages, mock_archive, mock_fetch, install_mockery):
|
def test_deprecate(mock_packages, mock_archive, mock_fetch, install_mockery):
|
||||||
install("libelf@0.8.13")
|
install("libelf@0.8.13")
|
||||||
|
|||||||
@@ -181,7 +181,6 @@ def test_diff_cmd(install_mockery, mock_fetch, mock_archive, mock_packages):
|
|||||||
assert ["hash", "mpileaks %s" % specB.dag_hash()] in c["b_not_a"]
|
assert ["hash", "mpileaks %s" % specB.dag_hash()] in c["b_not_a"]
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.not_on_windows("Not supported on Windows (yet)")
|
|
||||||
def test_load_first(install_mockery, mock_fetch, mock_archive, mock_packages):
|
def test_load_first(install_mockery, mock_fetch, mock_archive, mock_packages):
|
||||||
"""Test with and without the --first option"""
|
"""Test with and without the --first option"""
|
||||||
install_cmd("mpileaks")
|
install_cmd("mpileaks")
|
||||||
|
|||||||
@@ -1057,7 +1057,6 @@ def test_env_with_included_config_file(mutable_mock_env_path, packages_file):
|
|||||||
assert any(x.satisfies("mpileaks@2.2") for x in e._get_environment_specs())
|
assert any(x.satisfies("mpileaks@2.2") for x in e._get_environment_specs())
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.only_clingo("original concretizer does not support requirements")
|
|
||||||
def test_config_change_existing(mutable_mock_env_path, tmp_path, mock_packages, mutable_config):
|
def test_config_change_existing(mutable_mock_env_path, tmp_path, mock_packages, mutable_config):
|
||||||
"""Test ``config change`` with config in the ``spack.yaml`` as well as an
|
"""Test ``config change`` with config in the ``spack.yaml`` as well as an
|
||||||
included file scope.
|
included file scope.
|
||||||
@@ -1133,7 +1132,6 @@ def test_config_change_existing(mutable_mock_env_path, tmp_path, mock_packages,
|
|||||||
spack.spec.Spec("bowtie@1.2.2").concretized()
|
spack.spec.Spec("bowtie@1.2.2").concretized()
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.only_clingo("original concretizer does not support requirements")
|
|
||||||
def test_config_change_new(mutable_mock_env_path, tmp_path, mock_packages, mutable_config):
|
def test_config_change_new(mutable_mock_env_path, tmp_path, mock_packages, mutable_config):
|
||||||
spack_yaml = tmp_path / ev.manifest_name
|
spack_yaml = tmp_path / ev.manifest_name
|
||||||
spack_yaml.write_text(
|
spack_yaml.write_text(
|
||||||
@@ -1736,6 +1734,17 @@ def test_env_include_concrete_env_yaml(env_name):
|
|||||||
assert test.path in combined_yaml["include_concrete"]
|
assert test.path in combined_yaml["include_concrete"]
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.regression("45766")
|
||||||
|
@pytest.mark.parametrize("format", ["v1", "v2", "v3"])
|
||||||
|
def test_env_include_concrete_old_env(format, tmpdir):
|
||||||
|
lockfile = os.path.join(spack.paths.test_path, "data", "legacy_env", f"{format}.lock")
|
||||||
|
# create an env from old .lock file -- this does not update the format
|
||||||
|
env("create", "old-env", lockfile)
|
||||||
|
env("create", "--include-concrete", "old-env", "test")
|
||||||
|
|
||||||
|
assert ev.read("old-env").all_specs() == ev.read("test").all_specs()
|
||||||
|
|
||||||
|
|
||||||
def test_env_bad_include_concrete_env():
|
def test_env_bad_include_concrete_env():
|
||||||
with pytest.raises(ev.SpackEnvironmentError):
|
with pytest.raises(ev.SpackEnvironmentError):
|
||||||
env("create", "--include-concrete", "nonexistant_env", "combined_env")
|
env("create", "--include-concrete", "nonexistant_env", "combined_env")
|
||||||
@@ -2332,8 +2341,6 @@ def test_stack_concretize_extraneous_deps(tmpdir, mock_packages):
|
|||||||
# FIXME: constraints for stacks
|
# FIXME: constraints for stacks
|
||||||
# FIXME: This now works for statically-determinable invalid deps
|
# FIXME: This now works for statically-determinable invalid deps
|
||||||
# FIXME: But it still does not work for dynamically determined invalid deps
|
# FIXME: But it still does not work for dynamically determined invalid deps
|
||||||
# if spack.config.get('config:concretizer') == 'clingo':
|
|
||||||
# pytest.skip('Clingo concretizer does not support soft constraints')
|
|
||||||
|
|
||||||
filename = str(tmpdir.join("spack.yaml"))
|
filename = str(tmpdir.join("spack.yaml"))
|
||||||
with open(filename, "w") as f:
|
with open(filename, "w") as f:
|
||||||
@@ -3180,9 +3187,7 @@ def test_concretize_user_specs_together():
|
|||||||
e.remove("mpich")
|
e.remove("mpich")
|
||||||
e.add("mpich2")
|
e.add("mpich2")
|
||||||
|
|
||||||
exc_cls = spack.error.SpackError
|
exc_cls = spack.error.UnsatisfiableSpecError
|
||||||
if spack.config.get("config:concretizer") == "clingo":
|
|
||||||
exc_cls = spack.error.UnsatisfiableSpecError
|
|
||||||
|
|
||||||
# Concretizing without invalidating the concrete spec for mpileaks fails
|
# Concretizing without invalidating the concrete spec for mpileaks fails
|
||||||
with pytest.raises(exc_cls):
|
with pytest.raises(exc_cls):
|
||||||
@@ -3208,10 +3213,8 @@ def test_duplicate_packages_raise_when_concretizing_together():
|
|||||||
e.add("mpileaks~opt")
|
e.add("mpileaks~opt")
|
||||||
e.add("mpich")
|
e.add("mpich")
|
||||||
|
|
||||||
exc_cls, match = spack.error.SpackError, None
|
exc_cls = spack.error.UnsatisfiableSpecError
|
||||||
if spack.config.get("config:concretizer") == "clingo":
|
match = r"You could consider setting `concretizer:unify`"
|
||||||
exc_cls = spack.error.UnsatisfiableSpecError
|
|
||||||
match = r"You could consider setting `concretizer:unify`"
|
|
||||||
|
|
||||||
with pytest.raises(exc_cls, match=match):
|
with pytest.raises(exc_cls, match=match):
|
||||||
e.concretize()
|
e.concretize()
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user