Compare commits
595 Commits
develop-20
...
hs/fix/fas
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
39cf85ae26 | ||
|
|
e035bd176a | ||
|
|
925f4c3f1e | ||
|
|
be2c0bb033 | ||
|
|
b8c357556e | ||
|
|
d20e3fbd84 | ||
|
|
f20803ab3b | ||
|
|
318db244c8 | ||
|
|
4d979b0676 | ||
|
|
a3dbbae861 | ||
|
|
4e72a09578 | ||
|
|
9ed941b7d6 | ||
|
|
161b67fd09 | ||
|
|
1a8d4e46e4 | ||
|
|
33e152accc | ||
|
|
1da7ddc2b3 | ||
|
|
0ec918570a | ||
|
|
a9e6074996 | ||
|
|
30db764449 | ||
|
|
f5b8b0ac5d | ||
|
|
913dcd97bc | ||
|
|
68570b7587 | ||
|
|
2da4366ba6 | ||
|
|
2713b0c216 | ||
|
|
16b01c5661 | ||
|
|
ebd4ef934c | ||
|
|
97b5ec6e4f | ||
|
|
4c9bc8d879 | ||
|
|
825fd1ccf6 | ||
|
|
33109ce9b9 | ||
|
|
fb5910d139 | ||
|
|
fa6b8a4ceb | ||
|
|
97acf2614a | ||
|
|
e99bf48d28 | ||
|
|
b97015b791 | ||
|
|
1884520f7b | ||
|
|
7fbfb0f6dc | ||
|
|
11d276ab6f | ||
|
|
da1d533877 | ||
|
|
c6997e11a7 | ||
|
|
4322cf56b1 | ||
|
|
907a37145f | ||
|
|
4778d2d332 | ||
|
|
eb256476d2 | ||
|
|
ff26d2f833 | ||
|
|
ed916ffe6c | ||
|
|
4fbdf2f2c0 | ||
|
|
60ba61f6b2 | ||
|
|
0a4563fd02 | ||
|
|
754408ca2b | ||
|
|
0d817878ea | ||
|
|
bf11fb037b | ||
|
|
074b845cd3 | ||
|
|
dd26732897 | ||
|
|
3665c5c01b | ||
|
|
73219e4b02 | ||
|
|
57a90c91a4 | ||
|
|
8f4a0718bf | ||
|
|
9049ffdc7a | ||
|
|
d1f313342e | ||
|
|
e62cf9c45b | ||
|
|
ee2723dc46 | ||
|
|
d09b185522 | ||
|
|
a31c525778 | ||
|
|
2aa5a16433 | ||
|
|
0c164d2740 | ||
|
|
801390f6be | ||
|
|
c601692bc7 | ||
|
|
2b9c6790f2 | ||
|
|
09ae2516d5 | ||
|
|
eb9ff5d7a7 | ||
|
|
dadb30f0e2 | ||
|
|
d45f682573 | ||
|
|
b7601f3042 | ||
|
|
6b5a479d1e | ||
|
|
1297dd7fbc | ||
|
|
75c169d870 | ||
|
|
afe431cfb5 | ||
|
|
14bc900e9d | ||
|
|
e42e541605 | ||
|
|
9310fcabd8 | ||
|
|
6822f99cc6 | ||
|
|
703cd6a313 | ||
|
|
5b59a53545 | ||
|
|
b862eec6bc | ||
|
|
dcc199ae63 | ||
|
|
f8da72cffe | ||
|
|
8650ba3cea | ||
|
|
54aaa95a35 | ||
|
|
5a29c9d82b | ||
|
|
c8873ea35c | ||
|
|
c7659df4af | ||
|
|
0de6c17477 | ||
|
|
6924c530e2 | ||
|
|
38c8069ab4 | ||
|
|
5cc07522ab | ||
|
|
575a006ca3 | ||
|
|
23ac56edfb | ||
|
|
8c3068809f | ||
|
|
2214fc855d | ||
|
|
d44bdc40c9 | ||
|
|
e952f6be8e | ||
|
|
b95936f752 | ||
|
|
8d0856d1cc | ||
|
|
10f7014add | ||
|
|
c9ed91758d | ||
|
|
2c1d74db9b | ||
|
|
5b93466340 | ||
|
|
1ee344c75c | ||
|
|
754011643c | ||
|
|
2148292bdb | ||
|
|
cf3576a9bb | ||
|
|
a86f164835 | ||
|
|
2782ae6d7e | ||
|
|
b1fd6dbb6d | ||
|
|
18936771ff | ||
|
|
9a94ea7dfe | ||
|
|
a93bd6cee4 | ||
|
|
4c247e206c | ||
|
|
fcdaccfeb6 | ||
|
|
2fc056e27c | ||
|
|
417c48b07a | ||
|
|
f05033b0d2 | ||
|
|
d63f06e4b7 | ||
|
|
8296aaf175 | ||
|
|
86ebcabd46 | ||
|
|
87329639f2 | ||
|
|
0acd6ae7b2 | ||
|
|
395c911689 | ||
|
|
2664303d7a | ||
|
|
ff9568fa2f | ||
|
|
632c009569 | ||
|
|
55918c31d2 | ||
|
|
b8461f3d2d | ||
|
|
133895e785 | ||
|
|
19e3ab83cf | ||
|
|
e42a4a8bac | ||
|
|
1462c35761 | ||
|
|
0cf8cb70f4 | ||
|
|
7b2450c22a | ||
|
|
8f09f523cc | ||
|
|
24d3ed8c18 | ||
|
|
492c52089f | ||
|
|
5df7dc88fc | ||
|
|
4a75c3c87a | ||
|
|
35aa02771a | ||
|
|
b38a29f4df | ||
|
|
9a25a58219 | ||
|
|
c0c9743300 | ||
|
|
a69af3c71f | ||
|
|
cb92d70d6d | ||
|
|
76ed4578e7 | ||
|
|
504cc808d6 | ||
|
|
8076134c91 | ||
|
|
b4b3320f71 | ||
|
|
e35bc1f82d | ||
|
|
0de1ddcbe8 | ||
|
|
e3aca49e25 | ||
|
|
94c29e1cfc | ||
|
|
0c00a297e1 | ||
|
|
c6a1ec996c | ||
|
|
0437c5314e | ||
|
|
ffde309a99 | ||
|
|
a08b4ae538 | ||
|
|
404b1c6c19 | ||
|
|
275339ab4c | ||
|
|
877930c4ef | ||
|
|
89d0215d5b | ||
|
|
f003d8c0c3 | ||
|
|
6ab92b119d | ||
|
|
f809b56f81 | ||
|
|
ec058556ad | ||
|
|
ce78e8a1f8 | ||
|
|
c3435b4e7d | ||
|
|
02d2c4a9ff | ||
|
|
9d03170cb2 | ||
|
|
8892c878ce | ||
|
|
cbf4d3967a | ||
|
|
8bc0b2e086 | ||
|
|
354615d491 | ||
|
|
9ac261af58 | ||
|
|
34b2f28a5e | ||
|
|
8a10eff757 | ||
|
|
44d09f2b2b | ||
|
|
161b2d7cb0 | ||
|
|
4de5b664cd | ||
|
|
5d0c6c3350 | ||
|
|
8391c8eb87 | ||
|
|
3108849121 | ||
|
|
52471bab02 | ||
|
|
b8e3246e89 | ||
|
|
60cb628283 | ||
|
|
5bca7187a5 | ||
|
|
65daf17b54 | ||
|
|
d776dead56 | ||
|
|
741a4a5d4f | ||
|
|
dbe7b6bc6b | ||
|
|
ffc904aa6b | ||
|
|
f889b2a95e | ||
|
|
7f609ba934 | ||
|
|
ffd7830bfa | ||
|
|
20a6b22f78 | ||
|
|
1bff2f7034 | ||
|
|
ca48233ef7 | ||
|
|
c302049b5d | ||
|
|
360dbe41f7 | ||
|
|
ea1aa0714b | ||
|
|
7af1a3d240 | ||
|
|
962115b386 | ||
|
|
f81ca0cd89 | ||
|
|
25a5585f7d | ||
|
|
e81ce18cad | ||
|
|
d48d993ae7 | ||
|
|
fbd5c3d589 | ||
|
|
11aa02b37a | ||
|
|
b9ebf8cc9c | ||
|
|
1229d5a3cc | ||
|
|
be5a096665 | ||
|
|
32ce278a51 | ||
|
|
e83536de38 | ||
|
|
ff058377c5 | ||
|
|
e855bb011d | ||
|
|
dbab4828ed | ||
|
|
fac92dceca | ||
|
|
035b890b17 | ||
|
|
2a7e5cafa1 | ||
|
|
49845760b6 | ||
|
|
ce6255c0bb | ||
|
|
f0d54ba39d | ||
|
|
2ec4281c4f | ||
|
|
e80d75cbe3 | ||
|
|
47e70c5c3a | ||
|
|
fea2171672 | ||
|
|
12a475e648 | ||
|
|
c348891c07 | ||
|
|
019e90ab36 | ||
|
|
19137b2653 | ||
|
|
2761e650fa | ||
|
|
84ea389017 | ||
|
|
17f07523f5 | ||
|
|
bd2ddb8909 | ||
|
|
f5db757e66 | ||
|
|
277f8596de | ||
|
|
c8bebff7f5 | ||
|
|
61d2d21acc | ||
|
|
7b27aed4c8 | ||
|
|
ad0b256407 | ||
|
|
a2a3a83a26 | ||
|
|
7d86670826 | ||
|
|
ae306b73c3 | ||
|
|
b63cbe4e6e | ||
|
|
ef220daaca | ||
|
|
e86a3b68f7 | ||
|
|
7319408bc7 | ||
|
|
b34159348f | ||
|
|
f13d998d21 | ||
|
|
2912d4a661 | ||
|
|
8e2ec58859 | ||
|
|
01eb26578b | ||
|
|
fe0a8a1735 | ||
|
|
d523f12e99 | ||
|
|
1b0631b69e | ||
|
|
65bb3a12ea | ||
|
|
5ac2b8a178 | ||
|
|
b063765c2e | ||
|
|
4511052d26 | ||
|
|
3804d128e7 | ||
|
|
f09ce00fe1 | ||
|
|
cdde7c3ccf | ||
|
|
c52c0a482f | ||
|
|
8a75cdad9a | ||
|
|
e0eea48ccf | ||
|
|
61cbfc1da0 | ||
|
|
d8c8074762 | ||
|
|
faeef6272d | ||
|
|
f6ad1e23f8 | ||
|
|
a0173a5a94 | ||
|
|
225be45687 | ||
|
|
3581821d3c | ||
|
|
79ad6f6b48 | ||
|
|
6320993409 | ||
|
|
1472dcace4 | ||
|
|
755c113c16 | ||
|
|
43bcb5056f | ||
|
|
fd1c95a432 | ||
|
|
5b5be0582f | ||
|
|
aed1a3f980 | ||
|
|
978be305a7 | ||
|
|
7ddb40a804 | ||
|
|
37664b36da | ||
|
|
f33912d707 | ||
|
|
e785d3716e | ||
|
|
328787b017 | ||
|
|
67a40c6cc4 | ||
|
|
eccf97af33 | ||
|
|
e63e8b5efa | ||
|
|
bb25210b62 | ||
|
|
f8ab94061f | ||
|
|
ed15b73c3b | ||
|
|
1f6da280b7 | ||
|
|
1ad5739094 | ||
|
|
06f33dcdbb | ||
|
|
582254f891 | ||
|
|
31694fe9bd | ||
|
|
a53a14346e | ||
|
|
c102ff953b | ||
|
|
59a2a87937 | ||
|
|
d86feeac54 | ||
|
|
43e26b330c | ||
|
|
9c8b5f58c0 | ||
|
|
50aa5a7b24 | ||
|
|
ffab156366 | ||
|
|
e147679d40 | ||
|
|
ef9bb7ebe5 | ||
|
|
fc443ea30e | ||
|
|
b601bace24 | ||
|
|
cbad3d464a | ||
|
|
b56e792295 | ||
|
|
5b279c0732 | ||
|
|
149753a52e | ||
|
|
b582eacbc1 | ||
|
|
037196c2bd | ||
|
|
d9e8c5f13e | ||
|
|
275d1d88f4 | ||
|
|
a07d42d35b | ||
|
|
19ad29a690 | ||
|
|
4187c57250 | ||
|
|
590be9bba1 | ||
|
|
3edd68d981 | ||
|
|
5ca0e94bdd | ||
|
|
f6c9d98c8f | ||
|
|
9854c9e5f2 | ||
|
|
e5a602c1bb | ||
|
|
37fe3b4984 | ||
|
|
a00fddef4e | ||
|
|
260b36e272 | ||
|
|
117480dba9 | ||
|
|
bc75f23927 | ||
|
|
b0f1a0eb7c | ||
|
|
4d616e1168 | ||
|
|
4de8344c16 | ||
|
|
411ea019f1 | ||
|
|
296f99d800 | ||
|
|
ca4df91e7d | ||
|
|
9b8c06a049 | ||
|
|
011ff48f82 | ||
|
|
adcd05b365 | ||
|
|
dc160e3a52 | ||
|
|
ba953352a1 | ||
|
|
d47e726b76 | ||
|
|
89ab47284f | ||
|
|
31bdcd7dc6 | ||
|
|
f2bd11cbf4 | ||
|
|
f69e8297a7 | ||
|
|
c9377d9437 | ||
|
|
899004e29a | ||
|
|
df6427d259 | ||
|
|
31cfcafeba | ||
|
|
230bc7010a | ||
|
|
957c0cc9da | ||
|
|
99e4d6b446 | ||
|
|
7acd0cd86c | ||
|
|
d3378ffd25 | ||
|
|
2356ccc816 | ||
|
|
1d25275bd1 | ||
|
|
7678635d36 | ||
|
|
b2e28a0b08 | ||
|
|
53385f12da | ||
|
|
cfae194fbd | ||
|
|
88c193b83a | ||
|
|
c006cb573a | ||
|
|
d8d41e9b0e | ||
|
|
c6bfe7c6bd | ||
|
|
4432f5a1fe | ||
|
|
b9e0914ab2 | ||
|
|
49a8e84588 | ||
|
|
d36452cf4e | ||
|
|
580cc3c91b | ||
|
|
9ba7af404a | ||
|
|
2da812cbad | ||
|
|
420266c5c4 | ||
|
|
049ade024a | ||
|
|
75c71f7291 | ||
|
|
0a7533a609 | ||
|
|
7ecdc175ff | ||
|
|
962262a1d3 | ||
|
|
adaa0a4863 | ||
|
|
5f56eee8b0 | ||
|
|
aa6caf9ee6 | ||
|
|
1eb2cb97ad | ||
|
|
178a8bbdc5 | ||
|
|
e4c233710c | ||
|
|
b661acfa9b | ||
|
|
7bddcd27d2 | ||
|
|
5d2c67ec83 | ||
|
|
62fd5d12c2 | ||
|
|
64a7525e3f | ||
|
|
bfe434cbd5 | ||
|
|
39063baf18 | ||
|
|
f4a4acd272 | ||
|
|
8d2a059279 | ||
|
|
34c89c0f7b | ||
|
|
e1ea9e12a6 | ||
|
|
5611523baf | ||
|
|
4ff07c3918 | ||
|
|
49489a4815 | ||
|
|
fb53d31d09 | ||
|
|
80b9807e10 | ||
|
|
b573ec3920 | ||
|
|
cbdc07248f | ||
|
|
db6a2523d9 | ||
|
|
c710a1597f | ||
|
|
8c70912b11 | ||
|
|
64f90c38be | ||
|
|
d2f1e29927 | ||
|
|
57586df91a | ||
|
|
c00f36b5e2 | ||
|
|
2a7dd29f95 | ||
|
|
58e2f7a54f | ||
|
|
e3afe9a364 | ||
|
|
b0314faa3d | ||
|
|
2099e9f5cd | ||
|
|
5947c13570 | ||
|
|
1259992159 | ||
|
|
0477875667 | ||
|
|
4d5844b460 | ||
|
|
fc79c37e2d | ||
|
|
1d76ed7aa4 | ||
|
|
237f886e5d | ||
|
|
834ed2f117 | ||
|
|
73069045ae | ||
|
|
e0efd2bea2 | ||
|
|
b9873c5cea | ||
|
|
2f711bda5f | ||
|
|
f8381c9a63 | ||
|
|
c8f61c8662 | ||
|
|
507965cbc6 | ||
|
|
1f6ce56d3b | ||
|
|
3918f83ddc | ||
|
|
d4dc13fffb | ||
|
|
5008519a56 | ||
|
|
dad5ff8796 | ||
|
|
a24220b53f | ||
|
|
2186ff720e | ||
|
|
65d61e12c9 | ||
|
|
05f3fef72c | ||
|
|
21c2eedb80 | ||
|
|
66a3c7bc42 | ||
|
|
8b3d3ac2de | ||
|
|
b5610cdb8b | ||
|
|
6c6b262140 | ||
|
|
796e372bde | ||
|
|
78740942f9 | ||
|
|
02a991688f | ||
|
|
a8029c8ec4 | ||
|
|
adb8f37fc5 | ||
|
|
81b41d5948 | ||
|
|
0ff980ae87 | ||
|
|
74a93c04d8 | ||
|
|
b72c7deacb | ||
|
|
b061bbbb8f | ||
|
|
bbfad7e979 | ||
|
|
3a9963b497 | ||
|
|
8ac00aa58f | ||
|
|
13f80ff142 | ||
|
|
e8291cbd74 | ||
|
|
0dded55f39 | ||
|
|
a4ca6452c0 | ||
|
|
36761715fd | ||
|
|
02b116bd56 | ||
|
|
d4d7d5830d | ||
|
|
389b1824e9 | ||
|
|
e65be13056 | ||
|
|
1580c1745c | ||
|
|
cf54ef0fd3 | ||
|
|
b8b02e0691 | ||
|
|
8d986b8a99 | ||
|
|
4b836cb795 | ||
|
|
d5966e676d | ||
|
|
e187508485 | ||
|
|
80982149d5 | ||
|
|
a1f2e794c7 | ||
|
|
dbe323c631 | ||
|
|
77ddafaaac | ||
|
|
17efd6153c | ||
|
|
93f356c1cc | ||
|
|
386d115333 | ||
|
|
6b512210d4 | ||
|
|
ba215ca824 | ||
|
|
629a3e9396 | ||
|
|
08b07b9b27 | ||
|
|
3a38122764 | ||
|
|
25ab7cc16d | ||
|
|
41773383ec | ||
|
|
9855fbf7f1 | ||
|
|
5ef9d7e3ed | ||
|
|
5a4b7d3d44 | ||
|
|
9b40c1e89d | ||
|
|
edff99aab3 | ||
|
|
22043617aa | ||
|
|
7df23c7471 | ||
|
|
ef87a9a052 | ||
|
|
af62a062cc | ||
|
|
e6114f544d | ||
|
|
8d651625f7 | ||
|
|
9346306b79 | ||
|
|
f3a3e85bb9 | ||
|
|
caaaba464e | ||
|
|
8fae388f57 | ||
|
|
a332e0c143 | ||
|
|
bc662b8764 | ||
|
|
7a8955597d | ||
|
|
bcf9c646cf | ||
|
|
a76fffe8ff | ||
|
|
26c8714a24 | ||
|
|
0776ff05d2 | ||
|
|
d3beef6584 | ||
|
|
bdd06cb176 | ||
|
|
f639c4f1e6 | ||
|
|
f18a106759 | ||
|
|
5b01ddf832 | ||
|
|
c1fc98eef8 | ||
|
|
e9831985e4 | ||
|
|
30e9545d3e | ||
|
|
ce0910a82c | ||
|
|
afc01f9570 | ||
|
|
fc3a484a8c | ||
|
|
de0d5ba883 | ||
|
|
f756ab156c | ||
|
|
540de118c1 | ||
|
|
675be13a7b | ||
|
|
3342866e0e | ||
|
|
39ff675898 | ||
|
|
f807337273 | ||
|
|
8e4e3c9060 | ||
|
|
6d67992191 | ||
|
|
0f3fea511e | ||
|
|
a0611650e2 | ||
|
|
5959be577f | ||
|
|
9b5e508d15 | ||
|
|
66a30aef98 | ||
|
|
b117074df4 | ||
|
|
9f4be17451 | ||
|
|
d70e9e131d | ||
|
|
d7643d4f88 | ||
|
|
73b6aa9b92 | ||
|
|
6d51d94dab | ||
|
|
1a965e9ec2 | ||
|
|
a9e9b901d1 | ||
|
|
95b46dca3d | ||
|
|
7f6ae2a51e | ||
|
|
489d5b0f21 | ||
|
|
f884817009 | ||
|
|
a30704fdad | ||
|
|
57eb21ac3d | ||
|
|
f48c36fc2c | ||
|
|
a09b9f0659 | ||
|
|
92d940b7f4 | ||
|
|
d8c7cbe8f0 | ||
|
|
717d4800e1 | ||
|
|
c77916146c | ||
|
|
f5135018dd | ||
|
|
adfb3a77ad | ||
|
|
d5ccf8203d | ||
|
|
416943f7ed | ||
|
|
519684978b | ||
|
|
c9de1cbcda | ||
|
|
eedc41405b | ||
|
|
e6f48ceaf5 | ||
|
|
2ba583e7eb | ||
|
|
741b6bc0e4 | ||
|
|
ff98c15065 | ||
|
|
625d032e80 | ||
|
|
5227f5f387 | ||
|
|
170e322701 | ||
|
|
cb673862d1 | ||
|
|
31d6e7a901 | ||
|
|
79db34574b | ||
|
|
b3831d4e8c | ||
|
|
35f0feba00 | ||
|
|
9a04a94a26 | ||
|
|
a87fc566ec | ||
|
|
c8f6a19fc0 | ||
|
|
365892be4c | ||
|
|
70acce1aad | ||
|
|
48e2dd8038 | ||
|
|
2844f7425b | ||
|
|
f75760d4f2 | ||
|
|
b8e3f35a8b | ||
|
|
f610c3e4d0 | ||
|
|
a0b925dae3 | ||
|
|
c99518709a | ||
|
|
d67b5b300c | ||
|
|
9bcca28afd | ||
|
|
b07d1e0194 |
@@ -5,7 +5,7 @@ coverage:
|
||||
status:
|
||||
project:
|
||||
default:
|
||||
threshold: 0.2%
|
||||
threshold: 2.0%
|
||||
|
||||
ignore:
|
||||
- lib/spack/spack/test/.*
|
||||
|
||||
6
.github/workflows/audit.yaml
vendored
6
.github/workflows/audit.yaml
vendored
@@ -28,8 +28,8 @@ jobs:
|
||||
run:
|
||||
shell: ${{ matrix.system.shell }}
|
||||
steps:
|
||||
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
|
||||
- uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
- uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b
|
||||
with:
|
||||
python-version: ${{inputs.python_version}}
|
||||
- name: Install Python packages
|
||||
@@ -66,7 +66,7 @@ jobs:
|
||||
./share/spack/qa/validate_last_exit.ps1
|
||||
spack -d audit externals
|
||||
./share/spack/qa/validate_last_exit.ps1
|
||||
- uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874
|
||||
- uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882
|
||||
if: ${{ inputs.with_coverage == 'true' && runner.os != 'Windows' }}
|
||||
with:
|
||||
name: coverage-audits-${{ matrix.system.os }}
|
||||
|
||||
2
.github/workflows/bin/bootstrap-test.sh
vendored
2
.github/workflows/bin/bootstrap-test.sh
vendored
@@ -1,7 +1,7 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
source share/spack/setup-env.sh
|
||||
$PYTHON bin/spack bootstrap disable github-actions-v0.4
|
||||
$PYTHON bin/spack bootstrap disable github-actions-v0.5
|
||||
$PYTHON bin/spack bootstrap disable spack-install
|
||||
$PYTHON bin/spack $SPACK_FLAGS solve zlib
|
||||
tree $BOOTSTRAP/store
|
||||
|
||||
51
.github/workflows/bootstrap.yml
vendored
51
.github/workflows/bootstrap.yml
vendored
@@ -37,14 +37,14 @@ jobs:
|
||||
make patch unzip which xz python3 python3-devel tree \
|
||||
cmake bison
|
||||
- name: Checkout
|
||||
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Bootstrap clingo
|
||||
run: |
|
||||
source share/spack/setup-env.sh
|
||||
spack bootstrap disable github-actions-v0.6
|
||||
spack bootstrap disable github-actions-v0.5
|
||||
spack bootstrap disable github-actions-v0.4
|
||||
spack external find cmake bison
|
||||
spack -d solve zlib
|
||||
tree ~/.spack/bootstrap/store/
|
||||
@@ -60,17 +60,17 @@ jobs:
|
||||
run: |
|
||||
brew install cmake bison tree
|
||||
- name: Checkout
|
||||
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3
|
||||
- uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b
|
||||
with:
|
||||
python-version: "3.12"
|
||||
- name: Bootstrap clingo
|
||||
run: |
|
||||
source share/spack/setup-env.sh
|
||||
spack bootstrap disable github-actions-v0.6
|
||||
spack bootstrap disable github-actions-v0.5
|
||||
spack bootstrap disable github-actions-v0.4
|
||||
spack external find --not-buildable cmake bison
|
||||
spack -d solve zlib
|
||||
tree $HOME/.spack/bootstrap/store/
|
||||
@@ -83,22 +83,22 @@ jobs:
|
||||
steps:
|
||||
- name: Setup macOS
|
||||
if: ${{ matrix.runner != 'ubuntu-latest' }}
|
||||
run: brew install tree gawk
|
||||
- name: Remove system executables
|
||||
run: |
|
||||
brew install tree gawk
|
||||
sudo rm -rf $(command -v gpg gpg2)
|
||||
- name: Setup Ubuntu
|
||||
if: ${{ matrix.runner == 'ubuntu-latest' }}
|
||||
run: sudo rm -rf $(command -v gpg gpg2 patchelf)
|
||||
while [ -n "$(command -v gpg gpg2 patchelf)" ]; do
|
||||
sudo rm $(command -v gpg gpg2 patchelf)
|
||||
done
|
||||
- name: Checkout
|
||||
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Bootstrap GnuPG
|
||||
run: |
|
||||
source share/spack/setup-env.sh
|
||||
spack solve zlib
|
||||
spack bootstrap disable github-actions-v0.6
|
||||
spack bootstrap disable github-actions-v0.5
|
||||
spack bootstrap disable github-actions-v0.4
|
||||
spack -d gpg list
|
||||
tree ~/.spack/bootstrap/store/
|
||||
|
||||
@@ -110,19 +110,17 @@ jobs:
|
||||
steps:
|
||||
- name: Setup macOS
|
||||
if: ${{ matrix.runner != 'ubuntu-latest' }}
|
||||
run: brew install tree
|
||||
- name: Remove system executables
|
||||
run: |
|
||||
brew install tree
|
||||
# Remove GnuPG since we want to bootstrap it
|
||||
sudo rm -rf /usr/local/bin/gpg
|
||||
- name: Setup Ubuntu
|
||||
if: ${{ matrix.runner == 'ubuntu-latest' }}
|
||||
run: |
|
||||
sudo rm -rf $(which gpg) $(which gpg2) $(which patchelf)
|
||||
while [ -n "$(command -v gpg gpg2 patchelf)" ]; do
|
||||
sudo rm $(command -v gpg gpg2 patchelf)
|
||||
done
|
||||
- name: Checkout
|
||||
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3
|
||||
- uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b
|
||||
with:
|
||||
python-version: |
|
||||
3.8
|
||||
@@ -130,15 +128,16 @@ jobs:
|
||||
3.10
|
||||
3.11
|
||||
3.12
|
||||
3.13
|
||||
- name: Set bootstrap sources
|
||||
run: |
|
||||
source share/spack/setup-env.sh
|
||||
spack bootstrap disable github-actions-v0.4
|
||||
spack bootstrap disable github-actions-v0.5
|
||||
spack bootstrap disable spack-install
|
||||
- name: Bootstrap clingo
|
||||
run: |
|
||||
set -e
|
||||
for ver in '3.8' '3.9' '3.10' '3.11' '3.12' ; do
|
||||
for ver in '3.8' '3.9' '3.10' '3.11' '3.12' '3.13'; do
|
||||
not_found=1
|
||||
ver_dir="$(find $RUNNER_TOOL_CACHE/Python -wholename "*/${ver}.*/*/bin" | grep . || true)"
|
||||
if [[ -d "$ver_dir" ]] ; then
|
||||
@@ -172,10 +171,10 @@ jobs:
|
||||
runs-on: "windows-latest"
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3
|
||||
- uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b
|
||||
with:
|
||||
python-version: "3.12"
|
||||
- name: Setup Windows
|
||||
@@ -185,8 +184,8 @@ jobs:
|
||||
- name: Bootstrap clingo
|
||||
run: |
|
||||
./share/spack/setup-env.ps1
|
||||
spack bootstrap disable github-actions-v0.6
|
||||
spack bootstrap disable github-actions-v0.5
|
||||
spack bootstrap disable github-actions-v0.4
|
||||
spack external find --not-buildable cmake bison
|
||||
spack -d solve zlib
|
||||
./share/spack/qa/validate_last_exit.ps1
|
||||
|
||||
8
.github/workflows/build-containers.yml
vendored
8
.github/workflows/build-containers.yml
vendored
@@ -55,7 +55,7 @@ jobs:
|
||||
if: github.repository == 'spack/spack'
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
|
||||
- uses: docker/metadata-action@8e5442c4ef9f78752691e2d8f8d19755c6f78e81
|
||||
id: docker_meta
|
||||
@@ -87,7 +87,7 @@ jobs:
|
||||
fi
|
||||
|
||||
- name: Upload Dockerfile
|
||||
uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874
|
||||
uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882
|
||||
with:
|
||||
name: dockerfiles_${{ matrix.dockerfile[0] }}
|
||||
path: dockerfiles
|
||||
@@ -113,7 +113,7 @@ jobs:
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Build & Deploy ${{ matrix.dockerfile[0] }}
|
||||
uses: docker/build-push-action@32945a339266b759abcbdc89316275140b0fc960
|
||||
uses: docker/build-push-action@4f58ea79222b3b9dc2c8bbdd6debcef730109a75
|
||||
with:
|
||||
context: dockerfiles/${{ matrix.dockerfile[0] }}
|
||||
platforms: ${{ matrix.dockerfile[1] }}
|
||||
@@ -126,7 +126,7 @@ jobs:
|
||||
needs: deploy-images
|
||||
steps:
|
||||
- name: Merge Artifacts
|
||||
uses: actions/upload-artifact/merge@50769540e7f4bd5e21e526ee35c689e35e0d6874
|
||||
uses: actions/upload-artifact/merge@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882
|
||||
with:
|
||||
name: dockerfiles
|
||||
pattern: dockerfiles_*
|
||||
|
||||
28
.github/workflows/ci.yaml
vendored
28
.github/workflows/ci.yaml
vendored
@@ -24,7 +24,7 @@ jobs:
|
||||
core: ${{ steps.filter.outputs.core }}
|
||||
packages: ${{ steps.filter.outputs.packages }}
|
||||
steps:
|
||||
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
if: ${{ github.event_name == 'push' }}
|
||||
with:
|
||||
fetch-depth: 0
|
||||
@@ -83,10 +83,17 @@ jobs:
|
||||
|
||||
all-prechecks:
|
||||
needs: [ prechecks ]
|
||||
if: ${{ always() }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Success
|
||||
run: "true"
|
||||
run: |
|
||||
if [ "${{ needs.prechecks.result }}" == "failure" ] || [ "${{ needs.prechecks.result }}" == "canceled" ]; then
|
||||
echo "Unit tests failed."
|
||||
exit 1
|
||||
else
|
||||
exit 0
|
||||
fi
|
||||
|
||||
coverage:
|
||||
needs: [ unit-tests, prechecks ]
|
||||
@@ -94,8 +101,19 @@ jobs:
|
||||
secrets: inherit
|
||||
|
||||
all:
|
||||
needs: [ coverage, bootstrap ]
|
||||
needs: [ unit-tests, coverage, bootstrap ]
|
||||
if: ${{ always() }}
|
||||
runs-on: ubuntu-latest
|
||||
# See https://docs.github.com/en/actions/writing-workflows/choosing-what-your-workflow-does/accessing-contextual-information-about-workflow-runs#needs-context
|
||||
steps:
|
||||
- name: Success
|
||||
run: "true"
|
||||
- name: Status summary
|
||||
run: |
|
||||
if [ "${{ needs.unit-tests.result }}" == "failure" ] || [ "${{ needs.unit-tests.result }}" == "canceled" ]; then
|
||||
echo "Unit tests failed."
|
||||
exit 1
|
||||
elif [ "${{ needs.bootstrap.result }}" == "failure" ] || [ "${{ needs.bootstrap.result }}" == "canceled" ]; then
|
||||
echo "Bootstrap tests failed."
|
||||
exit 1
|
||||
else
|
||||
exit 0
|
||||
fi
|
||||
|
||||
4
.github/workflows/coverage.yml
vendored
4
.github/workflows/coverage.yml
vendored
@@ -8,8 +8,8 @@ jobs:
|
||||
upload:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
|
||||
- uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
- uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b
|
||||
with:
|
||||
python-version: '3.11'
|
||||
cache: 'pip'
|
||||
|
||||
4
.github/workflows/nightly-win-builds.yml
vendored
4
.github/workflows/nightly-win-builds.yml
vendored
@@ -14,10 +14,10 @@ jobs:
|
||||
build-paraview-deps:
|
||||
runs-on: windows-latest
|
||||
steps:
|
||||
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3
|
||||
- uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b
|
||||
with:
|
||||
python-version: 3.9
|
||||
- name: Install Python packages
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
black==24.8.0
|
||||
black==24.10.0
|
||||
clingo==5.7.1
|
||||
flake8==7.1.1
|
||||
isort==5.13.2
|
||||
mypy==1.8.0
|
||||
types-six==1.16.21.20240513
|
||||
types-six==1.16.21.20241105
|
||||
vermin==1.6.0
|
||||
|
||||
51
.github/workflows/unit_tests.yaml
vendored
51
.github/workflows/unit_tests.yaml
vendored
@@ -40,10 +40,10 @@ jobs:
|
||||
on_develop: false
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3
|
||||
- uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- name: Install System packages
|
||||
@@ -80,7 +80,7 @@ jobs:
|
||||
UNIT_TEST_COVERAGE: ${{ matrix.python-version == '3.11' }}
|
||||
run: |
|
||||
share/spack/qa/run-unit-tests
|
||||
- uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874
|
||||
- uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882
|
||||
with:
|
||||
name: coverage-${{ matrix.os }}-python${{ matrix.python-version }}
|
||||
path: coverage
|
||||
@@ -89,10 +89,10 @@ jobs:
|
||||
shell:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3
|
||||
- uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b
|
||||
with:
|
||||
python-version: '3.11'
|
||||
- name: Install System packages
|
||||
@@ -113,7 +113,7 @@ jobs:
|
||||
COVERAGE: true
|
||||
run: |
|
||||
share/spack/qa/run-shell-tests
|
||||
- uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874
|
||||
- uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882
|
||||
with:
|
||||
name: coverage-shell
|
||||
path: coverage
|
||||
@@ -130,7 +130,7 @@ jobs:
|
||||
dnf install -y \
|
||||
bzip2 curl file gcc-c++ gcc gcc-gfortran git gnupg2 gzip \
|
||||
make patch tcl unzip which xz
|
||||
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
- name: Setup repo and non-root user
|
||||
run: |
|
||||
git --version
|
||||
@@ -149,32 +149,33 @@ jobs:
|
||||
clingo-cffi:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3
|
||||
- uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b
|
||||
with:
|
||||
python-version: '3.11'
|
||||
python-version: '3.13'
|
||||
- name: Install System packages
|
||||
run: |
|
||||
sudo apt-get -y update
|
||||
sudo apt-get -y install coreutils cvs gfortran graphviz gnupg2 mercurial ninja-build kcov
|
||||
sudo apt-get -y install coreutils gfortran graphviz gnupg2
|
||||
- name: Install Python packages
|
||||
run: |
|
||||
pip install --upgrade pip setuptools pytest coverage[toml] pytest-cov clingo pytest-xdist
|
||||
pip install --upgrade pip setuptools pytest coverage[toml] pytest-cov clingo
|
||||
pip install --upgrade flake8 "isort>=4.3.5" "mypy>=0.900" "click" "black"
|
||||
- name: Setup git configuration
|
||||
run: |
|
||||
# Need this for the git tests to succeed.
|
||||
git --version
|
||||
. .github/workflows/bin/setup_git.sh
|
||||
- name: Run unit tests (full suite with coverage)
|
||||
env:
|
||||
COVERAGE: true
|
||||
COVERAGE_FILE: coverage/.coverage-clingo-cffi
|
||||
run: |
|
||||
share/spack/qa/run-unit-tests
|
||||
- uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874
|
||||
. share/spack/setup-env.sh
|
||||
spack bootstrap disable spack-install
|
||||
spack bootstrap disable github-actions-v0.5
|
||||
spack bootstrap disable github-actions-v0.6
|
||||
spack bootstrap status
|
||||
spack solve zlib
|
||||
spack unit-test --verbose --cov --cov-config=pyproject.toml --cov-report=xml:coverage.xml lib/spack/spack/test/concretize.py
|
||||
- uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882
|
||||
with:
|
||||
name: coverage-clingo-cffi
|
||||
path: coverage
|
||||
@@ -187,10 +188,10 @@ jobs:
|
||||
os: [macos-13, macos-14]
|
||||
python-version: ["3.11"]
|
||||
steps:
|
||||
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3
|
||||
- uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- name: Install Python packages
|
||||
@@ -212,7 +213,7 @@ jobs:
|
||||
$(which spack) solve zlib
|
||||
common_args=(--dist loadfile --tx '4*popen//python=./bin/spack-tmpconfig python -u ./bin/spack python' -x)
|
||||
$(which spack) unit-test --verbose --cov --cov-config=pyproject.toml --cov-report=xml:coverage.xml "${common_args[@]}"
|
||||
- uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874
|
||||
- uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882
|
||||
with:
|
||||
name: coverage-${{ matrix.os }}-python${{ matrix.python-version }}
|
||||
path: coverage
|
||||
@@ -225,10 +226,10 @@ jobs:
|
||||
powershell Invoke-Expression -Command "./share/spack/qa/windows_test_setup.ps1"; {0}
|
||||
runs-on: windows-latest
|
||||
steps:
|
||||
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3
|
||||
- uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b
|
||||
with:
|
||||
python-version: 3.9
|
||||
- name: Install Python packages
|
||||
@@ -243,7 +244,7 @@ jobs:
|
||||
run: |
|
||||
spack unit-test -x --verbose --cov --cov-config=pyproject.toml
|
||||
./share/spack/qa/validate_last_exit.ps1
|
||||
- uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874
|
||||
- uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882
|
||||
with:
|
||||
name: coverage-windows
|
||||
path: coverage
|
||||
|
||||
22
.github/workflows/valid-style.yml
vendored
22
.github/workflows/valid-style.yml
vendored
@@ -18,8 +18,8 @@ jobs:
|
||||
validate:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
|
||||
- uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
- uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b
|
||||
with:
|
||||
python-version: '3.11'
|
||||
cache: 'pip'
|
||||
@@ -35,10 +35,10 @@ jobs:
|
||||
style:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3
|
||||
- uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b
|
||||
with:
|
||||
python-version: '3.11'
|
||||
cache: 'pip'
|
||||
@@ -70,7 +70,7 @@ jobs:
|
||||
dnf install -y \
|
||||
bzip2 curl file gcc-c++ gcc gcc-gfortran git gnupg2 gzip \
|
||||
make patch tcl unzip which xz
|
||||
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
- name: Setup repo and non-root user
|
||||
run: |
|
||||
git --version
|
||||
@@ -85,7 +85,7 @@ jobs:
|
||||
source share/spack/setup-env.sh
|
||||
spack debug report
|
||||
spack -d bootstrap now --dev
|
||||
spack style -t black
|
||||
spack -d style -t black
|
||||
spack unit-test -V
|
||||
import-check:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -98,14 +98,14 @@ jobs:
|
||||
# PR: use the base of the PR as the old commit
|
||||
- name: Checkout PR base commit
|
||||
if: github.event_name == 'pull_request'
|
||||
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.base.sha }}
|
||||
path: old
|
||||
# not a PR: use the previous commit as the old commit
|
||||
- name: Checkout previous commit
|
||||
if: github.event_name != 'pull_request'
|
||||
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
with:
|
||||
fetch-depth: 2
|
||||
path: old
|
||||
@@ -114,14 +114,14 @@ jobs:
|
||||
run: git -C old reset --hard HEAD^
|
||||
|
||||
- name: Checkout new commit
|
||||
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
with:
|
||||
path: new
|
||||
- name: Install circular import checker
|
||||
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
with:
|
||||
repository: haampie/circular-import-fighter
|
||||
ref: 555519c6fd5564fd2eb844e7b87e84f4d12602e2
|
||||
ref: 9f60f51bc7134e0be73f27623f1b0357d1718427
|
||||
path: circular-import-fighter
|
||||
- name: Install dependencies
|
||||
working-directory: circular-import-fighter
|
||||
|
||||
@@ -14,3 +14,26 @@ sphinx:
|
||||
python:
|
||||
install:
|
||||
- requirements: lib/spack/docs/requirements.txt
|
||||
|
||||
search:
|
||||
ranking:
|
||||
spack.html: -10
|
||||
spack.*.html: -10
|
||||
llnl.html: -10
|
||||
llnl.*.html: -10
|
||||
_modules/*: -10
|
||||
command_index.html: -9
|
||||
basic_usage.html: 5
|
||||
configuration.html: 5
|
||||
config_yaml.html: 5
|
||||
packages_yaml.html: 5
|
||||
build_settings.html: 5
|
||||
environments.html: 5
|
||||
containers.html: 5
|
||||
mirrors.html: 5
|
||||
module_file_support.html: 5
|
||||
repositories.html: 5
|
||||
binary_caches.html: 5
|
||||
chain.html: 5
|
||||
pipelines.html: 5
|
||||
packaging_guide.html: 5
|
||||
|
||||
@@ -1,71 +1,11 @@
|
||||
@ECHO OFF
|
||||
setlocal EnableDelayedExpansion
|
||||
:: (c) 2021 Lawrence Livermore National Laboratory
|
||||
:: To use this file independently of Spack's installer, execute this script in its directory, or add the
|
||||
:: associated bin directory to your PATH. Invoke to launch Spack Shell.
|
||||
::
|
||||
:: source_dir/spack/bin/spack_cmd.bat
|
||||
::
|
||||
pushd %~dp0..
|
||||
set SPACK_ROOT=%CD%
|
||||
pushd %CD%\..
|
||||
set spackinstdir=%CD%
|
||||
popd
|
||||
|
||||
|
||||
:: Check if Python is on the PATH
|
||||
if not defined python_pf_ver (
|
||||
(for /f "delims=" %%F in ('where python.exe') do (
|
||||
set "python_pf_ver=%%F"
|
||||
goto :found_python
|
||||
) ) 2> NUL
|
||||
)
|
||||
:found_python
|
||||
if not defined python_pf_ver (
|
||||
:: If not, look for Python from the Spack installer
|
||||
:get_builtin
|
||||
(for /f "tokens=*" %%g in ('dir /b /a:d "!spackinstdir!\Python*"') do (
|
||||
set "python_ver=%%g")) 2> NUL
|
||||
|
||||
if not defined python_ver (
|
||||
echo Python was not found on your system.
|
||||
echo Please install Python or add Python to your PATH.
|
||||
) else (
|
||||
set "py_path=!spackinstdir!\!python_ver!"
|
||||
set "py_exe=!py_path!\python.exe"
|
||||
)
|
||||
goto :exitpoint
|
||||
) else (
|
||||
:: Python is already on the path
|
||||
set "py_exe=!python_pf_ver!"
|
||||
(for /F "tokens=* USEBACKQ" %%F in (
|
||||
`"!py_exe!" --version`) do (set "output=%%F")) 2>NUL
|
||||
if not "!output:Microsoft Store=!"=="!output!" goto :get_builtin
|
||||
goto :exitpoint
|
||||
)
|
||||
:exitpoint
|
||||
|
||||
set "PATH=%SPACK_ROOT%\bin\;%PATH%"
|
||||
if defined py_path (
|
||||
set "PATH=%py_path%;%PATH%"
|
||||
)
|
||||
|
||||
if defined py_exe (
|
||||
"%py_exe%" "%SPACK_ROOT%\bin\haspywin.py"
|
||||
)
|
||||
|
||||
set "EDITOR=notepad"
|
||||
|
||||
DOSKEY spacktivate=spack env activate $*
|
||||
|
||||
@echo **********************************************************************
|
||||
@echo ** Spack Package Manager
|
||||
@echo **********************************************************************
|
||||
|
||||
IF "%1"=="" GOTO CONTINUE
|
||||
set
|
||||
GOTO:EOF
|
||||
|
||||
:continue
|
||||
set PROMPT=[spack] %PROMPT%
|
||||
%comspec% /k
|
||||
call "%~dp0..\share\spack\setup-env.bat"
|
||||
pushd %SPACK_ROOT%
|
||||
%comspec% /K
|
||||
|
||||
@@ -9,15 +9,15 @@ bootstrap:
|
||||
# may not be able to bootstrap all the software that Spack needs,
|
||||
# depending on its type.
|
||||
sources:
|
||||
- name: 'github-actions-v0.5'
|
||||
- name: github-actions-v0.6
|
||||
metadata: $spack/share/spack/bootstrap/github-actions-v0.6
|
||||
- name: github-actions-v0.5
|
||||
metadata: $spack/share/spack/bootstrap/github-actions-v0.5
|
||||
- name: 'github-actions-v0.4'
|
||||
metadata: $spack/share/spack/bootstrap/github-actions-v0.4
|
||||
- name: 'spack-install'
|
||||
- name: spack-install
|
||||
metadata: $spack/share/spack/bootstrap/spack-install
|
||||
trusted:
|
||||
# By default we trust bootstrapping from sources and from binaries
|
||||
# produced on Github via the workflow
|
||||
github-actions-v0.6: true
|
||||
github-actions-v0.5: true
|
||||
github-actions-v0.4: true
|
||||
spack-install: true
|
||||
|
||||
@@ -42,8 +42,8 @@ concretizer:
|
||||
# "minimal": allows the duplication of 'build-tools' nodes only (e.g. py-setuptools, cmake etc.)
|
||||
# "full" (experimental): allows separation of the entire build-tool stack (e.g. the entire "cmake" subDAG)
|
||||
strategy: minimal
|
||||
# Option to specify compatiblity between operating systems for reuse of compilers and packages
|
||||
# Specified as a key: [list] where the key is the os that is being targeted, and the list contains the OS's
|
||||
# it can reuse. Note this is a directional compatibility so mutual compatibility between two OS's
|
||||
# Option to specify compatibility between operating systems for reuse of compilers and packages
|
||||
# Specified as a key: [list] where the key is the os that is being targeted, and the list contains the OS's
|
||||
# it can reuse. Note this is a directional compatibility so mutual compatibility between two OS's
|
||||
# requires two entries i.e. os_compatible: {sonoma: [monterey], monterey: [sonoma]}
|
||||
os_compatible: {}
|
||||
|
||||
@@ -40,9 +40,9 @@ packages:
|
||||
jpeg: [libjpeg-turbo, libjpeg]
|
||||
lapack: [openblas, amdlibflame]
|
||||
libc: [glibc, musl]
|
||||
libgfortran: [ gcc-runtime ]
|
||||
libgfortran: [gcc-runtime]
|
||||
libglx: [mesa+glx]
|
||||
libifcore: [ intel-oneapi-runtime ]
|
||||
libifcore: [intel-oneapi-runtime]
|
||||
libllvm: [llvm]
|
||||
lua-lang: [lua, lua-luajit-openresty, lua-luajit]
|
||||
luajit: [lua-luajit-openresty, lua-luajit]
|
||||
|
||||
@@ -1359,6 +1359,10 @@ For example, for the ``stackstart`` variant:
|
||||
mpileaks stackstart==4 # variant will be propagated to dependencies
|
||||
mpileaks stackstart=4 # only mpileaks will have this variant value
|
||||
|
||||
Spack also allows variants to be propagated from a package that does
|
||||
not have that variant.
|
||||
|
||||
|
||||
^^^^^^^^^^^^^^
|
||||
Compiler Flags
|
||||
^^^^^^^^^^^^^^
|
||||
|
||||
@@ -166,3 +166,74 @@ while `py-numpy` still needs an older version:
|
||||
|
||||
Up to Spack v0.20 ``duplicates:strategy:none`` was the default (and only) behavior. From Spack v0.21 the
|
||||
default behavior is ``duplicates:strategy:minimal``.
|
||||
|
||||
--------
|
||||
Splicing
|
||||
--------
|
||||
|
||||
The ``splice`` key covers config attributes for splicing specs in the solver.
|
||||
|
||||
"Splicing" is a method for replacing a dependency with another spec
|
||||
that provides the same package or virtual. There are two types of
|
||||
splices, referring to different behaviors for shared dependencies
|
||||
between the root spec and the new spec replacing a dependency:
|
||||
"transitive" and "intransitive". A "transitive" splice is one that
|
||||
resolves all conflicts by taking the dependency from the new node. An
|
||||
"intransitive" splice is one that resolves all conflicts by taking the
|
||||
dependency from the original root. From a theory perspective, hybrid
|
||||
splices are possible but are not modeled by Spack.
|
||||
|
||||
All spliced specs retain a ``build_spec`` attribute that points to the
|
||||
original Spec before any splice occurred. The ``build_spec`` for a
|
||||
non-spliced spec is itself.
|
||||
|
||||
The figure below shows examples of transitive and intransitive splices:
|
||||
|
||||
.. figure:: images/splices.png
|
||||
:align: center
|
||||
|
||||
The concretizer can be configured to explicitly splice particular
|
||||
replacements for a target spec. Splicing will allow the user to make
|
||||
use of generically built public binary caches, while swapping in
|
||||
highly optimized local builds for performance critical components
|
||||
and/or components that interact closely with the specific hardware
|
||||
details of the system. The most prominent candidate for splicing is
|
||||
MPI providers. MPI packages have relatively well-understood ABI
|
||||
characteristics, and most High Performance Computing facilities deploy
|
||||
highly optimized MPI packages tailored to their particular
|
||||
hardware. The following config block configures Spack to replace
|
||||
whatever MPI provider each spec was concretized to use with the
|
||||
particular package of ``mpich`` with the hash that begins ``abcdef``.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
concretizer:
|
||||
splice:
|
||||
explicit:
|
||||
- target: mpi
|
||||
replacement: mpich/abcdef
|
||||
transitive: false
|
||||
|
||||
.. warning::
|
||||
|
||||
When configuring an explicit splice, you as the user take on the
|
||||
responsibility for ensuring ABI compatibility between the specs
|
||||
matched by the target and the replacement you provide. If they are
|
||||
not compatible, Spack will not warn you and your application will
|
||||
fail to run.
|
||||
|
||||
The ``target`` field of an explicit splice can be any abstract
|
||||
spec. The ``replacement`` field must be a spec that includes the hash
|
||||
of a concrete spec, and the replacement must either be the same
|
||||
package as the target, provide the virtual that is the target, or
|
||||
provide a virtual that the target provides. The ``transitive`` field
|
||||
is optional -- by default, splices will be transitive.
|
||||
|
||||
.. note::
|
||||
|
||||
With explicit splices configured, it is possible for Spack to
|
||||
concretize to a spec that does not satisfy the input. For example,
|
||||
with the config above ``hdf5 ^mvapich2`` will concretize to user
|
||||
``mpich/abcdef`` instead of ``mvapich2`` as the MPI provider. Spack
|
||||
will warn the user in this case, but will not fail the
|
||||
concretization.
|
||||
|
||||
@@ -214,12 +214,14 @@ def setup(sphinx):
|
||||
# Spack classes that intersphinx is unable to resolve
|
||||
("py:class", "spack.version.StandardVersion"),
|
||||
("py:class", "spack.spec.DependencySpec"),
|
||||
("py:class", "spack.spec.ArchSpec"),
|
||||
("py:class", "spack.spec.InstallStatus"),
|
||||
("py:class", "spack.spec.SpecfileReaderBase"),
|
||||
("py:class", "spack.install_test.Pb"),
|
||||
("py:class", "spack.filesystem_view.SimpleFilesystemView"),
|
||||
("py:class", "spack.traverse.EdgeAndDepth"),
|
||||
("py:class", "archspec.cpu.microarchitecture.Microarchitecture"),
|
||||
("py:class", "spack.compiler.CompilerCache"),
|
||||
# TypeVar that is not handled correctly
|
||||
("py:class", "llnl.util.lang.T"),
|
||||
]
|
||||
|
||||
@@ -281,7 +281,7 @@ When spack queries for configuration parameters, it searches in
|
||||
higher-precedence scopes first. So, settings in a higher-precedence file
|
||||
can override those with the same key in a lower-precedence one. For
|
||||
list-valued settings, Spack *prepends* higher-precedence settings to
|
||||
lower-precedence settings. Completely ignoring higher-level configuration
|
||||
lower-precedence settings. Completely ignoring lower-precedence configuration
|
||||
options is supported with the ``::`` notation for keys (see
|
||||
:ref:`config-overrides` below).
|
||||
|
||||
@@ -511,6 +511,7 @@ Spack understands over a dozen special variables. These are:
|
||||
* ``$target_family``. The target family for the current host, as
|
||||
detected by ArchSpec. E.g. ``x86_64`` or ``aarch64``.
|
||||
* ``$date``: the current date in the format YYYY-MM-DD
|
||||
* ``$spack_short_version``: the Spack version truncated to the first components.
|
||||
|
||||
|
||||
Note that, as with shell variables, you can write these as ``$varname``
|
||||
|
||||
@@ -184,7 +184,7 @@ Style Tests
|
||||
|
||||
Spack uses `Flake8 <http://flake8.pycqa.org/en/latest/>`_ to test for
|
||||
`PEP 8 <https://www.python.org/dev/peps/pep-0008/>`_ conformance and
|
||||
`mypy <https://mypy.readthedocs.io/en/stable/>` for type checking. PEP 8 is
|
||||
`mypy <https://mypy.readthedocs.io/en/stable/>`_ for type checking. PEP 8 is
|
||||
a series of style guides for Python that provide suggestions for everything
|
||||
from variable naming to indentation. In order to limit the number of PRs that
|
||||
were mostly style changes, we decided to enforce PEP 8 conformance. Your PR
|
||||
|
||||
@@ -333,13 +333,9 @@ inserting them at different places in the spack code base. Whenever a hook
|
||||
type triggers by way of a function call, we find all the hooks of that type,
|
||||
and run them.
|
||||
|
||||
Spack defines hooks by way of a module at ``lib/spack/spack/hooks`` where we can define
|
||||
types of hooks in the ``__init__.py``, and then python files in that folder
|
||||
can use hook functions. The files are automatically parsed, so if you write
|
||||
a new file for some integration (e.g., ``lib/spack/spack/hooks/myintegration.py``
|
||||
you can then write hook functions in that file that will be automatically detected,
|
||||
and run whenever your hook is called. This section will cover the basic kind
|
||||
of hooks, and how to write them.
|
||||
Spack defines hooks by way of a module in the ``lib/spack/spack/hooks`` directory.
|
||||
This module has to be registered in ``__init__.py`` so that Spack is aware of it.
|
||||
This section will cover the basic kind of hooks, and how to write them.
|
||||
|
||||
^^^^^^^^^^^^^^
|
||||
Types of Hooks
|
||||
@@ -712,27 +708,27 @@ Release branches
|
||||
^^^^^^^^^^^^^^^^
|
||||
|
||||
There are currently two types of Spack releases: :ref:`major releases
|
||||
<major-releases>` (``0.17.0``, ``0.18.0``, etc.) and :ref:`point releases
|
||||
<point-releases>` (``0.17.1``, ``0.17.2``, ``0.17.3``, etc.). Here is a
|
||||
<major-releases>` (``0.21.0``, ``0.22.0``, etc.) and :ref:`patch releases
|
||||
<patch-releases>` (``0.22.1``, ``0.22.2``, ``0.22.3``, etc.). Here is a
|
||||
diagram of how Spack release branches work::
|
||||
|
||||
o branch: develop (latest version, v0.19.0.dev0)
|
||||
o branch: develop (latest version, v0.23.0.dev0)
|
||||
|
|
||||
o
|
||||
| o branch: releases/v0.18, tag: v0.18.1
|
||||
| o branch: releases/v0.22, tag: v0.22.1
|
||||
o |
|
||||
| o tag: v0.18.0
|
||||
| o tag: v0.22.0
|
||||
o |
|
||||
| o
|
||||
|/
|
||||
o
|
||||
|
|
||||
o
|
||||
| o branch: releases/v0.17, tag: v0.17.2
|
||||
| o branch: releases/v0.21, tag: v0.21.2
|
||||
o |
|
||||
| o tag: v0.17.1
|
||||
| o tag: v0.21.1
|
||||
o |
|
||||
| o tag: v0.17.0
|
||||
| o tag: v0.21.0
|
||||
o |
|
||||
| o
|
||||
|/
|
||||
@@ -743,8 +739,8 @@ requests target ``develop``. The ``develop`` branch will report that its
|
||||
version is that of the next **major** release with a ``.dev0`` suffix.
|
||||
|
||||
Each Spack release series also has a corresponding branch, e.g.
|
||||
``releases/v0.18`` has ``0.18.x`` versions of Spack, and
|
||||
``releases/v0.17`` has ``0.17.x`` versions. A major release is the first
|
||||
``releases/v0.22`` has ``v0.22.x`` versions of Spack, and
|
||||
``releases/v0.21`` has ``v0.21.x`` versions. A major release is the first
|
||||
tagged version on a release branch. Minor releases are back-ported from
|
||||
develop onto release branches. This is typically done by cherry-picking
|
||||
bugfix commits off of ``develop``.
|
||||
@@ -774,27 +770,40 @@ for more details.
|
||||
Scheduling work for releases
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
We schedule work for releases by creating `GitHub projects
|
||||
<https://github.com/spack/spack/projects>`_. At any time, there may be
|
||||
several open release projects. For example, below are two releases (from
|
||||
some past version of the page linked above):
|
||||
We schedule work for **major releases** through `milestones
|
||||
<https://github.com/spack/spack/milestones>`_ and `GitHub Projects
|
||||
<https://github.com/spack/spack/projects>`_, while **patch releases** use `labels
|
||||
<https://github.com/spack/spack/labels>`_.
|
||||
|
||||
.. image:: images/projects.png
|
||||
There is only one milestone open at a time. Its name corresponds to the next major version, for
|
||||
example ``v0.23``. Important issues and pull requests should be assigned to this milestone by
|
||||
core developers, so that they are not forgotten at the time of release. The milestone is closed
|
||||
when the release is made, and a new milestone is created for the next major release.
|
||||
|
||||
This image shows one release in progress for ``0.15.1`` and another for
|
||||
``0.16.0``. Each of these releases has a project board containing issues
|
||||
and pull requests. GitHub shows a status bar with completed work in
|
||||
green, work in progress in purple, and work not started yet in gray, so
|
||||
it's fairly easy to see progress.
|
||||
Bug reports in GitHub issues are automatically labelled ``bug`` and ``triage``. Spack developers
|
||||
assign one of the labels ``impact-low``, ``impact-medium`` or ``impact-high``. This will make the
|
||||
issue appear in the `Triaged bugs <https://github.com/orgs/spack/projects/6>`_ project board.
|
||||
Important issues should be assigned to the next milestone as well, so they appear at the top of
|
||||
the project board.
|
||||
|
||||
Spack's project boards are not firm commitments so we move work between
|
||||
releases frequently. If we need to make a release and some tasks are not
|
||||
yet done, we will simply move them to the next minor or major release, rather
|
||||
than delaying the release to complete them.
|
||||
Spack's milestones are not firm commitments so we move work between releases frequently. If we
|
||||
need to make a release and some tasks are not yet done, we will simply move them to the next major
|
||||
release milestone, rather than delaying the release to complete them.
|
||||
|
||||
For more on using GitHub project boards, see `GitHub's documentation
|
||||
<https://docs.github.com/en/github/managing-your-work-on-github/about-project-boards>`_.
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
Backporting bug fixes
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
When a bug is fixed in the ``develop`` branch, it is often necessary to backport the fix to one
|
||||
(or more) of the ``release/vX.Y`` branches. Only the release manager is responsible for doing
|
||||
backports, but Spack maintainers are responsible for labelling pull requests (and issues if no bug
|
||||
fix is available yet) with ``vX.Y.Z`` labels. The label should correspond to the next patch version
|
||||
that the bug fix should be backported to.
|
||||
|
||||
Backports are done publicly by the release manager using a pull request named ``Backports vX.Y.Z``.
|
||||
This pull request is opened from the ``backports/vX.Y.Z`` branch, targets the ``releases/vX.Y``
|
||||
branch and contains a (growing) list of cherry-picked commits from the ``develop`` branch.
|
||||
Typically there are one or two backport pull requests open at any given time.
|
||||
|
||||
.. _major-releases:
|
||||
|
||||
@@ -802,25 +811,21 @@ For more on using GitHub project boards, see `GitHub's documentation
|
||||
Making major releases
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Assuming a project board has already been created and all required work
|
||||
completed, the steps to make the major release are:
|
||||
Assuming all required work from the milestone is completed, the steps to make the major release
|
||||
are:
|
||||
|
||||
#. Create two new project boards:
|
||||
#. `Create a new milestone <https://github.com/spack/spack/milestones>`_ for the next major
|
||||
release.
|
||||
|
||||
* One for the next major release
|
||||
* One for the next point release
|
||||
#. `Create a new label <https://github.com/spack/spack/labels>`_ for the next patch release.
|
||||
|
||||
#. Move any optional tasks that are not done to one of the new project boards.
|
||||
|
||||
In general, small bugfixes should go to the next point release. Major
|
||||
features, refactors, and changes that could affect concretization should
|
||||
go in the next major release.
|
||||
#. Move any optional tasks that are not done to the next milestone.
|
||||
|
||||
#. Create a branch for the release, based on ``develop``:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ git checkout -b releases/v0.15 develop
|
||||
$ git checkout -b releases/v0.23 develop
|
||||
|
||||
For a version ``vX.Y.Z``, the branch's name should be
|
||||
``releases/vX.Y``. That is, you should create a ``releases/vX.Y``
|
||||
@@ -856,8 +861,8 @@ completed, the steps to make the major release are:
|
||||
|
||||
Create a pull request targeting the ``develop`` branch, bumping the major
|
||||
version in ``lib/spack/spack/__init__.py`` with a ``dev0`` release segment.
|
||||
For instance when you have just released ``v0.15.0``, set the version
|
||||
to ``(0, 16, 0, 'dev0')`` on ``develop``.
|
||||
For instance when you have just released ``v0.23.0``, set the version
|
||||
to ``(0, 24, 0, 'dev0')`` on ``develop``.
|
||||
|
||||
#. Follow the steps in :ref:`publishing-releases`.
|
||||
|
||||
@@ -866,82 +871,52 @@ completed, the steps to make the major release are:
|
||||
#. Follow the steps in :ref:`announcing-releases`.
|
||||
|
||||
|
||||
.. _point-releases:
|
||||
.. _patch-releases:
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
Making point releases
|
||||
Making patch releases
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Assuming a project board has already been created and all required work
|
||||
completed, the steps to make the point release are:
|
||||
To make the patch release process both efficient and transparent, we use a *backports pull request*
|
||||
which contains cherry-picked commits from the ``develop`` branch. The majority of the work is to
|
||||
cherry-pick the bug fixes, which ideally should be done as soon as they land on ``develop``:
|
||||
this ensures cherry-picking happens in order, and makes conflicts easier to resolve since the
|
||||
changes are fresh in the mind of the developer.
|
||||
|
||||
#. Create a new project board for the next point release.
|
||||
The backports pull request is always titled ``Backports vX.Y.Z`` and is labelled ``backports``. It
|
||||
is opened from a branch named ``backports/vX.Y.Z`` and targets the ``releases/vX.Y`` branch.
|
||||
|
||||
#. Move any optional tasks that are not done to the next project board.
|
||||
Whenever a pull request labelled ``vX.Y.Z`` is merged, cherry-pick the associated squashed commit
|
||||
on ``develop`` to the ``backports/vX.Y.Z`` branch. For pull requests that were rebased (or not
|
||||
squashed), cherry-pick each associated commit individually. Never force push to the
|
||||
``backports/vX.Y.Z`` branch.
|
||||
|
||||
#. Check out the release branch (it should already exist).
|
||||
.. warning::
|
||||
|
||||
For the ``X.Y.Z`` release, the release branch is called ``releases/vX.Y``.
|
||||
For ``v0.15.1``, you would check out ``releases/v0.15``:
|
||||
Sometimes you may **still** get merge conflicts even if you have
|
||||
cherry-picked all the commits in order. This generally means there
|
||||
is some other intervening pull request that the one you're trying
|
||||
to pick depends on. In these cases, you'll need to make a judgment
|
||||
call regarding those pull requests. Consider the number of affected
|
||||
files and/or the resulting differences.
|
||||
|
||||
.. code-block:: console
|
||||
1. If the changes are small, you might just cherry-pick it.
|
||||
|
||||
$ git checkout releases/v0.15
|
||||
2. If the changes are large, then you may decide that this fix is not
|
||||
worth including in a patch release, in which case you should remove
|
||||
the label from the pull request. Remember that large, manual backports
|
||||
are seldom the right choice for a patch release.
|
||||
|
||||
#. If a pull request to the release branch named ``Backports vX.Y.Z`` is not already
|
||||
in the project, create it. This pull request ought to be created as early as
|
||||
possible when working on a release project, so that we can build the release
|
||||
commits incrementally, and identify potential conflicts at an early stage.
|
||||
When all commits are cherry-picked in the ``backports/vX.Y.Z`` branch, make the patch
|
||||
release as follows:
|
||||
|
||||
#. Cherry-pick each pull request in the ``Done`` column of the release
|
||||
project board onto the ``Backports vX.Y.Z`` pull request.
|
||||
#. `Create a new label <https://github.com/spack/spack/labels>`_ ``vX.Y.{Z+1}`` for the next patch
|
||||
release.
|
||||
|
||||
This is **usually** fairly simple since we squash the commits from the
|
||||
vast majority of pull requests. That means there is only one commit
|
||||
per pull request to cherry-pick. For example, `this pull request
|
||||
<https://github.com/spack/spack/pull/15777>`_ has three commits, but
|
||||
they were squashed into a single commit on merge. You can see the
|
||||
commit that was created here:
|
||||
#. Replace the label ``vX.Y.Z`` with ``vX.Y.{Z+1}`` for all PRs and issues that are not done.
|
||||
|
||||
.. image:: images/pr-commit.png
|
||||
|
||||
You can easily cherry pick it like this (assuming you already have the
|
||||
release branch checked out):
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ git cherry-pick 7e46da7
|
||||
|
||||
For pull requests that were rebased (or not squashed), you'll need to
|
||||
cherry-pick each associated commit individually.
|
||||
|
||||
.. warning::
|
||||
|
||||
It is important to cherry-pick commits in the order they happened,
|
||||
otherwise you can get conflicts while cherry-picking. When
|
||||
cherry-picking look at the merge date,
|
||||
**not** the number of the pull request or the date it was opened.
|
||||
|
||||
Sometimes you may **still** get merge conflicts even if you have
|
||||
cherry-picked all the commits in order. This generally means there
|
||||
is some other intervening pull request that the one you're trying
|
||||
to pick depends on. In these cases, you'll need to make a judgment
|
||||
call regarding those pull requests. Consider the number of affected
|
||||
files and or the resulting differences.
|
||||
|
||||
1. If the dependency changes are small, you might just cherry-pick it,
|
||||
too. If you do this, add the task to the release board.
|
||||
|
||||
2. If the changes are large, then you may decide that this fix is not
|
||||
worth including in a point release, in which case you should remove
|
||||
the task from the release project.
|
||||
|
||||
3. You can always decide to manually back-port the fix to the release
|
||||
branch if neither of the above options makes sense, but this can
|
||||
require a lot of work. It's seldom the right choice.
|
||||
|
||||
#. When all the commits from the project board are cherry-picked into
|
||||
the ``Backports vX.Y.Z`` pull request, you can push a commit to:
|
||||
#. Manually push a single commit with commit message ``Set version to vX.Y.Z`` to the
|
||||
``backports/vX.Y.Z`` branch, that both bumps the Spack version number and updates the changelog:
|
||||
|
||||
1. Bump the version in ``lib/spack/spack/__init__.py``.
|
||||
2. Update ``CHANGELOG.md`` with a list of the changes.
|
||||
@@ -950,20 +925,22 @@ completed, the steps to make the point release are:
|
||||
release branch. See `the changelog from 0.14.1
|
||||
<https://github.com/spack/spack/commit/ff0abb9838121522321df2a054d18e54b566b44a>`_.
|
||||
|
||||
#. Merge the ``Backports vX.Y.Z`` PR with the **Rebase and merge** strategy. This
|
||||
is needed to keep track in the release branch of all the commits that were
|
||||
cherry-picked.
|
||||
|
||||
#. Make sure CI passes on the release branch, including:
|
||||
#. Make sure CI passes on the **backports pull request**, including:
|
||||
|
||||
* Regular unit tests
|
||||
* Build tests
|
||||
* The E4S pipeline at `gitlab.spack.io <https://gitlab.spack.io>`_
|
||||
|
||||
If CI does not pass, you'll need to figure out why, and make changes
|
||||
to the release branch until it does. You can make more commits, modify
|
||||
or remove cherry-picked commits, or cherry-pick **more** from
|
||||
``develop`` to make this happen.
|
||||
#. Merge the ``Backports vX.Y.Z`` PR with the **Rebase and merge** strategy. This
|
||||
is needed to keep track in the release branch of all the commits that were
|
||||
cherry-picked.
|
||||
|
||||
#. Make sure CI passes on the last commit of the **release branch**.
|
||||
|
||||
#. In the rare case you need to include additional commits in the patch release after the backports
|
||||
PR is merged, it is best to delete the last commit ``Set version to vX.Y.Z`` from the release
|
||||
branch with a single force push, open a new backports PR named ``Backports vX.Y.Z (2)``, and
|
||||
repeat the process. Avoid repeated force pushes to the release branch.
|
||||
|
||||
#. Follow the steps in :ref:`publishing-releases`.
|
||||
|
||||
@@ -1038,25 +1015,31 @@ Updating `releases/latest`
|
||||
|
||||
If the new release is the **highest** Spack release yet, you should
|
||||
also tag it as ``releases/latest``. For example, suppose the highest
|
||||
release is currently ``0.15.3``:
|
||||
release is currently ``0.22.3``:
|
||||
|
||||
* If you are releasing ``0.15.4`` or ``0.16.0``, then you should tag
|
||||
it with ``releases/latest``, as these are higher than ``0.15.3``.
|
||||
* If you are releasing ``0.22.4`` or ``0.23.0``, then you should tag
|
||||
it with ``releases/latest``, as these are higher than ``0.22.3``.
|
||||
|
||||
* If you are making a new release of an **older** major version of
|
||||
Spack, e.g. ``0.14.4``, then you should not tag it as
|
||||
Spack, e.g. ``0.21.4``, then you should not tag it as
|
||||
``releases/latest`` (as there are newer major versions).
|
||||
|
||||
To tag ``releases/latest``, do this:
|
||||
To do so, first fetch the latest tag created on GitHub, since you may not have it locally:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ git checkout releases/vX.Y # vX.Y is the new release's branch
|
||||
$ git tag --force releases/latest
|
||||
$ git push --force --tags
|
||||
$ git fetch --force git@github.com:spack/spack vX.Y.Z
|
||||
|
||||
The ``--force`` argument to ``git tag`` makes ``git`` overwrite the existing
|
||||
``releases/latest`` tag with the new one.
|
||||
Then tag ``vX.Y.Z`` as ``releases/latest`` and push the individual tag to GitHub.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ git tag --force releases/latest vX.Y.Z
|
||||
$ git push --force git@github.com:spack/spack releases/latest
|
||||
|
||||
The ``--force`` argument to ``git tag`` makes ``git`` overwrite the existing ``releases/latest``
|
||||
tag with the new one. Do **not** use the ``--tags`` flag when pushing, since this will push *all*
|
||||
local tags.
|
||||
|
||||
|
||||
.. _announcing-releases:
|
||||
|
||||
@@ -425,9 +425,13 @@ Developing Packages in a Spack Environment
|
||||
|
||||
The ``spack develop`` command allows one to develop Spack packages in
|
||||
an environment. It requires a spec containing a concrete version, and
|
||||
will configure Spack to install the package from local source. By
|
||||
default, it will also clone the package to a subdirectory in the
|
||||
environment. This package will have a special variant ``dev_path``
|
||||
will configure Spack to install the package from local source.
|
||||
If a version is not provided from the command line interface then spack
|
||||
will automatically pick the highest version the package has defined.
|
||||
This means any infinity versions (``develop``, ``main``, ``stable``) will be
|
||||
preferred in this selection process.
|
||||
By default, ``spack develop`` will also clone the package to a subdirectory in the
|
||||
environment for the local source. This package will have a special variant ``dev_path``
|
||||
set, and Spack will ensure the package and its dependents are rebuilt
|
||||
any time the environment is installed if the package's local source
|
||||
code has been modified. Spack's native implementation to check for modifications
|
||||
@@ -669,6 +673,9 @@ them to the environment.
|
||||
Environments can include files or URLs. File paths can be relative or
|
||||
absolute. URLs include the path to the text for individual files or
|
||||
can be the path to a directory containing configuration files.
|
||||
Spack supports ``file``, ``http``, ``https`` and ``ftp`` protocols (or
|
||||
schemes). Spack-specific, environment and user path variables may be
|
||||
used in these paths. See :ref:`config-file-variables` for more information.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Configuration precedence
|
||||
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 44 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 68 KiB |
BIN
lib/spack/docs/images/splices.png
Normal file
BIN
lib/spack/docs/images/splices.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 358 KiB |
@@ -12,10 +12,6 @@
|
||||
Spack
|
||||
===================
|
||||
|
||||
.. epigraph::
|
||||
|
||||
`These are docs for the Spack package manager. For sphere packing, see` `pyspack <https://pyspack.readthedocs.io>`_.
|
||||
|
||||
Spack is a package management tool designed to support multiple
|
||||
versions and configurations of software on a wide variety of platforms
|
||||
and environments. It was designed for large supercomputing centers,
|
||||
|
||||
@@ -457,11 +457,11 @@ For instance, the following config options,
|
||||
tcl:
|
||||
all:
|
||||
suffixes:
|
||||
^python@3.12: 'python-3.12'
|
||||
^python@3: 'python{^python.version}'
|
||||
^openblas: 'openblas'
|
||||
|
||||
will add a ``python-3.12`` version string to any packages compiled with
|
||||
Python matching the spec, ``python@3.12``. This is useful to know which
|
||||
will add a ``python-3.12.1`` version string to any packages compiled with
|
||||
Python matching the spec, ``python@3``. This is useful to know which
|
||||
version of Python a set of Python extensions is associated with. Likewise, the
|
||||
``openblas`` string is attached to any program that has openblas in the spec,
|
||||
most likely via the ``+blas`` variant specification.
|
||||
|
||||
@@ -2503,15 +2503,14 @@ with. For example, suppose that in the ``libdwarf`` package you write:
|
||||
|
||||
depends_on("libelf@0.8")
|
||||
|
||||
Now ``libdwarf`` will require ``libelf`` at *exactly* version ``0.8``.
|
||||
You can also specify a requirement for a particular variant or for
|
||||
specific compiler flags:
|
||||
Now ``libdwarf`` will require ``libelf`` in the range ``0.8``, which
|
||||
includes patch versions ``0.8.1``, ``0.8.2``, etc. Apart from version
|
||||
restrictions, you can also specify variants if this package requires
|
||||
optional features of the dependency.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
depends_on("libelf@0.8+debug")
|
||||
depends_on("libelf debug=True")
|
||||
depends_on("libelf cppflags='-fPIC'")
|
||||
depends_on("libelf@0.8 +parser +pic")
|
||||
|
||||
Both users *and* package authors can use the same spec syntax to refer
|
||||
to different package configurations. Users use the spec syntax on the
|
||||
@@ -2519,46 +2518,82 @@ command line to find installed packages or to install packages with
|
||||
particular constraints, and package authors can use specs to describe
|
||||
relationships between packages.
|
||||
|
||||
^^^^^^^^^^^^^^
|
||||
Version ranges
|
||||
^^^^^^^^^^^^^^
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Specifying backward and forward compatibility
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Although some packages require a specific version for their dependencies,
|
||||
most can be built with a range of versions. For example, if you are
|
||||
writing a package for a legacy Python module that only works with Python
|
||||
2.4 through 2.6, this would look like:
|
||||
Packages are often compatible with a range of versions of their
|
||||
dependencies. This is typically referred to as backward and forward
|
||||
compatibility. Spack allows you to specify this in the ``depends_on``
|
||||
directive using version ranges.
|
||||
|
||||
**Backwards compatibility** means that the package requires at least a
|
||||
certain version of its dependency:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
depends_on("python@2.4:2.6")
|
||||
depends_on("python@3.10:")
|
||||
|
||||
Version ranges in Spack are *inclusive*, so ``2.4:2.6`` means any version
|
||||
greater than or equal to ``2.4`` and up to and including any ``2.6.x``. If
|
||||
you want to specify that a package works with any version of Python 3 (or
|
||||
higher), this would look like:
|
||||
In this case, the package requires Python 3.10 or newer.
|
||||
|
||||
Commonly, packages drop support for older versions of a dependency as
|
||||
they release new versions. In Spack you can conveniently add every
|
||||
backward compatibility rule as a separate line:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
depends_on("python@3:")
|
||||
# backward compatibility with Python
|
||||
depends_on("python@3.8:")
|
||||
depends_on("python@3.9:", when="@1.2:")
|
||||
depends_on("python@3.10:", when="@1.4:")
|
||||
|
||||
Here we leave out the upper bound. If you want to say that a package
|
||||
requires Python 2, you can similarly leave out the lower bound:
|
||||
This means that in general we need Python 3.8 or newer; from version
|
||||
1.2 onwards we need Python 3.9 or newer; from version 1.4 onwards we
|
||||
need Python 3.10 or newer. Notice that it's fine to have overlapping
|
||||
ranges in the ``when`` clauses.
|
||||
|
||||
**Forward compatibility** means that the package requires at most a
|
||||
certain version of its dependency. Forward compatibility rules are
|
||||
necessary when there are breaking changes in the dependency that the
|
||||
package cannot handle. In Spack we often add forward compatibility
|
||||
bounds only at the time a new, breaking version of a dependency is
|
||||
released. As with backward compatibility, it is typical to see a list
|
||||
of forward compatibility bounds in a package file as seperate lines:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
depends_on("python@:2")
|
||||
# forward compatibility with Python
|
||||
depends_on("python@:3.12", when="@:1.10")
|
||||
depends_on("python@:3.13", when="@:1.12")
|
||||
|
||||
Notice that we didn't use ``@:3``. Version ranges are *inclusive*, so
|
||||
``@:3`` means "up to and including any 3.x version".
|
||||
Notice how the ``:`` now appears before the version number both in the
|
||||
dependency and in the ``when`` clause. This tells Spack that in general
|
||||
we need Python 3.13 or older up to version ``1.12.x``, and up to version
|
||||
``1.10.x`` we need Python 3.12 or older. Said differently, forward compatibility
|
||||
with Python 3.13 was added in version 1.11, while version 1.13 added forward
|
||||
compatibility with Python 3.14.
|
||||
|
||||
You can also simply write
|
||||
Notice that a version range ``@:3.12`` includes *any* patch version
|
||||
number ``3.12.x``, which is often useful when specifying forward compatibility
|
||||
bounds.
|
||||
|
||||
So far we have seen open-ended version ranges, which is by far the most
|
||||
common use case. It is also possible to specify both a lower and an upper bound
|
||||
on the version of a dependency, like this:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
depends_on("python@2.7")
|
||||
depends_on("python@3.10:3.12")
|
||||
|
||||
to tell Spack that the package needs Python 2.7.x. This is equivalent to
|
||||
``@2.7:2.7``.
|
||||
There is short syntax to specify that a package is compatible with say any
|
||||
``3.x`` version:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
depends_on("python@3")
|
||||
|
||||
The above is equivalent to ``depends_on("python@3:3")``, which means at least
|
||||
Python version 3 and at most any version ``3.x.y``.
|
||||
|
||||
In very rare cases, you may need to specify an exact version, for example
|
||||
if you need to distinguish between ``3.2`` and ``3.2.1``:
|
||||
@@ -3199,7 +3234,7 @@ as a ``@property`` in the package's class:
|
||||
@property
|
||||
def libs(self):
|
||||
# The library provided by Foo is libMyFoo.so
|
||||
return find_libraries("libMyFoo", root=self.home, recursive=True)
|
||||
return find_libraries("libMyFoo", root=self.home)
|
||||
|
||||
A package may also provide a custom implementation of each attribute
|
||||
for the virtual packages it provides by implementing the
|
||||
@@ -3253,22 +3288,22 @@ follows:
|
||||
# Just the foo headers
|
||||
@property
|
||||
def headers(self):
|
||||
return find_headers("foo", root=self.home.include, recursive=False)
|
||||
return find_headers("foo", root=self.home)
|
||||
|
||||
# Just the foo libraries
|
||||
@property
|
||||
def libs(self):
|
||||
return find_libraries("libFoo", root=self.home, recursive=True)
|
||||
return find_libraries("libFoo", root=self.home)
|
||||
|
||||
# The header provided by the bar virtual package
|
||||
@property
|
||||
def bar_headers(self):
|
||||
return find_headers("bar/bar.h", root=self.home.include, recursive=False)
|
||||
return find_headers("bar", root=self.home)
|
||||
|
||||
# The library provided by the bar virtual package
|
||||
@property
|
||||
def bar_libs(self):
|
||||
return find_libraries("libFooBar", root=self.home, recursive=True)
|
||||
return find_libraries("libFooBar", root=self.home)
|
||||
|
||||
# The baz virtual package home
|
||||
@property
|
||||
@@ -3278,12 +3313,12 @@ follows:
|
||||
# The header provided by the baz virtual package
|
||||
@property
|
||||
def baz_headers(self):
|
||||
return find_headers("baz/baz", root=self.baz_home.include, recursive=False)
|
||||
return find_headers("baz", root=self.baz_home)
|
||||
|
||||
# The library provided by the baz virtual package
|
||||
@property
|
||||
def baz_libs(self):
|
||||
return find_libraries("libFooBaz", root=self.baz_home, recursive=True)
|
||||
return find_libraries("libFooBaz", root=self.baz_home)
|
||||
|
||||
Now consider another package, ``foo-app``, depending on all three:
|
||||
|
||||
|
||||
@@ -59,7 +59,7 @@ Functional Example
|
||||
------------------
|
||||
|
||||
The simplest fully functional standalone example of a working pipeline can be
|
||||
examined live at this example `project <https://gitlab.com/scott.wittenburg/spack-pipeline-demo>`_
|
||||
examined live at this example `project <https://gitlab.com/spack/pipeline-quickstart>`_
|
||||
on gitlab.com.
|
||||
|
||||
Here's the ``.gitlab-ci.yml`` file from that example that builds and runs the
|
||||
@@ -67,39 +67,46 @@ pipeline:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
stages: [generate, build]
|
||||
stages: [ "generate", "build" ]
|
||||
|
||||
variables:
|
||||
SPACK_REPO: https://github.com/scottwittenburg/spack.git
|
||||
SPACK_REF: pipelines-reproducible-builds
|
||||
SPACK_REPOSITORY: "https://github.com/spack/spack.git"
|
||||
SPACK_REF: "develop-2024-10-06"
|
||||
SPACK_USER_CONFIG_PATH: ${CI_PROJECT_DIR}
|
||||
SPACK_BACKTRACE: 1
|
||||
|
||||
generate-pipeline:
|
||||
stage: generate
|
||||
tags:
|
||||
- docker
|
||||
- saas-linux-small-amd64
|
||||
stage: generate
|
||||
image:
|
||||
name: ghcr.io/scottwittenburg/ecpe4s-ubuntu18.04-runner-x86_64:2020-09-01
|
||||
entrypoint: [""]
|
||||
before_script:
|
||||
- git clone ${SPACK_REPO}
|
||||
- pushd spack && git checkout ${SPACK_REF} && popd
|
||||
- . "./spack/share/spack/setup-env.sh"
|
||||
name: ghcr.io/spack/ubuntu20.04-runner-x86_64:2023-01-01
|
||||
script:
|
||||
- git clone ${SPACK_REPOSITORY}
|
||||
- cd spack && git checkout ${SPACK_REF} && cd ../
|
||||
- . "./spack/share/spack/setup-env.sh"
|
||||
- spack --version
|
||||
- spack env activate --without-view .
|
||||
- spack -d ci generate
|
||||
- spack -d -v --color=always
|
||||
ci generate
|
||||
--check-index-only
|
||||
--artifacts-root "${CI_PROJECT_DIR}/jobs_scratch_dir"
|
||||
--output-file "${CI_PROJECT_DIR}/jobs_scratch_dir/pipeline.yml"
|
||||
--output-file "${CI_PROJECT_DIR}/jobs_scratch_dir/cloud-ci-pipeline.yml"
|
||||
artifacts:
|
||||
paths:
|
||||
- "${CI_PROJECT_DIR}/jobs_scratch_dir"
|
||||
|
||||
build-jobs:
|
||||
build-pipeline:
|
||||
stage: build
|
||||
trigger:
|
||||
include:
|
||||
- artifact: "jobs_scratch_dir/pipeline.yml"
|
||||
- artifact: jobs_scratch_dir/cloud-ci-pipeline.yml
|
||||
job: generate-pipeline
|
||||
strategy: depend
|
||||
needs:
|
||||
- artifacts: True
|
||||
job: generate-pipeline
|
||||
|
||||
|
||||
The key thing to note above is that there are two jobs: The first job to run,
|
||||
``generate-pipeline``, runs the ``spack ci generate`` command to generate a
|
||||
@@ -114,82 +121,93 @@ And here's the spack environment built by the pipeline represented as a
|
||||
spack:
|
||||
view: false
|
||||
concretizer:
|
||||
unify: false
|
||||
unify: true
|
||||
reuse: false
|
||||
|
||||
definitions:
|
||||
- pkgs:
|
||||
- zlib
|
||||
- bzip2
|
||||
- arch:
|
||||
- '%gcc@7.5.0 arch=linux-ubuntu18.04-x86_64'
|
||||
- bzip2 ~debug
|
||||
- compiler:
|
||||
- '%gcc'
|
||||
|
||||
specs:
|
||||
- matrix:
|
||||
- - $pkgs
|
||||
- - $arch
|
||||
|
||||
mirrors: { "mirror": "s3://spack-public/mirror" }
|
||||
- - $compiler
|
||||
|
||||
ci:
|
||||
enable-artifacts-buildcache: True
|
||||
rebuild-index: False
|
||||
target: gitlab
|
||||
|
||||
pipeline-gen:
|
||||
- any-job:
|
||||
before_script:
|
||||
- git clone ${SPACK_REPO}
|
||||
- pushd spack && git checkout ${SPACK_CHECKOUT_VERSION} && popd
|
||||
- . "./spack/share/spack/setup-env.sh"
|
||||
- build-job:
|
||||
tags: [docker]
|
||||
tags:
|
||||
- saas-linux-small-amd64
|
||||
image:
|
||||
name: ghcr.io/scottwittenburg/ecpe4s-ubuntu18.04-runner-x86_64:2020-09-01
|
||||
entrypoint: [""]
|
||||
name: ghcr.io/spack/ubuntu20.04-runner-x86_64:2023-01-01
|
||||
before_script:
|
||||
- git clone ${SPACK_REPOSITORY}
|
||||
- cd spack && git checkout ${SPACK_REF} && cd ../
|
||||
- . "./spack/share/spack/setup-env.sh"
|
||||
- spack --version
|
||||
- export SPACK_USER_CONFIG_PATH=${CI_PROJECT_DIR}
|
||||
- spack config blame mirrors
|
||||
|
||||
|
||||
The elements of this file important to spack ci pipelines are described in more
|
||||
detail below, but there are a couple of things to note about the above working
|
||||
example:
|
||||
|
||||
.. note::
|
||||
There is no ``script`` attribute specified for here. The reason for this is
|
||||
Spack CI will automatically generate reasonable default scripts. More
|
||||
detail on what is in these scripts can be found below.
|
||||
The use of ``reuse: false`` in spack environments used for pipelines is
|
||||
almost always what you want, as without it your pipelines will not rebuild
|
||||
packages even if package hashes have changed. This is due to the concretizer
|
||||
strongly preferring known hashes when ``reuse: true``.
|
||||
|
||||
Also notice the ``before_script`` section. It is required when using any of the
|
||||
default scripts to source the ``setup-env.sh`` script in order to inform
|
||||
the default scripts where to find the ``spack`` executable.
|
||||
The ``ci`` section in the above environment file contains the bare minimum
|
||||
configuration required for ``spack ci generate`` to create a working pipeline.
|
||||
The ``target: gitlab`` tells spack that the desired pipeline output is for
|
||||
gitlab. However, this isn't strictly required, as currently gitlab is the
|
||||
only possible output format for pipelines. The ``pipeline-gen`` section
|
||||
contains the key information needed to specify attributes for the generated
|
||||
jobs. Notice that it contains a list which has only a single element in
|
||||
this case. In real pipelines it will almost certainly have more elements,
|
||||
and in those cases, order is important: spack starts at the bottom of the
|
||||
list and works upwards when applying attributes.
|
||||
|
||||
Normally ``enable-artifacts-buildcache`` is not recommended in production as it
|
||||
results in large binary artifacts getting transferred back and forth between
|
||||
gitlab and the runners. But in this example on gitlab.com where there is no
|
||||
shared, persistent file system, and where no secrets are stored for giving
|
||||
permission to write to an S3 bucket, ``enabled-buildcache-artifacts`` is the only
|
||||
way to propagate binaries from jobs to their dependents.
|
||||
But in this simple case, we use only the special key ``any-job`` to
|
||||
indicate that spack should apply the specified attributes (``tags``, ``image``,
|
||||
and ``before_script``) to any job it generates. This includes jobs for
|
||||
building/pushing all packages, a ``rebuild-index`` job at the end of the
|
||||
pipeline, as well as any ``noop`` jobs that might be needed by gitlab when
|
||||
no rebuilds are required.
|
||||
|
||||
Also, it is usually a good idea to let the pipeline generate a final "rebuild the
|
||||
buildcache index" job, so that subsequent pipeline generation can quickly determine
|
||||
which specs are up to date and which need to be rebuilt (it's a good idea for other
|
||||
reasons as well, but those are out of scope for this discussion). In this case we
|
||||
have disabled it (using ``rebuild-index: False``) because the index would only be
|
||||
generated in the artifacts mirror anyway, and consequently would not be available
|
||||
during subsequent pipeline runs.
|
||||
Something to note is that in this simple case, we rely on spack to
|
||||
generate a reasonable script for the package build jobs (it just creates
|
||||
a script that invokes ``spack ci rebuild``).
|
||||
|
||||
.. note::
|
||||
With the addition of reproducible builds (#22887) a previously working
|
||||
pipeline will require some changes:
|
||||
Another thing to note is the use of the ``SPACK_USER_CONFIG_DIR`` environment
|
||||
variable in any generated jobs. The purpose of this is to make spack
|
||||
aware of one final file in the example, the one that contains the mirror
|
||||
configuration. This file, ``mirrors.yaml`` looks like this:
|
||||
|
||||
* In the build-jobs, the environment location changed.
|
||||
This will typically show as a ``KeyError`` in the failing job. Be sure to
|
||||
point to ``${SPACK_CONCRETE_ENV_DIR}``.
|
||||
.. code-block:: yaml
|
||||
|
||||
* When using ``include`` in your environment, be sure to make the included
|
||||
files available in the build jobs. This means adding those files to the
|
||||
artifact directory. Those files will also be missing in the reproducibility
|
||||
artifact.
|
||||
mirrors:
|
||||
buildcache-destination:
|
||||
url: oci://registry.gitlab.com/spack/pipeline-quickstart
|
||||
binary: true
|
||||
access_pair:
|
||||
id_variable: CI_REGISTRY_USER
|
||||
secret_variable: CI_REGISTRY_PASSWORD
|
||||
|
||||
* Because the location of the environment changed, including files with
|
||||
relative path may have to be adapted to work both in the project context
|
||||
(generation job) and in the concrete env dir context (build job).
|
||||
|
||||
Note the name of the mirror is ``buildcache-destination``, which is required
|
||||
as of Spack 0.23 (see below for more information). The mirror url simply
|
||||
points to the container registry associated with the project, while
|
||||
``id_variable`` and ``secret_variable`` refer to to environment variables
|
||||
containing the access credentials for the mirror.
|
||||
|
||||
When spack builds packages for this example project, they will be pushed to
|
||||
the project container registry, where they will be available for subsequent
|
||||
jobs to install as dependencies, or for other pipelines to use to build runnable
|
||||
container images.
|
||||
|
||||
-----------------------------------
|
||||
Spack commands supporting pipelines
|
||||
@@ -417,15 +435,6 @@ configuration with a ``script`` attribute. Specifying a signing job without a sc
|
||||
does not create a signing job and the job configuration attributes will be ignored.
|
||||
Signing jobs are always assigned the runner tags ``aws``, ``protected``, and ``notary``.
|
||||
|
||||
^^^^^^^^^^^^^^^^^
|
||||
Cleanup (cleanup)
|
||||
^^^^^^^^^^^^^^^^^
|
||||
|
||||
When using ``temporary-storage-url-prefix`` the cleanup job will destroy the mirror
|
||||
created for the associated Gitlab pipeline. Cleanup jobs do not allow modifying the
|
||||
script, but do expect that the spack command is in the path and require a
|
||||
``before_script`` to be specified that sources the ``setup-env.sh`` script.
|
||||
|
||||
.. _noop_jobs:
|
||||
|
||||
^^^^^^^^^^^^
|
||||
@@ -592,6 +601,77 @@ the attributes will be merged starting from the bottom match going up to the top
|
||||
|
||||
In the case that no match is found in a submapping section, no additional attributes will be applied.
|
||||
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Dynamic Mapping Sections
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
For large scale CI where cost optimization is required, dynamic mapping allows for the use of real-time
|
||||
mapping schemes served by a web service. This type of mapping does not support the ``-remove`` type
|
||||
behavior, but it does follow the rest of the merge rules for configurations.
|
||||
|
||||
The dynamic mapping service needs to implement a single REST API interface for getting
|
||||
requests ``GET <URL>[:PORT][/PATH]?spec=<pkg_name@pkg_version +variant1+variant2%compiler@compiler_version>``.
|
||||
|
||||
example request.
|
||||
|
||||
.. code-block::
|
||||
|
||||
https://my-dyn-mapping.spack.io/allocation?spec=zlib-ng@2.1.6 +compat+opt+shared+pic+new_strategies arch=linux-ubuntu20.04-x86_64_v3%gcc@12.0.0
|
||||
|
||||
|
||||
With an example response the updates kubernetes request variables, overrides the max retries for gitlab,
|
||||
and prepends a note about the modifications made by the my-dyn-mapping.spack.io service.
|
||||
|
||||
.. code-block::
|
||||
|
||||
200 OK
|
||||
|
||||
{
|
||||
"variables":
|
||||
{
|
||||
"KUBERNETES_CPU_REQUEST": "500m",
|
||||
"KUBERNETES_MEMORY_REQUEST": "2G",
|
||||
},
|
||||
"retry": { "max:": "1"}
|
||||
"script+:":
|
||||
[
|
||||
"echo \"Job modified by my-dyn-mapping.spack.io\""
|
||||
]
|
||||
}
|
||||
|
||||
|
||||
The ci.yaml configuration section takes the URL endpoint as well as a number of options to configure how responses are handled.
|
||||
|
||||
It is possible to specify a list of allowed and ignored configuration attributes under ``allow`` and ``ignore``
|
||||
respectively. It is also possible to configure required attributes under ``required`` section.
|
||||
|
||||
Options to configure the client timeout and SSL verification using the ``timeout`` and ``verify_ssl`` options.
|
||||
By default, the ``timeout`` is set to the option in ``config:timeout`` and ``veryify_ssl`` is set the the option in ``config::verify_ssl``.
|
||||
|
||||
Passing header parameters to the request can be achieved through the ``header`` section. The values of the variables passed to the
|
||||
header may be environment variables that are expanded at runtime, such as a private token configured on the runner.
|
||||
|
||||
Here is an example configuration pointing to ``my-dyn-mapping.spack.io/allocation``.
|
||||
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
ci:
|
||||
- dynamic-mapping:
|
||||
endpoint: my-dyn-mapping.spack.io/allocation
|
||||
timeout: 10
|
||||
verify_ssl: True
|
||||
header:
|
||||
PRIVATE_TOKEN: ${MY_PRIVATE_TOKEN}
|
||||
MY_CONFIG: "fuzz_allocation:false"
|
||||
allow:
|
||||
- variables
|
||||
ignore:
|
||||
- script
|
||||
require: []
|
||||
|
||||
|
||||
^^^^^^^^^^^^^
|
||||
Bootstrapping
|
||||
^^^^^^^^^^^^^
|
||||
@@ -670,15 +750,6 @@ environment/stack file, and in that case no bootstrapping will be done (only the
|
||||
specs will be staged for building) and the runners will be expected to already
|
||||
have all needed compilers installed and configured for spack to use.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^
|
||||
Pipeline Buildcache
|
||||
^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
The ``enable-artifacts-buildcache`` key
|
||||
takes a boolean and determines whether the pipeline uses artifacts to store and
|
||||
pass along the buildcaches from one stage to the next (the default if you don't
|
||||
provide this option is ``False``).
|
||||
|
||||
^^^^^^^^^^^^^^^^
|
||||
Broken Specs URL
|
||||
^^^^^^^^^^^^^^^^
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
sphinx==7.4.7
|
||||
sphinx==8.1.3
|
||||
sphinxcontrib-programoutput==0.17
|
||||
sphinx_design==0.6.1
|
||||
sphinx-rtd-theme==2.0.0
|
||||
python-levenshtein==0.25.1
|
||||
docutils==0.20.1
|
||||
sphinx-rtd-theme==3.0.1
|
||||
python-levenshtein==0.26.1
|
||||
docutils==0.21.2
|
||||
pygments==2.18.0
|
||||
urllib3==2.2.3
|
||||
pytest==8.3.3
|
||||
isort==5.13.2
|
||||
black==24.8.0
|
||||
black==24.10.0
|
||||
flake8==7.1.1
|
||||
mypy==1.11.1
|
||||
|
||||
238
lib/spack/env/cc
vendored
238
lib/spack/env/cc
vendored
@@ -101,10 +101,9 @@ setsep() {
|
||||
esac
|
||||
}
|
||||
|
||||
# prepend LISTNAME ELEMENT [SEP]
|
||||
# prepend LISTNAME ELEMENT
|
||||
#
|
||||
# Prepend ELEMENT to the list stored in the variable LISTNAME,
|
||||
# assuming the list is separated by SEP.
|
||||
# Prepend ELEMENT to the list stored in the variable LISTNAME.
|
||||
# Handles empty lists and single-element lists.
|
||||
prepend() {
|
||||
varname="$1"
|
||||
@@ -238,6 +237,36 @@ esac
|
||||
}
|
||||
"
|
||||
|
||||
# path_list functions. Path_lists have 3 parts: spack_store_<list>, <list> and system_<list>,
|
||||
# which are used to prioritize paths when assembling the final command line.
|
||||
|
||||
# init_path_lists LISTNAME
|
||||
# Set <LISTNAME>, spack_store_<LISTNAME>, and system_<LISTNAME> to "".
|
||||
init_path_lists() {
|
||||
eval "spack_store_$1=\"\""
|
||||
eval "$1=\"\""
|
||||
eval "system_$1=\"\""
|
||||
}
|
||||
|
||||
# assign_path_lists LISTNAME1 LISTNAME2
|
||||
# Copy contents of LISTNAME2 into LISTNAME1, for each path_list prefix.
|
||||
assign_path_lists() {
|
||||
eval "spack_store_$1=\"\${spack_store_$2}\""
|
||||
eval "$1=\"\${$2}\""
|
||||
eval "system_$1=\"\${system_$2}\""
|
||||
}
|
||||
|
||||
# append_path_lists LISTNAME ELT
|
||||
# Append the provided ELT to the appropriate list, based on the result of path_order().
|
||||
append_path_lists() {
|
||||
path_order "$2"
|
||||
case $? in
|
||||
0) eval "append spack_store_$1 \"\$2\"" ;;
|
||||
1) eval "append $1 \"\$2\"" ;;
|
||||
2) eval "append system_$1 \"\$2\"" ;;
|
||||
esac
|
||||
}
|
||||
|
||||
# Check if optional parameters are defined
|
||||
# If we aren't asking for debug flags, don't add them
|
||||
if [ -z "${SPACK_ADD_DEBUG_FLAGS:-}" ]; then
|
||||
@@ -470,12 +499,7 @@ input_command="$*"
|
||||
parse_Wl() {
|
||||
while [ $# -ne 0 ]; do
|
||||
if [ "$wl_expect_rpath" = yes ]; then
|
||||
path_order "$1"
|
||||
case $? in
|
||||
0) append return_spack_store_rpath_dirs_list "$1" ;;
|
||||
1) append return_rpath_dirs_list "$1" ;;
|
||||
2) append return_system_rpath_dirs_list "$1" ;;
|
||||
esac
|
||||
append_path_lists return_rpath_dirs_list "$1"
|
||||
wl_expect_rpath=no
|
||||
else
|
||||
case "$1" in
|
||||
@@ -484,24 +508,14 @@ parse_Wl() {
|
||||
if [ -z "$arg" ]; then
|
||||
shift; continue
|
||||
fi
|
||||
path_order "$arg"
|
||||
case $? in
|
||||
0) append return_spack_store_rpath_dirs_list "$arg" ;;
|
||||
1) append return_rpath_dirs_list "$arg" ;;
|
||||
2) append return_system_rpath_dirs_list "$arg" ;;
|
||||
esac
|
||||
append_path_lists return_rpath_dirs_list "$arg"
|
||||
;;
|
||||
--rpath=*)
|
||||
arg="${1#--rpath=}"
|
||||
if [ -z "$arg" ]; then
|
||||
shift; continue
|
||||
fi
|
||||
path_order "$arg"
|
||||
case $? in
|
||||
0) append return_spack_store_rpath_dirs_list "$arg" ;;
|
||||
1) append return_rpath_dirs_list "$arg" ;;
|
||||
2) append return_system_rpath_dirs_list "$arg" ;;
|
||||
esac
|
||||
append_path_lists return_rpath_dirs_list "$arg"
|
||||
;;
|
||||
-rpath|--rpath)
|
||||
wl_expect_rpath=yes
|
||||
@@ -509,8 +523,7 @@ parse_Wl() {
|
||||
"$dtags_to_strip")
|
||||
;;
|
||||
-Wl)
|
||||
# Nested -Wl,-Wl means we're in NAG compiler territory, we don't support
|
||||
# it.
|
||||
# Nested -Wl,-Wl means we're in NAG compiler territory. We don't support it.
|
||||
return 1
|
||||
;;
|
||||
*)
|
||||
@@ -529,21 +542,10 @@ categorize_arguments() {
|
||||
return_other_args_list=""
|
||||
return_isystem_was_used=""
|
||||
|
||||
return_isystem_spack_store_include_dirs_list=""
|
||||
return_isystem_system_include_dirs_list=""
|
||||
return_isystem_include_dirs_list=""
|
||||
|
||||
return_spack_store_include_dirs_list=""
|
||||
return_system_include_dirs_list=""
|
||||
return_include_dirs_list=""
|
||||
|
||||
return_spack_store_lib_dirs_list=""
|
||||
return_system_lib_dirs_list=""
|
||||
return_lib_dirs_list=""
|
||||
|
||||
return_spack_store_rpath_dirs_list=""
|
||||
return_system_rpath_dirs_list=""
|
||||
return_rpath_dirs_list=""
|
||||
init_path_lists return_isystem_include_dirs_list
|
||||
init_path_lists return_include_dirs_list
|
||||
init_path_lists return_lib_dirs_list
|
||||
init_path_lists return_rpath_dirs_list
|
||||
|
||||
# Global state for keeping track of -Wl,-rpath -Wl,/path
|
||||
wl_expect_rpath=no
|
||||
@@ -609,32 +611,17 @@ categorize_arguments() {
|
||||
arg="${1#-isystem}"
|
||||
return_isystem_was_used=true
|
||||
if [ -z "$arg" ]; then shift; arg="$1"; fi
|
||||
path_order "$arg"
|
||||
case $? in
|
||||
0) append return_isystem_spack_store_include_dirs_list "$arg" ;;
|
||||
1) append return_isystem_include_dirs_list "$arg" ;;
|
||||
2) append return_isystem_system_include_dirs_list "$arg" ;;
|
||||
esac
|
||||
append_path_lists return_isystem_include_dirs_list "$arg"
|
||||
;;
|
||||
-I*)
|
||||
arg="${1#-I}"
|
||||
if [ -z "$arg" ]; then shift; arg="$1"; fi
|
||||
path_order "$arg"
|
||||
case $? in
|
||||
0) append return_spack_store_include_dirs_list "$arg" ;;
|
||||
1) append return_include_dirs_list "$arg" ;;
|
||||
2) append return_system_include_dirs_list "$arg" ;;
|
||||
esac
|
||||
append_path_lists return_include_dirs_list "$arg"
|
||||
;;
|
||||
-L*)
|
||||
arg="${1#-L}"
|
||||
if [ -z "$arg" ]; then shift; arg="$1"; fi
|
||||
path_order "$arg"
|
||||
case $? in
|
||||
0) append return_spack_store_lib_dirs_list "$arg" ;;
|
||||
1) append return_lib_dirs_list "$arg" ;;
|
||||
2) append return_system_lib_dirs_list "$arg" ;;
|
||||
esac
|
||||
append_path_lists return_lib_dirs_list "$arg"
|
||||
;;
|
||||
-l*)
|
||||
# -loopopt=0 is generated erroneously in autoconf <= 2.69,
|
||||
@@ -667,32 +654,17 @@ categorize_arguments() {
|
||||
break
|
||||
elif [ "$xlinker_expect_rpath" = yes ]; then
|
||||
# Register the path of -Xlinker -rpath <other args> -Xlinker <path>
|
||||
path_order "$1"
|
||||
case $? in
|
||||
0) append return_spack_store_rpath_dirs_list "$1" ;;
|
||||
1) append return_rpath_dirs_list "$1" ;;
|
||||
2) append return_system_rpath_dirs_list "$1" ;;
|
||||
esac
|
||||
append_path_lists return_rpath_dirs_list "$1"
|
||||
xlinker_expect_rpath=no
|
||||
else
|
||||
case "$1" in
|
||||
-rpath=*)
|
||||
arg="${1#-rpath=}"
|
||||
path_order "$arg"
|
||||
case $? in
|
||||
0) append return_spack_store_rpath_dirs_list "$arg" ;;
|
||||
1) append return_rpath_dirs_list "$arg" ;;
|
||||
2) append return_system_rpath_dirs_list "$arg" ;;
|
||||
esac
|
||||
append_path_lists return_rpath_dirs_list "$arg"
|
||||
;;
|
||||
--rpath=*)
|
||||
arg="${1#--rpath=}"
|
||||
path_order "$arg"
|
||||
case $? in
|
||||
0) append return_spack_store_rpath_dirs_list "$arg" ;;
|
||||
1) append return_rpath_dirs_list "$arg" ;;
|
||||
2) append return_system_rpath_dirs_list "$arg" ;;
|
||||
esac
|
||||
append_path_lists return_rpath_dirs_list "$arg"
|
||||
;;
|
||||
-rpath|--rpath)
|
||||
xlinker_expect_rpath=yes
|
||||
@@ -709,7 +681,36 @@ categorize_arguments() {
|
||||
"$dtags_to_strip")
|
||||
;;
|
||||
*)
|
||||
append return_other_args_list "$1"
|
||||
# if mode is not ld, we can just add to other args
|
||||
if [ "$mode" != "ld" ]; then
|
||||
append return_other_args_list "$1"
|
||||
shift
|
||||
continue
|
||||
fi
|
||||
|
||||
# if we're in linker mode, we need to parse raw RPATH args
|
||||
case "$1" in
|
||||
-rpath=*)
|
||||
arg="${1#-rpath=}"
|
||||
append_path_lists return_rpath_dirs_list "$arg"
|
||||
;;
|
||||
--rpath=*)
|
||||
arg="${1#--rpath=}"
|
||||
append_path_lists return_rpath_dirs_list "$arg"
|
||||
;;
|
||||
-rpath|--rpath)
|
||||
if [ $# -eq 1 ]; then
|
||||
# -rpath without value: let the linker raise an error.
|
||||
append return_other_args_list "$1"
|
||||
break
|
||||
fi
|
||||
shift
|
||||
append_path_lists return_rpath_dirs_list "$1"
|
||||
;;
|
||||
*)
|
||||
append return_other_args_list "$1"
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
esac
|
||||
shift
|
||||
@@ -731,21 +732,10 @@ categorize_arguments() {
|
||||
|
||||
categorize_arguments "$@"
|
||||
|
||||
spack_store_include_dirs_list="$return_spack_store_include_dirs_list"
|
||||
system_include_dirs_list="$return_system_include_dirs_list"
|
||||
include_dirs_list="$return_include_dirs_list"
|
||||
|
||||
spack_store_lib_dirs_list="$return_spack_store_lib_dirs_list"
|
||||
system_lib_dirs_list="$return_system_lib_dirs_list"
|
||||
lib_dirs_list="$return_lib_dirs_list"
|
||||
|
||||
spack_store_rpath_dirs_list="$return_spack_store_rpath_dirs_list"
|
||||
system_rpath_dirs_list="$return_system_rpath_dirs_list"
|
||||
rpath_dirs_list="$return_rpath_dirs_list"
|
||||
|
||||
isystem_spack_store_include_dirs_list="$return_isystem_spack_store_include_dirs_list"
|
||||
isystem_system_include_dirs_list="$return_isystem_system_include_dirs_list"
|
||||
isystem_include_dirs_list="$return_isystem_include_dirs_list"
|
||||
assign_path_lists isystem_include_dirs_list return_isystem_include_dirs_list
|
||||
assign_path_lists include_dirs_list return_include_dirs_list
|
||||
assign_path_lists lib_dirs_list return_lib_dirs_list
|
||||
assign_path_lists rpath_dirs_list return_rpath_dirs_list
|
||||
|
||||
isystem_was_used="$return_isystem_was_used"
|
||||
other_args_list="$return_other_args_list"
|
||||
@@ -821,21 +811,10 @@ IFS="$lsep"
|
||||
categorize_arguments $spack_flags_list
|
||||
unset IFS
|
||||
|
||||
spack_flags_isystem_spack_store_include_dirs_list="$return_isystem_spack_store_include_dirs_list"
|
||||
spack_flags_isystem_system_include_dirs_list="$return_isystem_system_include_dirs_list"
|
||||
spack_flags_isystem_include_dirs_list="$return_isystem_include_dirs_list"
|
||||
|
||||
spack_flags_spack_store_include_dirs_list="$return_spack_store_include_dirs_list"
|
||||
spack_flags_system_include_dirs_list="$return_system_include_dirs_list"
|
||||
spack_flags_include_dirs_list="$return_include_dirs_list"
|
||||
|
||||
spack_flags_spack_store_lib_dirs_list="$return_spack_store_lib_dirs_list"
|
||||
spack_flags_system_lib_dirs_list="$return_system_lib_dirs_list"
|
||||
spack_flags_lib_dirs_list="$return_lib_dirs_list"
|
||||
|
||||
spack_flags_spack_store_rpath_dirs_list="$return_spack_store_rpath_dirs_list"
|
||||
spack_flags_system_rpath_dirs_list="$return_system_rpath_dirs_list"
|
||||
spack_flags_rpath_dirs_list="$return_rpath_dirs_list"
|
||||
assign_path_lists spack_flags_isystem_include_dirs_list return_isystem_include_dirs_list
|
||||
assign_path_lists spack_flags_include_dirs_list return_include_dirs_list
|
||||
assign_path_lists spack_flags_lib_dirs_list return_lib_dirs_list
|
||||
assign_path_lists spack_flags_rpath_dirs_list return_rpath_dirs_list
|
||||
|
||||
spack_flags_isystem_was_used="$return_isystem_was_used"
|
||||
spack_flags_other_args_list="$return_other_args_list"
|
||||
@@ -894,7 +873,7 @@ esac
|
||||
case "$mode" in
|
||||
cpp|cc|as|ccld)
|
||||
if [ "$spack_flags_isystem_was_used" = "true" ] || [ "$isystem_was_used" = "true" ]; then
|
||||
extend isystem_spack_store_include_dirs_list SPACK_STORE_INCLUDE_DIRS
|
||||
extend spack_store_isystem_include_dirs_list SPACK_STORE_INCLUDE_DIRS
|
||||
extend isystem_include_dirs_list SPACK_INCLUDE_DIRS
|
||||
else
|
||||
extend spack_store_include_dirs_list SPACK_STORE_INCLUDE_DIRS
|
||||
@@ -910,64 +889,63 @@ args_list="$flags_list"
|
||||
|
||||
# Include search paths partitioned by (in store, non-sytem, system)
|
||||
# NOTE: adding ${lsep} to the prefix here turns every added element into two
|
||||
extend args_list spack_flags_spack_store_include_dirs_list -I
|
||||
extend args_list spack_store_spack_flags_include_dirs_list -I
|
||||
extend args_list spack_store_include_dirs_list -I
|
||||
|
||||
extend args_list spack_flags_include_dirs_list -I
|
||||
extend args_list include_dirs_list -I
|
||||
|
||||
extend args_list spack_flags_isystem_spack_store_include_dirs_list "-isystem${lsep}"
|
||||
extend args_list isystem_spack_store_include_dirs_list "-isystem${lsep}"
|
||||
extend args_list spack_store_spack_flags_isystem_include_dirs_list "-isystem${lsep}"
|
||||
extend args_list spack_store_isystem_include_dirs_list "-isystem${lsep}"
|
||||
|
||||
extend args_list spack_flags_isystem_include_dirs_list "-isystem${lsep}"
|
||||
extend args_list isystem_include_dirs_list "-isystem${lsep}"
|
||||
|
||||
extend args_list spack_flags_system_include_dirs_list -I
|
||||
extend args_list system_spack_flags_include_dirs_list -I
|
||||
extend args_list system_include_dirs_list -I
|
||||
|
||||
extend args_list spack_flags_isystem_system_include_dirs_list "-isystem${lsep}"
|
||||
extend args_list isystem_system_include_dirs_list "-isystem${lsep}"
|
||||
extend args_list system_spack_flags_isystem_include_dirs_list "-isystem${lsep}"
|
||||
extend args_list system_isystem_include_dirs_list "-isystem${lsep}"
|
||||
|
||||
# Library search paths partitioned by (in store, non-sytem, system)
|
||||
extend args_list spack_flags_spack_store_lib_dirs_list "-L"
|
||||
extend args_list spack_store_spack_flags_lib_dirs_list "-L"
|
||||
extend args_list spack_store_lib_dirs_list "-L"
|
||||
|
||||
extend args_list spack_flags_lib_dirs_list "-L"
|
||||
extend args_list lib_dirs_list "-L"
|
||||
|
||||
extend args_list spack_flags_system_lib_dirs_list "-L"
|
||||
extend args_list system_spack_flags_lib_dirs_list "-L"
|
||||
extend args_list system_lib_dirs_list "-L"
|
||||
|
||||
# RPATHs arguments
|
||||
rpath_prefix=""
|
||||
case "$mode" in
|
||||
ccld)
|
||||
if [ -n "$dtags_to_add" ] ; then
|
||||
append args_list "$linker_arg$dtags_to_add"
|
||||
fi
|
||||
extend args_list spack_flags_spack_store_rpath_dirs_list "$rpath"
|
||||
extend args_list spack_store_rpath_dirs_list "$rpath"
|
||||
|
||||
extend args_list spack_flags_rpath_dirs_list "$rpath"
|
||||
extend args_list rpath_dirs_list "$rpath"
|
||||
|
||||
extend args_list spack_flags_system_rpath_dirs_list "$rpath"
|
||||
extend args_list system_rpath_dirs_list "$rpath"
|
||||
rpath_prefix="$rpath"
|
||||
;;
|
||||
ld)
|
||||
if [ -n "$dtags_to_add" ] ; then
|
||||
append args_list "$dtags_to_add"
|
||||
fi
|
||||
extend args_list spack_flags_spack_store_rpath_dirs_list "-rpath${lsep}"
|
||||
extend args_list spack_store_rpath_dirs_list "-rpath${lsep}"
|
||||
|
||||
extend args_list spack_flags_rpath_dirs_list "-rpath${lsep}"
|
||||
extend args_list rpath_dirs_list "-rpath${lsep}"
|
||||
|
||||
extend args_list spack_flags_system_rpath_dirs_list "-rpath${lsep}"
|
||||
extend args_list system_rpath_dirs_list "-rpath${lsep}"
|
||||
rpath_prefix="-rpath${lsep}"
|
||||
;;
|
||||
esac
|
||||
|
||||
# if mode is ccld or ld, extend RPATH lists with the prefix determined above
|
||||
if [ -n "$rpath_prefix" ]; then
|
||||
extend args_list spack_store_spack_flags_rpath_dirs_list "$rpath_prefix"
|
||||
extend args_list spack_store_rpath_dirs_list "$rpath_prefix"
|
||||
|
||||
extend args_list spack_flags_rpath_dirs_list "$rpath_prefix"
|
||||
extend args_list rpath_dirs_list "$rpath_prefix"
|
||||
|
||||
extend args_list system_spack_flags_rpath_dirs_list "$rpath_prefix"
|
||||
extend args_list system_rpath_dirs_list "$rpath_prefix"
|
||||
fi
|
||||
|
||||
# Other arguments from the input command
|
||||
extend args_list other_args_list
|
||||
extend args_list spack_flags_other_args_list
|
||||
|
||||
2
lib/spack/external/__init__.py
vendored
2
lib/spack/external/__init__.py
vendored
@@ -18,7 +18,7 @@
|
||||
|
||||
* Homepage: https://pypi.python.org/pypi/archspec
|
||||
* Usage: Labeling, comparison and detection of microarchitectures
|
||||
* Version: 0.2.5-dev (commit bceb39528ac49dd0c876b2e9bf3e7482e9c2be4a)
|
||||
* Version: 0.2.5 (commit 38ce485258ffc4fc6dd6688f8dc90cb269478c47)
|
||||
|
||||
astunparse
|
||||
----------------
|
||||
|
||||
@@ -81,8 +81,13 @@ def __init__(self, name, parents, vendor, features, compilers, generation=0, cpu
|
||||
self.generation = generation
|
||||
# Only relevant for AArch64
|
||||
self.cpu_part = cpu_part
|
||||
# Cache the ancestor computation
|
||||
|
||||
# Cache the "ancestor" computation
|
||||
self._ancestors = None
|
||||
# Cache the "generic" computation
|
||||
self._generic = None
|
||||
# Cache the "family" computation
|
||||
self._family = None
|
||||
|
||||
@property
|
||||
def ancestors(self):
|
||||
@@ -174,18 +179,22 @@ def __contains__(self, feature):
|
||||
@property
|
||||
def family(self):
|
||||
"""Returns the architecture family a given target belongs to"""
|
||||
roots = [x for x in [self] + self.ancestors if not x.ancestors]
|
||||
msg = "a target is expected to belong to just one architecture family"
|
||||
msg += f"[found {', '.join(str(x) for x in roots)}]"
|
||||
assert len(roots) == 1, msg
|
||||
if self._family is None:
|
||||
roots = [x for x in [self] + self.ancestors if not x.ancestors]
|
||||
msg = "a target is expected to belong to just one architecture family"
|
||||
msg += f"[found {', '.join(str(x) for x in roots)}]"
|
||||
assert len(roots) == 1, msg
|
||||
self._family = roots.pop()
|
||||
|
||||
return roots.pop()
|
||||
return self._family
|
||||
|
||||
@property
|
||||
def generic(self):
|
||||
"""Returns the best generic architecture that is compatible with self"""
|
||||
generics = [x for x in [self] + self.ancestors if x.vendor == "generic"]
|
||||
return max(generics, key=lambda x: len(x.ancestors))
|
||||
if self._generic is None:
|
||||
generics = [x for x in [self] + self.ancestors if x.vendor == "generic"]
|
||||
self._generic = max(generics, key=lambda x: len(x.ancestors))
|
||||
return self._generic
|
||||
|
||||
def to_dict(self):
|
||||
"""Returns a dictionary representation of this object."""
|
||||
|
||||
@@ -1482,7 +1482,6 @@
|
||||
"cldemote",
|
||||
"movdir64b",
|
||||
"movdiri",
|
||||
"pdcm",
|
||||
"serialize",
|
||||
"waitpkg"
|
||||
],
|
||||
@@ -2237,6 +2236,84 @@
|
||||
]
|
||||
}
|
||||
},
|
||||
"zen5": {
|
||||
"from": ["zen4"],
|
||||
"vendor": "AuthenticAMD",
|
||||
"features": [
|
||||
"abm",
|
||||
"aes",
|
||||
"avx",
|
||||
"avx2",
|
||||
"avx512_bf16",
|
||||
"avx512_bitalg",
|
||||
"avx512bw",
|
||||
"avx512cd",
|
||||
"avx512dq",
|
||||
"avx512f",
|
||||
"avx512ifma",
|
||||
"avx512vbmi",
|
||||
"avx512_vbmi2",
|
||||
"avx512vl",
|
||||
"avx512_vnni",
|
||||
"avx512_vp2intersect",
|
||||
"avx512_vpopcntdq",
|
||||
"avx_vnni",
|
||||
"bmi1",
|
||||
"bmi2",
|
||||
"clflushopt",
|
||||
"clwb",
|
||||
"clzero",
|
||||
"cppc",
|
||||
"cx16",
|
||||
"f16c",
|
||||
"flush_l1d",
|
||||
"fma",
|
||||
"fsgsbase",
|
||||
"gfni",
|
||||
"ibrs_enhanced",
|
||||
"mmx",
|
||||
"movbe",
|
||||
"movdir64b",
|
||||
"movdiri",
|
||||
"pclmulqdq",
|
||||
"popcnt",
|
||||
"rdseed",
|
||||
"sse",
|
||||
"sse2",
|
||||
"sse4_1",
|
||||
"sse4_2",
|
||||
"sse4a",
|
||||
"ssse3",
|
||||
"tsc_adjust",
|
||||
"vaes",
|
||||
"vpclmulqdq",
|
||||
"xsavec",
|
||||
"xsaveopt"
|
||||
],
|
||||
"compilers": {
|
||||
"gcc": [
|
||||
{
|
||||
"versions": "14.1:",
|
||||
"name": "znver5",
|
||||
"flags": "-march={name} -mtune={name}"
|
||||
}
|
||||
],
|
||||
"aocc": [
|
||||
{
|
||||
"versions": "5.0:",
|
||||
"name": "znver5",
|
||||
"flags": "-march={name} -mtune={name}"
|
||||
}
|
||||
],
|
||||
"clang": [
|
||||
{
|
||||
"versions": "19.1:",
|
||||
"name": "znver5",
|
||||
"flags": "-march={name} -mtune={name}"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"ppc64": {
|
||||
"from": [],
|
||||
"vendor": "generic",
|
||||
|
||||
@@ -41,6 +41,20 @@ def comma_and(sequence: List[str]) -> str:
|
||||
return comma_list(sequence, "and")
|
||||
|
||||
|
||||
def ordinal(number: int) -> str:
|
||||
"""Return the ordinal representation (1st, 2nd, 3rd, etc.) for the provided number.
|
||||
|
||||
Args:
|
||||
number: int to convert to ordinal number
|
||||
|
||||
Returns: number's corresponding ordinal
|
||||
"""
|
||||
idx = (number % 10) << 1
|
||||
tens = number % 100 // 10
|
||||
suffix = "th" if tens == 1 or idx > 6 else "thstndrd"[idx : idx + 2]
|
||||
return f"{number}{suffix}"
|
||||
|
||||
|
||||
def quote(sequence: List[str], q: str = "'") -> List[str]:
|
||||
"""Quotes each item in the input list with the quote character passed as second argument."""
|
||||
return [f"{q}{e}{q}" for e in sequence]
|
||||
|
||||
@@ -20,11 +20,23 @@
|
||||
import tempfile
|
||||
from contextlib import contextmanager
|
||||
from itertools import accumulate
|
||||
from typing import Callable, Iterable, List, Match, Optional, Tuple, Union
|
||||
from typing import (
|
||||
Callable,
|
||||
Deque,
|
||||
Dict,
|
||||
Iterable,
|
||||
List,
|
||||
Match,
|
||||
Optional,
|
||||
Sequence,
|
||||
Set,
|
||||
Tuple,
|
||||
Union,
|
||||
)
|
||||
|
||||
import llnl.util.symlink
|
||||
from llnl.util import tty
|
||||
from llnl.util.lang import dedupe, memoized
|
||||
from llnl.util.lang import dedupe, fnmatch_translate_multiple, memoized
|
||||
from llnl.util.symlink import islink, readlink, resolve_link_target_relative_to_the_link, symlink
|
||||
|
||||
from ..path import path_to_os_path, system_path_filter
|
||||
@@ -47,6 +59,7 @@
|
||||
"copy_mode",
|
||||
"filter_file",
|
||||
"find",
|
||||
"find_first",
|
||||
"find_headers",
|
||||
"find_all_headers",
|
||||
"find_libraries",
|
||||
@@ -84,6 +97,8 @@
|
||||
"visit_directory_tree",
|
||||
]
|
||||
|
||||
Path = Union[str, pathlib.Path]
|
||||
|
||||
if sys.version_info < (3, 7, 4):
|
||||
# monkeypatch shutil.copystat to fix PermissionError when copying read-only
|
||||
# files on Lustre when using Python < 3.7.4
|
||||
@@ -1672,105 +1687,204 @@ def find_first(root: str, files: Union[Iterable[str], str], bfs_depth: int = 2)
|
||||
return FindFirstFile(root, *files, bfs_depth=bfs_depth).find()
|
||||
|
||||
|
||||
def find(root, files, recursive=True):
|
||||
"""Search for ``files`` starting from the ``root`` directory.
|
||||
|
||||
Like GNU/BSD find but written entirely in Python.
|
||||
|
||||
Examples:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ find /usr -name python
|
||||
|
||||
is equivalent to:
|
||||
|
||||
>>> find('/usr', 'python')
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ find /usr/local/bin -maxdepth 1 -name python
|
||||
|
||||
is equivalent to:
|
||||
|
||||
>>> find('/usr/local/bin', 'python', recursive=False)
|
||||
def find(
|
||||
root: Union[Path, Sequence[Path]],
|
||||
files: Union[str, Sequence[str]],
|
||||
recursive: bool = True,
|
||||
*,
|
||||
max_depth: Optional[int] = None,
|
||||
) -> List[str]:
|
||||
"""Finds all files matching the patterns from ``files`` starting from ``root``. This function
|
||||
returns a deterministic result for the same input and directory structure when run multiple
|
||||
times. Symlinked directories are followed, and unique directories are searched only once. Each
|
||||
matching file is returned only once at lowest depth in case multiple paths exist due to
|
||||
symlinked directories.
|
||||
|
||||
Accepts any glob characters accepted by fnmatch:
|
||||
|
||||
========== ====================================
|
||||
Pattern Meaning
|
||||
========== ====================================
|
||||
``*`` matches everything
|
||||
``*`` matches one or more characters
|
||||
``?`` matches any single character
|
||||
``[seq]`` matches any character in ``seq``
|
||||
``[!seq]`` matches any character not in ``seq``
|
||||
========== ====================================
|
||||
|
||||
Parameters:
|
||||
root (str): The root directory to start searching from
|
||||
files (str or collections.abc.Sequence): Library name(s) to search for
|
||||
recursive (bool): if False search only root folder,
|
||||
if True descends top-down from the root. Defaults to True.
|
||||
Examples:
|
||||
|
||||
Returns:
|
||||
list: The files that have been found
|
||||
>>> find("/usr", "*.txt", recursive=True, max_depth=2)
|
||||
|
||||
finds all files with the extension ``.txt`` in the directory ``/usr`` and subdirectories up to
|
||||
depth 2.
|
||||
|
||||
>>> find(["/usr", "/var"], ["*.txt", "*.log"], recursive=True)
|
||||
|
||||
finds all files with the extension ``.txt`` or ``.log`` in the directories ``/usr`` and
|
||||
``/var`` at any depth.
|
||||
|
||||
>>> find("/usr", "GL/*.h", recursive=True)
|
||||
|
||||
finds all header files in a directory GL at any depth in the directory ``/usr``.
|
||||
|
||||
Parameters:
|
||||
root: One or more root directories to start searching from
|
||||
files: One or more filename patterns to search for
|
||||
recursive: if False search only root, if True descends from roots. Defaults to True.
|
||||
max_depth: if set, don't search below this depth. Cannot be set if recursive is False
|
||||
|
||||
Returns a list of absolute, matching file paths.
|
||||
"""
|
||||
if isinstance(root, (str, pathlib.Path)):
|
||||
root = [root]
|
||||
elif not isinstance(root, collections.abc.Sequence):
|
||||
raise TypeError(f"'root' arg must be a path or a sequence of paths, not '{type(root)}']")
|
||||
|
||||
if isinstance(files, str):
|
||||
files = [files]
|
||||
elif not isinstance(files, collections.abc.Sequence):
|
||||
raise TypeError(f"'files' arg must be str or a sequence of str, not '{type(files)}']")
|
||||
|
||||
if recursive:
|
||||
tty.debug(f"Find (recursive): {root} {str(files)}")
|
||||
result = _find_recursive(root, files)
|
||||
else:
|
||||
tty.debug(f"Find (not recursive): {root} {str(files)}")
|
||||
result = _find_non_recursive(root, files)
|
||||
# If recursive is false, max_depth can only be None or 0
|
||||
if max_depth and not recursive:
|
||||
raise ValueError(f"max_depth ({max_depth}) cannot be set if recursive is False")
|
||||
|
||||
tty.debug(f"Find complete: {root} {str(files)}")
|
||||
tty.debug(f"Find (max depth = {max_depth}): {root} {files}")
|
||||
if not recursive:
|
||||
max_depth = 0
|
||||
elif max_depth is None:
|
||||
max_depth = sys.maxsize
|
||||
result = _find_max_depth(root, files, max_depth)
|
||||
tty.debug(f"Find complete: {root} {files}")
|
||||
return result
|
||||
|
||||
|
||||
@system_path_filter
|
||||
def _find_recursive(root, search_files):
|
||||
# The variable here is **on purpose** a defaultdict. The idea is that
|
||||
# we want to poke the filesystem as little as possible, but still maintain
|
||||
# stability in the order of the answer. Thus we are recording each library
|
||||
# found in a key, and reconstructing the stable order later.
|
||||
found_files = collections.defaultdict(list)
|
||||
|
||||
# Make the path absolute to have os.walk also return an absolute path
|
||||
root = os.path.abspath(root)
|
||||
for path, _, list_files in os.walk(root):
|
||||
for search_file in search_files:
|
||||
matches = glob.glob(os.path.join(path, search_file))
|
||||
matches = [os.path.join(path, x) for x in matches]
|
||||
found_files[search_file].extend(matches)
|
||||
|
||||
answer = []
|
||||
for search_file in search_files:
|
||||
answer.extend(found_files[search_file])
|
||||
|
||||
return answer
|
||||
def _log_file_access_issue(e: OSError, path: str) -> None:
|
||||
errno_name = errno.errorcode.get(e.errno, "UNKNOWN")
|
||||
tty.debug(f"find must skip {path}: {errno_name} {e}")
|
||||
|
||||
|
||||
@system_path_filter
|
||||
def _find_non_recursive(root, search_files):
|
||||
# The variable here is **on purpose** a defaultdict as os.list_dir
|
||||
# can return files in any order (does not preserve stability)
|
||||
found_files = collections.defaultdict(list)
|
||||
def _file_id(s: os.stat_result) -> Tuple[int, int]:
|
||||
# Note: on windows, st_ino is the file index and st_dev is the volume serial number. See
|
||||
# https://github.com/python/cpython/blob/3.9/Python/fileutils.c
|
||||
return (s.st_ino, s.st_dev)
|
||||
|
||||
# Make the path absolute to have absolute path returned
|
||||
root = os.path.abspath(root)
|
||||
|
||||
for search_file in search_files:
|
||||
matches = glob.glob(os.path.join(root, search_file))
|
||||
matches = [os.path.join(root, x) for x in matches]
|
||||
found_files[search_file].extend(matches)
|
||||
def _dedupe_files(paths: List[str]) -> List[str]:
|
||||
"""Deduplicate files by inode and device, dropping files that cannot be accessed."""
|
||||
unique_files: List[str] = []
|
||||
# tuple of (inode, device) for each file without following symlinks
|
||||
visited: Set[Tuple[int, int]] = set()
|
||||
for path in paths:
|
||||
try:
|
||||
stat_info = os.lstat(path)
|
||||
except OSError as e:
|
||||
_log_file_access_issue(e, path)
|
||||
continue
|
||||
file_id = _file_id(stat_info)
|
||||
if file_id not in visited:
|
||||
unique_files.append(path)
|
||||
visited.add(file_id)
|
||||
return unique_files
|
||||
|
||||
answer = []
|
||||
for search_file in search_files:
|
||||
answer.extend(found_files[search_file])
|
||||
|
||||
return answer
|
||||
def _find_max_depth(
|
||||
roots: Sequence[Path], globs: Sequence[str], max_depth: int = sys.maxsize
|
||||
) -> List[str]:
|
||||
"""See ``find`` for the public API."""
|
||||
# We optimize for the common case of simple filename only patterns: a single, combined regex
|
||||
# is used. For complex patterns that include path components, we use a slower glob call from
|
||||
# every directory we visit within max_depth.
|
||||
filename_only_patterns = {
|
||||
f"pattern_{i}": os.path.normcase(x) for i, x in enumerate(globs) if "/" not in x
|
||||
}
|
||||
complex_patterns = {f"pattern_{i}": x for i, x in enumerate(globs) if "/" in x}
|
||||
regex = re.compile(fnmatch_translate_multiple(filename_only_patterns))
|
||||
# Ordered dictionary that keeps track of what pattern found which files
|
||||
matched_paths: Dict[str, List[str]] = {f"pattern_{i}": [] for i, _ in enumerate(globs)}
|
||||
# Ensure returned paths are always absolute
|
||||
roots = [os.path.abspath(r) for r in roots]
|
||||
# Breadth-first search queue. Each element is a tuple of (depth, dir)
|
||||
dir_queue: Deque[Tuple[int, str]] = collections.deque()
|
||||
# Set of visited directories. Each element is a tuple of (inode, device)
|
||||
visited_dirs: Set[Tuple[int, int]] = set()
|
||||
|
||||
for root in roots:
|
||||
try:
|
||||
stat_root = os.stat(root)
|
||||
except OSError as e:
|
||||
_log_file_access_issue(e, root)
|
||||
continue
|
||||
dir_id = _file_id(stat_root)
|
||||
if dir_id not in visited_dirs:
|
||||
dir_queue.appendleft((0, root))
|
||||
visited_dirs.add(dir_id)
|
||||
|
||||
while dir_queue:
|
||||
depth, curr_dir = dir_queue.pop()
|
||||
try:
|
||||
dir_iter = os.scandir(curr_dir)
|
||||
except OSError as e:
|
||||
_log_file_access_issue(e, curr_dir)
|
||||
continue
|
||||
|
||||
# Use glob.glob for complex patterns.
|
||||
for pattern_name, pattern in complex_patterns.items():
|
||||
matched_paths[pattern_name].extend(
|
||||
path for path in glob.glob(os.path.join(curr_dir, pattern))
|
||||
)
|
||||
|
||||
# List of subdirectories by path and (inode, device) tuple
|
||||
subdirs: List[Tuple[str, Tuple[int, int]]] = []
|
||||
|
||||
with dir_iter:
|
||||
for dir_entry in dir_iter:
|
||||
|
||||
# Match filename only patterns
|
||||
if filename_only_patterns:
|
||||
m = regex.match(os.path.normcase(dir_entry.name))
|
||||
if m:
|
||||
for pattern_name in filename_only_patterns:
|
||||
if m.group(pattern_name):
|
||||
matched_paths[pattern_name].append(dir_entry.path)
|
||||
break
|
||||
|
||||
# Collect subdirectories
|
||||
if depth >= max_depth:
|
||||
continue
|
||||
|
||||
try:
|
||||
if not dir_entry.is_dir(follow_symlinks=True):
|
||||
continue
|
||||
if sys.platform == "win32":
|
||||
# Note: st_ino/st_dev on DirEntry.stat are not set on Windows, so we have
|
||||
# to call os.stat
|
||||
stat_info = os.stat(dir_entry.path, follow_symlinks=True)
|
||||
else:
|
||||
stat_info = dir_entry.stat(follow_symlinks=True)
|
||||
except OSError as e:
|
||||
# Possible permission issue, or a symlink that cannot be resolved (ELOOP).
|
||||
_log_file_access_issue(e, dir_entry.path)
|
||||
continue
|
||||
|
||||
subdirs.append((dir_entry.path, _file_id(stat_info)))
|
||||
|
||||
# Enqueue subdirectories in a deterministic order
|
||||
if subdirs:
|
||||
subdirs.sort(key=lambda s: os.path.basename(s[0]))
|
||||
for subdir, subdir_id in subdirs:
|
||||
if subdir_id not in visited_dirs:
|
||||
dir_queue.appendleft((depth + 1, subdir))
|
||||
visited_dirs.add(subdir_id)
|
||||
|
||||
# Sort the matched paths for deterministic output
|
||||
for paths in matched_paths.values():
|
||||
paths.sort()
|
||||
all_matching_paths = [path for paths in matched_paths.values() for path in paths]
|
||||
|
||||
# We only dedupe files if we have any complex patterns, since only they can match the same file
|
||||
# multiple times
|
||||
return _dedupe_files(all_matching_paths) if complex_patterns else all_matching_paths
|
||||
|
||||
|
||||
# Utilities for libraries and headers
|
||||
@@ -1991,7 +2105,13 @@ def add_macro(self, macro):
|
||||
self._macro_definitions.append(macro)
|
||||
|
||||
|
||||
def find_headers(headers, root, recursive=False):
|
||||
def find_headers(
|
||||
headers: Union[List[str], str],
|
||||
root: str,
|
||||
recursive: Union[bool, str] = "heuristic",
|
||||
*,
|
||||
max_depth: Optional[int] = None,
|
||||
) -> HeaderList:
|
||||
"""Returns an iterable object containing a list of full paths to
|
||||
headers if found.
|
||||
|
||||
@@ -2007,10 +2127,11 @@ def find_headers(headers, root, recursive=False):
|
||||
======= ====================================
|
||||
|
||||
Parameters:
|
||||
headers (str or list): Header name(s) to search for
|
||||
root (str): The root directory to start searching from
|
||||
recursive (bool): if False search only root folder,
|
||||
if True descends top-down from the root. Defaults to False.
|
||||
headers: Header name(s) to search for
|
||||
root: The root directory to start searching from
|
||||
recursive: if False search only root folder, if True recurse from the root. Defaults to
|
||||
"heuristic", which uses a non-exhaustive, faster search.
|
||||
max_depth: if set, don't search below this depth. Cannot be set if recursive is False.
|
||||
|
||||
Returns:
|
||||
HeaderList: The headers that have been found
|
||||
@@ -2018,10 +2139,13 @@ def find_headers(headers, root, recursive=False):
|
||||
if isinstance(headers, str):
|
||||
headers = [headers]
|
||||
elif not isinstance(headers, collections.abc.Sequence):
|
||||
message = "{0} expects a string or sequence of strings as the "
|
||||
message += "first argument [got {1} instead]"
|
||||
message = message.format(find_headers.__name__, type(headers))
|
||||
raise TypeError(message)
|
||||
raise TypeError(
|
||||
f"{find_headers.__name__} expects a string or sequence of strings as the "
|
||||
f"first argument [got {type(headers)} instead]"
|
||||
)
|
||||
|
||||
if recursive is False and max_depth is not None:
|
||||
raise ValueError(f"max_depth ({max_depth}) cannot be set if recursive is False")
|
||||
|
||||
# Construct the right suffix for the headers
|
||||
suffixes = [
|
||||
@@ -2041,18 +2165,32 @@ def find_headers(headers, root, recursive=False):
|
||||
]
|
||||
|
||||
# List of headers we are searching with suffixes
|
||||
headers = ["{0}.{1}".format(header, suffix) for header in headers for suffix in suffixes]
|
||||
headers = [f"{header}.{suffix}" for header in headers for suffix in suffixes]
|
||||
|
||||
return HeaderList(find(root, headers, recursive))
|
||||
if isinstance(recursive, bool):
|
||||
return HeaderList(find(root, headers, recursive=recursive, max_depth=max_depth))
|
||||
|
||||
# The heuristic here is simpler than the one for libraries: restrict search to <root>/include
|
||||
# (if root isn't an include directory itself) and limit search depth so that headers are found
|
||||
# not deeper than <root>/include/<subdir>/<subdir>/*.
|
||||
|
||||
if max_depth is None:
|
||||
max_depth = 3
|
||||
|
||||
if os.path.basename(root).lower() != "include":
|
||||
root = os.path.join(root, "include")
|
||||
max_depth -= 1
|
||||
|
||||
return HeaderList(find(root, headers, recursive=True, max_depth=max_depth))
|
||||
|
||||
|
||||
@system_path_filter
|
||||
def find_all_headers(root):
|
||||
"""Convenience function that returns the list of all headers found
|
||||
in the directory passed as argument.
|
||||
def find_all_headers(root: str) -> HeaderList:
|
||||
"""Convenience function that returns the list of all headers found in the directory passed as
|
||||
argument.
|
||||
|
||||
Args:
|
||||
root (str): directory where to look recursively for header files
|
||||
root: directory where to look recursively for header files
|
||||
|
||||
Returns:
|
||||
List of all headers found in ``root`` and subdirectories.
|
||||
@@ -2209,8 +2347,16 @@ def find_system_libraries(libraries, shared=True):
|
||||
return libraries_found
|
||||
|
||||
|
||||
def find_libraries(libraries, root, shared=True, recursive=False, runtime=True):
|
||||
"""Returns an iterable of full paths to libraries found in a root dir.
|
||||
def find_libraries(
|
||||
libraries: Union[List[str], str],
|
||||
root: str,
|
||||
shared: bool = True,
|
||||
recursive: Union[bool, str] = "heuristic",
|
||||
runtime: bool = True,
|
||||
*,
|
||||
max_depth: Optional[int] = None,
|
||||
) -> LibraryList:
|
||||
"""Find libraries in the specified root directory.
|
||||
|
||||
Accepts any glob characters accepted by fnmatch:
|
||||
|
||||
@@ -2224,27 +2370,29 @@ def find_libraries(libraries, root, shared=True, recursive=False, runtime=True):
|
||||
======= ====================================
|
||||
|
||||
Parameters:
|
||||
libraries (str or list): Library name(s) to search for
|
||||
root (str): The root directory to start searching from
|
||||
shared (bool): if True searches for shared libraries,
|
||||
otherwise for static. Defaults to True.
|
||||
recursive (bool): if False search only root folder,
|
||||
if True descends top-down from the root. Defaults to False.
|
||||
runtime (bool): Windows only option, no-op elsewhere. If true,
|
||||
search for runtime shared libs (.DLL), otherwise, search
|
||||
for .Lib files. If shared is false, this has no meaning.
|
||||
Defaults to True.
|
||||
libraries: library name(s) to search for
|
||||
root: the root directory to start searching from
|
||||
shared: if True searches for shared libraries, otherwise for static. Defaults to True.
|
||||
recursive: if False search only root folder, if True recurse from the root. Defaults to
|
||||
"heuristic", which uses a non-exhaustive, faster search.
|
||||
runtime: Windows only option, no-op elsewhere. If True (default), search for runtime shared
|
||||
libs (.DLL), otherwise, search for .Lib files. If shared is False, this has no meaning.
|
||||
max_depth: if set, don't search below this depth. Cannot be set if recursive is False.
|
||||
|
||||
Returns:
|
||||
LibraryList: The libraries that have been found
|
||||
"""
|
||||
|
||||
if isinstance(libraries, str):
|
||||
libraries = [libraries]
|
||||
elif not isinstance(libraries, collections.abc.Sequence):
|
||||
message = "{0} expects a string or sequence of strings as the "
|
||||
message += "first argument [got {1} instead]"
|
||||
message = message.format(find_libraries.__name__, type(libraries))
|
||||
raise TypeError(message)
|
||||
raise TypeError(
|
||||
f"{find_libraries.__name__} expects a string or sequence of strings as the "
|
||||
f"first argument [got {type(libraries)} instead]"
|
||||
)
|
||||
|
||||
if recursive is False and max_depth is not None:
|
||||
raise ValueError(f"max_depth ({max_depth}) cannot be set if recursive is False")
|
||||
|
||||
if sys.platform == "win32":
|
||||
static_ext = "lib"
|
||||
@@ -2267,31 +2415,52 @@ def find_libraries(libraries, root, shared=True, recursive=False, runtime=True):
|
||||
suffixes = [static_ext]
|
||||
|
||||
# List of libraries we are searching with suffixes
|
||||
libraries = ["{0}.{1}".format(lib, suffix) for lib in libraries for suffix in suffixes]
|
||||
libraries = [f"{lib}.{suffix}" for lib in libraries for suffix in suffixes]
|
||||
|
||||
if not recursive:
|
||||
# If not recursive, look for the libraries directly in root
|
||||
return LibraryList(find(root, libraries, False))
|
||||
if isinstance(recursive, bool):
|
||||
return LibraryList(find(root, libraries, recursive=recursive, max_depth=max_depth))
|
||||
|
||||
# Heuristic search: a form of non-exhaustive iterative deepening, in order to return early if
|
||||
# libraries are found in their usual locations. This is the default behavior for recursive
|
||||
# searches.
|
||||
|
||||
if max_depth is None:
|
||||
# this default covers search in <root>/lib/pythonX.Y/site-packages/<package>/*.
|
||||
max_depth = 4
|
||||
|
||||
# To speedup the search for external packages configured e.g. in /usr,
|
||||
# perform first non-recursive search in root/lib then in root/lib64 and
|
||||
# finally search all of root recursively. The search stops when the first
|
||||
# match is found.
|
||||
common_lib_dirs = ["lib", "lib64"]
|
||||
if sys.platform == "win32":
|
||||
common_lib_dirs.extend(["bin", "Lib"])
|
||||
|
||||
for subdir in common_lib_dirs:
|
||||
dirname = join_path(root, subdir)
|
||||
if not os.path.isdir(dirname):
|
||||
continue
|
||||
found_libs = find(dirname, libraries, False)
|
||||
if found_libs:
|
||||
break
|
||||
common_lib_dirs = ("lib", "lib64", "bin", "Lib")
|
||||
else:
|
||||
found_libs = find(root, libraries, True)
|
||||
common_lib_dirs = ("lib", "lib64")
|
||||
|
||||
return LibraryList(found_libs)
|
||||
if os.path.basename(root).lower() not in common_lib_dirs:
|
||||
# search root and its direct library subdirectories non-recursively
|
||||
non_recursive = [root, *(os.path.join(root, libdir) for libdir in common_lib_dirs)]
|
||||
# avoid the expensive recursive search of the root directory
|
||||
fallback_recursive = [os.path.join(root, libdir) for libdir in common_lib_dirs]
|
||||
# reduce max_depth by 1 as we already joined the common library directories
|
||||
max_depth -= 1
|
||||
else:
|
||||
# the call site already has a common library dir as root
|
||||
non_recursive = [root]
|
||||
fallback_recursive = [root]
|
||||
|
||||
found_libs = find(non_recursive, libraries, recursive=False)
|
||||
|
||||
if found_libs:
|
||||
return LibraryList(found_libs)
|
||||
|
||||
# Do one more (manual) step of iterative deepening, to early exit on typical
|
||||
# <root>/lib/<triplet>/ sub-directories before exhaustive, max_depth search. Slightly better
|
||||
# would be to add lib/<triplet> itself to common_lib_dirs, but we are lacking information to
|
||||
# determine the triplet.
|
||||
if max_depth is None or max_depth > 1:
|
||||
found_libs = find(fallback_recursive, libraries, max_depth=1)
|
||||
if found_libs:
|
||||
return LibraryList(found_libs)
|
||||
|
||||
# Finally fall back to exhaustive, recursive search
|
||||
return LibraryList(find(fallback_recursive, libraries, recursive=True, max_depth=max_depth))
|
||||
|
||||
|
||||
def find_all_shared_libraries(root, recursive=False, runtime=True):
|
||||
|
||||
@@ -5,14 +5,17 @@
|
||||
|
||||
import collections.abc
|
||||
import contextlib
|
||||
import fnmatch
|
||||
import functools
|
||||
import itertools
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import traceback
|
||||
import typing
|
||||
import warnings
|
||||
from datetime import datetime, timedelta
|
||||
from typing import Callable, Iterable, List, Tuple, TypeVar
|
||||
from typing import Callable, Dict, Iterable, List, Tuple, TypeVar
|
||||
|
||||
# Ignore emacs backups when listing modules
|
||||
ignore_modules = r"^\.#|~$"
|
||||
@@ -858,6 +861,19 @@ def elide_list(line_list: List[str], max_num: int = 10) -> List[str]:
|
||||
return line_list
|
||||
|
||||
|
||||
if sys.version_info >= (3, 9):
|
||||
PatternStr = re.Pattern[str]
|
||||
else:
|
||||
PatternStr = typing.Pattern[str]
|
||||
|
||||
|
||||
def fnmatch_translate_multiple(named_patterns: Dict[str, str]) -> str:
|
||||
"""Similar to ``fnmatch.translate``, but takes an ordered dictionary where keys are pattern
|
||||
names, and values are filename patterns. The output is a regex that matches any of the
|
||||
patterns in order, and named capture groups are used to identify which pattern matched."""
|
||||
return "|".join(f"(?P<{n}>{fnmatch.translate(p)})" for n, p in named_patterns.items())
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def nullcontext(*args, **kwargs):
|
||||
"""Empty context manager.
|
||||
@@ -870,15 +886,6 @@ class UnhashableArguments(TypeError):
|
||||
"""Raise when an @memoized function receives unhashable arg or kwarg values."""
|
||||
|
||||
|
||||
def enum(**kwargs):
|
||||
"""Return an enum-like class.
|
||||
|
||||
Args:
|
||||
**kwargs: explicit dictionary of enums
|
||||
"""
|
||||
return type("Enum", (object,), kwargs)
|
||||
|
||||
|
||||
T = TypeVar("T")
|
||||
|
||||
|
||||
@@ -914,6 +921,21 @@ def ensure_last(lst, *elements):
|
||||
lst.append(lst.pop(lst.index(elt)))
|
||||
|
||||
|
||||
class Const:
|
||||
"""Class level constant, raises when trying to set the attribute"""
|
||||
|
||||
__slots__ = ["value"]
|
||||
|
||||
def __init__(self, value):
|
||||
self.value = value
|
||||
|
||||
def __get__(self, instance, owner):
|
||||
return self.value
|
||||
|
||||
def __set__(self, instance, value):
|
||||
raise TypeError(f"Const value does not support assignment [value={self.value}]")
|
||||
|
||||
|
||||
class TypedMutableSequence(collections.abc.MutableSequence):
|
||||
"""Base class that behaves like a list, just with a different type.
|
||||
|
||||
@@ -1018,3 +1040,42 @@ def __init__(self, callback):
|
||||
|
||||
def __get__(self, instance, owner):
|
||||
return self.callback(owner)
|
||||
|
||||
|
||||
class DeprecatedProperty:
|
||||
"""Data descriptor to error or warn when a deprecated property is accessed.
|
||||
|
||||
Derived classes must define a factory method to return an adaptor for the deprecated
|
||||
property, if the descriptor is not set to error.
|
||||
"""
|
||||
|
||||
__slots__ = ["name"]
|
||||
|
||||
#: 0 - Nothing
|
||||
#: 1 - Warning
|
||||
#: 2 - Error
|
||||
error_lvl = 0
|
||||
|
||||
def __init__(self, name: str) -> None:
|
||||
self.name = name
|
||||
|
||||
def __get__(self, instance, owner):
|
||||
if instance is None:
|
||||
return self
|
||||
|
||||
if self.error_lvl == 1:
|
||||
warnings.warn(
|
||||
f"accessing the '{self.name}' property of '{instance}', which is deprecated"
|
||||
)
|
||||
elif self.error_lvl == 2:
|
||||
raise AttributeError(f"cannot access the '{self.name}' attribute of '{instance}'")
|
||||
|
||||
return self.factory(instance, owner)
|
||||
|
||||
def __set__(self, instance, value):
|
||||
raise TypeError(
|
||||
f"the deprecated property '{self.name}' of '{instance}' does not support assignment"
|
||||
)
|
||||
|
||||
def factory(self, instance, owner):
|
||||
raise NotImplementedError("must be implemented by derived classes")
|
||||
|
||||
@@ -263,7 +263,9 @@ def match_to_ansi(match):
|
||||
f"Incomplete color format: '{match.group(0)}' in '{match.string}'"
|
||||
)
|
||||
|
||||
ansi_code = _escape(f"{styles[style]};{colors.get(color_code, '')}", color, enclose, zsh)
|
||||
color_number = colors.get(color_code, "")
|
||||
semi = ";" if color_number else ""
|
||||
ansi_code = _escape(f"{styles[style]}{semi}{color_number}", color, enclose, zsh)
|
||||
if text:
|
||||
return f"{ansi_code}{text}{_escape(0, color, enclose, zsh)}"
|
||||
else:
|
||||
|
||||
@@ -10,7 +10,6 @@
|
||||
import errno
|
||||
import io
|
||||
import multiprocessing
|
||||
import multiprocessing.connection
|
||||
import os
|
||||
import re
|
||||
import select
|
||||
@@ -19,9 +18,10 @@
|
||||
import threading
|
||||
import traceback
|
||||
from contextlib import contextmanager
|
||||
from multiprocessing.connection import Connection
|
||||
from threading import Thread
|
||||
from types import ModuleType
|
||||
from typing import Optional
|
||||
from typing import Callable, Optional
|
||||
|
||||
import llnl.util.tty as tty
|
||||
|
||||
@@ -345,49 +345,6 @@ def close(self):
|
||||
self.file.close()
|
||||
|
||||
|
||||
class MultiProcessFd:
|
||||
"""Return an object which stores a file descriptor and can be passed as an
|
||||
argument to a function run with ``multiprocessing.Process``, such that
|
||||
the file descriptor is available in the subprocess."""
|
||||
|
||||
def __init__(self, fd):
|
||||
self._connection = None
|
||||
self._fd = None
|
||||
if sys.version_info >= (3, 8):
|
||||
self._connection = multiprocessing.connection.Connection(fd)
|
||||
else:
|
||||
self._fd = fd
|
||||
|
||||
@property
|
||||
def fd(self):
|
||||
if self._connection:
|
||||
return self._connection._handle
|
||||
else:
|
||||
return self._fd
|
||||
|
||||
def close(self):
|
||||
if self._connection:
|
||||
self._connection.close()
|
||||
else:
|
||||
os.close(self._fd)
|
||||
|
||||
|
||||
def close_connection_and_file(multiprocess_fd, file):
|
||||
# MultiprocessFd is intended to transmit a FD
|
||||
# to a child process, this FD is then opened to a Python File object
|
||||
# (using fdopen). In >= 3.8, MultiprocessFd encapsulates a
|
||||
# multiprocessing.connection.Connection; Connection closes the FD
|
||||
# when it is deleted, and prints a warning about duplicate closure if
|
||||
# it is not explicitly closed. In < 3.8, MultiprocessFd encapsulates a
|
||||
# simple FD; closing the FD here appears to conflict with
|
||||
# closure of the File object (in < 3.8 that is). Therefore this needs
|
||||
# to choose whether to close the File or the Connection.
|
||||
if sys.version_info >= (3, 8):
|
||||
multiprocess_fd.close()
|
||||
else:
|
||||
file.close()
|
||||
|
||||
|
||||
@contextmanager
|
||||
def replace_environment(env):
|
||||
"""Replace the current environment (`os.environ`) with `env`.
|
||||
@@ -545,22 +502,20 @@ def __enter__(self):
|
||||
# forcing debug output.
|
||||
self._saved_debug = tty._debug
|
||||
|
||||
# OS-level pipe for redirecting output to logger
|
||||
read_fd, write_fd = os.pipe()
|
||||
# Pipe for redirecting output to logger
|
||||
read_fd, self.write_fd = multiprocessing.Pipe(duplex=False)
|
||||
|
||||
read_multiprocess_fd = MultiProcessFd(read_fd)
|
||||
|
||||
# Multiprocessing pipe for communication back from the daemon
|
||||
# Pipe for communication back from the daemon
|
||||
# Currently only used to save echo value between uses
|
||||
self.parent_pipe, child_pipe = multiprocessing.Pipe()
|
||||
self.parent_pipe, child_pipe = multiprocessing.Pipe(duplex=False)
|
||||
|
||||
# Sets a daemon that writes to file what it reads from a pipe
|
||||
try:
|
||||
# need to pass this b/c multiprocessing closes stdin in child.
|
||||
input_multiprocess_fd = None
|
||||
input_fd = None
|
||||
try:
|
||||
if sys.stdin.isatty():
|
||||
input_multiprocess_fd = MultiProcessFd(os.dup(sys.stdin.fileno()))
|
||||
input_fd = Connection(os.dup(sys.stdin.fileno()))
|
||||
except BaseException:
|
||||
# just don't forward input if this fails
|
||||
pass
|
||||
@@ -569,9 +524,9 @@ def __enter__(self):
|
||||
self.process = multiprocessing.Process(
|
||||
target=_writer_daemon,
|
||||
args=(
|
||||
input_multiprocess_fd,
|
||||
read_multiprocess_fd,
|
||||
write_fd,
|
||||
input_fd,
|
||||
read_fd,
|
||||
self.write_fd,
|
||||
self.echo,
|
||||
self.log_file,
|
||||
child_pipe,
|
||||
@@ -582,9 +537,9 @@ def __enter__(self):
|
||||
self.process.start()
|
||||
|
||||
finally:
|
||||
if input_multiprocess_fd:
|
||||
input_multiprocess_fd.close()
|
||||
read_multiprocess_fd.close()
|
||||
if input_fd:
|
||||
input_fd.close()
|
||||
read_fd.close()
|
||||
|
||||
# Flush immediately before redirecting so that anything buffered
|
||||
# goes to the original stream
|
||||
@@ -602,9 +557,9 @@ def __enter__(self):
|
||||
self._saved_stderr = os.dup(sys.stderr.fileno())
|
||||
|
||||
# redirect to the pipe we created above
|
||||
os.dup2(write_fd, sys.stdout.fileno())
|
||||
os.dup2(write_fd, sys.stderr.fileno())
|
||||
os.close(write_fd)
|
||||
os.dup2(self.write_fd.fileno(), sys.stdout.fileno())
|
||||
os.dup2(self.write_fd.fileno(), sys.stderr.fileno())
|
||||
self.write_fd.close()
|
||||
|
||||
else:
|
||||
# Handle I/O the Python way. This won't redirect lower-level
|
||||
@@ -617,7 +572,7 @@ def __enter__(self):
|
||||
self._saved_stderr = sys.stderr
|
||||
|
||||
# create a file object for the pipe; redirect to it.
|
||||
pipe_fd_out = os.fdopen(write_fd, "w")
|
||||
pipe_fd_out = os.fdopen(self.write_fd.fileno(), "w", closefd=False)
|
||||
sys.stdout = pipe_fd_out
|
||||
sys.stderr = pipe_fd_out
|
||||
|
||||
@@ -653,6 +608,7 @@ def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
else:
|
||||
sys.stdout = self._saved_stdout
|
||||
sys.stderr = self._saved_stderr
|
||||
self.write_fd.close()
|
||||
|
||||
# print log contents in parent if needed.
|
||||
if self.log_file.write_in_parent:
|
||||
@@ -866,14 +822,14 @@ def force_echo(self):
|
||||
|
||||
|
||||
def _writer_daemon(
|
||||
stdin_multiprocess_fd,
|
||||
read_multiprocess_fd,
|
||||
write_fd,
|
||||
echo,
|
||||
log_file_wrapper,
|
||||
control_pipe,
|
||||
filter_fn,
|
||||
):
|
||||
stdin_fd: Optional[Connection],
|
||||
read_fd: Connection,
|
||||
write_fd: Connection,
|
||||
echo: bool,
|
||||
log_file_wrapper: FileWrapper,
|
||||
control_fd: Connection,
|
||||
filter_fn: Optional[Callable[[str], str]],
|
||||
) -> None:
|
||||
"""Daemon used by ``log_output`` to write to a log file and to ``stdout``.
|
||||
|
||||
The daemon receives output from the parent process and writes it both
|
||||
@@ -910,43 +866,37 @@ def _writer_daemon(
|
||||
``StringIO`` in the parent. This is mainly for testing.
|
||||
|
||||
Arguments:
|
||||
stdin_multiprocess_fd (int): input from the terminal
|
||||
read_multiprocess_fd (int): pipe for reading from parent's redirected
|
||||
stdout
|
||||
echo (bool): initial echo setting -- controlled by user and
|
||||
preserved across multiple writer daemons
|
||||
log_file_wrapper (FileWrapper): file to log all output
|
||||
control_pipe (Pipe): multiprocessing pipe on which to send control
|
||||
information to the parent
|
||||
filter_fn (callable, optional): function to filter each line of output
|
||||
stdin_fd: optional input from the terminal
|
||||
read_fd: pipe for reading from parent's redirected stdout
|
||||
echo: initial echo setting -- controlled by user and preserved across multiple writer
|
||||
daemons
|
||||
log_file_wrapper: file to log all output
|
||||
control_pipe: multiprocessing pipe on which to send control information to the parent
|
||||
filter_fn: optional function to filter each line of output
|
||||
|
||||
"""
|
||||
# If this process was forked, then it will inherit file descriptors from
|
||||
# the parent process. This process depends on closing all instances of
|
||||
# write_fd to terminate the reading loop, so we close the file descriptor
|
||||
# here. Forking is the process spawning method everywhere except Mac OS
|
||||
# for Python >= 3.8 and on Windows
|
||||
if sys.version_info < (3, 8) or sys.platform != "darwin":
|
||||
os.close(write_fd)
|
||||
# This process depends on closing all instances of write_pipe to terminate the reading loop
|
||||
write_fd.close()
|
||||
|
||||
# 1. Use line buffering (3rd param = 1) since Python 3 has a bug
|
||||
# that prevents unbuffered text I/O.
|
||||
# 2. Python 3.x before 3.7 does not open with UTF-8 encoding by default
|
||||
in_pipe = os.fdopen(read_multiprocess_fd.fd, "r", 1, encoding="utf-8")
|
||||
# 3. closefd=False because Connection has "ownership"
|
||||
read_file = os.fdopen(read_fd.fileno(), "r", 1, encoding="utf-8", closefd=False)
|
||||
|
||||
if stdin_multiprocess_fd:
|
||||
stdin = os.fdopen(stdin_multiprocess_fd.fd)
|
||||
if stdin_fd:
|
||||
stdin_file = os.fdopen(stdin_fd.fileno(), closefd=False)
|
||||
else:
|
||||
stdin = None
|
||||
stdin_file = None
|
||||
|
||||
# list of streams to select from
|
||||
istreams = [in_pipe, stdin] if stdin else [in_pipe]
|
||||
istreams = [read_file, stdin_file] if stdin_file else [read_file]
|
||||
force_echo = False # parent can force echo for certain output
|
||||
|
||||
log_file = log_file_wrapper.unwrap()
|
||||
|
||||
try:
|
||||
with keyboard_input(stdin) as kb:
|
||||
with keyboard_input(stdin_file) as kb:
|
||||
while True:
|
||||
# fix the terminal settings if we recently came to
|
||||
# the foreground
|
||||
@@ -959,12 +909,12 @@ def _writer_daemon(
|
||||
# Allow user to toggle echo with 'v' key.
|
||||
# Currently ignores other chars.
|
||||
# only read stdin if we're in the foreground
|
||||
if stdin in rlist and not _is_background_tty(stdin):
|
||||
if stdin_file and stdin_file in rlist and not _is_background_tty(stdin_file):
|
||||
# it's possible to be backgrounded between the above
|
||||
# check and the read, so we ignore SIGTTIN here.
|
||||
with ignore_signal(signal.SIGTTIN):
|
||||
try:
|
||||
if stdin.read(1) == "v":
|
||||
if stdin_file.read(1) == "v":
|
||||
echo = not echo
|
||||
except IOError as e:
|
||||
# If SIGTTIN is ignored, the system gives EIO
|
||||
@@ -973,13 +923,13 @@ def _writer_daemon(
|
||||
if e.errno != errno.EIO:
|
||||
raise
|
||||
|
||||
if in_pipe in rlist:
|
||||
if read_file in rlist:
|
||||
line_count = 0
|
||||
try:
|
||||
while line_count < 100:
|
||||
# Handle output from the calling process.
|
||||
try:
|
||||
line = _retry(in_pipe.readline)()
|
||||
line = _retry(read_file.readline)()
|
||||
except UnicodeDecodeError:
|
||||
# installs like --test=root gpgme produce non-UTF8 logs
|
||||
line = "<line lost: output was not encoded as UTF-8>\n"
|
||||
@@ -1008,7 +958,7 @@ def _writer_daemon(
|
||||
if xoff in controls:
|
||||
force_echo = False
|
||||
|
||||
if not _input_available(in_pipe):
|
||||
if not _input_available(read_file):
|
||||
break
|
||||
finally:
|
||||
if line_count > 0:
|
||||
@@ -1023,14 +973,14 @@ def _writer_daemon(
|
||||
finally:
|
||||
# send written data back to parent if we used a StringIO
|
||||
if isinstance(log_file, io.StringIO):
|
||||
control_pipe.send(log_file.getvalue())
|
||||
control_fd.send(log_file.getvalue())
|
||||
log_file_wrapper.close()
|
||||
close_connection_and_file(read_multiprocess_fd, in_pipe)
|
||||
if stdin_multiprocess_fd:
|
||||
close_connection_and_file(stdin_multiprocess_fd, stdin)
|
||||
read_fd.close()
|
||||
if stdin_fd:
|
||||
stdin_fd.close()
|
||||
|
||||
# send echo value back to the parent so it can be preserved.
|
||||
control_pipe.send(echo)
|
||||
control_fd.send(echo)
|
||||
|
||||
|
||||
def _retry(function):
|
||||
|
||||
@@ -69,4 +69,15 @@ def get_version() -> str:
|
||||
return spack_version
|
||||
|
||||
|
||||
__all__ = ["spack_version_info", "spack_version", "get_version", "get_spack_commit"]
|
||||
def get_short_version() -> str:
|
||||
"""Short Spack version."""
|
||||
return f"{spack_version_info[0]}.{spack_version_info[1]}"
|
||||
|
||||
|
||||
__all__ = [
|
||||
"spack_version_info",
|
||||
"spack_version",
|
||||
"get_version",
|
||||
"get_spack_commit",
|
||||
"get_short_version",
|
||||
]
|
||||
|
||||
@@ -722,9 +722,8 @@ def _ensure_env_methods_are_ported_to_builders(pkgs, error_cls):
|
||||
)
|
||||
builder_cls_names = [spack.builder.BUILDER_CLS[x].__name__ for x in build_system_names]
|
||||
|
||||
module = pkg_cls.module
|
||||
has_builders_in_package_py = any(
|
||||
getattr(module, name, False) for name in builder_cls_names
|
||||
spack.builder.get_builder_class(pkg_cls, name) for name in builder_cls_names
|
||||
)
|
||||
if not has_builders_in_package_py:
|
||||
continue
|
||||
@@ -806,7 +805,7 @@ def _uses_deprecated_globals(pkgs, error_cls):
|
||||
|
||||
file = spack.repo.PATH.filename_for_package_name(pkg_name)
|
||||
tree = ast.parse(open(file).read())
|
||||
visitor = DeprecatedMagicGlobals(("std_cmake_args",))
|
||||
visitor = DeprecatedMagicGlobals(("std_cmake_args", "std_meson_args", "std_pip_args"))
|
||||
visitor.visit(tree)
|
||||
if visitor.references_to_globals:
|
||||
errors.append(
|
||||
|
||||
@@ -35,6 +35,7 @@
|
||||
import spack.caches
|
||||
import spack.config as config
|
||||
import spack.database as spack_db
|
||||
import spack.deptypes as dt
|
||||
import spack.error
|
||||
import spack.hash_types as ht
|
||||
import spack.hooks
|
||||
@@ -251,7 +252,7 @@ def _associate_built_specs_with_mirror(self, cache_key, mirror_url):
|
||||
|
||||
spec_list = [
|
||||
s
|
||||
for s in db.query_local(installed=any, in_buildcache=any)
|
||||
for s in db.query_local(installed=any)
|
||||
if s.external or db.query_local_by_spec_hash(s.dag_hash()).in_buildcache
|
||||
]
|
||||
|
||||
@@ -712,15 +713,32 @@ def get_buildfile_manifest(spec):
|
||||
return data
|
||||
|
||||
|
||||
def hashes_to_prefixes(spec):
|
||||
"""Return a dictionary of hashes to prefixes for a spec and its deps, excluding externals"""
|
||||
return {
|
||||
s.dag_hash(): str(s.prefix)
|
||||
def deps_to_relocate(spec):
|
||||
"""Return the transitive link and direct run dependencies of the spec.
|
||||
|
||||
This is a special traversal for dependencies we need to consider when relocating a package.
|
||||
|
||||
Package binaries, scripts, and other files may refer to the prefixes of dependencies, so
|
||||
we need to rewrite those locations when dependencies are in a different place at install time
|
||||
than they were at build time.
|
||||
|
||||
This traversal covers transitive link dependencies and direct run dependencies because:
|
||||
|
||||
1. Spack adds RPATHs for transitive link dependencies so that packages can find needed
|
||||
dependency libraries.
|
||||
2. Packages may call any of their *direct* run dependencies (and may bake their paths into
|
||||
binaries or scripts), so we also need to search for run dependency prefixes when relocating.
|
||||
|
||||
This returns a deduplicated list of transitive link dependencies and direct run dependencies.
|
||||
"""
|
||||
deps = [
|
||||
s
|
||||
for s in itertools.chain(
|
||||
spec.traverse(root=True, deptype="link"), spec.dependencies(deptype="run")
|
||||
)
|
||||
if not s.external
|
||||
}
|
||||
]
|
||||
return llnl.util.lang.dedupe(deps, key=lambda s: s.dag_hash())
|
||||
|
||||
|
||||
def get_buildinfo_dict(spec):
|
||||
@@ -736,7 +754,7 @@ def get_buildinfo_dict(spec):
|
||||
"relocate_binaries": manifest["binary_to_relocate"],
|
||||
"relocate_links": manifest["link_to_relocate"],
|
||||
"hardlinks_deduped": manifest["hardlinks_deduped"],
|
||||
"hash_to_prefix": hashes_to_prefixes(spec),
|
||||
"hash_to_prefix": {d.dag_hash(): str(d.prefix) for d in deps_to_relocate(spec)},
|
||||
}
|
||||
|
||||
|
||||
@@ -1631,7 +1649,6 @@ def _oci_push(
|
||||
Dict[str, spack.oci.oci.Blob],
|
||||
List[Tuple[Spec, BaseException]],
|
||||
]:
|
||||
|
||||
# Spec dag hash -> blob
|
||||
checksums: Dict[str, spack.oci.oci.Blob] = {}
|
||||
|
||||
@@ -2201,11 +2218,36 @@ def relocate_package(spec):
|
||||
# First match specific prefix paths. Possibly the *local* install prefix
|
||||
# of some dependency is in an upstream, so we cannot assume the original
|
||||
# spack store root can be mapped uniformly to the new spack store root.
|
||||
for dag_hash, new_dep_prefix in hashes_to_prefixes(spec).items():
|
||||
if dag_hash in hash_to_old_prefix:
|
||||
old_dep_prefix = hash_to_old_prefix[dag_hash]
|
||||
prefix_to_prefix_bin[old_dep_prefix] = new_dep_prefix
|
||||
prefix_to_prefix_text[old_dep_prefix] = new_dep_prefix
|
||||
#
|
||||
# If the spec is spliced, we need to handle the simultaneous mapping
|
||||
# from the old install_tree to the new install_tree and from the build_spec
|
||||
# to the spliced spec.
|
||||
# Because foo.build_spec is foo for any non-spliced spec, we can simplify
|
||||
# by checking for spliced-in nodes by checking for nodes not in the build_spec
|
||||
# without any explicit check for whether the spec is spliced.
|
||||
# An analog in this algorithm is any spec that shares a name or provides the same virtuals
|
||||
# in the context of the relevant root spec. This ensures that the analog for a spec s
|
||||
# is the spec that s replaced when we spliced.
|
||||
relocation_specs = deps_to_relocate(spec)
|
||||
build_spec_ids = set(id(s) for s in spec.build_spec.traverse(deptype=dt.ALL & ~dt.BUILD))
|
||||
for s in relocation_specs:
|
||||
analog = s
|
||||
if id(s) not in build_spec_ids:
|
||||
analogs = [
|
||||
d
|
||||
for d in spec.build_spec.traverse(deptype=dt.ALL & ~dt.BUILD)
|
||||
if s._splice_match(d, self_root=spec, other_root=spec.build_spec)
|
||||
]
|
||||
if analogs:
|
||||
# Prefer same-name analogs and prefer higher versions
|
||||
# This matches the preferences in Spec.splice, so we will find same node
|
||||
analog = max(analogs, key=lambda a: (a.name == s.name, a.version))
|
||||
|
||||
lookup_dag_hash = analog.dag_hash()
|
||||
if lookup_dag_hash in hash_to_old_prefix:
|
||||
old_dep_prefix = hash_to_old_prefix[lookup_dag_hash]
|
||||
prefix_to_prefix_bin[old_dep_prefix] = str(s.prefix)
|
||||
prefix_to_prefix_text[old_dep_prefix] = str(s.prefix)
|
||||
|
||||
# Only then add the generic fallback of install prefix -> install prefix.
|
||||
prefix_to_prefix_text[old_prefix] = new_prefix
|
||||
@@ -2520,7 +2562,13 @@ def _ensure_common_prefix(tar: tarfile.TarFile) -> str:
|
||||
return pkg_prefix
|
||||
|
||||
|
||||
def install_root_node(spec, unsigned=False, force=False, sha256=None):
|
||||
def install_root_node(
|
||||
spec: spack.spec.Spec,
|
||||
unsigned=False,
|
||||
force: bool = False,
|
||||
sha256: Optional[str] = None,
|
||||
allow_missing: bool = False,
|
||||
) -> None:
|
||||
"""Install the root node of a concrete spec from a buildcache.
|
||||
|
||||
Checking the sha256 sum of a node before installation is usually needed only
|
||||
@@ -2529,11 +2577,10 @@ def install_root_node(spec, unsigned=False, force=False, sha256=None):
|
||||
|
||||
Args:
|
||||
spec: spec to be installed (note that only the root node will be installed)
|
||||
unsigned (bool): if True allows installing unsigned binaries
|
||||
force (bool): force installation if the spec is already present in the
|
||||
local store
|
||||
sha256 (str): optional sha256 of the binary package, to be checked
|
||||
before installation
|
||||
unsigned: if True allows installing unsigned binaries
|
||||
force: force installation if the spec is already present in the local store
|
||||
sha256: optional sha256 of the binary package, to be checked before installation
|
||||
allow_missing: when true, allows installing a node with missing dependencies
|
||||
"""
|
||||
# Early termination
|
||||
if spec.external or spec.virtual:
|
||||
@@ -2543,10 +2590,10 @@ def install_root_node(spec, unsigned=False, force=False, sha256=None):
|
||||
warnings.warn("Package for spec {0} already installed.".format(spec.format()))
|
||||
return
|
||||
|
||||
download_result = download_tarball(spec, unsigned)
|
||||
download_result = download_tarball(spec.build_spec, unsigned)
|
||||
if not download_result:
|
||||
msg = 'download of binary cache file for spec "{0}" failed'
|
||||
raise RuntimeError(msg.format(spec.format()))
|
||||
raise RuntimeError(msg.format(spec.build_spec.format()))
|
||||
|
||||
if sha256:
|
||||
checker = spack.util.crypto.Checker(sha256)
|
||||
@@ -2565,8 +2612,13 @@ def install_root_node(spec, unsigned=False, force=False, sha256=None):
|
||||
with spack.util.path.filter_padding():
|
||||
tty.msg('Installing "{0}" from a buildcache'.format(spec.format()))
|
||||
extract_tarball(spec, download_result, force)
|
||||
spec.package.windows_establish_runtime_linkage()
|
||||
if spec.spliced: # overwrite old metadata with new
|
||||
spack.store.STORE.layout.write_spec(
|
||||
spec, spack.store.STORE.layout.spec_file_path(spec)
|
||||
)
|
||||
spack.hooks.post_install(spec, False)
|
||||
spack.store.STORE.db.add(spec)
|
||||
spack.store.STORE.db.add(spec, allow_missing=allow_missing)
|
||||
|
||||
|
||||
def install_single_spec(spec, unsigned=False, force=False):
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
"""Common basic functions used through the spack.bootstrap package"""
|
||||
import fnmatch
|
||||
import glob
|
||||
import importlib
|
||||
import os.path
|
||||
import re
|
||||
@@ -60,10 +61,19 @@ def _try_import_from_store(
|
||||
python, *_ = candidate_spec.dependencies("python-venv")
|
||||
else:
|
||||
python, *_ = candidate_spec.dependencies("python")
|
||||
module_paths = [
|
||||
os.path.join(candidate_spec.prefix, python.package.purelib),
|
||||
os.path.join(candidate_spec.prefix, python.package.platlib),
|
||||
]
|
||||
|
||||
# if python is installed, ask it for the layout
|
||||
if python.installed:
|
||||
module_paths = [
|
||||
os.path.join(candidate_spec.prefix, python.package.purelib),
|
||||
os.path.join(candidate_spec.prefix, python.package.platlib),
|
||||
]
|
||||
# otherwise search for the site-packages directory
|
||||
# (clingo from binaries with truncated python-venv runtime)
|
||||
else:
|
||||
module_paths = glob.glob(
|
||||
os.path.join(candidate_spec.prefix, "lib", "python*", "site-packages")
|
||||
)
|
||||
path_before = list(sys.path)
|
||||
|
||||
# NOTE: try module_paths first and last, last allows an existing version in path
|
||||
|
||||
@@ -37,6 +37,7 @@
|
||||
import spack.binary_distribution
|
||||
import spack.config
|
||||
import spack.detection
|
||||
import spack.mirror
|
||||
import spack.platforms
|
||||
import spack.spec
|
||||
import spack.store
|
||||
@@ -44,7 +45,6 @@
|
||||
import spack.util.executable
|
||||
import spack.util.path
|
||||
import spack.util.spack_yaml
|
||||
import spack.util.url
|
||||
import spack.version
|
||||
from spack.installer import PackageInstaller
|
||||
|
||||
@@ -91,12 +91,7 @@ def __init__(self, conf: ConfigDictionary) -> None:
|
||||
self.metadata_dir = spack.util.path.canonicalize_path(conf["metadata"])
|
||||
|
||||
# Promote (relative) paths to file urls
|
||||
url = conf["info"]["url"]
|
||||
if spack.util.url.is_path_instead_of_url(url):
|
||||
if not os.path.isabs(url):
|
||||
url = os.path.join(self.metadata_dir, url)
|
||||
url = spack.util.url.path_to_file_url(url)
|
||||
self.url = url
|
||||
self.url = spack.mirror.Mirror(conf["info"]["url"]).fetch_url
|
||||
|
||||
@property
|
||||
def mirror_scope(self) -> spack.config.InternalConfigScope:
|
||||
@@ -175,7 +170,15 @@ def _install_by_hash(
|
||||
query = spack.binary_distribution.BinaryCacheQuery(all_architectures=True)
|
||||
for match in spack.store.find([f"/{pkg_hash}"], multiple=False, query_fn=query):
|
||||
spack.binary_distribution.install_root_node(
|
||||
match, unsigned=True, force=True, sha256=pkg_sha256
|
||||
# allow_missing is true since when bootstrapping clingo we truncate runtime
|
||||
# deps such as gcc-runtime, since we link libstdc++ statically, and the other
|
||||
# further runtime deps are loaded by the Python interpreter. This just silences
|
||||
# warnings about missing dependencies.
|
||||
match,
|
||||
unsigned=True,
|
||||
force=True,
|
||||
sha256=pkg_sha256,
|
||||
allow_missing=True,
|
||||
)
|
||||
|
||||
def _install_and_test(
|
||||
@@ -599,7 +602,10 @@ def bootstrapping_sources(scope: Optional[str] = None):
|
||||
current = copy.copy(entry)
|
||||
metadata_dir = spack.util.path.canonicalize_path(entry["metadata"])
|
||||
metadata_yaml = os.path.join(metadata_dir, METADATA_YAML_FILENAME)
|
||||
with open(metadata_yaml, encoding="utf-8") as stream:
|
||||
current.update(spack.util.spack_yaml.load(stream))
|
||||
list_of_sources.append(current)
|
||||
try:
|
||||
with open(metadata_yaml, encoding="utf-8") as stream:
|
||||
current.update(spack.util.spack_yaml.load(stream))
|
||||
list_of_sources.append(current)
|
||||
except OSError:
|
||||
pass
|
||||
return list_of_sources
|
||||
|
||||
@@ -44,6 +44,7 @@
|
||||
from collections import defaultdict
|
||||
from enum import Flag, auto
|
||||
from itertools import chain
|
||||
from multiprocessing.connection import Connection
|
||||
from typing import Callable, Dict, List, Optional, Set, Tuple
|
||||
|
||||
import archspec.cpu
|
||||
@@ -54,7 +55,6 @@
|
||||
from llnl.util.lang import dedupe, stable_partition
|
||||
from llnl.util.symlink import symlink
|
||||
from llnl.util.tty.color import cescape, colorize
|
||||
from llnl.util.tty.log import MultiProcessFd
|
||||
|
||||
import spack.build_systems._checks
|
||||
import spack.build_systems.cmake
|
||||
@@ -91,7 +91,7 @@
|
||||
)
|
||||
from spack.util.executable import Executable
|
||||
from spack.util.log_parse import make_log_context, parse_log_events
|
||||
from spack.util.module_cmd import load_module, path_from_modules
|
||||
from spack.util.module_cmd import load_module
|
||||
|
||||
#
|
||||
# This can be set by the user to globally disable parallel builds.
|
||||
@@ -617,13 +617,11 @@ def set_package_py_globals(pkg, context: Context = Context.BUILD):
|
||||
"""
|
||||
module = ModuleChangePropagator(pkg)
|
||||
|
||||
if context == Context.BUILD:
|
||||
module.std_cmake_args = spack.build_systems.cmake.CMakeBuilder.std_args(pkg)
|
||||
module.std_meson_args = spack.build_systems.meson.MesonBuilder.std_args(pkg)
|
||||
module.std_pip_args = spack.build_systems.python.PythonPipBuilder.std_args(pkg)
|
||||
|
||||
jobs = spack.config.determine_number_of_jobs(parallel=pkg.parallel)
|
||||
module.make_jobs = jobs
|
||||
if context == Context.BUILD:
|
||||
module.std_meson_args = spack.build_systems.meson.MesonBuilder.std_args(pkg)
|
||||
module.std_pip_args = spack.build_systems.python.PythonPipBuilder.std_args(pkg)
|
||||
|
||||
# TODO: make these build deps that can be installed if not found.
|
||||
module.make = MakeExecutable("make", jobs)
|
||||
@@ -792,21 +790,6 @@ def get_rpath_deps(pkg: spack.package_base.PackageBase) -> List[spack.spec.Spec]
|
||||
return _get_rpath_deps_from_spec(pkg.spec, pkg.transitive_rpaths)
|
||||
|
||||
|
||||
def get_rpaths(pkg):
|
||||
"""Get a list of all the rpaths for a package."""
|
||||
rpaths = [pkg.prefix.lib, pkg.prefix.lib64]
|
||||
deps = get_rpath_deps(pkg)
|
||||
rpaths.extend(d.prefix.lib for d in deps if os.path.isdir(d.prefix.lib))
|
||||
rpaths.extend(d.prefix.lib64 for d in deps if os.path.isdir(d.prefix.lib64))
|
||||
# Second module is our compiler mod name. We use that to get rpaths from
|
||||
# module show output.
|
||||
if pkg.compiler.modules and len(pkg.compiler.modules) > 1:
|
||||
mod_rpath = path_from_modules([pkg.compiler.modules[1]])
|
||||
if mod_rpath:
|
||||
rpaths.append(mod_rpath)
|
||||
return list(dedupe(filter_system_paths(rpaths)))
|
||||
|
||||
|
||||
def load_external_modules(pkg):
|
||||
"""Traverse a package's spec DAG and load any external modules.
|
||||
|
||||
@@ -1063,6 +1046,12 @@ def set_all_package_py_globals(self):
|
||||
# This includes runtime dependencies, also runtime deps of direct build deps.
|
||||
set_package_py_globals(pkg, context=Context.RUN)
|
||||
|
||||
# Looping over the set of packages a second time
|
||||
# ensures all globals are loaded into the module space prior to
|
||||
# any package setup. This guarantees package setup methods have
|
||||
# access to expected module level definitions such as "spack_cc"
|
||||
for dspec, flag in chain(self.external, self.nonexternal):
|
||||
pkg = dspec.package
|
||||
for spec in dspec.dependents():
|
||||
# Note: some specs have dependents that are unreachable from the root, so avoid
|
||||
# setting globals for those.
|
||||
@@ -1072,6 +1061,15 @@ def set_all_package_py_globals(self):
|
||||
pkg.setup_dependent_package(dependent_module, spec)
|
||||
dependent_module.propagate_changes_to_mro()
|
||||
|
||||
if self.context == Context.BUILD:
|
||||
pkg = self.specs[0].package
|
||||
module = ModuleChangePropagator(pkg)
|
||||
# std_cmake_args is not sufficiently static to be defined
|
||||
# in set_package_py_globals and is deprecated so its handled
|
||||
# here as a special case
|
||||
module.std_cmake_args = spack.build_systems.cmake.CMakeBuilder.std_args(pkg)
|
||||
module.propagate_changes_to_mro()
|
||||
|
||||
def get_env_modifications(self) -> EnvironmentModifications:
|
||||
"""Returns the environment variable modifications for the given input specs and context.
|
||||
Environment modifications include:
|
||||
@@ -1141,40 +1139,14 @@ def _make_runnable(self, dep: spack.spec.Spec, env: EnvironmentModifications):
|
||||
env.prepend_path("PATH", bin_dir)
|
||||
|
||||
|
||||
def get_cmake_prefix_path(pkg):
|
||||
# Note that unlike modifications_from_dependencies, this does not include
|
||||
# any edits to CMAKE_PREFIX_PATH defined in custom
|
||||
# setup_dependent_build_environment implementations of dependency packages
|
||||
build_deps = set(pkg.spec.dependencies(deptype=("build", "test")))
|
||||
link_deps = set(pkg.spec.traverse(root=False, deptype=("link")))
|
||||
build_link_deps = build_deps | link_deps
|
||||
spack_built = []
|
||||
externals = []
|
||||
# modifications_from_dependencies updates CMAKE_PREFIX_PATH by first
|
||||
# prepending all externals and then all non-externals
|
||||
for dspec in pkg.spec.traverse(root=False, order="post"):
|
||||
if dspec in build_link_deps:
|
||||
if dspec.external:
|
||||
externals.insert(0, dspec)
|
||||
else:
|
||||
spack_built.insert(0, dspec)
|
||||
|
||||
ordered_build_link_deps = spack_built + externals
|
||||
cmake_prefix_path_entries = []
|
||||
for spec in ordered_build_link_deps:
|
||||
cmake_prefix_path_entries.extend(spec.package.cmake_prefix_paths)
|
||||
|
||||
return filter_system_paths(cmake_prefix_path_entries)
|
||||
|
||||
|
||||
def _setup_pkg_and_run(
|
||||
serialized_pkg: "spack.subprocess_context.PackageInstallContext",
|
||||
function: Callable,
|
||||
kwargs: Dict,
|
||||
write_pipe: multiprocessing.connection.Connection,
|
||||
input_multiprocess_fd: Optional[MultiProcessFd],
|
||||
jsfd1: Optional[MultiProcessFd],
|
||||
jsfd2: Optional[MultiProcessFd],
|
||||
write_pipe: Connection,
|
||||
input_pipe: Optional[Connection],
|
||||
jsfd1: Optional[Connection],
|
||||
jsfd2: Optional[Connection],
|
||||
):
|
||||
"""Main entry point in the child process for Spack builds.
|
||||
|
||||
@@ -1216,13 +1188,12 @@ def _setup_pkg_and_run(
|
||||
context: str = kwargs.get("context", "build")
|
||||
|
||||
try:
|
||||
# We are in the child process. Python sets sys.stdin to
|
||||
# open(os.devnull) to prevent our process and its parent from
|
||||
# simultaneously reading from the original stdin. But, we assume
|
||||
# that the parent process is not going to read from it till we
|
||||
# are done with the child, so we undo Python's precaution.
|
||||
if input_multiprocess_fd is not None:
|
||||
sys.stdin = os.fdopen(input_multiprocess_fd.fd)
|
||||
# We are in the child process. Python sets sys.stdin to open(os.devnull) to prevent our
|
||||
# process and its parent from simultaneously reading from the original stdin. But, we
|
||||
# assume that the parent process is not going to read from it till we are done with the
|
||||
# child, so we undo Python's precaution. closefd=False since Connection has ownership.
|
||||
if input_pipe is not None:
|
||||
sys.stdin = os.fdopen(input_pipe.fileno(), closefd=False)
|
||||
|
||||
pkg = serialized_pkg.restore()
|
||||
|
||||
@@ -1245,7 +1216,7 @@ def _setup_pkg_and_run(
|
||||
# objects can't be sent to the parent.
|
||||
exc_type = type(e)
|
||||
tb = e.__traceback__
|
||||
tb_string = traceback.format_exception(exc_type, e, tb)
|
||||
tb_string = "".join(traceback.format_exception(exc_type, e, tb))
|
||||
|
||||
# build up some context from the offending package so we can
|
||||
# show that, too.
|
||||
@@ -1291,8 +1262,8 @@ def _setup_pkg_and_run(
|
||||
|
||||
finally:
|
||||
write_pipe.close()
|
||||
if input_multiprocess_fd is not None:
|
||||
input_multiprocess_fd.close()
|
||||
if input_pipe is not None:
|
||||
input_pipe.close()
|
||||
|
||||
|
||||
def start_build_process(pkg, function, kwargs):
|
||||
@@ -1319,23 +1290,9 @@ def child_fun():
|
||||
If something goes wrong, the child process catches the error and
|
||||
passes it to the parent wrapped in a ChildError. The parent is
|
||||
expected to handle (or re-raise) the ChildError.
|
||||
|
||||
This uses `multiprocessing.Process` to create the child process. The
|
||||
mechanism used to create the process differs on different operating
|
||||
systems and for different versions of Python. In some cases "fork"
|
||||
is used (i.e. the "fork" system call) and some cases it starts an
|
||||
entirely new Python interpreter process (in the docs this is referred
|
||||
to as the "spawn" start method). Breaking it down by OS:
|
||||
|
||||
- Linux always uses fork.
|
||||
- Mac OS uses fork before Python 3.8 and "spawn" for 3.8 and after.
|
||||
- Windows always uses the "spawn" start method.
|
||||
|
||||
For more information on `multiprocessing` child process creation
|
||||
mechanisms, see https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
|
||||
"""
|
||||
read_pipe, write_pipe = multiprocessing.Pipe(duplex=False)
|
||||
input_multiprocess_fd = None
|
||||
input_fd = None
|
||||
jobserver_fd1 = None
|
||||
jobserver_fd2 = None
|
||||
|
||||
@@ -1344,14 +1301,13 @@ def child_fun():
|
||||
try:
|
||||
# Forward sys.stdin when appropriate, to allow toggling verbosity
|
||||
if sys.platform != "win32" and sys.stdin.isatty() and hasattr(sys.stdin, "fileno"):
|
||||
input_fd = os.dup(sys.stdin.fileno())
|
||||
input_multiprocess_fd = MultiProcessFd(input_fd)
|
||||
input_fd = Connection(os.dup(sys.stdin.fileno()))
|
||||
mflags = os.environ.get("MAKEFLAGS", False)
|
||||
if mflags:
|
||||
m = re.search(r"--jobserver-[^=]*=(\d),(\d)", mflags)
|
||||
if m:
|
||||
jobserver_fd1 = MultiProcessFd(int(m.group(1)))
|
||||
jobserver_fd2 = MultiProcessFd(int(m.group(2)))
|
||||
jobserver_fd1 = Connection(int(m.group(1)))
|
||||
jobserver_fd2 = Connection(int(m.group(2)))
|
||||
|
||||
p = multiprocessing.Process(
|
||||
target=_setup_pkg_and_run,
|
||||
@@ -1360,7 +1316,7 @@ def child_fun():
|
||||
function,
|
||||
kwargs,
|
||||
write_pipe,
|
||||
input_multiprocess_fd,
|
||||
input_fd,
|
||||
jobserver_fd1,
|
||||
jobserver_fd2,
|
||||
),
|
||||
@@ -1380,8 +1336,8 @@ def child_fun():
|
||||
|
||||
finally:
|
||||
# Close the input stream in the parent process
|
||||
if input_multiprocess_fd is not None:
|
||||
input_multiprocess_fd.close()
|
||||
if input_fd is not None:
|
||||
input_fd.close()
|
||||
|
||||
def exitcode_msg(p):
|
||||
typ = "exit" if p.exitcode >= 0 else "signal"
|
||||
|
||||
@@ -10,7 +10,6 @@
|
||||
import llnl.util.filesystem as fs
|
||||
import llnl.util.tty as tty
|
||||
|
||||
import spack.build_environment
|
||||
import spack.builder
|
||||
|
||||
from .cmake import CMakeBuilder, CMakePackage
|
||||
@@ -297,18 +296,6 @@ def initconfig_hardware_entries(self):
|
||||
def std_initconfig_entries(self):
|
||||
cmake_prefix_path_env = os.environ["CMAKE_PREFIX_PATH"]
|
||||
cmake_prefix_path = cmake_prefix_path_env.replace(os.pathsep, ";")
|
||||
cmake_rpaths_env = spack.build_environment.get_rpaths(self.pkg)
|
||||
cmake_rpaths_path = ";".join(cmake_rpaths_env)
|
||||
complete_rpath_list = cmake_rpaths_path
|
||||
if "SPACK_COMPILER_EXTRA_RPATHS" in os.environ:
|
||||
spack_extra_rpaths_env = os.environ["SPACK_COMPILER_EXTRA_RPATHS"]
|
||||
spack_extra_rpaths_path = spack_extra_rpaths_env.replace(os.pathsep, ";")
|
||||
complete_rpath_list = "{0};{1}".format(complete_rpath_list, spack_extra_rpaths_path)
|
||||
|
||||
if "SPACK_COMPILER_IMPLICIT_RPATHS" in os.environ:
|
||||
spack_implicit_rpaths_env = os.environ["SPACK_COMPILER_IMPLICIT_RPATHS"]
|
||||
spack_implicit_rpaths_path = spack_implicit_rpaths_env.replace(os.pathsep, ";")
|
||||
complete_rpath_list = "{0};{1}".format(complete_rpath_list, spack_implicit_rpaths_path)
|
||||
|
||||
return [
|
||||
"#------------------{0}".format("-" * 60),
|
||||
@@ -318,8 +305,6 @@ def std_initconfig_entries(self):
|
||||
"#------------------{0}\n".format("-" * 60),
|
||||
cmake_cache_string("CMAKE_PREFIX_PATH", cmake_prefix_path),
|
||||
cmake_cache_string("CMAKE_INSTALL_RPATH_USE_LINK_PATH", "ON"),
|
||||
cmake_cache_string("CMAKE_BUILD_RPATH", complete_rpath_list),
|
||||
cmake_cache_string("CMAKE_INSTALL_RPATH", complete_rpath_list),
|
||||
self.define_cmake_cache_from_variant("CMAKE_BUILD_TYPE", "build_type"),
|
||||
]
|
||||
|
||||
|
||||
@@ -8,17 +8,19 @@
|
||||
import platform
|
||||
import re
|
||||
import sys
|
||||
from typing import List, Optional, Tuple
|
||||
from itertools import chain
|
||||
from typing import List, Optional, Set, Tuple
|
||||
|
||||
import llnl.util.filesystem as fs
|
||||
from llnl.util.lang import stable_partition
|
||||
|
||||
import spack.build_environment
|
||||
import spack.builder
|
||||
import spack.deptypes as dt
|
||||
import spack.error
|
||||
import spack.package_base
|
||||
from spack.directives import build_system, conflicts, depends_on, variant
|
||||
from spack.multimethod import when
|
||||
from spack.util.environment import filter_system_paths
|
||||
|
||||
from ._checks import BaseBuilder, execute_build_time_tests
|
||||
|
||||
@@ -152,6 +154,24 @@ def _values(x):
|
||||
conflicts(f"generator={x}")
|
||||
|
||||
|
||||
def get_cmake_prefix_path(pkg: spack.package_base.PackageBase) -> List[str]:
|
||||
"""Obtain the CMAKE_PREFIX_PATH entries for a package, based on the cmake_prefix_path package
|
||||
attribute of direct build/test and transitive link dependencies."""
|
||||
# Add direct build/test deps
|
||||
selected: Set[str] = {s.dag_hash() for s in pkg.spec.dependencies(deptype=dt.BUILD | dt.TEST)}
|
||||
# Add transitive link deps
|
||||
selected.update(s.dag_hash() for s in pkg.spec.traverse(root=False, deptype=dt.LINK))
|
||||
# Separate out externals so they do not shadow Spack prefixes
|
||||
externals, spack_built = stable_partition(
|
||||
(s for s in pkg.spec.traverse(root=False, order="topo") if s.dag_hash() in selected),
|
||||
lambda x: x.external,
|
||||
)
|
||||
|
||||
return filter_system_paths(
|
||||
path for spec in chain(spack_built, externals) for path in spec.package.cmake_prefix_paths
|
||||
)
|
||||
|
||||
|
||||
class CMakePackage(spack.package_base.PackageBase):
|
||||
"""Specialized class for packages built using CMake
|
||||
|
||||
@@ -358,6 +378,16 @@ def std_args(pkg, generator=None):
|
||||
"-G",
|
||||
generator,
|
||||
define("CMAKE_INSTALL_PREFIX", pathlib.Path(pkg.prefix).as_posix()),
|
||||
define("CMAKE_INSTALL_RPATH_USE_LINK_PATH", True),
|
||||
# only include the install prefix lib dirs; rpaths for deps are added by USE_LINK_PATH
|
||||
define(
|
||||
"CMAKE_INSTALL_RPATH",
|
||||
[
|
||||
pathlib.Path(pkg.prefix, "lib").as_posix(),
|
||||
pathlib.Path(pkg.prefix, "lib64").as_posix(),
|
||||
],
|
||||
),
|
||||
define("CMAKE_PREFIX_PATH", get_cmake_prefix_path(pkg)),
|
||||
define("CMAKE_BUILD_TYPE", build_type),
|
||||
]
|
||||
|
||||
@@ -372,15 +402,6 @@ def std_args(pkg, generator=None):
|
||||
_conditional_cmake_defaults(pkg, args)
|
||||
_maybe_set_python_hints(pkg, args)
|
||||
|
||||
# Set up CMake rpath
|
||||
args.extend(
|
||||
[
|
||||
define("CMAKE_INSTALL_RPATH_USE_LINK_PATH", True),
|
||||
define("CMAKE_INSTALL_RPATH", spack.build_environment.get_rpaths(pkg)),
|
||||
define("CMAKE_PREFIX_PATH", spack.build_environment.get_cmake_prefix_path(pkg)),
|
||||
]
|
||||
)
|
||||
|
||||
return args
|
||||
|
||||
@staticmethod
|
||||
@@ -541,6 +562,13 @@ def cmake_args(self):
|
||||
|
||||
def cmake(self, pkg, spec, prefix):
|
||||
"""Runs ``cmake`` in the build directory"""
|
||||
|
||||
# skip cmake phase if it is an incremental develop build
|
||||
if spec.is_develop and os.path.isfile(
|
||||
os.path.join(self.build_directory, "CMakeCache.txt")
|
||||
):
|
||||
return
|
||||
|
||||
options = self.std_cmake_args
|
||||
options += self.cmake_args()
|
||||
options.append(os.path.abspath(self.root_cmakelists_dir))
|
||||
|
||||
@@ -110,8 +110,8 @@ def compute_capabilities(arch_list: Iterable[str]) -> List[str]:
|
||||
|
||||
depends_on("cuda@5.0:10.2", when="cuda_arch=30")
|
||||
depends_on("cuda@5.0:10.2", when="cuda_arch=32")
|
||||
depends_on("cuda@5.0:", when="cuda_arch=35")
|
||||
depends_on("cuda@6.5:", when="cuda_arch=37")
|
||||
depends_on("cuda@5.0:11.8", when="cuda_arch=35")
|
||||
depends_on("cuda@6.5:11.8", when="cuda_arch=37")
|
||||
|
||||
depends_on("cuda@6.0:", when="cuda_arch=50")
|
||||
depends_on("cuda@6.5:", when="cuda_arch=52")
|
||||
@@ -131,6 +131,7 @@ def compute_capabilities(arch_list: Iterable[str]) -> List[str]:
|
||||
depends_on("cuda@11.8:", when="cuda_arch=89")
|
||||
|
||||
depends_on("cuda@12.0:", when="cuda_arch=90")
|
||||
depends_on("cuda@12.0:", when="cuda_arch=90a")
|
||||
|
||||
# From the NVIDIA install guide we know of conflicts for particular
|
||||
# platforms (linux, darwin), architectures (x86, powerpc) and compilers
|
||||
@@ -149,7 +150,6 @@ def compute_capabilities(arch_list: Iterable[str]) -> List[str]:
|
||||
# minimum supported versions
|
||||
conflicts("%gcc@:4", when="+cuda ^cuda@11.0:")
|
||||
conflicts("%gcc@:5", when="+cuda ^cuda@11.4:")
|
||||
conflicts("%gcc@:7.2", when="+cuda ^cuda@12.4:")
|
||||
conflicts("%clang@:6", when="+cuda ^cuda@12.2:")
|
||||
|
||||
# maximum supported version
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
|
||||
import spack.error
|
||||
import spack.multimethod
|
||||
import spack.repo
|
||||
|
||||
#: Builder classes, as registered by the "builder" decorator
|
||||
BUILDER_CLS = {}
|
||||
@@ -74,6 +75,14 @@ def __call__(self, spec, prefix):
|
||||
return self.phase_fn(self.builder.pkg, spec, prefix)
|
||||
|
||||
|
||||
def get_builder_class(pkg, name: str) -> Optional[type]:
|
||||
"""Return the builder class if a package module defines it."""
|
||||
cls = getattr(pkg.module, name, None)
|
||||
if cls and cls.__module__.startswith(spack.repo.ROOT_PYTHON_NAMESPACE):
|
||||
return cls
|
||||
return None
|
||||
|
||||
|
||||
def _create(pkg):
|
||||
"""Return a new builder object for the package object being passed as argument.
|
||||
|
||||
@@ -99,9 +108,10 @@ class hierarchy (look at AspellDictPackage for an example of that)
|
||||
package_buildsystem = buildsystem_name(pkg)
|
||||
default_builder_cls = BUILDER_CLS[package_buildsystem]
|
||||
builder_cls_name = default_builder_cls.__name__
|
||||
builder_cls = getattr(pkg.module, builder_cls_name, None)
|
||||
if builder_cls:
|
||||
return builder_cls(pkg)
|
||||
builder_class = get_builder_class(pkg, builder_cls_name)
|
||||
|
||||
if builder_class:
|
||||
return builder_class(pkg)
|
||||
|
||||
# Specialized version of a given buildsystem can subclass some
|
||||
# base classes and specialize certain phases or methods or attributes.
|
||||
|
||||
@@ -5,7 +5,6 @@
|
||||
|
||||
"""Caches used by Spack to store data"""
|
||||
import os
|
||||
from typing import Union
|
||||
|
||||
import llnl.util.lang
|
||||
from llnl.util.filesystem import mkdirp
|
||||
@@ -32,12 +31,8 @@ def _misc_cache():
|
||||
return spack.util.file_cache.FileCache(path)
|
||||
|
||||
|
||||
FileCacheType = Union[spack.util.file_cache.FileCache, llnl.util.lang.Singleton]
|
||||
|
||||
#: Spack's cache for small data
|
||||
MISC_CACHE: Union[spack.util.file_cache.FileCache, llnl.util.lang.Singleton] = (
|
||||
llnl.util.lang.Singleton(_misc_cache)
|
||||
)
|
||||
MISC_CACHE: spack.util.file_cache.FileCache = llnl.util.lang.Singleton(_misc_cache) # type: ignore
|
||||
|
||||
|
||||
def fetch_cache_location():
|
||||
@@ -74,6 +69,4 @@ def store(self, fetcher, relative_dest):
|
||||
|
||||
|
||||
#: Spack's local cache for downloaded source archives
|
||||
FETCH_CACHE: Union[spack.fetch_strategy.FsCache, llnl.util.lang.Singleton] = (
|
||||
llnl.util.lang.Singleton(_fetch_cache)
|
||||
)
|
||||
FETCH_CACHE: spack.fetch_strategy.FsCache = llnl.util.lang.Singleton(_fetch_cache) # type: ignore
|
||||
|
||||
@@ -10,6 +10,7 @@
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import ssl
|
||||
import stat
|
||||
import subprocess
|
||||
import sys
|
||||
@@ -19,21 +20,21 @@
|
||||
from collections import defaultdict, namedtuple
|
||||
from typing import Dict, List, Optional, Set, Tuple
|
||||
from urllib.error import HTTPError, URLError
|
||||
from urllib.parse import urlencode
|
||||
from urllib.request import HTTPHandler, Request, build_opener
|
||||
from urllib.parse import quote, urlencode, urlparse
|
||||
from urllib.request import HTTPHandler, HTTPSHandler, Request, build_opener
|
||||
|
||||
import ruamel.yaml
|
||||
|
||||
import llnl.util.filesystem as fs
|
||||
import llnl.util.tty as tty
|
||||
from llnl.util.lang import memoized
|
||||
from llnl.util.lang import Singleton, memoized
|
||||
from llnl.util.tty.color import cescape, colorize
|
||||
|
||||
import spack
|
||||
import spack.binary_distribution as bindist
|
||||
import spack.concretize
|
||||
import spack.config as cfg
|
||||
import spack.environment as ev
|
||||
import spack.error
|
||||
import spack.main
|
||||
import spack.mirror
|
||||
import spack.paths
|
||||
@@ -50,6 +51,31 @@
|
||||
from spack.reporters.cdash import SPACK_CDASH_TIMEOUT
|
||||
from spack.reporters.cdash import build_stamp as cdash_build_stamp
|
||||
|
||||
|
||||
def _urlopen():
|
||||
error_handler = web_util.SpackHTTPDefaultErrorHandler()
|
||||
|
||||
# One opener with HTTPS ssl enabled
|
||||
with_ssl = build_opener(
|
||||
HTTPHandler(), HTTPSHandler(context=web_util.ssl_create_default_context()), error_handler
|
||||
)
|
||||
|
||||
# One opener with HTTPS ssl disabled
|
||||
without_ssl = build_opener(
|
||||
HTTPHandler(), HTTPSHandler(context=ssl._create_unverified_context()), error_handler
|
||||
)
|
||||
|
||||
# And dynamically dispatch based on the config:verify_ssl.
|
||||
def dispatch_open(fullurl, data=None, timeout=None, verify_ssl=True):
|
||||
opener = with_ssl if verify_ssl else without_ssl
|
||||
timeout = timeout or spack.config.get("config:connect_timeout", 1)
|
||||
return opener.open(fullurl, data, timeout)
|
||||
|
||||
return dispatch_open
|
||||
|
||||
|
||||
_dyn_mapping_urlopener = Singleton(_urlopen)
|
||||
|
||||
# See https://docs.gitlab.com/ee/ci/yaml/#retry for descriptions of conditions
|
||||
JOB_RETRY_CONDITIONS = [
|
||||
# "always",
|
||||
@@ -69,8 +95,6 @@
|
||||
|
||||
TEMP_STORAGE_MIRROR_NAME = "ci_temporary_mirror"
|
||||
SPACK_RESERVED_TAGS = ["public", "protected", "notary"]
|
||||
# TODO: Remove this in Spack 0.23
|
||||
SHARED_PR_MIRROR_URL = "s3://spack-binaries-prs/shared_pr_mirror"
|
||||
JOB_NAME_FORMAT = (
|
||||
"{name}{@version} {/hash:7} {%compiler.name}{@compiler.version}{ arch=architecture}"
|
||||
)
|
||||
@@ -175,11 +199,11 @@ def _remove_satisfied_deps(deps, satisfied_list):
|
||||
return nodes, edges, stages
|
||||
|
||||
|
||||
def _print_staging_summary(spec_labels, stages, mirrors_to_check, rebuild_decisions):
|
||||
def _print_staging_summary(spec_labels, stages, rebuild_decisions):
|
||||
if not stages:
|
||||
return
|
||||
|
||||
mirrors = spack.mirror.MirrorCollection(mirrors=mirrors_to_check, binary=True)
|
||||
mirrors = spack.mirror.MirrorCollection(binary=True)
|
||||
tty.msg("Checked the following mirrors for binaries:")
|
||||
for m in mirrors.values():
|
||||
tty.msg(f" {m.fetch_url}")
|
||||
@@ -226,21 +250,14 @@ def _spec_matches(spec, match_string):
|
||||
return spec.intersects(match_string)
|
||||
|
||||
|
||||
def _format_job_needs(
|
||||
dep_jobs, build_group, prune_dag, rebuild_decisions, enable_artifacts_buildcache
|
||||
):
|
||||
def _format_job_needs(dep_jobs, build_group, prune_dag, rebuild_decisions):
|
||||
needs_list = []
|
||||
for dep_job in dep_jobs:
|
||||
dep_spec_key = _spec_ci_label(dep_job)
|
||||
rebuild = rebuild_decisions[dep_spec_key].rebuild
|
||||
|
||||
if not prune_dag or rebuild:
|
||||
needs_list.append(
|
||||
{
|
||||
"job": get_job_name(dep_job, build_group),
|
||||
"artifacts": enable_artifacts_buildcache,
|
||||
}
|
||||
)
|
||||
needs_list.append({"job": get_job_name(dep_job, build_group), "artifacts": False})
|
||||
return needs_list
|
||||
|
||||
|
||||
@@ -384,12 +401,6 @@ def __init__(self, ci_config, spec_labels, stages):
|
||||
|
||||
self.ir = {
|
||||
"jobs": {},
|
||||
"temporary-storage-url-prefix": self.ci_config.get(
|
||||
"temporary-storage-url-prefix", None
|
||||
),
|
||||
"enable-artifacts-buildcache": self.ci_config.get(
|
||||
"enable-artifacts-buildcache", False
|
||||
),
|
||||
"rebuild-index": self.ci_config.get("rebuild-index", True),
|
||||
"broken-specs-url": self.ci_config.get("broken-specs-url", None),
|
||||
"broken-tests-packages": self.ci_config.get("broken-tests-packages", []),
|
||||
@@ -405,9 +416,20 @@ def __init__(self, ci_config, spec_labels, stages):
|
||||
if name not in ["any", "build"]:
|
||||
jobs[name] = self.__init_job("")
|
||||
|
||||
def __init_job(self, spec):
|
||||
def __init_job(self, release_spec):
|
||||
"""Initialize job object"""
|
||||
return {"spec": spec, "attributes": {}}
|
||||
job_object = {"spec": release_spec, "attributes": {}}
|
||||
if release_spec:
|
||||
job_vars = job_object["attributes"].setdefault("variables", {})
|
||||
job_vars["SPACK_JOB_SPEC_DAG_HASH"] = release_spec.dag_hash()
|
||||
job_vars["SPACK_JOB_SPEC_PKG_NAME"] = release_spec.name
|
||||
job_vars["SPACK_JOB_SPEC_PKG_VERSION"] = release_spec.format("{version}")
|
||||
job_vars["SPACK_JOB_SPEC_COMPILER_NAME"] = release_spec.format("{compiler.name}")
|
||||
job_vars["SPACK_JOB_SPEC_COMPILER_VERSION"] = release_spec.format("{compiler.version}")
|
||||
job_vars["SPACK_JOB_SPEC_ARCH"] = release_spec.format("{architecture}")
|
||||
job_vars["SPACK_JOB_SPEC_VARIANTS"] = release_spec.format("{variants}")
|
||||
|
||||
return job_object
|
||||
|
||||
def __is_named(self, section):
|
||||
"""Check if a pipeline-gen configuration section is for a named job,
|
||||
@@ -500,6 +522,7 @@ def generate_ir(self):
|
||||
for section in reversed(pipeline_gen):
|
||||
name = self.__is_named(section)
|
||||
has_submapping = "submapping" in section
|
||||
has_dynmapping = "dynamic-mapping" in section
|
||||
section = cfg.InternalConfigScope._process_dict_keyname_overrides(section)
|
||||
|
||||
if name:
|
||||
@@ -542,6 +565,108 @@ def _apply_section(dest, src):
|
||||
job["attributes"] = self.__apply_submapping(
|
||||
job["attributes"], job["spec"], section
|
||||
)
|
||||
elif has_dynmapping:
|
||||
mapping = section["dynamic-mapping"]
|
||||
|
||||
dynmap_name = mapping.get("name")
|
||||
|
||||
# Check if this section should be skipped
|
||||
dynmap_skip = os.environ.get("SPACK_CI_SKIP_DYNAMIC_MAPPING")
|
||||
if dynmap_name and dynmap_skip:
|
||||
if re.match(dynmap_skip, dynmap_name):
|
||||
continue
|
||||
|
||||
# Get the endpoint
|
||||
endpoint = mapping["endpoint"]
|
||||
endpoint_url = urlparse(endpoint)
|
||||
|
||||
# Configure the request header
|
||||
header = {"User-Agent": web_util.SPACK_USER_AGENT}
|
||||
header.update(mapping.get("header", {}))
|
||||
|
||||
# Expand header environment variables
|
||||
# ie. if tokens are passed
|
||||
for value in header.values():
|
||||
value = os.path.expandvars(value)
|
||||
|
||||
verify_ssl = mapping.get("verify_ssl", spack.config.get("config:verify_ssl", True))
|
||||
timeout = mapping.get("timeout", spack.config.get("config:connect_timeout", 1))
|
||||
|
||||
required = mapping.get("require", [])
|
||||
allowed = mapping.get("allow", [])
|
||||
ignored = mapping.get("ignore", [])
|
||||
|
||||
# required keys are implicitly allowed
|
||||
allowed = sorted(set(allowed + required))
|
||||
ignored = sorted(set(ignored))
|
||||
required = sorted(set(required))
|
||||
|
||||
# Make sure required things are not also ignored
|
||||
assert not any([ikey in required for ikey in ignored])
|
||||
|
||||
def job_query(job):
|
||||
job_vars = job["attributes"]["variables"]
|
||||
query = (
|
||||
"{SPACK_JOB_SPEC_PKG_NAME}@{SPACK_JOB_SPEC_PKG_VERSION}"
|
||||
# The preceding spaces are required (ref. https://github.com/spack/spack-gantry/blob/develop/docs/api.md#allocation)
|
||||
" {SPACK_JOB_SPEC_VARIANTS}"
|
||||
" arch={SPACK_JOB_SPEC_ARCH}"
|
||||
"%{SPACK_JOB_SPEC_COMPILER_NAME}@{SPACK_JOB_SPEC_COMPILER_VERSION}"
|
||||
).format_map(job_vars)
|
||||
return f"spec={quote(query)}"
|
||||
|
||||
for job in jobs.values():
|
||||
if not job["spec"]:
|
||||
continue
|
||||
|
||||
# Create request for this job
|
||||
query = job_query(job)
|
||||
request = Request(
|
||||
endpoint_url._replace(query=query).geturl(), headers=header, method="GET"
|
||||
)
|
||||
try:
|
||||
response = _dyn_mapping_urlopener(
|
||||
request, verify_ssl=verify_ssl, timeout=timeout
|
||||
)
|
||||
except Exception as e:
|
||||
# For now just ignore any errors from dynamic mapping and continue
|
||||
# This is still experimental, and failures should not stop CI
|
||||
# from running normally
|
||||
tty.warn(f"Failed to fetch dynamic mapping for query:\n\t{query}")
|
||||
tty.warn(f"{e}")
|
||||
continue
|
||||
|
||||
config = json.load(codecs.getreader("utf-8")(response))
|
||||
|
||||
# Strip ignore keys
|
||||
if ignored:
|
||||
for key in ignored:
|
||||
if key in config:
|
||||
config.pop(key)
|
||||
|
||||
# Only keep allowed keys
|
||||
clean_config = {}
|
||||
if allowed:
|
||||
for key in allowed:
|
||||
if key in config:
|
||||
clean_config[key] = config[key]
|
||||
else:
|
||||
clean_config = config
|
||||
|
||||
# Verify all of the required keys are present
|
||||
if required:
|
||||
missing_keys = []
|
||||
for key in required:
|
||||
if key not in clean_config.keys():
|
||||
missing_keys.append(key)
|
||||
|
||||
if missing_keys:
|
||||
tty.warn(f"Response missing required keys: {missing_keys}")
|
||||
|
||||
if clean_config:
|
||||
job["attributes"] = spack.config.merge_yaml(
|
||||
job.get("attributes", {}), clean_config
|
||||
)
|
||||
|
||||
for _, job in jobs.items():
|
||||
if job["spec"]:
|
||||
@@ -558,14 +683,13 @@ def generate_gitlab_ci_yaml(
|
||||
prune_dag=False,
|
||||
check_index_only=False,
|
||||
artifacts_root=None,
|
||||
remote_mirror_override=None,
|
||||
):
|
||||
"""Generate a gitlab yaml file to run a dynamic child pipeline from
|
||||
the spec matrix in the active environment.
|
||||
|
||||
Arguments:
|
||||
env (spack.environment.Environment): Activated environment object
|
||||
which must contain a gitlab-ci section describing how to map
|
||||
which must contain a ci section describing how to map
|
||||
specs to runners
|
||||
print_summary (bool): Should we print a summary of all the jobs in
|
||||
the stages in which they were placed.
|
||||
@@ -580,39 +704,21 @@ def generate_gitlab_ci_yaml(
|
||||
artifacts_root (str): Path where artifacts like logs, environment
|
||||
files (spack.yaml, spack.lock), etc should be written. GitLab
|
||||
requires this to be within the project directory.
|
||||
remote_mirror_override (str): Typically only needed when one spack.yaml
|
||||
is used to populate several mirrors with binaries, based on some
|
||||
criteria. Spack protected pipelines populate different mirrors based
|
||||
on branch name, facilitated by this option. DEPRECATED
|
||||
"""
|
||||
with spack.concretize.disable_compiler_existence_check():
|
||||
with env.write_transaction():
|
||||
env.concretize()
|
||||
env.write()
|
||||
|
||||
yaml_root = env.manifest[ev.TOP_LEVEL_KEY]
|
||||
|
||||
# Get the joined "ci" config with all of the current scopes resolved
|
||||
ci_config = cfg.get("ci")
|
||||
|
||||
config_deprecated = False
|
||||
if not ci_config:
|
||||
tty.warn("Environment does not have `ci` a configuration")
|
||||
gitlabci_config = yaml_root.get("gitlab-ci")
|
||||
if not gitlabci_config:
|
||||
tty.die("Environment yaml does not have `gitlab-ci` config section. Cannot recover.")
|
||||
|
||||
tty.warn(
|
||||
"The `gitlab-ci` configuration is deprecated in favor of `ci`.\n",
|
||||
"To update run \n\t$ spack env update /path/to/ci/spack.yaml",
|
||||
)
|
||||
translate_deprecated_config(gitlabci_config)
|
||||
ci_config = gitlabci_config
|
||||
config_deprecated = True
|
||||
raise SpackCIError("Environment does not have a `ci` configuration")
|
||||
|
||||
# Default target is gitlab...and only target is gitlab
|
||||
if not ci_config.get("target", "gitlab") == "gitlab":
|
||||
tty.die('Spack CI module only generates target "gitlab"')
|
||||
raise SpackCIError('Spack CI module only generates target "gitlab"')
|
||||
|
||||
cdash_config = cfg.get("cdash")
|
||||
cdash_handler = CDashHandler(cdash_config) if "build-group" in cdash_config else None
|
||||
@@ -673,12 +779,6 @@ def generate_gitlab_ci_yaml(
|
||||
spack_pipeline_type = os.environ.get("SPACK_PIPELINE_TYPE", None)
|
||||
|
||||
copy_only_pipeline = spack_pipeline_type == "spack_copy_only"
|
||||
if copy_only_pipeline and config_deprecated:
|
||||
tty.warn(
|
||||
"SPACK_PIPELINE_TYPE=spack_copy_only is not supported when using\n",
|
||||
"deprecated ci configuration, a no-op pipeline will be generated\n",
|
||||
"instead.",
|
||||
)
|
||||
|
||||
def ensure_expected_target_path(path):
|
||||
"""Returns passed paths with all Windows path separators exchanged
|
||||
@@ -697,38 +797,16 @@ def ensure_expected_target_path(path):
|
||||
return path
|
||||
|
||||
pipeline_mirrors = spack.mirror.MirrorCollection(binary=True)
|
||||
deprecated_mirror_config = False
|
||||
buildcache_destination = None
|
||||
if "buildcache-destination" in pipeline_mirrors:
|
||||
if remote_mirror_override:
|
||||
tty.die(
|
||||
"Using the deprecated --buildcache-destination cli option and "
|
||||
"having a mirror named 'buildcache-destination' at the same time "
|
||||
"is not allowed"
|
||||
)
|
||||
buildcache_destination = pipeline_mirrors["buildcache-destination"]
|
||||
else:
|
||||
deprecated_mirror_config = True
|
||||
# TODO: This will be an error in Spack 0.23
|
||||
if "buildcache-destination" not in pipeline_mirrors:
|
||||
raise SpackCIError("spack ci generate requires a mirror named 'buildcache-destination'")
|
||||
|
||||
# TODO: Remove this block in spack 0.23
|
||||
remote_mirror_url = None
|
||||
if deprecated_mirror_config:
|
||||
if "mirrors" not in yaml_root or len(yaml_root["mirrors"].values()) < 1:
|
||||
tty.die("spack ci generate requires an env containing a mirror")
|
||||
|
||||
ci_mirrors = yaml_root["mirrors"]
|
||||
mirror_urls = [url for url in ci_mirrors.values()]
|
||||
remote_mirror_url = mirror_urls[0]
|
||||
buildcache_destination = pipeline_mirrors["buildcache-destination"]
|
||||
|
||||
spack_buildcache_copy = os.environ.get("SPACK_COPY_BUILDCACHE", None)
|
||||
if spack_buildcache_copy:
|
||||
buildcache_copies = {}
|
||||
buildcache_copy_src_prefix = (
|
||||
buildcache_destination.fetch_url
|
||||
if buildcache_destination
|
||||
else remote_mirror_override or remote_mirror_url
|
||||
)
|
||||
buildcache_copy_src_prefix = buildcache_destination.fetch_url
|
||||
buildcache_copy_dest_prefix = spack_buildcache_copy
|
||||
|
||||
# Check for a list of "known broken" specs that we should not bother
|
||||
@@ -738,55 +816,10 @@ def ensure_expected_target_path(path):
|
||||
if "broken-specs-url" in ci_config:
|
||||
broken_specs_url = ci_config["broken-specs-url"]
|
||||
|
||||
enable_artifacts_buildcache = False
|
||||
if "enable-artifacts-buildcache" in ci_config:
|
||||
tty.warn("Support for enable-artifacts-buildcache will be removed in Spack 0.23")
|
||||
enable_artifacts_buildcache = ci_config["enable-artifacts-buildcache"]
|
||||
|
||||
rebuild_index_enabled = True
|
||||
if "rebuild-index" in ci_config and ci_config["rebuild-index"] is False:
|
||||
rebuild_index_enabled = False
|
||||
|
||||
temp_storage_url_prefix = None
|
||||
if "temporary-storage-url-prefix" in ci_config:
|
||||
tty.warn("Support for temporary-storage-url-prefix will be removed in Spack 0.23")
|
||||
temp_storage_url_prefix = ci_config["temporary-storage-url-prefix"]
|
||||
|
||||
# If a remote mirror override (alternate buildcache destination) was
|
||||
# specified, add it here in case it has already built hashes we might
|
||||
# generate.
|
||||
# TODO: Remove this block in Spack 0.23
|
||||
mirrors_to_check = None
|
||||
if deprecated_mirror_config and remote_mirror_override:
|
||||
if spack_pipeline_type == "spack_protected_branch":
|
||||
# Overriding the main mirror in this case might result
|
||||
# in skipping jobs on a release pipeline because specs are
|
||||
# up to date in develop. Eventually we want to notice and take
|
||||
# advantage of this by scheduling a job to copy the spec from
|
||||
# develop to the release, but until we have that, this makes
|
||||
# sure we schedule a rebuild job if the spec isn't already in
|
||||
# override mirror.
|
||||
mirrors_to_check = {"override": remote_mirror_override}
|
||||
|
||||
# If we have a remote override and we want generate pipeline using
|
||||
# --check-index-only, then the override mirror needs to be added to
|
||||
# the configured mirrors when bindist.update() is run, or else we
|
||||
# won't fetch its index and include in our local cache.
|
||||
spack.mirror.add(
|
||||
spack.mirror.Mirror(remote_mirror_override, name="ci_pr_mirror"),
|
||||
cfg.default_modify_scope(),
|
||||
)
|
||||
|
||||
# TODO: Remove this block in Spack 0.23
|
||||
shared_pr_mirror = None
|
||||
if deprecated_mirror_config and spack_pipeline_type == "spack_pull_request":
|
||||
stack_name = os.environ.get("SPACK_CI_STACK_NAME", "")
|
||||
shared_pr_mirror = url_util.join(SHARED_PR_MIRROR_URL, stack_name)
|
||||
spack.mirror.add(
|
||||
spack.mirror.Mirror(shared_pr_mirror, name="ci_shared_pr_mirror"),
|
||||
cfg.default_modify_scope(),
|
||||
)
|
||||
|
||||
pipeline_artifacts_dir = artifacts_root
|
||||
if not pipeline_artifacts_dir:
|
||||
proj_dir = os.environ.get("CI_PROJECT_DIR", os.getcwd())
|
||||
@@ -795,9 +828,8 @@ def ensure_expected_target_path(path):
|
||||
pipeline_artifacts_dir = os.path.abspath(pipeline_artifacts_dir)
|
||||
concrete_env_dir = os.path.join(pipeline_artifacts_dir, "concrete_environment")
|
||||
|
||||
# Now that we've added the mirrors we know about, they should be properly
|
||||
# reflected in the environment manifest file, so copy that into the
|
||||
# concrete environment directory, along with the spack.lock file.
|
||||
# Copy the environment manifest file into the concrete environment directory,
|
||||
# along with the spack.lock file.
|
||||
if not os.path.exists(concrete_env_dir):
|
||||
os.makedirs(concrete_env_dir)
|
||||
shutil.copyfile(env.manifest_path, os.path.join(concrete_env_dir, "spack.yaml"))
|
||||
@@ -822,18 +854,12 @@ def ensure_expected_target_path(path):
|
||||
env_includes.extend(include_scopes)
|
||||
env_yaml_root["spack"]["include"] = [ensure_expected_target_path(i) for i in env_includes]
|
||||
|
||||
if "gitlab-ci" in env_yaml_root["spack"] and "ci" not in env_yaml_root["spack"]:
|
||||
env_yaml_root["spack"]["ci"] = env_yaml_root["spack"].pop("gitlab-ci")
|
||||
translate_deprecated_config(env_yaml_root["spack"]["ci"])
|
||||
|
||||
with open(os.path.join(concrete_env_dir, "spack.yaml"), "w") as fd:
|
||||
fd.write(syaml.dump_config(env_yaml_root, default_flow_style=False))
|
||||
|
||||
job_log_dir = os.path.join(pipeline_artifacts_dir, "logs")
|
||||
job_repro_dir = os.path.join(pipeline_artifacts_dir, "reproduction")
|
||||
job_test_dir = os.path.join(pipeline_artifacts_dir, "tests")
|
||||
# TODO: Remove this line in Spack 0.23
|
||||
local_mirror_dir = os.path.join(pipeline_artifacts_dir, "mirror")
|
||||
user_artifacts_dir = os.path.join(pipeline_artifacts_dir, "user_data")
|
||||
|
||||
# We communicate relative paths to the downstream jobs to avoid issues in
|
||||
@@ -847,8 +873,6 @@ def ensure_expected_target_path(path):
|
||||
rel_job_log_dir = os.path.relpath(job_log_dir, ci_project_dir)
|
||||
rel_job_repro_dir = os.path.relpath(job_repro_dir, ci_project_dir)
|
||||
rel_job_test_dir = os.path.relpath(job_test_dir, ci_project_dir)
|
||||
# TODO: Remove this line in Spack 0.23
|
||||
rel_local_mirror_dir = os.path.join(local_mirror_dir, ci_project_dir)
|
||||
rel_user_artifacts_dir = os.path.relpath(user_artifacts_dir, ci_project_dir)
|
||||
|
||||
# Speed up staging by first fetching binary indices from all mirrors
|
||||
@@ -910,7 +934,7 @@ def ensure_expected_target_path(path):
|
||||
continue
|
||||
|
||||
up_to_date_mirrors = bindist.get_mirrors_for_spec(
|
||||
spec=release_spec, mirrors_to_check=mirrors_to_check, index_only=check_index_only
|
||||
spec=release_spec, index_only=check_index_only
|
||||
)
|
||||
|
||||
spec_record.rebuild = not up_to_date_mirrors
|
||||
@@ -952,36 +976,16 @@ def main_script_replacements(cmd):
|
||||
|
||||
job_name = get_job_name(release_spec, build_group)
|
||||
|
||||
job_vars = job_object.setdefault("variables", {})
|
||||
job_vars["SPACK_JOB_SPEC_DAG_HASH"] = release_spec_dag_hash
|
||||
job_vars["SPACK_JOB_SPEC_PKG_NAME"] = release_spec.name
|
||||
job_vars["SPACK_JOB_SPEC_PKG_VERSION"] = release_spec.format("{version}")
|
||||
job_vars["SPACK_JOB_SPEC_COMPILER_NAME"] = release_spec.format("{compiler.name}")
|
||||
job_vars["SPACK_JOB_SPEC_COMPILER_VERSION"] = release_spec.format("{compiler.version}")
|
||||
job_vars["SPACK_JOB_SPEC_ARCH"] = release_spec.format("{architecture}")
|
||||
job_vars["SPACK_JOB_SPEC_VARIANTS"] = release_spec.format("{variants}")
|
||||
|
||||
job_object["needs"] = []
|
||||
if spec_label in dependencies:
|
||||
if enable_artifacts_buildcache:
|
||||
# Get dependencies transitively, so they're all
|
||||
# available in the artifacts buildcache.
|
||||
dep_jobs = [d for d in release_spec.traverse(deptype="all", root=False)]
|
||||
else:
|
||||
# In this case, "needs" is only used for scheduling
|
||||
# purposes, so we only get the direct dependencies.
|
||||
dep_jobs = []
|
||||
for dep_label in dependencies[spec_label]:
|
||||
dep_jobs.append(spec_labels[dep_label])
|
||||
# In this case, "needs" is only used for scheduling
|
||||
# purposes, so we only get the direct dependencies.
|
||||
dep_jobs = []
|
||||
for dep_label in dependencies[spec_label]:
|
||||
dep_jobs.append(spec_labels[dep_label])
|
||||
|
||||
job_object["needs"].extend(
|
||||
_format_job_needs(
|
||||
dep_jobs,
|
||||
build_group,
|
||||
prune_dag,
|
||||
rebuild_decisions,
|
||||
enable_artifacts_buildcache,
|
||||
)
|
||||
_format_job_needs(dep_jobs, build_group, prune_dag, rebuild_decisions)
|
||||
)
|
||||
|
||||
rebuild_spec = spec_record.rebuild
|
||||
@@ -1038,6 +1042,7 @@ def main_script_replacements(cmd):
|
||||
|
||||
# Let downstream jobs know whether the spec needed rebuilding, regardless
|
||||
# whether DAG pruning was enabled or not.
|
||||
job_vars = job_object["variables"]
|
||||
job_vars["SPACK_SPEC_NEEDS_REBUILD"] = str(rebuild_spec)
|
||||
|
||||
if cdash_handler:
|
||||
@@ -1062,19 +1067,6 @@ def main_script_replacements(cmd):
|
||||
},
|
||||
)
|
||||
|
||||
# TODO: Remove this block in Spack 0.23
|
||||
if enable_artifacts_buildcache:
|
||||
bc_root = os.path.join(local_mirror_dir, "build_cache")
|
||||
job_object["artifacts"]["paths"].extend(
|
||||
[
|
||||
os.path.join(bc_root, p)
|
||||
for p in [
|
||||
bindist.tarball_name(release_spec, ".spec.json"),
|
||||
bindist.tarball_directory_name(release_spec),
|
||||
]
|
||||
]
|
||||
)
|
||||
|
||||
job_object["stage"] = stage_name
|
||||
job_object["retry"] = {"max": 2, "when": JOB_RETRY_CONDITIONS}
|
||||
job_object["interruptible"] = True
|
||||
@@ -1089,15 +1081,7 @@ def main_script_replacements(cmd):
|
||||
job_id += 1
|
||||
|
||||
if print_summary:
|
||||
_print_staging_summary(spec_labels, stages, mirrors_to_check, rebuild_decisions)
|
||||
|
||||
# Clean up remote mirror override if enabled
|
||||
# TODO: Remove this block in Spack 0.23
|
||||
if deprecated_mirror_config:
|
||||
if remote_mirror_override:
|
||||
spack.mirror.remove("ci_pr_mirror", cfg.default_modify_scope())
|
||||
if spack_pipeline_type == "spack_pull_request":
|
||||
spack.mirror.remove("ci_shared_pr_mirror", cfg.default_modify_scope())
|
||||
_print_staging_summary(spec_labels, stages, rebuild_decisions)
|
||||
|
||||
tty.debug(f"{job_id} build jobs generated in {stage_id} stages")
|
||||
|
||||
@@ -1119,7 +1103,7 @@ def main_script_replacements(cmd):
|
||||
"when": ["runner_system_failure", "stuck_or_timeout_failure", "script_failure"],
|
||||
}
|
||||
|
||||
if copy_only_pipeline and not config_deprecated:
|
||||
if copy_only_pipeline:
|
||||
stage_names.append("copy")
|
||||
sync_job = copy.deepcopy(spack_ci_ir["jobs"]["copy"]["attributes"])
|
||||
sync_job["stage"] = "copy"
|
||||
@@ -1129,17 +1113,12 @@ def main_script_replacements(cmd):
|
||||
if "variables" not in sync_job:
|
||||
sync_job["variables"] = {}
|
||||
|
||||
sync_job["variables"]["SPACK_COPY_ONLY_DESTINATION"] = (
|
||||
buildcache_destination.fetch_url
|
||||
if buildcache_destination
|
||||
else remote_mirror_override or remote_mirror_url
|
||||
)
|
||||
sync_job["variables"]["SPACK_COPY_ONLY_DESTINATION"] = buildcache_destination.fetch_url
|
||||
|
||||
if "buildcache-source" in pipeline_mirrors:
|
||||
buildcache_source = pipeline_mirrors["buildcache-source"].fetch_url
|
||||
else:
|
||||
# TODO: Remove this condition in Spack 0.23
|
||||
buildcache_source = os.environ.get("SPACK_SOURCE_MIRROR", None)
|
||||
if "buildcache-source" not in pipeline_mirrors:
|
||||
raise SpackCIError("Copy-only pipelines require a mirror named 'buildcache-source'")
|
||||
|
||||
buildcache_source = pipeline_mirrors["buildcache-source"].fetch_url
|
||||
sync_job["variables"]["SPACK_BUILDCACHE_SOURCE"] = buildcache_source
|
||||
sync_job["dependencies"] = []
|
||||
|
||||
@@ -1147,27 +1126,6 @@ def main_script_replacements(cmd):
|
||||
job_id += 1
|
||||
|
||||
if job_id > 0:
|
||||
# TODO: Remove this block in Spack 0.23
|
||||
if temp_storage_url_prefix:
|
||||
# There were some rebuild jobs scheduled, so we will need to
|
||||
# schedule a job to clean up the temporary storage location
|
||||
# associated with this pipeline.
|
||||
stage_names.append("cleanup-temp-storage")
|
||||
cleanup_job = copy.deepcopy(spack_ci_ir["jobs"]["cleanup"]["attributes"])
|
||||
|
||||
cleanup_job["stage"] = "cleanup-temp-storage"
|
||||
cleanup_job["when"] = "always"
|
||||
cleanup_job["retry"] = service_job_retries
|
||||
cleanup_job["interruptible"] = True
|
||||
|
||||
cleanup_job["script"] = _unpack_script(
|
||||
cleanup_job["script"],
|
||||
op=lambda cmd: cmd.replace("mirror_prefix", temp_storage_url_prefix),
|
||||
)
|
||||
|
||||
cleanup_job["dependencies"] = []
|
||||
output_object["cleanup"] = cleanup_job
|
||||
|
||||
if (
|
||||
"script" in spack_ci_ir["jobs"]["signing"]["attributes"]
|
||||
and spack_pipeline_type == "spack_protected_branch"
|
||||
@@ -1184,11 +1142,9 @@ def main_script_replacements(cmd):
|
||||
signing_job["interruptible"] = True
|
||||
if "variables" not in signing_job:
|
||||
signing_job["variables"] = {}
|
||||
signing_job["variables"]["SPACK_BUILDCACHE_DESTINATION"] = (
|
||||
buildcache_destination.push_url # need the s3 url for aws s3 sync
|
||||
if buildcache_destination
|
||||
else remote_mirror_override or remote_mirror_url
|
||||
)
|
||||
signing_job["variables"][
|
||||
"SPACK_BUILDCACHE_DESTINATION"
|
||||
] = buildcache_destination.push_url
|
||||
signing_job["dependencies"] = []
|
||||
|
||||
output_object["sign-pkgs"] = signing_job
|
||||
@@ -1199,9 +1155,7 @@ def main_script_replacements(cmd):
|
||||
final_job = spack_ci_ir["jobs"]["reindex"]["attributes"]
|
||||
|
||||
final_job["stage"] = "stage-rebuild-index"
|
||||
target_mirror = remote_mirror_override or remote_mirror_url
|
||||
if buildcache_destination:
|
||||
target_mirror = buildcache_destination.push_url
|
||||
target_mirror = buildcache_destination.push_url
|
||||
final_job["script"] = _unpack_script(
|
||||
final_job["script"],
|
||||
op=lambda cmd: cmd.replace("{index_target_mirror}", target_mirror),
|
||||
@@ -1227,17 +1181,11 @@ def main_script_replacements(cmd):
|
||||
"SPACK_CONCRETE_ENV_DIR": rel_concrete_env_dir,
|
||||
"SPACK_VERSION": spack_version,
|
||||
"SPACK_CHECKOUT_VERSION": version_to_clone,
|
||||
# TODO: Remove this line in Spack 0.23
|
||||
"SPACK_REMOTE_MIRROR_URL": remote_mirror_url,
|
||||
"SPACK_JOB_LOG_DIR": rel_job_log_dir,
|
||||
"SPACK_JOB_REPRO_DIR": rel_job_repro_dir,
|
||||
"SPACK_JOB_TEST_DIR": rel_job_test_dir,
|
||||
# TODO: Remove this line in Spack 0.23
|
||||
"SPACK_LOCAL_MIRROR_DIR": rel_local_mirror_dir,
|
||||
"SPACK_PIPELINE_TYPE": str(spack_pipeline_type),
|
||||
"SPACK_CI_STACK_NAME": os.environ.get("SPACK_CI_STACK_NAME", "None"),
|
||||
# TODO: Remove this line in Spack 0.23
|
||||
"SPACK_CI_SHARED_PR_MIRROR_URL": shared_pr_mirror or "None",
|
||||
"SPACK_REBUILD_CHECK_UP_TO_DATE": str(prune_dag),
|
||||
"SPACK_REBUILD_EVERYTHING": str(rebuild_everything),
|
||||
"SPACK_REQUIRE_SIGNING": os.environ.get("SPACK_REQUIRE_SIGNING", "False"),
|
||||
@@ -1246,10 +1194,6 @@ def main_script_replacements(cmd):
|
||||
for item, val in output_vars.items():
|
||||
output_vars[item] = ensure_expected_target_path(val)
|
||||
|
||||
# TODO: Remove this block in Spack 0.23
|
||||
if deprecated_mirror_config and remote_mirror_override:
|
||||
(output_object["variables"]["SPACK_REMOTE_MIRROR_OVERRIDE"]) = remote_mirror_override
|
||||
|
||||
spack_stack_name = os.environ.get("SPACK_CI_STACK_NAME", None)
|
||||
if spack_stack_name:
|
||||
output_object["variables"]["SPACK_CI_STACK_NAME"] = spack_stack_name
|
||||
@@ -1276,15 +1220,8 @@ def main_script_replacements(cmd):
|
||||
noop_job["retry"] = 0
|
||||
noop_job["allow_failure"] = True
|
||||
|
||||
if copy_only_pipeline and config_deprecated:
|
||||
tty.debug("Generating no-op job as copy-only is unsupported here.")
|
||||
noop_job["script"] = [
|
||||
'echo "copy-only pipelines are not supported with deprecated ci configs"'
|
||||
]
|
||||
output_object = {"unsupported-copy": noop_job}
|
||||
else:
|
||||
tty.debug("No specs to rebuild, generating no-op job")
|
||||
output_object = {"no-specs-to-rebuild": noop_job}
|
||||
tty.debug("No specs to rebuild, generating no-op job")
|
||||
output_object = {"no-specs-to-rebuild": noop_job}
|
||||
|
||||
# Ensure the child pipeline always runs
|
||||
output_object["workflow"] = {"rules": [{"when": "always"}]}
|
||||
@@ -2322,83 +2259,6 @@ def report_skipped(self, spec: spack.spec.Spec, report_dir: str, reason: Optiona
|
||||
reporter.test_skipped_report(report_dir, spec, reason)
|
||||
|
||||
|
||||
def translate_deprecated_config(config):
|
||||
# Remove all deprecated keys from config
|
||||
mappings = config.pop("mappings", [])
|
||||
match_behavior = config.pop("match_behavior", "first")
|
||||
|
||||
build_job = {}
|
||||
if "image" in config:
|
||||
build_job["image"] = config.pop("image")
|
||||
if "tags" in config:
|
||||
build_job["tags"] = config.pop("tags")
|
||||
if "variables" in config:
|
||||
build_job["variables"] = config.pop("variables")
|
||||
|
||||
# Scripts always override in old CI
|
||||
if "before_script" in config:
|
||||
build_job["before_script:"] = config.pop("before_script")
|
||||
if "script" in config:
|
||||
build_job["script:"] = config.pop("script")
|
||||
if "after_script" in config:
|
||||
build_job["after_script:"] = config.pop("after_script")
|
||||
|
||||
signing_job = None
|
||||
if "signing-job-attributes" in config:
|
||||
signing_job = {"signing-job": config.pop("signing-job-attributes")}
|
||||
|
||||
service_job_attributes = None
|
||||
if "service-job-attributes" in config:
|
||||
service_job_attributes = config.pop("service-job-attributes")
|
||||
|
||||
# If this config already has pipeline-gen do not more
|
||||
if "pipeline-gen" in config:
|
||||
return True if mappings or build_job or signing_job or service_job_attributes else False
|
||||
|
||||
config["target"] = "gitlab"
|
||||
|
||||
config["pipeline-gen"] = []
|
||||
pipeline_gen = config["pipeline-gen"]
|
||||
|
||||
# Build Job
|
||||
submapping = []
|
||||
for section in mappings:
|
||||
submapping_section = {"match": section["match"]}
|
||||
if "runner-attributes" in section:
|
||||
remapped_attributes = {}
|
||||
if match_behavior == "first":
|
||||
for key, value in section["runner-attributes"].items():
|
||||
# Scripts always override in old CI
|
||||
if key == "script":
|
||||
remapped_attributes["script:"] = value
|
||||
elif key == "before_script":
|
||||
remapped_attributes["before_script:"] = value
|
||||
elif key == "after_script":
|
||||
remapped_attributes["after_script:"] = value
|
||||
else:
|
||||
remapped_attributes[key] = value
|
||||
else:
|
||||
# Handle "merge" behavior be allowing scripts to merge in submapping section
|
||||
remapped_attributes = section["runner-attributes"]
|
||||
submapping_section["build-job"] = remapped_attributes
|
||||
|
||||
if "remove-attributes" in section:
|
||||
# Old format only allowed tags in this section, so no extra checks are needed
|
||||
submapping_section["build-job-remove"] = section["remove-attributes"]
|
||||
submapping.append(submapping_section)
|
||||
pipeline_gen.append({"submapping": submapping, "match_behavior": match_behavior})
|
||||
|
||||
if build_job:
|
||||
pipeline_gen.append({"build-job": build_job})
|
||||
|
||||
# Signing Job
|
||||
if signing_job:
|
||||
pipeline_gen.append(signing_job)
|
||||
|
||||
# Service Jobs
|
||||
if service_job_attributes:
|
||||
pipeline_gen.append({"reindex-job": service_job_attributes})
|
||||
pipeline_gen.append({"noop-job": service_job_attributes})
|
||||
pipeline_gen.append({"cleanup-job": service_job_attributes})
|
||||
|
||||
return True
|
||||
class SpackCIError(spack.error.SpackError):
|
||||
def __init__(self, msg):
|
||||
super().__init__(msg)
|
||||
|
||||
@@ -17,6 +17,7 @@
|
||||
from llnl.util.tty.colify import colify
|
||||
from llnl.util.tty.color import colorize
|
||||
|
||||
import spack.concretize
|
||||
import spack.config # breaks a cycle.
|
||||
import spack.environment as ev
|
||||
import spack.error
|
||||
@@ -173,10 +174,29 @@ def parse_specs(
|
||||
arg_string = " ".join([quote_kvp(arg) for arg in args])
|
||||
|
||||
specs = spack.parser.parse(arg_string)
|
||||
for spec in specs:
|
||||
if concretize:
|
||||
spec.concretize(tests=tests)
|
||||
return specs
|
||||
if not concretize:
|
||||
return specs
|
||||
|
||||
to_concretize = [(s, None) for s in specs]
|
||||
return _concretize_spec_pairs(to_concretize, tests=tests)
|
||||
|
||||
|
||||
def _concretize_spec_pairs(to_concretize, tests=False):
|
||||
"""Helper method that concretizes abstract specs from a list of abstract,concrete pairs.
|
||||
|
||||
Any spec with a concrete spec associated with it will concretize to that spec. Any spec
|
||||
with ``None`` for its concrete spec will be newly concretized. This method respects unification
|
||||
rules from config."""
|
||||
unify = spack.config.get("concretizer:unify", False)
|
||||
|
||||
concretize_method = spack.concretize.concretize_separately # unify: false
|
||||
if unify is True:
|
||||
concretize_method = spack.concretize.concretize_together
|
||||
elif unify == "when_possible":
|
||||
concretize_method = spack.concretize.concretize_together_when_possible
|
||||
|
||||
concretized = concretize_method(to_concretize, tests=tests)
|
||||
return [concrete for _, concrete in concretized]
|
||||
|
||||
|
||||
def matching_spec_from_env(spec):
|
||||
@@ -192,6 +212,22 @@ def matching_spec_from_env(spec):
|
||||
return spec.concretized()
|
||||
|
||||
|
||||
def matching_specs_from_env(specs):
|
||||
"""
|
||||
Same as ``matching_spec_from_env`` but respects spec unification rules.
|
||||
|
||||
For each spec, if there is a matching spec in the environment it is used. If no
|
||||
matching spec is found, this will return the given spec but concretized in the
|
||||
context of the active environment and other given specs, with unification rules applied.
|
||||
"""
|
||||
env = ev.active_environment()
|
||||
spec_pairs = [(spec, env.matching_spec(spec) if env else None) for spec in specs]
|
||||
additional_concrete_specs = (
|
||||
[(concrete, concrete) for _, concrete in env.concretized_specs()] if env else []
|
||||
)
|
||||
return _concretize_spec_pairs(spec_pairs + additional_concrete_specs)[: len(spec_pairs)]
|
||||
|
||||
|
||||
def disambiguate_spec(spec, env, local=False, installed=True, first=False):
|
||||
"""Given a spec, figure out which installed package it refers to.
|
||||
|
||||
|
||||
@@ -19,12 +19,23 @@
|
||||
|
||||
|
||||
def setup_parser(subparser):
|
||||
# DEPRECATED: equivalent to --generic --target
|
||||
subparser.add_argument(
|
||||
"-g", "--generic-target", action="store_true", help="show the best generic target"
|
||||
"-g",
|
||||
"--generic-target",
|
||||
action="store_true",
|
||||
help="show the best generic target (deprecated)",
|
||||
)
|
||||
subparser.add_argument(
|
||||
"--known-targets", action="store_true", help="show a list of all known targets and exit"
|
||||
)
|
||||
target_type = subparser.add_mutually_exclusive_group()
|
||||
target_type.add_argument(
|
||||
"--family", action="store_true", help="print generic ISA (x86_64, aarch64, ppc64le, ...)"
|
||||
)
|
||||
target_type.add_argument(
|
||||
"--generic", action="store_true", help="print feature level (x86_64_v3, armv8.4a, ...)"
|
||||
)
|
||||
parts = subparser.add_mutually_exclusive_group()
|
||||
parts2 = subparser.add_mutually_exclusive_group()
|
||||
parts.add_argument(
|
||||
@@ -80,6 +91,7 @@ def display_target_group(header, target_group):
|
||||
|
||||
def arch(parser, args):
|
||||
if args.generic_target:
|
||||
# TODO: add deprecation warning in 0.24
|
||||
print(archspec.cpu.host().generic)
|
||||
return
|
||||
|
||||
@@ -96,6 +108,10 @@ def arch(parser, args):
|
||||
host_platform = spack.platforms.host()
|
||||
host_os = host_platform.operating_system(os_args)
|
||||
host_target = host_platform.target(target_args)
|
||||
if args.family:
|
||||
host_target = host_target.family
|
||||
elif args.generic:
|
||||
host_target = host_target.generic
|
||||
architecture = spack.spec.ArchSpec((str(host_platform), str(host_os), str(host_target)))
|
||||
|
||||
if args.platform:
|
||||
|
||||
@@ -62,13 +62,6 @@ def setup_parser(subparser):
|
||||
"path to the file where generated jobs file should be written. "
|
||||
"default is .gitlab-ci.yml in the root of the repository",
|
||||
)
|
||||
generate.add_argument(
|
||||
"--copy-to",
|
||||
default=None,
|
||||
help="path to additional directory for job files\n\n"
|
||||
"this option provides an absolute path to a directory where the generated "
|
||||
"jobs yaml file should be copied. default is not to copy",
|
||||
)
|
||||
generate.add_argument(
|
||||
"--optimize",
|
||||
action="store_true",
|
||||
@@ -83,12 +76,6 @@ def setup_parser(subparser):
|
||||
default=False,
|
||||
help="(DEPRECATED) disable DAG scheduling (use 'plain' dependencies)",
|
||||
)
|
||||
generate.add_argument(
|
||||
"--buildcache-destination",
|
||||
default=None,
|
||||
help="override the mirror configured in the environment\n\n"
|
||||
"allows for pushing binaries from the generated pipeline to a different location",
|
||||
)
|
||||
prune_group = generate.add_mutually_exclusive_group()
|
||||
prune_group.add_argument(
|
||||
"--prune-dag",
|
||||
@@ -214,20 +201,10 @@ def ci_generate(args):
|
||||
|
||||
env = spack.cmd.require_active_env(cmd_name="ci generate")
|
||||
|
||||
if args.copy_to:
|
||||
tty.warn("The flag --copy-to is deprecated and will be removed in Spack 0.23")
|
||||
|
||||
if args.buildcache_destination:
|
||||
tty.warn(
|
||||
"The flag --buildcache-destination is deprecated and will be removed in Spack 0.23"
|
||||
)
|
||||
|
||||
output_file = args.output_file
|
||||
copy_yaml_to = args.copy_to
|
||||
prune_dag = args.prune_dag
|
||||
index_only = args.index_only
|
||||
artifacts_root = args.artifacts_root
|
||||
buildcache_destination = args.buildcache_destination
|
||||
|
||||
if not output_file:
|
||||
output_file = os.path.abspath(".gitlab-ci.yml")
|
||||
@@ -245,15 +222,8 @@ def ci_generate(args):
|
||||
prune_dag=prune_dag,
|
||||
check_index_only=index_only,
|
||||
artifacts_root=artifacts_root,
|
||||
remote_mirror_override=buildcache_destination,
|
||||
)
|
||||
|
||||
if copy_yaml_to:
|
||||
copy_to_dir = os.path.dirname(copy_yaml_to)
|
||||
if not os.path.exists(copy_to_dir):
|
||||
os.makedirs(copy_to_dir)
|
||||
shutil.copyfile(output_file, copy_yaml_to)
|
||||
|
||||
|
||||
def ci_reindex(args):
|
||||
"""rebuild the buildcache index for the remote mirror
|
||||
@@ -298,22 +268,13 @@ def ci_rebuild(args):
|
||||
job_log_dir = os.environ.get("SPACK_JOB_LOG_DIR")
|
||||
job_test_dir = os.environ.get("SPACK_JOB_TEST_DIR")
|
||||
repro_dir = os.environ.get("SPACK_JOB_REPRO_DIR")
|
||||
# TODO: Remove this in Spack 0.23
|
||||
local_mirror_dir = os.environ.get("SPACK_LOCAL_MIRROR_DIR")
|
||||
concrete_env_dir = os.environ.get("SPACK_CONCRETE_ENV_DIR")
|
||||
ci_pipeline_id = os.environ.get("CI_PIPELINE_ID")
|
||||
ci_job_name = os.environ.get("CI_JOB_NAME")
|
||||
signing_key = os.environ.get("SPACK_SIGNING_KEY")
|
||||
job_spec_pkg_name = os.environ.get("SPACK_JOB_SPEC_PKG_NAME")
|
||||
job_spec_dag_hash = os.environ.get("SPACK_JOB_SPEC_DAG_HASH")
|
||||
spack_pipeline_type = os.environ.get("SPACK_PIPELINE_TYPE")
|
||||
# TODO: Remove this in Spack 0.23
|
||||
remote_mirror_override = os.environ.get("SPACK_REMOTE_MIRROR_OVERRIDE")
|
||||
# TODO: Remove this in Spack 0.23
|
||||
remote_mirror_url = os.environ.get("SPACK_REMOTE_MIRROR_URL")
|
||||
spack_ci_stack_name = os.environ.get("SPACK_CI_STACK_NAME")
|
||||
# TODO: Remove this in Spack 0.23
|
||||
shared_pr_mirror_url = os.environ.get("SPACK_CI_SHARED_PR_MIRROR_URL")
|
||||
rebuild_everything = os.environ.get("SPACK_REBUILD_EVERYTHING")
|
||||
require_signing = os.environ.get("SPACK_REQUIRE_SIGNING")
|
||||
|
||||
@@ -333,12 +294,10 @@ def ci_rebuild(args):
|
||||
job_log_dir = os.path.join(ci_project_dir, job_log_dir)
|
||||
job_test_dir = os.path.join(ci_project_dir, job_test_dir)
|
||||
repro_dir = os.path.join(ci_project_dir, repro_dir)
|
||||
local_mirror_dir = os.path.join(ci_project_dir, local_mirror_dir)
|
||||
concrete_env_dir = os.path.join(ci_project_dir, concrete_env_dir)
|
||||
|
||||
# Debug print some of the key environment variables we should have received
|
||||
tty.debug("pipeline_artifacts_dir = {0}".format(pipeline_artifacts_dir))
|
||||
tty.debug("remote_mirror_url = {0}".format(remote_mirror_url))
|
||||
tty.debug("job_spec_pkg_name = {0}".format(job_spec_pkg_name))
|
||||
|
||||
# Query the environment manifest to find out whether we're reporting to a
|
||||
@@ -370,51 +329,11 @@ def ci_rebuild(args):
|
||||
full_rebuild = True if rebuild_everything and rebuild_everything.lower() == "true" else False
|
||||
|
||||
pipeline_mirrors = spack.mirror.MirrorCollection(binary=True)
|
||||
deprecated_mirror_config = False
|
||||
buildcache_destination = None
|
||||
if "buildcache-destination" in pipeline_mirrors:
|
||||
buildcache_destination = pipeline_mirrors["buildcache-destination"]
|
||||
else:
|
||||
deprecated_mirror_config = True
|
||||
# TODO: This will be an error in Spack 0.23
|
||||
if "buildcache-destination" not in pipeline_mirrors:
|
||||
tty.die("spack ci rebuild requires a mirror named 'buildcache-destination")
|
||||
|
||||
# If no override url exists, then just push binary package to the
|
||||
# normal remote mirror url.
|
||||
# TODO: Remove in Spack 0.23
|
||||
buildcache_mirror_url = remote_mirror_override or remote_mirror_url
|
||||
if buildcache_destination:
|
||||
buildcache_mirror_url = buildcache_destination.push_url
|
||||
|
||||
# Figure out what is our temporary storage mirror: Is it artifacts
|
||||
# buildcache? Or temporary-storage-url-prefix? In some cases we need to
|
||||
# force something or pipelines might not have a way to propagate build
|
||||
# artifacts from upstream to downstream jobs.
|
||||
# TODO: Remove this in Spack 0.23
|
||||
pipeline_mirror_url = None
|
||||
|
||||
# TODO: Remove this in Spack 0.23
|
||||
temp_storage_url_prefix = None
|
||||
if "temporary-storage-url-prefix" in ci_config:
|
||||
temp_storage_url_prefix = ci_config["temporary-storage-url-prefix"]
|
||||
pipeline_mirror_url = url_util.join(temp_storage_url_prefix, ci_pipeline_id)
|
||||
|
||||
# TODO: Remove this in Spack 0.23
|
||||
enable_artifacts_mirror = False
|
||||
if "enable-artifacts-buildcache" in ci_config:
|
||||
enable_artifacts_mirror = ci_config["enable-artifacts-buildcache"]
|
||||
if enable_artifacts_mirror or (
|
||||
spack_is_pr_pipeline and not enable_artifacts_mirror and not temp_storage_url_prefix
|
||||
):
|
||||
# If you explicitly enabled the artifacts buildcache feature, or
|
||||
# if this is a PR pipeline but you did not enable either of the
|
||||
# per-pipeline temporary storage features, we force the use of
|
||||
# artifacts buildcache. Otherwise jobs will not have binary
|
||||
# dependencies from previous stages available since we do not
|
||||
# allow pushing binaries to the remote mirror during PR pipelines.
|
||||
enable_artifacts_mirror = True
|
||||
pipeline_mirror_url = url_util.path_to_file_url(local_mirror_dir)
|
||||
mirror_msg = "artifact buildcache enabled, mirror url: {0}".format(pipeline_mirror_url)
|
||||
tty.debug(mirror_msg)
|
||||
buildcache_destination = pipeline_mirrors["buildcache-destination"]
|
||||
|
||||
# Get the concrete spec to be built by this job.
|
||||
try:
|
||||
@@ -489,48 +408,7 @@ def ci_rebuild(args):
|
||||
fd.write(spack_info.encode("utf8"))
|
||||
fd.write(b"\n")
|
||||
|
||||
pipeline_mirrors = []
|
||||
|
||||
# If we decided there should be a temporary storage mechanism, add that
|
||||
# mirror now so it's used when we check for a hash match already
|
||||
# built for this spec.
|
||||
# TODO: Remove this block in Spack 0.23
|
||||
if pipeline_mirror_url:
|
||||
mirror = spack.mirror.Mirror(pipeline_mirror_url, name=spack_ci.TEMP_STORAGE_MIRROR_NAME)
|
||||
spack.mirror.add(mirror, cfg.default_modify_scope())
|
||||
pipeline_mirrors.append(pipeline_mirror_url)
|
||||
|
||||
# Check configured mirrors for a built spec with a matching hash
|
||||
# TODO: Remove this block in Spack 0.23
|
||||
mirrors_to_check = None
|
||||
if remote_mirror_override:
|
||||
if spack_pipeline_type == "spack_protected_branch":
|
||||
# Passing "mirrors_to_check" below means we *only* look in the override
|
||||
# mirror to see if we should skip building, which is what we want.
|
||||
mirrors_to_check = {"override": remote_mirror_override}
|
||||
|
||||
# Adding this mirror to the list of configured mirrors means dependencies
|
||||
# could be installed from either the override mirror or any other configured
|
||||
# mirror (e.g. remote_mirror_url which is defined in the environment or
|
||||
# pipeline_mirror_url), which is also what we want.
|
||||
spack.mirror.add(
|
||||
spack.mirror.Mirror(remote_mirror_override, name="mirror_override"),
|
||||
cfg.default_modify_scope(),
|
||||
)
|
||||
pipeline_mirrors.append(remote_mirror_override)
|
||||
|
||||
# TODO: Remove this in Spack 0.23
|
||||
if deprecated_mirror_config and spack_pipeline_type == "spack_pull_request":
|
||||
if shared_pr_mirror_url != "None":
|
||||
pipeline_mirrors.append(shared_pr_mirror_url)
|
||||
|
||||
matches = (
|
||||
None
|
||||
if full_rebuild
|
||||
else bindist.get_mirrors_for_spec(
|
||||
job_spec, mirrors_to_check=mirrors_to_check, index_only=False
|
||||
)
|
||||
)
|
||||
matches = None if full_rebuild else bindist.get_mirrors_for_spec(job_spec, index_only=False)
|
||||
|
||||
if matches:
|
||||
# Got a hash match on at least one configured mirror. All
|
||||
@@ -542,25 +420,10 @@ def ci_rebuild(args):
|
||||
tty.msg("No need to rebuild {0}, found hash match at: ".format(job_spec_pkg_name))
|
||||
for match in matches:
|
||||
tty.msg(" {0}".format(match["mirror_url"]))
|
||||
# TODO: Remove this block in Spack 0.23
|
||||
if enable_artifacts_mirror:
|
||||
matching_mirror = matches[0]["mirror_url"]
|
||||
build_cache_dir = os.path.join(local_mirror_dir, "build_cache")
|
||||
tty.debug("Getting {0} buildcache from {1}".format(job_spec_pkg_name, matching_mirror))
|
||||
tty.debug("Downloading to {0}".format(build_cache_dir))
|
||||
bindist.download_single_spec(job_spec, build_cache_dir, mirror_url=matching_mirror)
|
||||
|
||||
# Now we are done and successful
|
||||
return 0
|
||||
|
||||
# Before beginning the install, if this is a "rebuild everything" pipeline, we
|
||||
# only want to keep the mirror being used by the current pipeline as it's binary
|
||||
# package destination. This ensures that the when we rebuild everything, we only
|
||||
# consume binary dependencies built in this pipeline.
|
||||
# TODO: Remove this in Spack 0.23
|
||||
if deprecated_mirror_config and full_rebuild:
|
||||
spack_ci.remove_other_mirrors(pipeline_mirrors, cfg.default_modify_scope())
|
||||
|
||||
# No hash match anywhere means we need to rebuild spec
|
||||
|
||||
# Start with spack arguments
|
||||
@@ -681,17 +544,11 @@ def ci_rebuild(args):
|
||||
cdash_handler.copy_test_results(reports_dir, job_test_dir)
|
||||
|
||||
if install_exit_code == 0:
|
||||
# If the install succeeded, push it to one or more mirrors. Failure to push to any mirror
|
||||
# If the install succeeded, push it to the buildcache destination. Failure to push
|
||||
# will result in a non-zero exit code. Pushing is best-effort.
|
||||
mirror_urls = [buildcache_mirror_url]
|
||||
|
||||
# TODO: Remove this block in Spack 0.23
|
||||
if pipeline_mirror_url:
|
||||
mirror_urls.append(pipeline_mirror_url)
|
||||
|
||||
for result in spack_ci.create_buildcache(
|
||||
input_spec=job_spec,
|
||||
destination_mirror_urls=mirror_urls,
|
||||
destination_mirror_urls=[buildcache_destination.push_url],
|
||||
sign_binaries=spack_ci.can_sign_binaries(),
|
||||
):
|
||||
if not result.success:
|
||||
|
||||
@@ -105,7 +105,8 @@ def clean(parser, args):
|
||||
# Then do the cleaning falling through the cases
|
||||
if args.specs:
|
||||
specs = spack.cmd.parse_specs(args.specs, concretize=False)
|
||||
specs = list(spack.cmd.matching_spec_from_env(x) for x in specs)
|
||||
specs = spack.cmd.matching_specs_from_env(specs)
|
||||
|
||||
for spec in specs:
|
||||
msg = "Cleaning build stage [{0}]"
|
||||
tty.msg(msg.format(spec.short_spec))
|
||||
|
||||
@@ -660,34 +660,32 @@ def mirror_name_or_url(m):
|
||||
# accidentally to a dir in the current working directory.
|
||||
|
||||
# If there's a \ or / in the name, it's interpreted as a path or url.
|
||||
if "/" in m or "\\" in m:
|
||||
if "/" in m or "\\" in m or m in (".", ".."):
|
||||
return spack.mirror.Mirror(m)
|
||||
|
||||
# Otherwise, the named mirror is required to exist.
|
||||
try:
|
||||
return spack.mirror.require_mirror_name(m)
|
||||
except ValueError as e:
|
||||
raise argparse.ArgumentTypeError(
|
||||
str(e) + ". Did you mean {}?".format(os.path.join(".", m))
|
||||
)
|
||||
raise argparse.ArgumentTypeError(f"{e}. Did you mean {os.path.join('.', m)}?") from e
|
||||
|
||||
|
||||
def mirror_url(url):
|
||||
try:
|
||||
return spack.mirror.Mirror.from_url(url)
|
||||
except ValueError as e:
|
||||
raise argparse.ArgumentTypeError(str(e))
|
||||
raise argparse.ArgumentTypeError(str(e)) from e
|
||||
|
||||
|
||||
def mirror_directory(path):
|
||||
try:
|
||||
return spack.mirror.Mirror.from_local_path(path)
|
||||
except ValueError as e:
|
||||
raise argparse.ArgumentTypeError(str(e))
|
||||
raise argparse.ArgumentTypeError(str(e)) from e
|
||||
|
||||
|
||||
def mirror_name(name):
|
||||
try:
|
||||
return spack.mirror.require_mirror_name(name)
|
||||
except ValueError as e:
|
||||
raise argparse.ArgumentTypeError(str(e))
|
||||
raise argparse.ArgumentTypeError(str(e)) from e
|
||||
|
||||
@@ -99,5 +99,5 @@ def deconcretize(parser, args):
|
||||
" Use `spack deconcretize --all` to deconcretize ALL specs.",
|
||||
)
|
||||
|
||||
specs = spack.cmd.parse_specs(args.specs) if args.specs else [any]
|
||||
specs = spack.cmd.parse_specs(args.specs) if args.specs else [None]
|
||||
deconcretize_specs(args, specs)
|
||||
|
||||
@@ -85,8 +85,14 @@ def _retrieve_develop_source(spec: spack.spec.Spec, abspath: str) -> None:
|
||||
|
||||
|
||||
def develop(parser, args):
|
||||
# Note: we could put develop specs in any scope, but I assume
|
||||
# users would only ever want to do this for either (a) an active
|
||||
# env or (b) a specified config file (e.g. that is included by
|
||||
# an environment)
|
||||
# TODO: when https://github.com/spack/spack/pull/35307 is merged,
|
||||
# an active env is not required if a scope is specified
|
||||
env = spack.cmd.require_active_env(cmd_name="develop")
|
||||
if not args.spec:
|
||||
env = spack.cmd.require_active_env(cmd_name="develop")
|
||||
if args.clone is False:
|
||||
raise SpackError("No spec provided to spack develop command")
|
||||
|
||||
@@ -116,16 +122,18 @@ def develop(parser, args):
|
||||
raise SpackError("spack develop requires at most one named spec")
|
||||
|
||||
spec = specs[0]
|
||||
|
||||
version = spec.versions.concrete_range_as_version
|
||||
if not version:
|
||||
raise SpackError("Packages to develop must have a concrete version")
|
||||
# look up the maximum version so infintiy versions are preferred for develop
|
||||
version = max(spec.package_class.versions.keys())
|
||||
tty.msg(f"Defaulting to highest version: {spec.name}@{version}")
|
||||
spec.versions = spack.version.VersionList([version])
|
||||
|
||||
# If user does not specify --path, we choose to create a directory in the
|
||||
# active environment's directory, named after the spec
|
||||
path = args.path or spec.name
|
||||
if not os.path.isabs(path):
|
||||
env = spack.cmd.require_active_env(cmd_name="develop")
|
||||
abspath = spack.util.path.canonicalize_path(path, default_wd=env.path)
|
||||
else:
|
||||
abspath = path
|
||||
@@ -149,13 +157,6 @@ def develop(parser, args):
|
||||
|
||||
_retrieve_develop_source(spec, abspath)
|
||||
|
||||
# Note: we could put develop specs in any scope, but I assume
|
||||
# users would only ever want to do this for either (a) an active
|
||||
# env or (b) a specified config file (e.g. that is included by
|
||||
# an environment)
|
||||
# TODO: when https://github.com/spack/spack/pull/35307 is merged,
|
||||
# an active env is not required if a scope is specified
|
||||
env = spack.cmd.require_active_env(cmd_name="develop")
|
||||
tty.debug("Updating develop config for {0} transactionally".format(env.name))
|
||||
with env.write_transaction():
|
||||
if args.build_directory is not None:
|
||||
|
||||
@@ -10,11 +10,12 @@
|
||||
import sys
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
from typing import List, Optional
|
||||
from typing import List, Optional, Set
|
||||
|
||||
import llnl.string as string
|
||||
import llnl.util.filesystem as fs
|
||||
import llnl.util.tty as tty
|
||||
from llnl.util.symlink import islink, symlink
|
||||
from llnl.util.tty.colify import colify
|
||||
from llnl.util.tty.color import cescape, colorize
|
||||
|
||||
@@ -50,6 +51,8 @@
|
||||
"update",
|
||||
"revert",
|
||||
"depfile",
|
||||
"track",
|
||||
"untrack",
|
||||
]
|
||||
|
||||
|
||||
@@ -57,35 +60,41 @@
|
||||
# env create
|
||||
#
|
||||
def env_create_setup_parser(subparser):
|
||||
"""create a new environment"""
|
||||
subparser.add_argument("env_name", metavar="env", help="name or directory of environment")
|
||||
"""create a new environment
|
||||
|
||||
create a new environment or, optionally, copy an existing environment
|
||||
|
||||
a manifest file results in a new abstract environment while a lock file
|
||||
creates a new concrete environment
|
||||
"""
|
||||
subparser.add_argument(
|
||||
"env_name", metavar="env", help="name or directory of the new environment"
|
||||
)
|
||||
subparser.add_argument(
|
||||
"-d", "--dir", action="store_true", help="create an environment in a specific directory"
|
||||
)
|
||||
subparser.add_argument(
|
||||
"--keep-relative",
|
||||
action="store_true",
|
||||
help="copy relative develop paths verbatim into the new environment"
|
||||
" when initializing from envfile",
|
||||
help="copy envfile's relative develop paths verbatim",
|
||||
)
|
||||
view_opts = subparser.add_mutually_exclusive_group()
|
||||
view_opts.add_argument(
|
||||
"--without-view", action="store_true", help="do not maintain a view for this environment"
|
||||
)
|
||||
view_opts.add_argument(
|
||||
"--with-view",
|
||||
help="specify that this environment should maintain a view at the"
|
||||
" specified path (by default the view is maintained in the"
|
||||
" environment directory)",
|
||||
"--with-view", help="maintain view at WITH_VIEW (vs. environment's directory)"
|
||||
)
|
||||
subparser.add_argument(
|
||||
"envfile",
|
||||
nargs="?",
|
||||
default=None,
|
||||
help="either a lockfile (must end with '.json' or '.lock') or a manifest file",
|
||||
help="manifest or lock file (ends with '.json' or '.lock')",
|
||||
)
|
||||
subparser.add_argument(
|
||||
"--include-concrete", action="append", help="name of old environment to copy specs from"
|
||||
"--include-concrete",
|
||||
action="append",
|
||||
help="copy concrete specs from INCLUDE_CONCRETE's environment",
|
||||
)
|
||||
|
||||
|
||||
@@ -173,7 +182,7 @@ def _env_create(
|
||||
# env activate
|
||||
#
|
||||
def env_activate_setup_parser(subparser):
|
||||
"""set the current environment"""
|
||||
"""set the active environment"""
|
||||
shells = subparser.add_mutually_exclusive_group()
|
||||
shells.add_argument(
|
||||
"--sh",
|
||||
@@ -213,14 +222,14 @@ def env_activate_setup_parser(subparser):
|
||||
|
||||
view_options = subparser.add_mutually_exclusive_group()
|
||||
view_options.add_argument(
|
||||
"--with-view",
|
||||
"-v",
|
||||
"--with-view",
|
||||
metavar="name",
|
||||
help="set runtime environment variables for specific view",
|
||||
help="set runtime environment variables for the named view",
|
||||
)
|
||||
view_options.add_argument(
|
||||
"--without-view",
|
||||
"-V",
|
||||
"--without-view",
|
||||
action="store_true",
|
||||
help="do not set runtime environment variables for any view",
|
||||
)
|
||||
@@ -230,14 +239,14 @@ def env_activate_setup_parser(subparser):
|
||||
"--prompt",
|
||||
action="store_true",
|
||||
default=False,
|
||||
help="decorate the command line prompt when activating",
|
||||
help="add the active environment to the command line prompt",
|
||||
)
|
||||
|
||||
subparser.add_argument(
|
||||
"--temp",
|
||||
action="store_true",
|
||||
default=False,
|
||||
help="create and activate an environment in a temporary directory",
|
||||
help="create and activate in a temporary directory",
|
||||
)
|
||||
subparser.add_argument(
|
||||
"--create",
|
||||
@@ -249,13 +258,12 @@ def env_activate_setup_parser(subparser):
|
||||
"--envfile",
|
||||
nargs="?",
|
||||
default=None,
|
||||
help="either a lockfile (must end with '.json' or '.lock') or a manifest file",
|
||||
help="manifest or lock file (ends with '.json' or '.lock')",
|
||||
)
|
||||
subparser.add_argument(
|
||||
"--keep-relative",
|
||||
action="store_true",
|
||||
help="copy relative develop paths verbatim into the new environment"
|
||||
" when initializing from envfile",
|
||||
help="copy envfile's relative develop paths verbatim when create",
|
||||
)
|
||||
subparser.add_argument(
|
||||
"-d",
|
||||
@@ -269,10 +277,7 @@ def env_activate_setup_parser(subparser):
|
||||
dest="env_name",
|
||||
nargs="?",
|
||||
default=None,
|
||||
help=(
|
||||
"name of managed environment or directory of the independent env"
|
||||
" (when using --dir/-d) to activate"
|
||||
),
|
||||
help=("name or directory of the environment being activated"),
|
||||
)
|
||||
|
||||
|
||||
@@ -385,7 +390,7 @@ def env_activate(args):
|
||||
# env deactivate
|
||||
#
|
||||
def env_deactivate_setup_parser(subparser):
|
||||
"""deactivate any active environment in the shell"""
|
||||
"""deactivate the active environment"""
|
||||
shells = subparser.add_mutually_exclusive_group()
|
||||
shells.add_argument(
|
||||
"--sh",
|
||||
@@ -444,104 +449,253 @@ def env_deactivate(args):
|
||||
sys.stdout.write(cmds)
|
||||
|
||||
|
||||
#
|
||||
# env track
|
||||
#
|
||||
def env_track_setup_parser(subparser):
|
||||
"""track an environment from a directory in Spack"""
|
||||
subparser.add_argument("-n", "--name", help="custom environment name")
|
||||
subparser.add_argument("dir", help="path to environment")
|
||||
arguments.add_common_arguments(subparser, ["yes_to_all"])
|
||||
|
||||
|
||||
def env_track(args):
|
||||
src_path = os.path.abspath(args.dir)
|
||||
if not ev.is_env_dir(src_path):
|
||||
tty.die("Cannot track environment. Path doesn't contain an environment")
|
||||
|
||||
if args.name:
|
||||
name = args.name
|
||||
else:
|
||||
name = os.path.basename(src_path)
|
||||
|
||||
try:
|
||||
dst_path = ev.environment_dir_from_name(name, exists_ok=False)
|
||||
except ev.SpackEnvironmentError:
|
||||
tty.die(
|
||||
f"An environment named {name} already exists. Set a name with:"
|
||||
"\n\n"
|
||||
f" spack env track --name NAME {src_path}\n"
|
||||
)
|
||||
|
||||
symlink(src_path, dst_path)
|
||||
|
||||
tty.msg(f"Tracking environment in {src_path}")
|
||||
tty.msg(
|
||||
"You can now activate this environment with the following command:\n\n"
|
||||
f" spack env activate {name}\n"
|
||||
)
|
||||
|
||||
|
||||
#
|
||||
# env remove & untrack helpers
|
||||
#
|
||||
def filter_managed_env_names(env_names: Set[str]) -> Set[str]:
|
||||
tracked_env_names = {e for e in env_names if islink(ev.environment_dir_from_name(e))}
|
||||
managed_env_names = env_names - set(tracked_env_names)
|
||||
|
||||
num_managed_envs = len(managed_env_names)
|
||||
managed_envs_str = " ".join(managed_env_names)
|
||||
if num_managed_envs >= 2:
|
||||
tty.error(
|
||||
f"The following are not tracked environments. "
|
||||
"To remove them completely run,"
|
||||
"\n\n"
|
||||
f" spack env rm {managed_envs_str}\n"
|
||||
)
|
||||
|
||||
elif num_managed_envs > 0:
|
||||
tty.error(
|
||||
f"'{managed_envs_str}' is not a tracked env. "
|
||||
"To remove it completely run,"
|
||||
"\n\n"
|
||||
f" spack env rm {managed_envs_str}\n"
|
||||
)
|
||||
|
||||
return tracked_env_names
|
||||
|
||||
|
||||
def get_valid_envs(env_names: Set[str]) -> Set[ev.Environment]:
|
||||
valid_envs = set()
|
||||
for env_name in env_names:
|
||||
try:
|
||||
env = ev.read(env_name)
|
||||
valid_envs.add(env)
|
||||
|
||||
except (spack.config.ConfigFormatError, ev.SpackEnvironmentConfigError):
|
||||
pass
|
||||
|
||||
return valid_envs
|
||||
|
||||
|
||||
def _env_untrack_or_remove(
|
||||
env_names: List[str], remove: bool = False, force: bool = False, yes_to_all: bool = False
|
||||
):
|
||||
all_env_names = set(ev.all_environment_names())
|
||||
known_env_names = set(env_names).intersection(all_env_names)
|
||||
unknown_env_names = set(env_names) - known_env_names
|
||||
|
||||
# print error for unknown environments
|
||||
for env_name in unknown_env_names:
|
||||
tty.error(f"Environment '{env_name}' does not exist")
|
||||
|
||||
# if only unlinking is allowed, remove all environments
|
||||
# which do not point internally at symlinks
|
||||
if not remove:
|
||||
env_names_to_remove = filter_managed_env_names(known_env_names)
|
||||
else:
|
||||
env_names_to_remove = known_env_names
|
||||
|
||||
# initalize all environments with valid spack.yaml configs
|
||||
all_valid_envs = get_valid_envs(all_env_names)
|
||||
|
||||
# build a task list of environments and bad env names to remove
|
||||
envs_to_remove = [e for e in all_valid_envs if e.name in env_names_to_remove]
|
||||
bad_env_names_to_remove = env_names_to_remove - {e.name for e in envs_to_remove}
|
||||
for remove_env in envs_to_remove:
|
||||
for env in all_valid_envs:
|
||||
# don't check if an environment is included to itself
|
||||
if env.name == remove_env.name:
|
||||
continue
|
||||
|
||||
# check if an environment is included un another
|
||||
if remove_env.path in env.included_concrete_envs:
|
||||
msg = f"Environment '{remove_env.name}' is used by environment '{env.name}'"
|
||||
if force:
|
||||
tty.warn(msg)
|
||||
else:
|
||||
tty.error(msg)
|
||||
envs_to_remove.remove(remove_env)
|
||||
|
||||
# ask the user if they really want to remove the known environments
|
||||
# force should do the same as yes to all here following the symantics of rm
|
||||
if not (yes_to_all or force) and (envs_to_remove or bad_env_names_to_remove):
|
||||
environments = string.plural(len(env_names_to_remove), "environment", show_n=False)
|
||||
envs = string.comma_and(list(env_names_to_remove))
|
||||
answer = tty.get_yes_or_no(
|
||||
f"Really {'remove' if remove else 'untrack'} {environments} {envs}?", default=False
|
||||
)
|
||||
if not answer:
|
||||
tty.die("Will not remove any environments")
|
||||
|
||||
# keep track of the environments we remove for later printing the exit code
|
||||
removed_env_names = []
|
||||
for env in envs_to_remove:
|
||||
name = env.name
|
||||
if not force and env.active:
|
||||
tty.error(
|
||||
f"Environment '{name}' can't be "
|
||||
f"{'removed' if remove else 'untracked'} while activated."
|
||||
)
|
||||
continue
|
||||
# Get path to check if environment is a tracked / symlinked environment
|
||||
if islink(env.path):
|
||||
real_env_path = os.path.realpath(env.path)
|
||||
os.unlink(env.path)
|
||||
tty.msg(
|
||||
f"Sucessfully untracked environment '{name}', "
|
||||
"but it can still be found at:\n\n"
|
||||
f" {real_env_path}\n"
|
||||
)
|
||||
else:
|
||||
env.destroy()
|
||||
tty.msg(f"Successfully removed environment '{name}'")
|
||||
|
||||
removed_env_names.append(env.name)
|
||||
|
||||
for bad_env_name in bad_env_names_to_remove:
|
||||
shutil.rmtree(
|
||||
spack.environment.environment.environment_dir_from_name(bad_env_name, exists_ok=True)
|
||||
)
|
||||
tty.msg(f"Successfully removed environment '{bad_env_name}'")
|
||||
removed_env_names.append(env.name)
|
||||
|
||||
# Following the design of linux rm we should exit with a status of 1
|
||||
# anytime we cannot delete every environment the user asks for.
|
||||
# However, we should still process all the environments we know about
|
||||
# and delete them instead of failing on the first unknown enviornment.
|
||||
if len(removed_env_names) < len(known_env_names):
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
#
|
||||
# env untrack
|
||||
#
|
||||
def env_untrack_setup_parser(subparser):
|
||||
"""track an environment from a directory in Spack"""
|
||||
subparser.add_argument("env", nargs="+", help="tracked environment name")
|
||||
subparser.add_argument(
|
||||
"-f", "--force", action="store_true", help="force unlink even when environment is active"
|
||||
)
|
||||
arguments.add_common_arguments(subparser, ["yes_to_all"])
|
||||
|
||||
|
||||
def env_untrack(args):
|
||||
_env_untrack_or_remove(
|
||||
env_names=args.env, force=args.force, yes_to_all=args.yes_to_all, remove=False
|
||||
)
|
||||
|
||||
|
||||
#
|
||||
# env remove
|
||||
#
|
||||
def env_remove_setup_parser(subparser):
|
||||
"""remove an existing environment"""
|
||||
subparser.add_argument("rm_env", metavar="env", nargs="+", help="environment(s) to remove")
|
||||
"""remove managed environment(s)
|
||||
|
||||
remove existing environment(s) managed by Spack
|
||||
|
||||
directory environments and manifests embedded in repositories must be
|
||||
removed manually
|
||||
"""
|
||||
subparser.add_argument(
|
||||
"rm_env", metavar="env", nargs="+", help="name(s) of the environment(s) being removed"
|
||||
)
|
||||
arguments.add_common_arguments(subparser, ["yes_to_all"])
|
||||
subparser.add_argument(
|
||||
"-f",
|
||||
"--force",
|
||||
action="store_true",
|
||||
help="remove the environment even if it is included in another environment",
|
||||
help="force removal even when included in other environment(s)",
|
||||
)
|
||||
|
||||
|
||||
def env_remove(args):
|
||||
"""Remove a *named* environment.
|
||||
|
||||
This removes an environment managed by Spack. Directory environments
|
||||
and manifests embedded in repositories should be removed manually.
|
||||
"""
|
||||
remove_envs = []
|
||||
valid_envs = []
|
||||
bad_envs = []
|
||||
|
||||
for env_name in ev.all_environment_names():
|
||||
try:
|
||||
env = ev.read(env_name)
|
||||
valid_envs.append(env)
|
||||
|
||||
if env_name in args.rm_env:
|
||||
remove_envs.append(env)
|
||||
except (spack.config.ConfigFormatError, ev.SpackEnvironmentConfigError):
|
||||
if env_name in args.rm_env:
|
||||
bad_envs.append(env_name)
|
||||
|
||||
# Check if remove_env is included from another env before trying to remove
|
||||
for env in valid_envs:
|
||||
for remove_env in remove_envs:
|
||||
# don't check if environment is included to itself
|
||||
if env.name == remove_env.name:
|
||||
continue
|
||||
|
||||
if remove_env.path in env.included_concrete_envs:
|
||||
msg = f'Environment "{remove_env.name}" is being used by environment "{env.name}"'
|
||||
if args.force:
|
||||
tty.warn(msg)
|
||||
else:
|
||||
tty.die(msg)
|
||||
|
||||
if not args.yes_to_all:
|
||||
environments = string.plural(len(args.rm_env), "environment", show_n=False)
|
||||
envs = string.comma_and(args.rm_env)
|
||||
answer = tty.get_yes_or_no(f"Really remove {environments} {envs}?", default=False)
|
||||
if not answer:
|
||||
tty.die("Will not remove any environments")
|
||||
|
||||
for env in remove_envs:
|
||||
name = env.name
|
||||
if env.active:
|
||||
tty.die(f"Environment {name} can't be removed while activated.")
|
||||
env.destroy()
|
||||
tty.msg(f"Successfully removed environment '{name}'")
|
||||
|
||||
for bad_env_name in bad_envs:
|
||||
shutil.rmtree(
|
||||
spack.environment.environment.environment_dir_from_name(bad_env_name, exists_ok=True)
|
||||
)
|
||||
tty.msg(f"Successfully removed environment '{bad_env_name}'")
|
||||
"""remove existing environment(s)"""
|
||||
_env_untrack_or_remove(
|
||||
env_names=args.rm_env, remove=True, force=args.force, yes_to_all=args.yes_to_all
|
||||
)
|
||||
|
||||
|
||||
#
|
||||
# env rename
|
||||
#
|
||||
def env_rename_setup_parser(subparser):
|
||||
"""rename an existing environment"""
|
||||
"""rename an existing environment
|
||||
|
||||
rename a managed environment or move an independent/directory environment
|
||||
|
||||
operation cannot be performed to or from an active environment
|
||||
"""
|
||||
subparser.add_argument(
|
||||
"mv_from", metavar="from", help="name (or path) of existing environment"
|
||||
)
|
||||
subparser.add_argument(
|
||||
"mv_to", metavar="to", help="new name (or path) for existing environment"
|
||||
"mv_from", metavar="from", help="current name or directory of the environment"
|
||||
)
|
||||
subparser.add_argument("mv_to", metavar="to", help="new name or directory for the environment")
|
||||
subparser.add_argument(
|
||||
"-d",
|
||||
"--dir",
|
||||
action="store_true",
|
||||
help="the specified arguments correspond to directory paths",
|
||||
help="positional arguments are environment directory paths",
|
||||
)
|
||||
subparser.add_argument(
|
||||
"-f", "--force", action="store_true", help="allow overwriting of an existing environment"
|
||||
"-f",
|
||||
"--force",
|
||||
action="store_true",
|
||||
help="force renaming even if overwriting an existing environment",
|
||||
)
|
||||
|
||||
|
||||
def env_rename(args):
|
||||
"""Rename an environment.
|
||||
|
||||
This renames a managed environment or moves an independent environment.
|
||||
"""
|
||||
"""rename or move an existing environment"""
|
||||
|
||||
# Directory option has been specified
|
||||
if args.dir:
|
||||
@@ -590,7 +744,7 @@ def env_rename(args):
|
||||
# env list
|
||||
#
|
||||
def env_list_setup_parser(subparser):
|
||||
"""list managed environments"""
|
||||
"""list all managed environments"""
|
||||
|
||||
|
||||
def env_list(args):
|
||||
@@ -626,13 +780,14 @@ def actions():
|
||||
# env view
|
||||
#
|
||||
def env_view_setup_parser(subparser):
|
||||
"""manage a view associated with the environment"""
|
||||
"""manage the environment's view
|
||||
|
||||
provide the path when enabling a view with a non-default path
|
||||
"""
|
||||
subparser.add_argument(
|
||||
"action", choices=ViewAction.actions(), help="action to take for the environment's view"
|
||||
)
|
||||
subparser.add_argument(
|
||||
"view_path", nargs="?", help="when enabling a view, optionally set the path manually"
|
||||
)
|
||||
subparser.add_argument("view_path", nargs="?", help="view's non-default path when enabling it")
|
||||
|
||||
|
||||
def env_view(args):
|
||||
@@ -660,7 +815,7 @@ def env_view(args):
|
||||
# env status
|
||||
#
|
||||
def env_status_setup_parser(subparser):
|
||||
"""print whether there is an active environment"""
|
||||
"""print active environment status"""
|
||||
|
||||
|
||||
def env_status(args):
|
||||
@@ -720,14 +875,22 @@ def env_loads(args):
|
||||
|
||||
|
||||
def env_update_setup_parser(subparser):
|
||||
"""update environments to the latest format"""
|
||||
"""update the environment manifest to the latest schema format
|
||||
|
||||
update the environment to the latest schema format, which may not be
|
||||
readable by older versions of spack
|
||||
|
||||
a backup copy of the manifest is retained in case there is a need to revert
|
||||
this operation
|
||||
"""
|
||||
subparser.add_argument(
|
||||
metavar="env", dest="update_env", help="name or directory of the environment to activate"
|
||||
metavar="env", dest="update_env", help="name or directory of the environment"
|
||||
)
|
||||
spack.cmd.common.arguments.add_common_arguments(subparser, ["yes_to_all"])
|
||||
|
||||
|
||||
def env_update(args):
|
||||
"""update the manifest to the latest format"""
|
||||
manifest_file = ev.manifest_file(args.update_env)
|
||||
backup_file = manifest_file + ".bkp"
|
||||
|
||||
@@ -757,14 +920,22 @@ def env_update(args):
|
||||
|
||||
|
||||
def env_revert_setup_parser(subparser):
|
||||
"""restore environments to their state before update"""
|
||||
"""restore the environment manifest to its previous format
|
||||
|
||||
revert the environment's manifest to the schema format from its last
|
||||
'spack env update'
|
||||
|
||||
the current manifest will be overwritten by the backup copy and the backup
|
||||
copy will be removed
|
||||
"""
|
||||
subparser.add_argument(
|
||||
metavar="env", dest="revert_env", help="name or directory of the environment to activate"
|
||||
metavar="env", dest="revert_env", help="name or directory of the environment"
|
||||
)
|
||||
spack.cmd.common.arguments.add_common_arguments(subparser, ["yes_to_all"])
|
||||
|
||||
|
||||
def env_revert(args):
|
||||
"""restore the environment manifest to its previous format"""
|
||||
manifest_file = ev.manifest_file(args.revert_env)
|
||||
backup_file = manifest_file + ".bkp"
|
||||
|
||||
@@ -796,15 +967,19 @@ def env_revert(args):
|
||||
|
||||
|
||||
def env_depfile_setup_parser(subparser):
|
||||
"""generate a depfile from the concrete environment specs"""
|
||||
"""generate a depfile to exploit parallel builds across specs
|
||||
|
||||
requires the active environment to be concrete
|
||||
"""
|
||||
subparser.add_argument(
|
||||
"--make-prefix",
|
||||
"--make-target-prefix",
|
||||
default=None,
|
||||
metavar="TARGET",
|
||||
help="prefix Makefile targets (and variables) with <TARGET>/<name>\n\nby default "
|
||||
"the absolute path to the directory makedeps under the environment metadata dir is "
|
||||
"used. can be set to an empty string --make-prefix ''",
|
||||
help="prefix Makefile targets/variables with <TARGET>/<name>,\n"
|
||||
"which can be an empty string (--make-prefix '')\n"
|
||||
"defaults to the absolute path of the environment's makedeps\n"
|
||||
"environment metadata dir\n",
|
||||
)
|
||||
subparser.add_argument(
|
||||
"--make-disable-jobserver",
|
||||
@@ -819,8 +994,8 @@ def env_depfile_setup_parser(subparser):
|
||||
type=arguments.use_buildcache,
|
||||
default="package:auto,dependencies:auto",
|
||||
metavar="[{auto,only,never},][package:{auto,only,never},][dependencies:{auto,only,never}]",
|
||||
help="when using `only`, redundant build dependencies are pruned from the DAG\n\n"
|
||||
"this flag is passed on to the generated spack install commands",
|
||||
help="use `only` to prune redundant build dependencies\n"
|
||||
"option is also passed to generated spack install commands",
|
||||
)
|
||||
subparser.add_argument(
|
||||
"-o",
|
||||
@@ -834,14 +1009,14 @@ def env_depfile_setup_parser(subparser):
|
||||
"--generator",
|
||||
default="make",
|
||||
choices=("make",),
|
||||
help="specify the depfile type\n\ncurrently only make is supported",
|
||||
help="specify the depfile type (only supports `make`)",
|
||||
)
|
||||
subparser.add_argument(
|
||||
metavar="specs",
|
||||
dest="specs",
|
||||
nargs=argparse.REMAINDER,
|
||||
default=None,
|
||||
help="generate a depfile only for matching specs in the environment",
|
||||
help="limit the generated file to matching specs",
|
||||
)
|
||||
|
||||
|
||||
@@ -910,7 +1085,12 @@ def setup_parser(subparser):
|
||||
setup_parser_cmd_name = "env_%s_setup_parser" % name
|
||||
setup_parser_cmd = globals()[setup_parser_cmd_name]
|
||||
|
||||
subsubparser = sp.add_parser(name, aliases=aliases, help=setup_parser_cmd.__doc__)
|
||||
subsubparser = sp.add_parser(
|
||||
name,
|
||||
aliases=aliases,
|
||||
description=setup_parser_cmd.__doc__,
|
||||
help=spack.cmd.first_line(setup_parser_cmd.__doc__),
|
||||
)
|
||||
setup_parser_cmd(subsubparser)
|
||||
|
||||
|
||||
|
||||
@@ -174,17 +174,17 @@ def query_arguments(args):
|
||||
if (args.missing or args.only_missing) and not args.only_deprecated:
|
||||
installed.append(InstallStatuses.MISSING)
|
||||
|
||||
known = any
|
||||
predicate_fn = None
|
||||
if args.unknown:
|
||||
known = False
|
||||
predicate_fn = lambda x: not spack.repo.PATH.exists(x.spec.name)
|
||||
|
||||
explicit = any
|
||||
explicit = None
|
||||
if args.explicit:
|
||||
explicit = True
|
||||
if args.implicit:
|
||||
explicit = False
|
||||
|
||||
q_args = {"installed": installed, "known": known, "explicit": explicit}
|
||||
q_args = {"installed": installed, "predicate_fn": predicate_fn, "explicit": explicit}
|
||||
|
||||
install_tree = args.install_tree
|
||||
upstreams = spack.config.get("upstreams", {})
|
||||
|
||||
@@ -80,8 +80,8 @@ def find_matching_specs(specs, allow_multiple_matches=False):
|
||||
has_errors = True
|
||||
|
||||
# No installed package matches the query
|
||||
if len(matching) == 0 and spec is not any:
|
||||
tty.die("{0} does not match any installed packages.".format(spec))
|
||||
if len(matching) == 0 and spec is not None:
|
||||
tty.die(f"{spec} does not match any installed packages.")
|
||||
|
||||
specs_from_cli.extend(matching)
|
||||
|
||||
@@ -98,8 +98,9 @@ def do_mark(specs, explicit):
|
||||
specs (list): list of specs to be marked
|
||||
explicit (bool): whether to mark specs as explicitly installed
|
||||
"""
|
||||
for spec in specs:
|
||||
spack.store.STORE.db.update_explicit(spec, explicit)
|
||||
with spack.store.STORE.db.write_transaction():
|
||||
for spec in specs:
|
||||
spack.store.STORE.db.mark(spec, "explicit", explicit)
|
||||
|
||||
|
||||
def mark_specs(args, specs):
|
||||
@@ -116,6 +117,6 @@ def mark(parser, args):
|
||||
" Use `spack mark --all` to mark ALL packages.",
|
||||
)
|
||||
|
||||
# [any] here handles the --all case by forcing all specs to be returned
|
||||
specs = spack.cmd.parse_specs(args.specs) if args.specs else [any]
|
||||
# [None] here handles the --all case by forcing all specs to be returned
|
||||
specs = spack.cmd.parse_specs(args.specs) if args.specs else [None]
|
||||
mark_specs(args, specs)
|
||||
|
||||
@@ -378,7 +378,10 @@ def refresh(module_type, specs, args):
|
||||
def modules_cmd(parser, args, module_type, callbacks=callbacks):
|
||||
# Qualifiers to be used when querying the db for specs
|
||||
constraint_qualifiers = {
|
||||
"refresh": {"installed": True, "known": lambda x: not spack.repo.PATH.exists(x)}
|
||||
"refresh": {
|
||||
"installed": True,
|
||||
"predicate_fn": lambda x: spack.repo.PATH.exists(x.spec.name),
|
||||
}
|
||||
}
|
||||
query_args = constraint_qualifiers.get(args.subparser_name, {})
|
||||
|
||||
|
||||
@@ -33,8 +33,9 @@ def patch(parser, args):
|
||||
spack.config.set("config:checksum", False, scope="command_line")
|
||||
|
||||
specs = spack.cmd.parse_specs(args.specs, concretize=False)
|
||||
specs = spack.cmd.matching_specs_from_env(specs)
|
||||
for spec in specs:
|
||||
_patch(spack.cmd.matching_spec_from_env(spec).package)
|
||||
_patch(spec.package)
|
||||
|
||||
|
||||
def _patch_env(env: ev.Environment):
|
||||
|
||||
@@ -3,7 +3,6 @@
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
import argparse
|
||||
import re
|
||||
import sys
|
||||
|
||||
@@ -12,13 +11,12 @@
|
||||
|
||||
import spack
|
||||
import spack.cmd
|
||||
import spack.cmd.common.arguments
|
||||
import spack.cmd.spec
|
||||
import spack.config
|
||||
import spack.environment
|
||||
import spack.hash_types as ht
|
||||
import spack.solver.asp as asp
|
||||
import spack.spec
|
||||
from spack.cmd.common import arguments
|
||||
|
||||
description = "concretize a specs using an ASP solver"
|
||||
section = "developer"
|
||||
@@ -41,42 +39,6 @@ def setup_parser(subparser):
|
||||
" solutions models found by asp program\n"
|
||||
" all all of the above",
|
||||
)
|
||||
|
||||
# Below are arguments w.r.t. spec display (like spack spec)
|
||||
arguments.add_common_arguments(subparser, ["long", "very_long", "namespaces"])
|
||||
|
||||
install_status_group = subparser.add_mutually_exclusive_group()
|
||||
arguments.add_common_arguments(install_status_group, ["install_status", "no_install_status"])
|
||||
|
||||
subparser.add_argument(
|
||||
"-y",
|
||||
"--yaml",
|
||||
action="store_const",
|
||||
dest="format",
|
||||
default=None,
|
||||
const="yaml",
|
||||
help="print concrete spec as yaml",
|
||||
)
|
||||
subparser.add_argument(
|
||||
"-j",
|
||||
"--json",
|
||||
action="store_const",
|
||||
dest="format",
|
||||
default=None,
|
||||
const="json",
|
||||
help="print concrete spec as json",
|
||||
)
|
||||
subparser.add_argument(
|
||||
"-c",
|
||||
"--cover",
|
||||
action="store",
|
||||
default="nodes",
|
||||
choices=["nodes", "edges", "paths"],
|
||||
help="how extensively to traverse the DAG (default: nodes)",
|
||||
)
|
||||
subparser.add_argument(
|
||||
"-t", "--types", action="store_true", default=False, help="show dependency types"
|
||||
)
|
||||
subparser.add_argument(
|
||||
"--timers",
|
||||
action="store_true",
|
||||
@@ -86,9 +48,8 @@ def setup_parser(subparser):
|
||||
subparser.add_argument(
|
||||
"--stats", action="store_true", default=False, help="print out statistics from clingo"
|
||||
)
|
||||
subparser.add_argument("specs", nargs=argparse.REMAINDER, help="specs of packages")
|
||||
|
||||
spack.cmd.common.arguments.add_concretizer_args(subparser)
|
||||
spack.cmd.spec.setup_parser(subparser)
|
||||
|
||||
|
||||
def _process_result(result, show, required_format, kwargs):
|
||||
@@ -164,11 +125,12 @@ def solve(parser, args):
|
||||
|
||||
# If we have an active environment, pick the specs from there
|
||||
env = spack.environment.active_environment()
|
||||
if env and args.specs:
|
||||
msg = "cannot give explicit specs when an environment is active"
|
||||
raise RuntimeError(msg)
|
||||
|
||||
specs = list(env.user_specs) if env else spack.cmd.parse_specs(args.specs)
|
||||
if args.specs:
|
||||
specs = spack.cmd.parse_specs(args.specs)
|
||||
elif env:
|
||||
specs = list(env.user_specs)
|
||||
else:
|
||||
tty.die("spack solve requires at least one spec or an active environment")
|
||||
|
||||
solver = asp.Solver()
|
||||
output = sys.stdout if "asp" in show else None
|
||||
|
||||
@@ -96,26 +96,25 @@ def spec(parser, args):
|
||||
if args.install_status:
|
||||
tree_context = spack.store.STORE.db.read_transaction
|
||||
|
||||
# Use command line specified specs, otherwise try to use environment specs.
|
||||
env = ev.active_environment()
|
||||
|
||||
if args.specs:
|
||||
input_specs = spack.cmd.parse_specs(args.specs)
|
||||
concretized_specs = spack.cmd.parse_specs(args.specs, concretize=True)
|
||||
specs = list(zip(input_specs, concretized_specs))
|
||||
else:
|
||||
env = ev.active_environment()
|
||||
if env:
|
||||
env.concretize()
|
||||
specs = env.concretized_specs()
|
||||
elif env:
|
||||
env.concretize()
|
||||
specs = env.concretized_specs()
|
||||
|
||||
if not args.format:
|
||||
# environments are printed together in a combined tree() invocation,
|
||||
# except when using --yaml or --json, which we print spec by spec below.
|
||||
if not args.format:
|
||||
tree_kwargs["key"] = spack.traverse.by_dag_hash
|
||||
tree_kwargs["hashes"] = args.long or args.very_long
|
||||
print(spack.spec.tree([concrete for _, concrete in specs], **tree_kwargs))
|
||||
return
|
||||
else:
|
||||
tty.die("spack spec requires at least one spec or an active environment")
|
||||
tree_kwargs["key"] = spack.traverse.by_dag_hash
|
||||
tree_kwargs["hashes"] = args.long or args.very_long
|
||||
print(spack.spec.tree([concrete for _, concrete in specs], **tree_kwargs))
|
||||
return
|
||||
else:
|
||||
tty.die("spack spec requires at least one spec or an active environment")
|
||||
|
||||
for input, output in specs:
|
||||
# With --yaml or --json, just print the raw specs to output
|
||||
|
||||
@@ -47,8 +47,8 @@ def stage(parser, args):
|
||||
if len(specs) > 1 and custom_path:
|
||||
tty.die("`--path` requires a single spec, but multiple were provided")
|
||||
|
||||
specs = spack.cmd.matching_specs_from_env(specs)
|
||||
for spec in specs:
|
||||
spec = spack.cmd.matching_spec_from_env(spec)
|
||||
pkg = spec.package
|
||||
|
||||
if custom_path:
|
||||
|
||||
@@ -165,7 +165,7 @@ def test_run(args):
|
||||
if args.fail_fast:
|
||||
spack.config.set("config:fail_fast", True, scope="command_line")
|
||||
|
||||
explicit = args.explicit or any
|
||||
explicit = args.explicit or None
|
||||
explicit_str = "explicitly " if args.explicit else ""
|
||||
|
||||
# Get specs to test
|
||||
|
||||
@@ -90,6 +90,7 @@ def find_matching_specs(
|
||||
env: optional active environment
|
||||
specs: list of specs to be matched against installed packages
|
||||
allow_multiple_matches: if True multiple matches are admitted
|
||||
origin: origin of the spec
|
||||
|
||||
Return:
|
||||
list: list of specs
|
||||
@@ -98,7 +99,7 @@ def find_matching_specs(
|
||||
hashes = env.all_hashes() if env else None
|
||||
|
||||
# List of specs that match expressions given via command line
|
||||
specs_from_cli = []
|
||||
specs_from_cli: List["spack.spec.Spec"] = []
|
||||
has_errors = False
|
||||
for spec in specs:
|
||||
install_query = [InstallStatuses.INSTALLED, InstallStatuses.DEPRECATED]
|
||||
@@ -116,7 +117,7 @@ def find_matching_specs(
|
||||
has_errors = True
|
||||
|
||||
# No installed package matches the query
|
||||
if len(matching) == 0 and spec is not any:
|
||||
if len(matching) == 0 and spec is not None:
|
||||
if env:
|
||||
pkg_type = "packages in environment '%s'" % env.name
|
||||
else:
|
||||
@@ -213,7 +214,7 @@ def get_uninstall_list(args, specs: List[spack.spec.Spec], env: Optional[ev.Envi
|
||||
|
||||
# Gets the list of installed specs that match the ones given via cli
|
||||
# args.all takes care of the case where '-a' is given in the cli
|
||||
matching_specs = find_matching_specs(env, specs, args.all)
|
||||
matching_specs = find_matching_specs(env, specs, args.all, origin=args.origin)
|
||||
dependent_specs = installed_dependents(matching_specs)
|
||||
all_uninstall_specs = matching_specs + dependent_specs if args.dependents else matching_specs
|
||||
other_dependent_envs = dependent_environments(all_uninstall_specs, current_env=env)
|
||||
@@ -301,6 +302,6 @@ def uninstall(parser, args):
|
||||
" Use `spack uninstall --all` to uninstall ALL packages.",
|
||||
)
|
||||
|
||||
# [any] here handles the --all case by forcing all specs to be returned
|
||||
specs = spack.cmd.parse_specs(args.specs) if args.specs else [any]
|
||||
# [None] here handles the --all case by forcing all specs to be returned
|
||||
specs = spack.cmd.parse_specs(args.specs) if args.specs else [None]
|
||||
uninstall_specs(args, specs)
|
||||
|
||||
@@ -33,6 +33,8 @@
|
||||
YamlFilesystemView.
|
||||
|
||||
"""
|
||||
import sys
|
||||
|
||||
import llnl.util.tty as tty
|
||||
from llnl.util.link_tree import MergeConflictError
|
||||
|
||||
@@ -178,7 +180,12 @@ def setup_parser(sp):
|
||||
|
||||
|
||||
def view(parser, args):
|
||||
"Produce a view of a set of packages."
|
||||
"""Produce a view of a set of packages."""
|
||||
|
||||
if sys.platform == "win32" and args.action in ("hardlink", "hard"):
|
||||
# Hard-linked views are not yet allowed on Windows.
|
||||
# See https://github.com/spack/spack/pull/46335#discussion_r1757411915
|
||||
tty.die("Hard linking is not supported on Windows. Please use symlinks or copy methods.")
|
||||
|
||||
specs = spack.cmd.parse_specs(args.specs)
|
||||
path = args.path[0]
|
||||
|
||||
@@ -4,20 +4,23 @@
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
import contextlib
|
||||
import hashlib
|
||||
import itertools
|
||||
import json
|
||||
import os
|
||||
import platform
|
||||
import re
|
||||
import shutil
|
||||
import sys
|
||||
import tempfile
|
||||
from typing import List, Optional, Sequence
|
||||
from typing import Dict, List, Optional, Sequence
|
||||
|
||||
import llnl.path
|
||||
import llnl.util.lang
|
||||
import llnl.util.tty as tty
|
||||
from llnl.util.filesystem import path_contains_subdirectory, paths_containing_libs
|
||||
|
||||
import spack.caches
|
||||
import spack.error
|
||||
import spack.schema.environment
|
||||
import spack.spec
|
||||
@@ -26,6 +29,7 @@
|
||||
import spack.util.module_cmd
|
||||
import spack.version
|
||||
from spack.util.environment import filter_system_paths
|
||||
from spack.util.file_cache import FileCache
|
||||
|
||||
__all__ = ["Compiler"]
|
||||
|
||||
@@ -34,7 +38,7 @@
|
||||
|
||||
|
||||
@llnl.util.lang.memoized
|
||||
def _get_compiler_version_output(compiler_path, version_arg, ignore_errors=()):
|
||||
def _get_compiler_version_output(compiler_path, version_arg, ignore_errors=()) -> str:
|
||||
"""Invokes the compiler at a given path passing a single
|
||||
version argument and returns the output.
|
||||
|
||||
@@ -57,7 +61,7 @@ def _get_compiler_version_output(compiler_path, version_arg, ignore_errors=()):
|
||||
return output
|
||||
|
||||
|
||||
def get_compiler_version_output(compiler_path, *args, **kwargs):
|
||||
def get_compiler_version_output(compiler_path, *args, **kwargs) -> str:
|
||||
"""Wrapper for _get_compiler_version_output()."""
|
||||
# This ensures that we memoize compiler output by *absolute path*,
|
||||
# not just executable name. If we don't do this, and the path changes
|
||||
@@ -275,7 +279,7 @@ def __init__(
|
||||
operating_system,
|
||||
target,
|
||||
paths,
|
||||
modules=None,
|
||||
modules: Optional[List[str]] = None,
|
||||
alias=None,
|
||||
environment=None,
|
||||
extra_rpaths=None,
|
||||
@@ -290,6 +294,7 @@ def __init__(
|
||||
self.environment = environment or {}
|
||||
self.extra_rpaths = extra_rpaths or []
|
||||
self.enable_implicit_rpaths = enable_implicit_rpaths
|
||||
self.cache = COMPILER_CACHE
|
||||
|
||||
self.cc = paths[0]
|
||||
self.cxx = paths[1]
|
||||
@@ -390,15 +395,11 @@ def real_version(self):
|
||||
|
||||
E.g. C++11 flag checks.
|
||||
"""
|
||||
if not self._real_version:
|
||||
try:
|
||||
real_version = spack.version.Version(self.get_real_version())
|
||||
if real_version == spack.version.Version("unknown"):
|
||||
return self.version
|
||||
self._real_version = real_version
|
||||
except spack.util.executable.ProcessError:
|
||||
self._real_version = self.version
|
||||
return self._real_version
|
||||
real_version_str = self.cache.get(self).real_version
|
||||
if not real_version_str or real_version_str == "unknown":
|
||||
return self.version
|
||||
|
||||
return spack.version.StandardVersion.from_string(real_version_str)
|
||||
|
||||
def implicit_rpaths(self) -> List[str]:
|
||||
if self.enable_implicit_rpaths is False:
|
||||
@@ -445,9 +446,7 @@ def required_libs(self):
|
||||
@property
|
||||
def compiler_verbose_output(self) -> Optional[str]:
|
||||
"""Verbose output from compiling a dummy C source file. Output is cached."""
|
||||
if not hasattr(self, "_compile_c_source_output"):
|
||||
self._compile_c_source_output = self._compile_dummy_c_source()
|
||||
return self._compile_c_source_output
|
||||
return self.cache.get(self).c_compiler_output
|
||||
|
||||
def _compile_dummy_c_source(self) -> Optional[str]:
|
||||
cc = self.cc if self.cc else self.cxx
|
||||
@@ -559,7 +558,7 @@ def fc_pic_flag(self):
|
||||
# Note: This is not a class method. The class methods are used to detect
|
||||
# compilers on PATH based systems, and do not set up the run environment of
|
||||
# the compiler. This method can be called on `module` based systems as well
|
||||
def get_real_version(self):
|
||||
def get_real_version(self) -> str:
|
||||
"""Query the compiler for its version.
|
||||
|
||||
This is the "real" compiler version, regardless of what is in the
|
||||
@@ -569,14 +568,17 @@ def get_real_version(self):
|
||||
modifications) to enable the compiler to run properly on any platform.
|
||||
"""
|
||||
cc = spack.util.executable.Executable(self.cc)
|
||||
with self.compiler_environment():
|
||||
output = cc(
|
||||
self.version_argument,
|
||||
output=str,
|
||||
error=str,
|
||||
ignore_errors=tuple(self.ignore_version_errors),
|
||||
)
|
||||
return self.extract_version_from_output(output)
|
||||
try:
|
||||
with self.compiler_environment():
|
||||
output = cc(
|
||||
self.version_argument,
|
||||
output=str,
|
||||
error=str,
|
||||
ignore_errors=tuple(self.ignore_version_errors),
|
||||
)
|
||||
return self.extract_version_from_output(output)
|
||||
except spack.util.executable.ProcessError:
|
||||
return "unknown"
|
||||
|
||||
@property
|
||||
def prefix(self):
|
||||
@@ -603,7 +605,7 @@ def default_version(cls, cc):
|
||||
|
||||
@classmethod
|
||||
@llnl.util.lang.memoized
|
||||
def extract_version_from_output(cls, output):
|
||||
def extract_version_from_output(cls, output: str) -> str:
|
||||
"""Extracts the version from compiler's output."""
|
||||
match = re.search(cls.version_regex, output)
|
||||
return match.group(1) if match else "unknown"
|
||||
@@ -732,3 +734,106 @@ def __init__(self, compiler, feature, flag_name, ver_string=None):
|
||||
)
|
||||
+ " implement the {0} property and submit a pull request or issue.".format(flag_name),
|
||||
)
|
||||
|
||||
|
||||
class CompilerCacheEntry:
|
||||
"""Deserialized cache entry for a compiler"""
|
||||
|
||||
__slots__ = ["c_compiler_output", "real_version"]
|
||||
|
||||
def __init__(self, c_compiler_output: Optional[str], real_version: str):
|
||||
self.c_compiler_output = c_compiler_output
|
||||
self.real_version = real_version
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict[str, Optional[str]]):
|
||||
if not isinstance(data, dict):
|
||||
raise ValueError(f"Invalid {cls.__name__} data")
|
||||
c_compiler_output = data.get("c_compiler_output")
|
||||
real_version = data.get("real_version")
|
||||
if not isinstance(real_version, str) or not isinstance(
|
||||
c_compiler_output, (str, type(None))
|
||||
):
|
||||
raise ValueError(f"Invalid {cls.__name__} data")
|
||||
return cls(c_compiler_output, real_version)
|
||||
|
||||
|
||||
class CompilerCache:
|
||||
"""Base class for compiler output cache. Default implementation does not cache anything."""
|
||||
|
||||
def value(self, compiler: Compiler) -> Dict[str, Optional[str]]:
|
||||
return {
|
||||
"c_compiler_output": compiler._compile_dummy_c_source(),
|
||||
"real_version": compiler.get_real_version(),
|
||||
}
|
||||
|
||||
def get(self, compiler: Compiler) -> CompilerCacheEntry:
|
||||
return CompilerCacheEntry.from_dict(self.value(compiler))
|
||||
|
||||
|
||||
class FileCompilerCache(CompilerCache):
|
||||
"""Cache for compiler output, which is used to determine implicit link paths, the default libc
|
||||
version, and the compiler version."""
|
||||
|
||||
name = os.path.join("compilers", "compilers.json")
|
||||
|
||||
def __init__(self, cache: "FileCache") -> None:
|
||||
self.cache = cache
|
||||
self.cache.init_entry(self.name)
|
||||
self._data: Dict[str, Dict[str, Optional[str]]] = {}
|
||||
|
||||
def _get_entry(self, key: str) -> Optional[CompilerCacheEntry]:
|
||||
try:
|
||||
return CompilerCacheEntry.from_dict(self._data[key])
|
||||
except ValueError:
|
||||
del self._data[key]
|
||||
except KeyError:
|
||||
pass
|
||||
return None
|
||||
|
||||
def get(self, compiler: Compiler) -> CompilerCacheEntry:
|
||||
# Cache hit
|
||||
try:
|
||||
with self.cache.read_transaction(self.name) as f:
|
||||
assert f is not None
|
||||
self._data = json.loads(f.read())
|
||||
assert isinstance(self._data, dict)
|
||||
except (json.JSONDecodeError, AssertionError):
|
||||
self._data = {}
|
||||
|
||||
key = self._key(compiler)
|
||||
value = self._get_entry(key)
|
||||
if value is not None:
|
||||
return value
|
||||
|
||||
# Cache miss
|
||||
with self.cache.write_transaction(self.name) as (old, new):
|
||||
try:
|
||||
assert old is not None
|
||||
self._data = json.loads(old.read())
|
||||
assert isinstance(self._data, dict)
|
||||
except (json.JSONDecodeError, AssertionError):
|
||||
self._data = {}
|
||||
|
||||
# Use cache entry that may have been created by another process in the meantime.
|
||||
entry = self._get_entry(key)
|
||||
|
||||
# Finally compute the cache entry
|
||||
if entry is None:
|
||||
self._data[key] = self.value(compiler)
|
||||
entry = CompilerCacheEntry.from_dict(self._data[key])
|
||||
|
||||
new.write(json.dumps(self._data, separators=(",", ":")))
|
||||
|
||||
return entry
|
||||
|
||||
def _key(self, compiler: Compiler) -> str:
|
||||
as_bytes = json.dumps(compiler.to_dict(), separators=(",", ":")).encode("utf-8")
|
||||
return hashlib.sha256(as_bytes).hexdigest()
|
||||
|
||||
|
||||
def _make_compiler_cache():
|
||||
return FileCompilerCache(spack.caches.MISC_CACHE)
|
||||
|
||||
|
||||
COMPILER_CACHE: CompilerCache = llnl.util.lang.Singleton(_make_compiler_cache) # type: ignore
|
||||
|
||||
@@ -116,5 +116,5 @@ def fflags(self):
|
||||
def _handle_default_flag_addtions(self):
|
||||
# This is a known issue for AOCC 3.0 see:
|
||||
# https://developer.amd.com/wp-content/resources/AOCC-3.0-Install-Guide.pdf
|
||||
if self.real_version.satisfies(ver("3.0.0")):
|
||||
if self.version.satisfies(ver("3.0.0")):
|
||||
return "-Wno-unused-command-line-argument " "-mllvm -eliminate-similar-expr=false"
|
||||
|
||||
@@ -92,6 +92,14 @@ def c11_flag(self):
|
||||
else:
|
||||
return "-std=c1x"
|
||||
|
||||
@property
|
||||
def c18_flag(self):
|
||||
# c18 supported since oneapi 2022, which is classic version 2021.5.0
|
||||
if self.real_version < Version("21.5.0"):
|
||||
raise UnsupportedCompilerFlag(self, "the C18 standard", "c18_flag", "< 21.5.0")
|
||||
else:
|
||||
return "-std=c18"
|
||||
|
||||
@property
|
||||
def cc_pic_flag(self):
|
||||
return "-fPIC"
|
||||
@@ -116,9 +124,8 @@ def setup_custom_environment(self, pkg, env):
|
||||
# Edge cases for Intel's oneAPI compilers when using the legacy classic compilers:
|
||||
# Always pass flags to disable deprecation warnings, since these warnings can
|
||||
# confuse tools that parse the output of compiler commands (e.g. version checks).
|
||||
if self.cc and self.cc.endswith("icc") and self.real_version >= Version("2021"):
|
||||
if self.real_version >= Version("2021") and self.real_version <= Version("2023"):
|
||||
env.append_flags("SPACK_ALWAYS_CFLAGS", "-diag-disable=10441")
|
||||
if self.cxx and self.cxx.endswith("icpc") and self.real_version >= Version("2021"):
|
||||
env.append_flags("SPACK_ALWAYS_CXXFLAGS", "-diag-disable=10441")
|
||||
if self.fc and self.fc.endswith("ifort") and self.real_version >= Version("2021"):
|
||||
if self.real_version >= Version("2021") and self.real_version <= Version("2024"):
|
||||
env.append_flags("SPACK_ALWAYS_FFLAGS", "-diag-disable=10448")
|
||||
|
||||
@@ -293,6 +293,17 @@ def platform_toolset_ver(self):
|
||||
vs22_toolset = Version(toolset_ver) > Version("142")
|
||||
return toolset_ver if not vs22_toolset else "143"
|
||||
|
||||
@property
|
||||
def visual_studio_version(self):
|
||||
"""The four digit Visual Studio version (i.e. 2019 or 2022)
|
||||
|
||||
Note: This differs from the msvc version or toolset version as
|
||||
those properties track the compiler and build tools version
|
||||
respectively, whereas this tracks the VS release associated
|
||||
with a given MSVC compiler.
|
||||
"""
|
||||
return re.search(r"[0-9]{4}", self.cc).group(0)
|
||||
|
||||
def _compiler_version(self, compiler):
|
||||
"""Returns version object for given compiler"""
|
||||
# ignore_errors below is true here due to ifx's
|
||||
|
||||
@@ -7,7 +7,9 @@
|
||||
from os.path import dirname, join
|
||||
|
||||
from llnl.util import tty
|
||||
from llnl.util.filesystem import ancestor
|
||||
|
||||
import spack.util.executable
|
||||
from spack.compiler import Compiler
|
||||
from spack.version import Version
|
||||
|
||||
@@ -116,6 +118,24 @@ def fc_pic_flag(self):
|
||||
def stdcxx_libs(self):
|
||||
return ("-cxxlib",)
|
||||
|
||||
@property
|
||||
def prefix(self):
|
||||
# OneAPI reports its install prefix when running ``--version``
|
||||
# on the line ``InstalledDir: <prefix>/bin/compiler``.
|
||||
cc = spack.util.executable.Executable(self.cc)
|
||||
with self.compiler_environment():
|
||||
oneapi_output = cc("--version", output=str, error=str)
|
||||
|
||||
for line in oneapi_output.splitlines():
|
||||
if line.startswith("InstalledDir:"):
|
||||
oneapi_prefix = line.split(":")[1].strip()
|
||||
# Go from <prefix>/bin/compiler to <prefix>
|
||||
return ancestor(oneapi_prefix, 2)
|
||||
|
||||
raise RuntimeError(
|
||||
"could not find install prefix of OneAPI from output:\n\t{}".format(oneapi_output)
|
||||
)
|
||||
|
||||
def setup_custom_environment(self, pkg, env):
|
||||
# workaround bug in icpx driver where it requires sycl-post-link is on the PATH
|
||||
# It is located in the same directory as the driver. Error message:
|
||||
@@ -131,11 +151,14 @@ def setup_custom_environment(self, pkg, env):
|
||||
# Edge cases for Intel's oneAPI compilers when using the legacy classic compilers:
|
||||
# Always pass flags to disable deprecation warnings, since these warnings can
|
||||
# confuse tools that parse the output of compiler commands (e.g. version checks).
|
||||
if self.cc and self.cc.endswith("icc") and self.real_version >= Version("2021"):
|
||||
# This is really only needed for Fortran, since oneapi@ should be using either
|
||||
# icx+icpx+ifx or icx+icpx+ifort. But to be on the safe side (some users may
|
||||
# want to try to swap icpx against icpc, for example), and since the Intel LLVM
|
||||
# compilers accept these diag-disable flags, we apply them for all compilers.
|
||||
if self.real_version >= Version("2021") and self.real_version <= Version("2023"):
|
||||
env.append_flags("SPACK_ALWAYS_CFLAGS", "-diag-disable=10441")
|
||||
if self.cxx and self.cxx.endswith("icpc") and self.real_version >= Version("2021"):
|
||||
env.append_flags("SPACK_ALWAYS_CXXFLAGS", "-diag-disable=10441")
|
||||
if self.fc and self.fc.endswith("ifort") and self.real_version >= Version("2021"):
|
||||
if self.real_version >= Version("2021") and self.real_version <= Version("2024"):
|
||||
env.append_flags("SPACK_ALWAYS_FFLAGS", "-diag-disable=10448")
|
||||
|
||||
# 2024 release bumped the libsycl version because of an ABI
|
||||
|
||||
@@ -2,14 +2,20 @@
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
"""
|
||||
(DEPRECATED) Used to contain the code for the original concretizer
|
||||
"""
|
||||
"""High-level functions to concretize list of specs"""
|
||||
import sys
|
||||
import time
|
||||
from contextlib import contextmanager
|
||||
from itertools import chain
|
||||
from typing import Iterable, Optional, Sequence, Tuple, Union
|
||||
|
||||
import llnl.util.tty as tty
|
||||
|
||||
import spack.compilers
|
||||
import spack.config
|
||||
import spack.error
|
||||
import spack.repo
|
||||
import spack.util.parallel
|
||||
from spack.spec import ArchSpec, CompilerSpec, Spec
|
||||
|
||||
CHECK_COMPILER_EXISTENCE = True
|
||||
|
||||
@@ -30,67 +36,167 @@ def enable_compiler_existence_check():
|
||||
CHECK_COMPILER_EXISTENCE = saved
|
||||
|
||||
|
||||
def find_spec(spec, condition, default=None):
|
||||
"""Searches the dag from spec in an intelligent order and looks
|
||||
for a spec that matches a condition"""
|
||||
# First search parents, then search children
|
||||
deptype = ("build", "link")
|
||||
dagiter = chain(
|
||||
spec.traverse(direction="parents", deptype=deptype, root=False),
|
||||
spec.traverse(direction="children", deptype=deptype, root=False),
|
||||
)
|
||||
visited = set()
|
||||
for relative in dagiter:
|
||||
if condition(relative):
|
||||
return relative
|
||||
visited.add(id(relative))
|
||||
|
||||
# Then search all other relatives in the DAG *except* spec
|
||||
for relative in spec.root.traverse(deptype="all"):
|
||||
if relative is spec:
|
||||
continue
|
||||
if id(relative) in visited:
|
||||
continue
|
||||
if condition(relative):
|
||||
return relative
|
||||
|
||||
# Finally search spec itself.
|
||||
if condition(spec):
|
||||
return spec
|
||||
|
||||
return default # Nothing matched the condition; return default.
|
||||
SpecPair = Tuple[Spec, Spec]
|
||||
SpecLike = Union[Spec, str]
|
||||
TestsType = Union[bool, Iterable[str]]
|
||||
|
||||
|
||||
def concretize_specs_together(*abstract_specs, **kwargs):
|
||||
def concretize_specs_together(
|
||||
abstract_specs: Sequence[SpecLike], tests: TestsType = False
|
||||
) -> Sequence[Spec]:
|
||||
"""Given a number of specs as input, tries to concretize them together.
|
||||
|
||||
Args:
|
||||
tests (bool or list or set): False to run no tests, True to test
|
||||
all packages, or a list of package names to run tests for some
|
||||
*abstract_specs: abstract specs to be concretized, given either
|
||||
as Specs or strings
|
||||
|
||||
Returns:
|
||||
List of concretized specs
|
||||
abstract_specs: abstract specs to be concretized
|
||||
tests: list of package names for which to consider tests dependencies. If True, all nodes
|
||||
will have test dependencies. If False, test dependencies will be disregarded.
|
||||
"""
|
||||
import spack.solver.asp
|
||||
|
||||
allow_deprecated = spack.config.get("config:deprecated", False)
|
||||
solver = spack.solver.asp.Solver()
|
||||
result = solver.solve(
|
||||
abstract_specs, tests=kwargs.get("tests", False), allow_deprecated=allow_deprecated
|
||||
)
|
||||
result = solver.solve(abstract_specs, tests=tests, allow_deprecated=allow_deprecated)
|
||||
return [s.copy() for s in result.specs]
|
||||
|
||||
|
||||
def concretize_together(
|
||||
spec_list: Sequence[SpecPair], tests: TestsType = False
|
||||
) -> Sequence[SpecPair]:
|
||||
"""Given a number of specs as input, tries to concretize them together.
|
||||
|
||||
Args:
|
||||
spec_list: list of tuples to concretize. First entry is abstract spec, second entry is
|
||||
already concrete spec or None if not yet concretized
|
||||
tests: list of package names for which to consider tests dependencies. If True, all nodes
|
||||
will have test dependencies. If False, test dependencies will be disregarded.
|
||||
"""
|
||||
to_concretize = [concrete if concrete else abstract for abstract, concrete in spec_list]
|
||||
abstract_specs = [abstract for abstract, _ in spec_list]
|
||||
concrete_specs = concretize_specs_together(to_concretize, tests=tests)
|
||||
return list(zip(abstract_specs, concrete_specs))
|
||||
|
||||
|
||||
def concretize_together_when_possible(
|
||||
spec_list: Sequence[SpecPair], tests: TestsType = False
|
||||
) -> Sequence[SpecPair]:
|
||||
"""Given a number of specs as input, tries to concretize them together to the extent possible.
|
||||
|
||||
See documentation for ``unify: when_possible`` concretization for the precise definition of
|
||||
"to the extent possible".
|
||||
|
||||
Args:
|
||||
spec_list: list of tuples to concretize. First entry is abstract spec, second entry is
|
||||
already concrete spec or None if not yet concretized
|
||||
tests: list of package names for which to consider tests dependencies. If True, all nodes
|
||||
will have test dependencies. If False, test dependencies will be disregarded.
|
||||
"""
|
||||
import spack.solver.asp
|
||||
|
||||
to_concretize = [concrete if concrete else abstract for abstract, concrete in spec_list]
|
||||
old_concrete_to_abstract = {
|
||||
concrete: abstract for (abstract, concrete) in spec_list if concrete
|
||||
}
|
||||
|
||||
result_by_user_spec = {}
|
||||
solver = spack.solver.asp.Solver()
|
||||
allow_deprecated = spack.config.get("config:deprecated", False)
|
||||
for result in solver.solve_in_rounds(
|
||||
to_concretize, tests=tests, allow_deprecated=allow_deprecated
|
||||
):
|
||||
result_by_user_spec.update(result.specs_by_input)
|
||||
|
||||
# If the "abstract" spec is a concrete spec from the previous concretization
|
||||
# translate it back to an abstract spec. Otherwise, keep the abstract spec
|
||||
return [
|
||||
(old_concrete_to_abstract.get(abstract, abstract), concrete)
|
||||
for abstract, concrete in sorted(result_by_user_spec.items())
|
||||
]
|
||||
|
||||
|
||||
def concretize_separately(
|
||||
spec_list: Sequence[SpecPair], tests: TestsType = False
|
||||
) -> Sequence[SpecPair]:
|
||||
"""Concretizes the input specs separately from each other.
|
||||
|
||||
Args:
|
||||
spec_list: list of tuples to concretize. First entry is abstract spec, second entry is
|
||||
already concrete spec or None if not yet concretized
|
||||
tests: list of package names for which to consider tests dependencies. If True, all nodes
|
||||
will have test dependencies. If False, test dependencies will be disregarded.
|
||||
"""
|
||||
import spack.bootstrap
|
||||
|
||||
to_concretize = [abstract for abstract, concrete in spec_list if not concrete]
|
||||
args = [
|
||||
(i, str(abstract), tests)
|
||||
for i, abstract in enumerate(to_concretize)
|
||||
if not abstract.concrete
|
||||
]
|
||||
ret = [(i, abstract) for i, abstract in enumerate(to_concretize) if abstract.concrete]
|
||||
# Ensure we don't try to bootstrap clingo in parallel
|
||||
with spack.bootstrap.ensure_bootstrap_configuration():
|
||||
spack.bootstrap.ensure_clingo_importable_or_raise()
|
||||
|
||||
# Ensure all the indexes have been built or updated, since
|
||||
# otherwise the processes in the pool may timeout on waiting
|
||||
# for a write lock. We do this indirectly by retrieving the
|
||||
# provider index, which should in turn trigger the update of
|
||||
# all the indexes if there's any need for that.
|
||||
_ = spack.repo.PATH.provider_index
|
||||
|
||||
# Ensure we have compilers in compilers.yaml to avoid that
|
||||
# processes try to write the config file in parallel
|
||||
_ = spack.compilers.all_compilers_config(spack.config.CONFIG)
|
||||
|
||||
# Early return if there is nothing to do
|
||||
if len(args) == 0:
|
||||
# Still have to combine the things that were passed in as abstract with the things
|
||||
# that were passed in as pairs
|
||||
return [(abstract, concrete) for abstract, (_, concrete) in zip(to_concretize, ret)] + [
|
||||
(abstract, concrete) for abstract, concrete in spec_list if concrete
|
||||
]
|
||||
|
||||
# Solve the environment in parallel on Linux
|
||||
# TODO: support parallel concretization on macOS and Windows
|
||||
num_procs = min(len(args), spack.config.determine_number_of_jobs(parallel=True))
|
||||
|
||||
for j, (i, concrete, duration) in enumerate(
|
||||
spack.util.parallel.imap_unordered(
|
||||
_concretize_task, args, processes=num_procs, debug=tty.is_debug(), maxtaskperchild=1
|
||||
)
|
||||
):
|
||||
ret.append((i, concrete))
|
||||
percentage = (j + 1) / len(args) * 100
|
||||
tty.verbose(
|
||||
f"{duration:6.1f}s [{percentage:3.0f}%] {concrete.cformat('{hash:7}')} "
|
||||
f"{to_concretize[i].colored_str}"
|
||||
)
|
||||
sys.stdout.flush()
|
||||
|
||||
# Add specs in original order
|
||||
ret.sort(key=lambda x: x[0])
|
||||
|
||||
return [(abstract, concrete) for abstract, (_, concrete) in zip(to_concretize, ret)] + [
|
||||
(abstract, concrete) for abstract, concrete in spec_list if concrete
|
||||
]
|
||||
|
||||
|
||||
def _concretize_task(packed_arguments: Tuple[int, str, TestsType]) -> Tuple[int, Spec, float]:
|
||||
index, spec_str, tests = packed_arguments
|
||||
with tty.SuppressOutput(msg_enabled=False):
|
||||
start = time.time()
|
||||
spec = Spec(spec_str).concretized(tests=tests)
|
||||
return index, spec, time.time() - start
|
||||
|
||||
|
||||
class UnavailableCompilerVersionError(spack.error.SpackError):
|
||||
"""Raised when there is no available compiler that satisfies a
|
||||
compiler spec."""
|
||||
|
||||
def __init__(self, compiler_spec, arch=None):
|
||||
err_msg = "No compilers with spec {0} found".format(compiler_spec)
|
||||
def __init__(self, compiler_spec: CompilerSpec, arch: Optional[ArchSpec] = None) -> None:
|
||||
err_msg = f"No compilers with spec {compiler_spec} found"
|
||||
if arch:
|
||||
err_msg += " for operating system {0} and target {1}.".format(arch.os, arch.target)
|
||||
err_msg += f" for operating system {arch.os} and target {arch.target}."
|
||||
|
||||
super().__init__(
|
||||
err_msg,
|
||||
|
||||
@@ -427,6 +427,10 @@ def __init__(self, *scopes: ConfigScope) -> None:
|
||||
self.push_scope(scope)
|
||||
self.format_updates: Dict[str, List[ConfigScope]] = collections.defaultdict(list)
|
||||
|
||||
def ensure_unwrapped(self) -> "Configuration":
|
||||
"""Ensure we unwrap this object from any dynamic wrapper (like Singleton)"""
|
||||
return self
|
||||
|
||||
@_config_mutator
|
||||
def push_scope(self, scope: ConfigScope) -> None:
|
||||
"""Add a higher precedence scope to the Configuration."""
|
||||
@@ -714,7 +718,7 @@ def print_section(self, section: str, blame: bool = False, *, scope=None) -> Non
|
||||
@contextlib.contextmanager
|
||||
def override(
|
||||
path_or_scope: Union[ConfigScope, str], value: Optional[Any] = None
|
||||
) -> Generator[Union[lang.Singleton, Configuration], None, None]:
|
||||
) -> Generator[Configuration, None, None]:
|
||||
"""Simple way to override config settings within a context.
|
||||
|
||||
Arguments:
|
||||
@@ -752,13 +756,7 @@ def override(
|
||||
assert scope is overrides
|
||||
|
||||
|
||||
#: configuration scopes added on the command line set by ``spack.main.main()``
|
||||
COMMAND_LINE_SCOPES: List[str] = []
|
||||
|
||||
|
||||
def _add_platform_scope(
|
||||
cfg: Union[Configuration, lang.Singleton], name: str, path: str, writable: bool = True
|
||||
) -> None:
|
||||
def _add_platform_scope(cfg: Configuration, name: str, path: str, writable: bool = True) -> None:
|
||||
"""Add a platform-specific subdirectory for the current platform."""
|
||||
platform = spack.platforms.host().name
|
||||
scope = DirectoryConfigScope(
|
||||
@@ -792,9 +790,7 @@ def config_paths_from_entry_points() -> List[Tuple[str, str]]:
|
||||
return config_paths
|
||||
|
||||
|
||||
def _add_command_line_scopes(
|
||||
cfg: Union[Configuration, lang.Singleton], command_line_scopes: List[str]
|
||||
) -> None:
|
||||
def _add_command_line_scopes(cfg: Configuration, command_line_scopes: List[str]) -> None:
|
||||
"""Add additional scopes from the --config-scope argument, either envs or dirs."""
|
||||
import spack.environment.environment as env # circular import
|
||||
|
||||
@@ -864,18 +860,11 @@ def create() -> Configuration:
|
||||
# Each scope can have per-platfom overrides in subdirectories
|
||||
_add_platform_scope(cfg, name, path)
|
||||
|
||||
# add command-line scopes
|
||||
_add_command_line_scopes(cfg, COMMAND_LINE_SCOPES)
|
||||
|
||||
# we make a special scope for spack commands so that they can
|
||||
# override configuration options.
|
||||
cfg.push_scope(InternalConfigScope("command_line"))
|
||||
|
||||
return cfg
|
||||
|
||||
|
||||
#: This is the singleton configuration instance for Spack.
|
||||
CONFIG: Union[Configuration, lang.Singleton] = lang.Singleton(create)
|
||||
CONFIG: Configuration = lang.Singleton(create) # type: ignore
|
||||
|
||||
|
||||
def add_from_file(filename: str, scope: Optional[str] = None) -> None:
|
||||
|
||||
@@ -32,6 +32,7 @@
|
||||
Container,
|
||||
Dict,
|
||||
Generator,
|
||||
Iterable,
|
||||
List,
|
||||
NamedTuple,
|
||||
Optional,
|
||||
@@ -290,55 +291,6 @@ def __reduce__(self):
|
||||
return ForbiddenLock, tuple()
|
||||
|
||||
|
||||
_QUERY_DOCSTRING = """
|
||||
|
||||
Args:
|
||||
query_spec: queries iterate through specs in the database and
|
||||
return those that satisfy the supplied ``query_spec``. If
|
||||
query_spec is `any`, This will match all specs in the
|
||||
database. If it is a spec, we'll evaluate
|
||||
``spec.satisfies(query_spec)``
|
||||
|
||||
known (bool or None): Specs that are "known" are those
|
||||
for which Spack can locate a ``package.py`` file -- i.e.,
|
||||
Spack "knows" how to install them. Specs that are unknown may
|
||||
represent packages that existed in a previous version of
|
||||
Spack, but have since either changed their name or
|
||||
been removed
|
||||
|
||||
installed (bool or InstallStatus or typing.Iterable or None):
|
||||
if ``True``, includes only installed
|
||||
specs in the search; if ``False`` only missing specs, and if
|
||||
``any``, all specs in database. If an InstallStatus or iterable
|
||||
of InstallStatus, returns specs whose install status
|
||||
(installed, deprecated, or missing) matches (one of) the
|
||||
InstallStatus. (default: True)
|
||||
|
||||
explicit (bool or None): A spec that was installed
|
||||
following a specific user request is marked as explicit. If
|
||||
instead it was pulled-in as a dependency of a user requested
|
||||
spec it's considered implicit.
|
||||
|
||||
start_date (datetime.datetime or None): filters the query
|
||||
discarding specs that have been installed before ``start_date``.
|
||||
|
||||
end_date (datetime.datetime or None): filters the query discarding
|
||||
specs that have been installed after ``end_date``.
|
||||
|
||||
hashes (Container): list or set of hashes that we can use to
|
||||
restrict the search
|
||||
|
||||
in_buildcache (bool or None): Specs that are marked in
|
||||
this database as part of an associated binary cache are
|
||||
``in_buildcache``. All other specs are not. This field is used
|
||||
for querying mirror indices. Default is ``any``.
|
||||
|
||||
Returns:
|
||||
list of specs that match the query
|
||||
|
||||
"""
|
||||
|
||||
|
||||
class LockConfiguration(NamedTuple):
|
||||
"""Data class to configure locks in Database objects
|
||||
|
||||
@@ -604,6 +556,9 @@ def _path(self, spec: "spack.spec.Spec") -> pathlib.Path:
|
||||
return self.dir / f"{spec.name}-{spec.dag_hash()}"
|
||||
|
||||
|
||||
SelectType = Callable[[InstallRecord], bool]
|
||||
|
||||
|
||||
class Database:
|
||||
#: Fields written for each install record
|
||||
record_fields: Tuple[str, ...] = DEFAULT_INSTALL_RECORD_FIELDS
|
||||
@@ -1245,7 +1200,7 @@ def _add(
|
||||
self._data[key].explicit = explicit
|
||||
|
||||
@_autospec
|
||||
def add(self, spec: "spack.spec.Spec", *, explicit: bool = False) -> None:
|
||||
def add(self, spec: "spack.spec.Spec", *, explicit: bool = False, allow_missing=False) -> None:
|
||||
"""Add spec at path to database, locking and reading DB to sync.
|
||||
|
||||
``add()`` will lock and read from the DB on disk.
|
||||
@@ -1254,7 +1209,7 @@ def add(self, spec: "spack.spec.Spec", *, explicit: bool = False) -> None:
|
||||
# TODO: ensure that spec is concrete?
|
||||
# Entire add is transactional.
|
||||
with self.write_transaction():
|
||||
self._add(spec, explicit=explicit)
|
||||
self._add(spec, explicit=explicit, allow_missing=allow_missing)
|
||||
|
||||
def _get_matching_spec_key(self, spec: "spack.spec.Spec", **kwargs) -> str:
|
||||
"""Get the exact spec OR get a single spec that matches."""
|
||||
@@ -1381,7 +1336,7 @@ def _deprecate(self, spec: "spack.spec.Spec", deprecator: "spack.spec.Spec") ->
|
||||
self._data[spec_key] = spec_rec
|
||||
|
||||
@_autospec
|
||||
def mark(self, spec: "spack.spec.Spec", key, value) -> None:
|
||||
def mark(self, spec: "spack.spec.Spec", key: str, value: Any) -> None:
|
||||
"""Mark an arbitrary record on a spec."""
|
||||
with self.write_transaction():
|
||||
return self._mark(spec, key, value)
|
||||
@@ -1525,62 +1480,51 @@ def get_by_hash(self, dag_hash, default=None, installed=any):
|
||||
|
||||
def _query(
|
||||
self,
|
||||
query_spec=any,
|
||||
known=any,
|
||||
installed=True,
|
||||
explicit=any,
|
||||
start_date=None,
|
||||
end_date=None,
|
||||
hashes=None,
|
||||
in_buildcache=any,
|
||||
origin=None,
|
||||
):
|
||||
"""Run a query on the database."""
|
||||
query_spec: Optional[Union[str, "spack.spec.Spec"]] = None,
|
||||
*,
|
||||
predicate_fn: Optional[SelectType] = None,
|
||||
installed: Union[bool, InstallStatus, List[InstallStatus]] = True,
|
||||
explicit: Optional[bool] = None,
|
||||
start_date: Optional[datetime.datetime] = None,
|
||||
end_date: Optional[datetime.datetime] = None,
|
||||
hashes: Optional[Iterable[str]] = None,
|
||||
in_buildcache: Optional[bool] = None,
|
||||
origin: Optional[str] = None,
|
||||
) -> List["spack.spec.Spec"]:
|
||||
|
||||
# TODO: Specs are a lot like queries. Should there be a
|
||||
# TODO: wildcard spec object, and should specs have attributes
|
||||
# TODO: like installed and known that can be queried? Or are
|
||||
# TODO: these really special cases that only belong here?
|
||||
# Restrict the set of records over which we iterate first
|
||||
matching_hashes = self._data
|
||||
if hashes is not None:
|
||||
matching_hashes = {h: self._data[h] for h in hashes if h in self._data}
|
||||
|
||||
if query_spec is not any:
|
||||
if not isinstance(query_spec, spack.spec.Spec):
|
||||
query_spec = spack.spec.Spec(query_spec)
|
||||
if isinstance(query_spec, str):
|
||||
query_spec = spack.spec.Spec(query_spec)
|
||||
|
||||
# Just look up concrete specs with hashes; no fancy search.
|
||||
if query_spec.concrete:
|
||||
# TODO: handling of hashes restriction is not particularly elegant.
|
||||
hash_key = query_spec.dag_hash()
|
||||
if hash_key in self._data and (not hashes or hash_key in hashes):
|
||||
return [self._data[hash_key].spec]
|
||||
else:
|
||||
return []
|
||||
if query_spec is not None and query_spec.concrete:
|
||||
hash_key = query_spec.dag_hash()
|
||||
if hash_key not in matching_hashes:
|
||||
return []
|
||||
matching_hashes = {hash_key: matching_hashes[hash_key]}
|
||||
|
||||
# Abstract specs require more work -- currently we test
|
||||
# against everything.
|
||||
results = []
|
||||
start_date = start_date or datetime.datetime.min
|
||||
end_date = end_date or datetime.datetime.max
|
||||
|
||||
# save specs whose name doesn't match for last, to avoid a virtual check
|
||||
deferred = []
|
||||
|
||||
for key, rec in self._data.items():
|
||||
if hashes is not None and rec.spec.dag_hash() not in hashes:
|
||||
continue
|
||||
|
||||
for rec in matching_hashes.values():
|
||||
if origin and not (origin == rec.origin):
|
||||
continue
|
||||
|
||||
if not rec.install_type_matches(installed):
|
||||
continue
|
||||
|
||||
if in_buildcache is not any and rec.in_buildcache != in_buildcache:
|
||||
if in_buildcache is not None and rec.in_buildcache != in_buildcache:
|
||||
continue
|
||||
|
||||
if explicit is not any and rec.explicit != explicit:
|
||||
if explicit is not None and rec.explicit != explicit:
|
||||
continue
|
||||
|
||||
if known is not any and known(rec.spec.name):
|
||||
if predicate_fn is not None and not predicate_fn(rec):
|
||||
continue
|
||||
|
||||
if start_date or end_date:
|
||||
@@ -1588,7 +1532,7 @@ def _query(
|
||||
if not (start_date < inst_date < end_date):
|
||||
continue
|
||||
|
||||
if query_spec is any:
|
||||
if query_spec is None or query_spec.concrete:
|
||||
results.append(rec.spec)
|
||||
continue
|
||||
|
||||
@@ -1606,36 +1550,118 @@ def _query(
|
||||
# If we did fine something, the query spec can't be virtual b/c we matched an actual
|
||||
# package installation, so skip the virtual check entirely. If we *didn't* find anything,
|
||||
# check all the deferred specs *if* the query is virtual.
|
||||
if not results and query_spec is not any and deferred and query_spec.virtual:
|
||||
if not results and query_spec is not None and deferred and query_spec.virtual:
|
||||
results = [spec for spec in deferred if spec.satisfies(query_spec)]
|
||||
|
||||
return results
|
||||
|
||||
if _query.__doc__ is None:
|
||||
_query.__doc__ = ""
|
||||
_query.__doc__ += _QUERY_DOCSTRING
|
||||
def query_local(
|
||||
self,
|
||||
query_spec: Optional[Union[str, "spack.spec.Spec"]] = None,
|
||||
*,
|
||||
predicate_fn: Optional[SelectType] = None,
|
||||
installed: Union[bool, InstallStatus, List[InstallStatus]] = True,
|
||||
explicit: Optional[bool] = None,
|
||||
start_date: Optional[datetime.datetime] = None,
|
||||
end_date: Optional[datetime.datetime] = None,
|
||||
hashes: Optional[List[str]] = None,
|
||||
in_buildcache: Optional[bool] = None,
|
||||
origin: Optional[str] = None,
|
||||
) -> List["spack.spec.Spec"]:
|
||||
"""Queries the local Spack database.
|
||||
|
||||
def query_local(self, *args, **kwargs):
|
||||
"""Query only the local Spack database.
|
||||
This function doesn't guarantee any sorting of the returned data for performance reason,
|
||||
since comparing specs for __lt__ may be an expensive operation.
|
||||
|
||||
This function doesn't guarantee any sorting of the returned
|
||||
data for performance reason, since comparing specs for __lt__
|
||||
may be an expensive operation.
|
||||
Args:
|
||||
query_spec: if query_spec is ``None``, match all specs in the database.
|
||||
If it is a spec, return all specs matching ``spec.satisfies(query_spec)``.
|
||||
|
||||
predicate_fn: optional predicate taking an InstallRecord as argument, and returning
|
||||
whether that record is selected for the query. It can be used to craft criteria
|
||||
that need some data for selection not provided by the Database itself.
|
||||
|
||||
installed: if ``True``, includes only installed specs in the search. If ``False`` only
|
||||
missing specs, and if ``any``, all specs in database. If an InstallStatus or
|
||||
iterable of InstallStatus, returns specs whose install status matches at least
|
||||
one of the InstallStatus.
|
||||
|
||||
explicit: a spec that was installed following a specific user request is marked as
|
||||
explicit. If instead it was pulled-in as a dependency of a user requested spec
|
||||
it's considered implicit.
|
||||
|
||||
start_date: if set considers only specs installed from the starting date.
|
||||
|
||||
end_date: if set considers only specs installed until the ending date.
|
||||
|
||||
in_buildcache: specs that are marked in this database as part of an associated binary
|
||||
cache are ``in_buildcache``. All other specs are not. This field is used for
|
||||
querying mirror indices. By default, it does not check this status.
|
||||
|
||||
hashes: list of hashes used to restrict the search
|
||||
|
||||
origin: origin of the spec
|
||||
"""
|
||||
with self.read_transaction():
|
||||
return self._query(*args, **kwargs)
|
||||
return self._query(
|
||||
query_spec,
|
||||
predicate_fn=predicate_fn,
|
||||
installed=installed,
|
||||
explicit=explicit,
|
||||
start_date=start_date,
|
||||
end_date=end_date,
|
||||
hashes=hashes,
|
||||
in_buildcache=in_buildcache,
|
||||
origin=origin,
|
||||
)
|
||||
|
||||
if query_local.__doc__ is None:
|
||||
query_local.__doc__ = ""
|
||||
query_local.__doc__ += _QUERY_DOCSTRING
|
||||
def query(
|
||||
self,
|
||||
query_spec: Optional[Union[str, "spack.spec.Spec"]] = None,
|
||||
*,
|
||||
predicate_fn: Optional[SelectType] = None,
|
||||
installed: Union[bool, InstallStatus, List[InstallStatus]] = True,
|
||||
explicit: Optional[bool] = None,
|
||||
start_date: Optional[datetime.datetime] = None,
|
||||
end_date: Optional[datetime.datetime] = None,
|
||||
in_buildcache: Optional[bool] = None,
|
||||
hashes: Optional[List[str]] = None,
|
||||
origin: Optional[str] = None,
|
||||
install_tree: str = "all",
|
||||
):
|
||||
"""Queries the Spack database including all upstream databases.
|
||||
|
||||
def query(self, *args, **kwargs):
|
||||
"""Query the Spack database including all upstream databases.
|
||||
Args:
|
||||
query_spec: if query_spec is ``None``, match all specs in the database.
|
||||
If it is a spec, return all specs matching ``spec.satisfies(query_spec)``.
|
||||
|
||||
Additional Arguments:
|
||||
install_tree (str): query 'all' (default), 'local', 'upstream', or upstream path
|
||||
predicate_fn: optional predicate taking an InstallRecord as argument, and returning
|
||||
whether that record is selected for the query. It can be used to craft criteria
|
||||
that need some data for selection not provided by the Database itself.
|
||||
|
||||
installed: if ``True``, includes only installed specs in the search. If ``False`` only
|
||||
missing specs, and if ``any``, all specs in database. If an InstallStatus or
|
||||
iterable of InstallStatus, returns specs whose install status matches at least
|
||||
one of the InstallStatus.
|
||||
|
||||
explicit: a spec that was installed following a specific user request is marked as
|
||||
explicit. If instead it was pulled-in as a dependency of a user requested spec
|
||||
it's considered implicit.
|
||||
|
||||
start_date: if set considers only specs installed from the starting date.
|
||||
|
||||
end_date: if set considers only specs installed until the ending date.
|
||||
|
||||
in_buildcache: specs that are marked in this database as part of an associated binary
|
||||
cache are ``in_buildcache``. All other specs are not. This field is used for
|
||||
querying mirror indices. By default, it does not check this status.
|
||||
|
||||
hashes: list of hashes used to restrict the search
|
||||
|
||||
install_tree: query 'all' (default), 'local', 'upstream', or upstream path
|
||||
|
||||
origin: origin of the spec
|
||||
"""
|
||||
install_tree = kwargs.pop("install_tree", "all")
|
||||
valid_trees = ["all", "upstream", "local", self.root] + [u.root for u in self.upstream_dbs]
|
||||
if install_tree not in valid_trees:
|
||||
msg = "Invalid install_tree argument to Database.query()\n"
|
||||
@@ -1651,28 +1677,54 @@ def query(self, *args, **kwargs):
|
||||
# queries for upstream DBs need to *not* lock - we may not
|
||||
# have permissions to do this and the upstream DBs won't know about
|
||||
# us anyway (so e.g. they should never uninstall specs)
|
||||
upstream_results.extend(upstream_db._query(*args, **kwargs) or [])
|
||||
upstream_results.extend(
|
||||
upstream_db._query(
|
||||
query_spec,
|
||||
predicate_fn=predicate_fn,
|
||||
installed=installed,
|
||||
explicit=explicit,
|
||||
start_date=start_date,
|
||||
end_date=end_date,
|
||||
hashes=hashes,
|
||||
in_buildcache=in_buildcache,
|
||||
origin=origin,
|
||||
)
|
||||
or []
|
||||
)
|
||||
|
||||
local_results = []
|
||||
local_results: Set["spack.spec.Spec"] = set()
|
||||
if install_tree in ("all", "local") or self.root == install_tree:
|
||||
local_results = set(self.query_local(*args, **kwargs))
|
||||
local_results = set(
|
||||
self.query_local(
|
||||
query_spec,
|
||||
predicate_fn=predicate_fn,
|
||||
installed=installed,
|
||||
explicit=explicit,
|
||||
start_date=start_date,
|
||||
end_date=end_date,
|
||||
hashes=hashes,
|
||||
in_buildcache=in_buildcache,
|
||||
origin=origin,
|
||||
)
|
||||
)
|
||||
|
||||
results = list(local_results) + list(x for x in upstream_results if x not in local_results)
|
||||
|
||||
return sorted(results)
|
||||
|
||||
if query.__doc__ is None:
|
||||
query.__doc__ = ""
|
||||
query.__doc__ += _QUERY_DOCSTRING
|
||||
|
||||
def query_one(self, query_spec, known=any, installed=True):
|
||||
def query_one(
|
||||
self,
|
||||
query_spec: Optional[Union[str, "spack.spec.Spec"]],
|
||||
predicate_fn: Optional[SelectType] = None,
|
||||
installed: Union[bool, InstallStatus, List[InstallStatus]] = True,
|
||||
) -> Optional["spack.spec.Spec"]:
|
||||
"""Query for exactly one spec that matches the query spec.
|
||||
|
||||
Raises an assertion error if more than one spec matches the
|
||||
query. Returns None if no installed package matches.
|
||||
Returns None if no installed package matches.
|
||||
|
||||
Raises:
|
||||
AssertionError: if more than one spec matches the query.
|
||||
"""
|
||||
concrete_specs = self.query(query_spec, known=known, installed=installed)
|
||||
concrete_specs = self.query(query_spec, predicate_fn=predicate_fn, installed=installed)
|
||||
assert len(concrete_specs) <= 1
|
||||
return concrete_specs[0] if concrete_specs else None
|
||||
|
||||
@@ -1719,24 +1771,6 @@ def root(key, record):
|
||||
if id(rec.spec) not in needed and rec.installed
|
||||
]
|
||||
|
||||
def update_explicit(self, spec, explicit):
|
||||
"""
|
||||
Update the spec's explicit state in the database.
|
||||
|
||||
Args:
|
||||
spec (spack.spec.Spec): the spec whose install record is being updated
|
||||
explicit (bool): ``True`` if the package was requested explicitly
|
||||
by the user, ``False`` if it was pulled in as a dependency of
|
||||
an explicit package.
|
||||
"""
|
||||
rec = self.get_record(spec)
|
||||
if explicit != rec.explicit:
|
||||
with self.write_transaction():
|
||||
message = "{s.name}@{s.version} : marking the package {0}"
|
||||
status = "explicit" if explicit else "implicit"
|
||||
tty.debug(message.format(status, s=spec))
|
||||
rec.explicit = explicit
|
||||
|
||||
|
||||
class NoUpstreamVisitor:
|
||||
"""Gives edges to upstream specs, but does follow edges from upstream specs."""
|
||||
|
||||
@@ -11,6 +11,7 @@
|
||||
import os.path
|
||||
import re
|
||||
import sys
|
||||
import traceback
|
||||
import warnings
|
||||
from typing import Dict, Iterable, List, Optional, Set, Tuple, Type
|
||||
|
||||
@@ -18,6 +19,7 @@
|
||||
import llnl.util.lang
|
||||
import llnl.util.tty
|
||||
|
||||
import spack.error
|
||||
import spack.spec
|
||||
import spack.util.elf as elf_utils
|
||||
import spack.util.environment
|
||||
@@ -66,6 +68,21 @@ def file_identifier(path):
|
||||
return s.st_dev, s.st_ino
|
||||
|
||||
|
||||
def dedupe_paths(paths: List[str]) -> List[str]:
|
||||
"""Deduplicate paths based on inode and device number. In case the list contains first a
|
||||
symlink and then the directory it points to, the symlink is replaced with the directory path.
|
||||
This ensures that we pick for example ``/usr/bin`` over ``/bin`` if the latter is a symlink to
|
||||
the former`."""
|
||||
seen: Dict[Tuple[int, int], str] = {}
|
||||
for path in paths:
|
||||
identifier = file_identifier(path)
|
||||
if identifier not in seen:
|
||||
seen[identifier] = path
|
||||
elif not os.path.islink(path):
|
||||
seen[identifier] = path
|
||||
return list(seen.values())
|
||||
|
||||
|
||||
def executables_in_path(path_hints: List[str]) -> Dict[str, str]:
|
||||
"""Get the paths of all executables available from the current PATH.
|
||||
|
||||
@@ -82,8 +99,7 @@ def executables_in_path(path_hints: List[str]) -> Dict[str, str]:
|
||||
"""
|
||||
search_paths = llnl.util.filesystem.search_paths_for_executables(*path_hints)
|
||||
# Make use we don't doubly list /usr/lib and /lib etc
|
||||
search_paths = list(llnl.util.lang.dedupe(search_paths, key=file_identifier))
|
||||
return path_to_dict(search_paths)
|
||||
return path_to_dict(dedupe_paths(search_paths))
|
||||
|
||||
|
||||
def accept_elf(path, host_compat):
|
||||
@@ -144,7 +160,7 @@ def libraries_in_ld_and_system_library_path(
|
||||
search_paths = list(filter(os.path.isdir, search_paths))
|
||||
|
||||
# Make use we don't doubly list /usr/lib and /lib etc
|
||||
search_paths = list(llnl.util.lang.dedupe(search_paths, key=file_identifier))
|
||||
search_paths = dedupe_paths(search_paths)
|
||||
|
||||
try:
|
||||
host_compat = elf_utils.get_elf_compat(sys.executable)
|
||||
@@ -260,8 +276,12 @@ def detect_specs(
|
||||
)
|
||||
except Exception as e:
|
||||
specs = []
|
||||
if spack.error.SHOW_BACKTRACE:
|
||||
details = traceback.format_exc()
|
||||
else:
|
||||
details = f"[{e.__class__.__name__}: {e}]"
|
||||
warnings.warn(
|
||||
f'error detecting "{pkg.name}" from prefix {candidate_path} [{str(e)}]'
|
||||
f'error detecting "{pkg.name}" from prefix {candidate_path}: {details}'
|
||||
)
|
||||
|
||||
if not specs:
|
||||
@@ -435,9 +455,9 @@ def by_path(
|
||||
llnl.util.tty.debug(
|
||||
f"[EXTERNAL DETECTION] Skipping {pkg_name}: timeout reached"
|
||||
)
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
llnl.util.tty.debug(
|
||||
f"[EXTERNAL DETECTION] Skipping {pkg_name}: exception occured {e}"
|
||||
f"[EXTERNAL DETECTION] Skipping {pkg_name}: {traceback.format_exc()}"
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
@@ -10,6 +10,7 @@
|
||||
import llnl.util.lang
|
||||
|
||||
import spack.error
|
||||
import spack.repo
|
||||
import spack.spec
|
||||
|
||||
#: Names of possible directives. This list is mostly populated using the @directive decorator.
|
||||
@@ -63,7 +64,7 @@ def __init__(cls, name, bases, attr_dict):
|
||||
# The instance is being initialized: if it is a package we must ensure
|
||||
# that the directives are called to set it up.
|
||||
|
||||
if "spack.pkg" in cls.__module__:
|
||||
if cls.__module__.startswith(spack.repo.ROOT_PYTHON_NAMESPACE):
|
||||
# Ensure the presence of the dictionaries associated with the directives.
|
||||
# All dictionaries are defaultdicts that create lists for missing keys.
|
||||
for d in DirectiveMeta._directive_dict_names:
|
||||
|
||||
@@ -473,6 +473,7 @@
|
||||
active_environment,
|
||||
all_environment_names,
|
||||
all_environments,
|
||||
as_env_dir,
|
||||
create,
|
||||
create_in_dir,
|
||||
deactivate,
|
||||
@@ -480,6 +481,7 @@
|
||||
default_view_name,
|
||||
display_specs,
|
||||
environment_dir_from_name,
|
||||
environment_from_name_or_dir,
|
||||
exists,
|
||||
initialize_environment_dir,
|
||||
installed_specs,
|
||||
@@ -507,6 +509,7 @@
|
||||
"active_environment",
|
||||
"all_environment_names",
|
||||
"all_environments",
|
||||
"as_env_dir",
|
||||
"create",
|
||||
"create_in_dir",
|
||||
"deactivate",
|
||||
@@ -514,6 +517,7 @@
|
||||
"default_view_name",
|
||||
"display_specs",
|
||||
"environment_dir_from_name",
|
||||
"environment_from_name_or_dir",
|
||||
"exists",
|
||||
"initialize_environment_dir",
|
||||
"installed_specs",
|
||||
|
||||
@@ -9,11 +9,13 @@
|
||||
|
||||
import os
|
||||
import re
|
||||
import shlex
|
||||
from enum import Enum
|
||||
from typing import List, Optional
|
||||
|
||||
import spack.deptypes as dt
|
||||
import spack.environment.environment as ev
|
||||
import spack.paths
|
||||
import spack.spec
|
||||
import spack.traverse as traverse
|
||||
|
||||
@@ -226,6 +228,7 @@ def to_dict(self):
|
||||
"install_deps_target": self._target("install-deps"),
|
||||
"any_hash_target": self._target("%"),
|
||||
"jobserver_support": self.jobserver_support,
|
||||
"spack_script": shlex.quote(spack.paths.spack_script),
|
||||
"adjacency_list": self.make_adjacency_list,
|
||||
"phony_convenience_targets": " ".join(self.phony_convenience_targets),
|
||||
"pkg_ids_variable": self.pkg_identifier_variable,
|
||||
|
||||
@@ -11,22 +11,19 @@
|
||||
import re
|
||||
import shutil
|
||||
import stat
|
||||
import sys
|
||||
import time
|
||||
import urllib.parse
|
||||
import urllib.request
|
||||
import warnings
|
||||
from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Union
|
||||
from typing import Any, Dict, Iterable, List, Optional, Sequence, Tuple, Union
|
||||
|
||||
import llnl.util.filesystem as fs
|
||||
import llnl.util.tty as tty
|
||||
import llnl.util.tty.color as clr
|
||||
from llnl.util.link_tree import ConflictingSpecsError
|
||||
from llnl.util.symlink import readlink, symlink
|
||||
from llnl.util.symlink import islink, readlink, symlink
|
||||
|
||||
import spack
|
||||
import spack.caches
|
||||
import spack.compilers
|
||||
import spack.concretize
|
||||
import spack.config
|
||||
import spack.deptypes as dt
|
||||
@@ -45,7 +42,6 @@
|
||||
import spack.util.environment
|
||||
import spack.util.hash
|
||||
import spack.util.lock as lk
|
||||
import spack.util.parallel
|
||||
import spack.util.path
|
||||
import spack.util.spack_json as sjson
|
||||
import spack.util.spack_yaml as syaml
|
||||
@@ -57,6 +53,8 @@
|
||||
from spack.spec_list import SpecList
|
||||
from spack.util.path import substitute_path_variables
|
||||
|
||||
SpecPair = spack.concretize.SpecPair
|
||||
|
||||
#: environment variable used to indicate the active environment
|
||||
spack_env_var = "SPACK_ENV"
|
||||
|
||||
@@ -277,6 +275,22 @@ def is_env_dir(path):
|
||||
return os.path.isdir(path) and os.path.exists(os.path.join(path, manifest_name))
|
||||
|
||||
|
||||
def as_env_dir(name_or_dir):
|
||||
"""Translate an environment name or directory to the environment directory"""
|
||||
if is_env_dir(name_or_dir):
|
||||
return name_or_dir
|
||||
else:
|
||||
validate_env_name(name_or_dir)
|
||||
if not exists(name_or_dir):
|
||||
raise SpackEnvironmentError("no such environment '%s'" % name_or_dir)
|
||||
return root(name_or_dir)
|
||||
|
||||
|
||||
def environment_from_name_or_dir(name_or_dir):
|
||||
"""Get an environment with the supplied name."""
|
||||
return Environment(as_env_dir(name_or_dir))
|
||||
|
||||
|
||||
def read(name):
|
||||
"""Get an environment with the supplied name."""
|
||||
validate_env_name(name)
|
||||
@@ -654,7 +668,7 @@ def from_dict(base_path, d):
|
||||
|
||||
@property
|
||||
def _current_root(self):
|
||||
if not os.path.islink(self.root):
|
||||
if not islink(self.root):
|
||||
return None
|
||||
|
||||
root = readlink(self.root)
|
||||
@@ -1159,6 +1173,8 @@ def clear(self, re_read=False):
|
||||
# things that cannot be recreated from file
|
||||
self.new_specs = [] # write packages for these on write()
|
||||
|
||||
self.manifest.clear()
|
||||
|
||||
@property
|
||||
def active(self):
|
||||
"""True if this environment is currently active."""
|
||||
@@ -1492,7 +1508,7 @@ def deconcretize(self, spec: spack.spec.Spec, concrete: bool = True):
|
||||
|
||||
def _get_specs_to_concretize(
|
||||
self,
|
||||
) -> Tuple[Set[spack.spec.Spec], Set[spack.spec.Spec], List[spack.spec.Spec]]:
|
||||
) -> Tuple[List[spack.spec.Spec], List[spack.spec.Spec], List[SpecPair]]:
|
||||
"""Compute specs to concretize for unify:true and unify:when_possible.
|
||||
|
||||
This includes new user specs and any already concretized specs.
|
||||
@@ -1502,23 +1518,20 @@ def _get_specs_to_concretize(
|
||||
|
||||
"""
|
||||
# Exit early if the set of concretized specs is the set of user specs
|
||||
new_user_specs = set(self.user_specs) - set(self.concretized_user_specs)
|
||||
kept_user_specs = set(self.user_specs) & set(self.concretized_user_specs)
|
||||
new_user_specs = list(set(self.user_specs) - set(self.concretized_user_specs))
|
||||
kept_user_specs = list(set(self.user_specs) & set(self.concretized_user_specs))
|
||||
kept_user_specs += self.included_user_specs
|
||||
if not new_user_specs:
|
||||
return new_user_specs, kept_user_specs, []
|
||||
|
||||
concrete_specs_to_keep = [
|
||||
concrete
|
||||
specs_to_concretize = [(s, None) for s in new_user_specs] + [
|
||||
(abstract, concrete)
|
||||
for abstract, concrete in self.concretized_specs()
|
||||
if abstract in kept_user_specs
|
||||
]
|
||||
|
||||
specs_to_concretize = list(new_user_specs) + concrete_specs_to_keep
|
||||
return new_user_specs, kept_user_specs, specs_to_concretize
|
||||
|
||||
def _concretize_together_where_possible(
|
||||
self, tests: bool = False
|
||||
) -> List[Tuple[spack.spec.Spec, spack.spec.Spec]]:
|
||||
def _concretize_together_where_possible(self, tests: bool = False) -> Sequence[SpecPair]:
|
||||
# Avoid cyclic dependency
|
||||
import spack.solver.asp
|
||||
|
||||
@@ -1527,36 +1540,26 @@ def _concretize_together_where_possible(
|
||||
if not new_user_specs:
|
||||
return []
|
||||
|
||||
old_concrete_to_abstract = {
|
||||
concrete: abstract for (abstract, concrete) in self.concretized_specs()
|
||||
}
|
||||
|
||||
self.concretized_user_specs = []
|
||||
self.concretized_order = []
|
||||
self.specs_by_hash = {}
|
||||
|
||||
result_by_user_spec = {}
|
||||
solver = spack.solver.asp.Solver()
|
||||
allow_deprecated = spack.config.get("config:deprecated", False)
|
||||
for result in solver.solve_in_rounds(
|
||||
specs_to_concretize, tests=tests, allow_deprecated=allow_deprecated
|
||||
):
|
||||
result_by_user_spec.update(result.specs_by_input)
|
||||
ret = []
|
||||
result = spack.concretize.concretize_together_when_possible(
|
||||
specs_to_concretize, tests=tests
|
||||
)
|
||||
for abstract, concrete in result:
|
||||
# Only add to the environment if it's from this environment (not included in)
|
||||
if abstract in self.user_specs:
|
||||
self._add_concrete_spec(abstract, concrete)
|
||||
|
||||
result = []
|
||||
for abstract, concrete in sorted(result_by_user_spec.items()):
|
||||
# If the "abstract" spec is a concrete spec from the previous concretization
|
||||
# translate it back to an abstract spec. Otherwise, keep the abstract spec
|
||||
abstract = old_concrete_to_abstract.get(abstract, abstract)
|
||||
# Return only the new specs
|
||||
if abstract in new_user_specs:
|
||||
result.append((abstract, concrete))
|
||||
self._add_concrete_spec(abstract, concrete)
|
||||
ret.append((abstract, concrete))
|
||||
|
||||
return result
|
||||
return ret
|
||||
|
||||
def _concretize_together(
|
||||
self, tests: bool = False
|
||||
) -> List[Tuple[spack.spec.Spec, spack.spec.Spec]]:
|
||||
def _concretize_together(self, tests: bool = False) -> Sequence[SpecPair]:
|
||||
"""Concretization strategy that concretizes all the specs
|
||||
in the same DAG.
|
||||
"""
|
||||
@@ -1570,8 +1573,8 @@ def _concretize_together(
|
||||
self.specs_by_hash = {}
|
||||
|
||||
try:
|
||||
concrete_specs: List[spack.spec.Spec] = spack.concretize.concretize_specs_together(
|
||||
*specs_to_concretize, tests=tests
|
||||
concretized_specs = spack.concretize.concretize_together(
|
||||
specs_to_concretize, tests=tests
|
||||
)
|
||||
except spack.error.UnsatisfiableSpecError as e:
|
||||
# "Enhance" the error message for multiple root specs, suggest a less strict
|
||||
@@ -1589,14 +1592,13 @@ def _concretize_together(
|
||||
)
|
||||
raise
|
||||
|
||||
# set() | set() does not preserve ordering, even though sets are ordered
|
||||
ordered_user_specs = list(new_user_specs) + list(kept_user_specs)
|
||||
concretized_specs = [x for x in zip(ordered_user_specs, concrete_specs)]
|
||||
for abstract, concrete in concretized_specs:
|
||||
self._add_concrete_spec(abstract, concrete)
|
||||
# Don't add if it's just included
|
||||
if abstract in self.user_specs:
|
||||
self._add_concrete_spec(abstract, concrete)
|
||||
|
||||
# zip truncates the longer list, which is exactly what we want here
|
||||
return list(zip(new_user_specs, concrete_specs))
|
||||
# Return the portion of the return value that is new
|
||||
return concretized_specs[: len(new_user_specs)]
|
||||
|
||||
def _concretize_separately(self, tests=False):
|
||||
"""Concretization strategy that concretizes separately one
|
||||
@@ -1618,71 +1620,16 @@ def _concretize_separately(self, tests=False):
|
||||
concrete = old_specs_by_hash[h]
|
||||
self._add_concrete_spec(s, concrete, new=False)
|
||||
|
||||
# Concretize any new user specs that we haven't concretized yet
|
||||
args, root_specs, i = [], [], 0
|
||||
for uspec in self.user_specs:
|
||||
if uspec not in old_concretized_user_specs:
|
||||
root_specs.append(uspec)
|
||||
args.append((i, str(uspec), tests))
|
||||
i += 1
|
||||
to_concretize = [
|
||||
(root, None) for root in self.user_specs if root not in old_concretized_user_specs
|
||||
]
|
||||
concretized_specs = spack.concretize.concretize_separately(to_concretize, tests=tests)
|
||||
|
||||
# Ensure we don't try to bootstrap clingo in parallel
|
||||
with spack.bootstrap.ensure_bootstrap_configuration():
|
||||
spack.bootstrap.ensure_clingo_importable_or_raise()
|
||||
|
||||
# Ensure all the indexes have been built or updated, since
|
||||
# otherwise the processes in the pool may timeout on waiting
|
||||
# for a write lock. We do this indirectly by retrieving the
|
||||
# provider index, which should in turn trigger the update of
|
||||
# all the indexes if there's any need for that.
|
||||
_ = spack.repo.PATH.provider_index
|
||||
|
||||
# Ensure we have compilers in compilers.yaml to avoid that
|
||||
# processes try to write the config file in parallel
|
||||
_ = spack.compilers.all_compilers_config(spack.config.CONFIG)
|
||||
|
||||
# Early return if there is nothing to do
|
||||
if len(args) == 0:
|
||||
return []
|
||||
|
||||
# Solve the environment in parallel on Linux
|
||||
start = time.time()
|
||||
num_procs = min(len(args), spack.config.determine_number_of_jobs(parallel=True))
|
||||
|
||||
# TODO: support parallel concretization on macOS and Windows
|
||||
msg = "Starting concretization"
|
||||
if sys.platform not in ("darwin", "win32") and num_procs > 1:
|
||||
msg += f" pool with {num_procs} processes"
|
||||
tty.msg(msg)
|
||||
|
||||
batch = []
|
||||
for j, (i, concrete, duration) in enumerate(
|
||||
spack.util.parallel.imap_unordered(
|
||||
_concretize_task,
|
||||
args,
|
||||
processes=num_procs,
|
||||
debug=tty.is_debug(),
|
||||
maxtaskperchild=1,
|
||||
)
|
||||
):
|
||||
batch.append((i, concrete))
|
||||
percentage = (j + 1) / len(args) * 100
|
||||
tty.verbose(
|
||||
f"{duration:6.1f}s [{percentage:3.0f}%] {concrete.cformat('{hash:7}')} "
|
||||
f"{root_specs[i].colored_str}"
|
||||
)
|
||||
sys.stdout.flush()
|
||||
|
||||
# Add specs in original order
|
||||
batch.sort(key=lambda x: x[0])
|
||||
by_hash = {} # for attaching information on test dependencies
|
||||
for root, (_, concrete) in zip(root_specs, batch):
|
||||
self._add_concrete_spec(root, concrete)
|
||||
by_hash = {}
|
||||
for abstract, concrete in concretized_specs:
|
||||
self._add_concrete_spec(abstract, concrete)
|
||||
by_hash[concrete.dag_hash()] = concrete
|
||||
|
||||
finish = time.time()
|
||||
tty.msg(f"Environment concretized in {finish - start:.2f} seconds")
|
||||
|
||||
# Unify the specs objects, so we get correct references to all parents
|
||||
self._read_lockfile_dict(self._to_lockfile_dict())
|
||||
|
||||
@@ -1702,11 +1649,7 @@ def _concretize_separately(self, tests=False):
|
||||
test_dependency.copy(), depflag=dt.TEST, virtuals=current_edge.virtuals
|
||||
)
|
||||
|
||||
results = [
|
||||
(abstract, self.specs_by_hash[h])
|
||||
for abstract, h in zip(self.concretized_user_specs, self.concretized_order)
|
||||
]
|
||||
return results
|
||||
return concretized_specs
|
||||
|
||||
@property
|
||||
def default_view(self):
|
||||
@@ -1954,17 +1897,16 @@ def install_specs(self, specs: Optional[List[Spec]] = None, **install_args):
|
||||
specs = specs if specs is not None else roots
|
||||
|
||||
# Extend the set of specs to overwrite with modified dev specs and their parents
|
||||
overwrite: Set[str] = set()
|
||||
overwrite.update(install_args.get("overwrite", []), self._dev_specs_that_need_overwrite())
|
||||
install_args["overwrite"] = overwrite
|
||||
install_args["overwrite"] = {
|
||||
*install_args.get("overwrite", ()),
|
||||
*self._dev_specs_that_need_overwrite(),
|
||||
}
|
||||
|
||||
explicit: Set[str] = set()
|
||||
explicit.update(
|
||||
install_args.get("explicit", []),
|
||||
(s.dag_hash() for s in specs),
|
||||
(s.dag_hash() for s in roots),
|
||||
)
|
||||
install_args["explicit"] = explicit
|
||||
# Only environment roots are marked explicit
|
||||
install_args["explicit"] = {
|
||||
*install_args.get("explicit", ()),
|
||||
*(s.dag_hash() for s in roots),
|
||||
}
|
||||
|
||||
PackageInstaller([spec.package for spec in specs], **install_args).install()
|
||||
|
||||
@@ -2163,6 +2105,13 @@ def _concrete_specs_dict(self):
|
||||
# Assumes no legacy formats, since this was just created.
|
||||
spec_dict[ht.dag_hash.name] = s.dag_hash()
|
||||
concrete_specs[s.dag_hash()] = spec_dict
|
||||
|
||||
if s.build_spec is not s:
|
||||
for d in s.build_spec.traverse():
|
||||
build_spec_dict = d.node_dict_with_hashes(hash=ht.dag_hash)
|
||||
build_spec_dict[ht.dag_hash.name] = d.dag_hash()
|
||||
concrete_specs[d.dag_hash()] = build_spec_dict
|
||||
|
||||
return concrete_specs
|
||||
|
||||
def _concrete_roots_dict(self):
|
||||
@@ -2322,7 +2271,7 @@ def filter_specs(self, reader, json_specs_by_hash, order_concretized):
|
||||
specs_by_hash[lockfile_key] = spec
|
||||
|
||||
# Second pass: For each spec, get its dependencies from the node dict
|
||||
# and add them to the spec
|
||||
# and add them to the spec, including build specs
|
||||
for lockfile_key, node_dict in json_specs_by_hash.items():
|
||||
name, data = reader.name_and_data(node_dict)
|
||||
for _, dep_hash, deptypes, _, virtuals in reader.dependencies_from_node_dict(data):
|
||||
@@ -2330,6 +2279,10 @@ def filter_specs(self, reader, json_specs_by_hash, order_concretized):
|
||||
specs_by_hash[dep_hash], depflag=dt.canonicalize(deptypes), virtuals=virtuals
|
||||
)
|
||||
|
||||
if "build_spec" in node_dict:
|
||||
_, bhash, _ = reader.extract_build_spec_info_from_node_dict(node_dict)
|
||||
specs_by_hash[lockfile_key]._build_spec = specs_by_hash[bhash]
|
||||
|
||||
# Traverse the root specs one at a time in the order they appear.
|
||||
# The first time we see each DAG hash, that's the one we want to
|
||||
# keep. This is only required as long as we support older lockfile
|
||||
@@ -2503,14 +2456,6 @@ def display_specs(specs):
|
||||
print(tree_string)
|
||||
|
||||
|
||||
def _concretize_task(packed_arguments) -> Tuple[int, Spec, float]:
|
||||
index, spec_str, tests = packed_arguments
|
||||
with tty.SuppressOutput(msg_enabled=False):
|
||||
start = time.time()
|
||||
spec = Spec(spec_str).concretized(tests=tests)
|
||||
return index, spec, time.time() - start
|
||||
|
||||
|
||||
def make_repo_path(root):
|
||||
"""Make a RepoPath from the repo subdirectories in an environment."""
|
||||
path = spack.repo.RepoPath(cache=spack.caches.MISC_CACHE)
|
||||
@@ -2789,6 +2734,11 @@ def remove_user_spec(self, user_spec: str) -> None:
|
||||
raise SpackEnvironmentError(msg) from e
|
||||
self.changed = True
|
||||
|
||||
def clear(self) -> None:
|
||||
"""Clear all user specs from the list of root specs"""
|
||||
self.configuration["specs"] = []
|
||||
self.changed = True
|
||||
|
||||
def override_user_spec(self, user_spec: str, idx: int) -> None:
|
||||
"""Overrides the user spec at index idx with the one passed as input.
|
||||
|
||||
|
||||
@@ -48,8 +48,6 @@ def activate_header(env, shell, prompt=None, view: Optional[str] = None):
|
||||
cmds += 'set "SPACK_ENV=%s"\n' % env.path
|
||||
if view:
|
||||
cmds += 'set "SPACK_ENV_VIEW=%s"\n' % view
|
||||
# TODO: despacktivate
|
||||
# TODO: prompt
|
||||
elif shell == "pwsh":
|
||||
cmds += "$Env:SPACK_ENV='%s'\n" % env.path
|
||||
if view:
|
||||
|
||||
@@ -12,6 +12,9 @@
|
||||
#: this is module-scoped because it needs to be set very early
|
||||
debug = 0
|
||||
|
||||
#: whether to show a backtrace when an error is printed, enabled with --backtrace.
|
||||
SHOW_BACKTRACE = False
|
||||
|
||||
|
||||
class SpackError(Exception):
|
||||
"""This is the superclass for all Spack errors.
|
||||
|
||||
@@ -33,6 +33,7 @@
|
||||
from llnl.util.tty.color import colorize
|
||||
|
||||
import spack.config
|
||||
import spack.directory_layout
|
||||
import spack.paths
|
||||
import spack.projections
|
||||
import spack.relocate
|
||||
@@ -50,7 +51,7 @@
|
||||
_projections_path = ".spack/projections.yaml"
|
||||
|
||||
|
||||
LinkCallbackType = Callable[[str, str, "FilesystemView", Optional["spack.spec.Spec"]], None]
|
||||
LinkCallbackType = Callable[[str, str, "FilesystemView", Optional[spack.spec.Spec]], None]
|
||||
|
||||
|
||||
def view_symlink(src: str, dst: str, *args, **kwargs) -> None:
|
||||
@@ -62,7 +63,7 @@ def view_hardlink(src: str, dst: str, *args, **kwargs) -> None:
|
||||
|
||||
|
||||
def view_copy(
|
||||
src: str, dst: str, view: "FilesystemView", spec: Optional["spack.spec.Spec"] = None
|
||||
src: str, dst: str, view: "FilesystemView", spec: Optional[spack.spec.Spec] = None
|
||||
) -> None:
|
||||
"""
|
||||
Copy a file from src to dst.
|
||||
@@ -100,10 +101,12 @@ def view_copy(
|
||||
|
||||
spack.relocate.relocate_text(files=[dst], prefixes=prefix_to_projection)
|
||||
|
||||
try:
|
||||
os.chown(dst, src_stat.st_uid, src_stat.st_gid)
|
||||
except OSError:
|
||||
tty.debug(f"Can't change the permissions for {dst}")
|
||||
# The os module on Windows does not have a chown function.
|
||||
if sys.platform != "win32":
|
||||
try:
|
||||
os.chown(dst, src_stat.st_uid, src_stat.st_gid)
|
||||
except OSError:
|
||||
tty.debug(f"Can't change the permissions for {dst}")
|
||||
|
||||
|
||||
#: supported string values for `link_type` in an env, mapped to canonical values
|
||||
@@ -158,7 +161,7 @@ class FilesystemView:
|
||||
def __init__(
|
||||
self,
|
||||
root: str,
|
||||
layout: "spack.directory_layout.DirectoryLayout",
|
||||
layout: spack.directory_layout.DirectoryLayout,
|
||||
*,
|
||||
projections: Optional[Dict] = None,
|
||||
ignore_conflicts: bool = False,
|
||||
@@ -180,7 +183,10 @@ def __init__(
|
||||
|
||||
# Setup link function to include view
|
||||
self.link_type = link_type
|
||||
self.link = ft.partial(function_for_link_type(link_type), view=self)
|
||||
self._link = function_for_link_type(link_type)
|
||||
|
||||
def link(self, src: str, dst: str, spec: Optional[spack.spec.Spec] = None) -> None:
|
||||
self._link(src, dst, self, spec)
|
||||
|
||||
def add_specs(self, *specs, **kwargs):
|
||||
"""
|
||||
@@ -281,7 +287,7 @@ class YamlFilesystemView(FilesystemView):
|
||||
def __init__(
|
||||
self,
|
||||
root: str,
|
||||
layout: "spack.directory_layout.DirectoryLayout",
|
||||
layout: spack.directory_layout.DirectoryLayout,
|
||||
*,
|
||||
projections: Optional[Dict] = None,
|
||||
ignore_conflicts: bool = False,
|
||||
|
||||
@@ -21,43 +21,40 @@
|
||||
features.
|
||||
"""
|
||||
import importlib
|
||||
|
||||
from llnl.util.lang import ensure_last, list_modules
|
||||
|
||||
import spack.paths
|
||||
import types
|
||||
from typing import List, Optional
|
||||
|
||||
|
||||
class _HookRunner:
|
||||
#: Stores all hooks on first call, shared among
|
||||
#: all HookRunner objects
|
||||
_hooks = None
|
||||
#: Order in which hooks are executed
|
||||
HOOK_ORDER = [
|
||||
"spack.hooks.module_file_generation",
|
||||
"spack.hooks.licensing",
|
||||
"spack.hooks.sbang",
|
||||
"spack.hooks.windows_runtime_linkage",
|
||||
"spack.hooks.drop_redundant_rpaths",
|
||||
"spack.hooks.absolutify_elf_sonames",
|
||||
"spack.hooks.permissions_setters",
|
||||
# after all mutations to the install prefix, write metadata
|
||||
"spack.hooks.write_install_manifest",
|
||||
# after all metadata is written
|
||||
"spack.hooks.autopush",
|
||||
]
|
||||
|
||||
#: Contains all hook modules after first call, shared among all HookRunner objects
|
||||
_hooks: Optional[List[types.ModuleType]] = None
|
||||
|
||||
def __init__(self, hook_name):
|
||||
self.hook_name = hook_name
|
||||
|
||||
@classmethod
|
||||
def _populate_hooks(cls):
|
||||
# Lazily populate the list of hooks
|
||||
cls._hooks = []
|
||||
|
||||
relative_names = list(list_modules(spack.paths.hooks_path))
|
||||
|
||||
# Ensure that write_install_manifest comes last
|
||||
ensure_last(relative_names, "absolutify_elf_sonames", "write_install_manifest")
|
||||
|
||||
for name in relative_names:
|
||||
module_name = __name__ + "." + name
|
||||
module_obj = importlib.import_module(module_name)
|
||||
cls._hooks.append((module_name, module_obj))
|
||||
|
||||
@property
|
||||
def hooks(self):
|
||||
def hooks(self) -> List[types.ModuleType]:
|
||||
if not self._hooks:
|
||||
self._populate_hooks()
|
||||
self._hooks = [importlib.import_module(module_name) for module_name in self.HOOK_ORDER]
|
||||
return self._hooks
|
||||
|
||||
def __call__(self, *args, **kwargs):
|
||||
for _, module in self.hooks:
|
||||
for module in self.hooks:
|
||||
if hasattr(module, self.hook_name):
|
||||
hook = getattr(module, self.hook_name)
|
||||
if hasattr(hook, "__call__"):
|
||||
|
||||
@@ -2,8 +2,7 @@
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
"""
|
||||
This module encapsulates package installation functionality.
|
||||
"""This module encapsulates package installation functionality.
|
||||
|
||||
The PackageInstaller coordinates concurrent builds of packages for the same
|
||||
Spack instance by leveraging the dependency DAG and file system locks. It
|
||||
@@ -17,16 +16,18 @@
|
||||
File system locks enable coordination such that no two processes attempt to
|
||||
build the same or a failed dependency package.
|
||||
|
||||
Failures to install dependency packages result in removal of their dependents'
|
||||
build tasks from the current process. A failure file is also written (and
|
||||
locked) so that other processes can detect the failure and adjust their build
|
||||
tasks accordingly.
|
||||
If a dependency package fails to install, its dependents' tasks will be
|
||||
removed from the installing process's queue. A failure file is also written
|
||||
and locked. Other processes use this file to detect the failure and dequeue
|
||||
its dependents.
|
||||
|
||||
This module supports the coordination of local and distributed concurrent
|
||||
installations of packages in a Spack instance.
|
||||
|
||||
"""
|
||||
|
||||
import copy
|
||||
import enum
|
||||
import glob
|
||||
import heapq
|
||||
import io
|
||||
@@ -42,6 +43,7 @@
|
||||
import llnl.util.filesystem as fs
|
||||
import llnl.util.lock as lk
|
||||
import llnl.util.tty as tty
|
||||
from llnl.string import ordinal
|
||||
from llnl.util.lang import pretty_seconds
|
||||
from llnl.util.tty.color import colorize
|
||||
from llnl.util.tty.log import log_output
|
||||
@@ -57,6 +59,7 @@
|
||||
import spack.package_base
|
||||
import spack.package_prefs as prefs
|
||||
import spack.repo
|
||||
import spack.rewiring
|
||||
import spack.spec
|
||||
import spack.store
|
||||
import spack.util.executable
|
||||
@@ -70,25 +73,32 @@
|
||||
#: were added (see https://docs.python.org/2/library/heapq.html).
|
||||
_counter = itertools.count(0)
|
||||
|
||||
#: Build status indicating task has been added.
|
||||
STATUS_ADDED = "queued"
|
||||
|
||||
#: Build status indicating the spec failed to install
|
||||
STATUS_FAILED = "failed"
|
||||
class BuildStatus(enum.Enum):
|
||||
"""Different build (task) states."""
|
||||
|
||||
#: Build status indicating the spec is being installed (possibly by another
|
||||
#: process)
|
||||
STATUS_INSTALLING = "installing"
|
||||
#: Build status indicating task has been added/queued.
|
||||
QUEUED = enum.auto()
|
||||
|
||||
#: Build status indicating the spec was sucessfully installed
|
||||
STATUS_INSTALLED = "installed"
|
||||
#: Build status indicating the spec failed to install
|
||||
FAILED = enum.auto()
|
||||
|
||||
#: Build status indicating the task has been popped from the queue
|
||||
STATUS_DEQUEUED = "dequeued"
|
||||
#: Build status indicating the spec is being installed (possibly by another
|
||||
#: process)
|
||||
INSTALLING = enum.auto()
|
||||
|
||||
#: Build status indicating task has been removed (to maintain priority
|
||||
#: queue invariants).
|
||||
STATUS_REMOVED = "removed"
|
||||
#: Build status indicating the spec was sucessfully installed
|
||||
INSTALLED = enum.auto()
|
||||
|
||||
#: Build status indicating the task has been popped from the queue
|
||||
DEQUEUED = enum.auto()
|
||||
|
||||
#: Build status indicating task has been removed (to maintain priority
|
||||
#: queue invariants).
|
||||
REMOVED = enum.auto()
|
||||
|
||||
def __str__(self):
|
||||
return f"{self.name.lower()}"
|
||||
|
||||
|
||||
def _write_timer_json(pkg, timer, cache):
|
||||
@@ -101,13 +111,22 @@ def _write_timer_json(pkg, timer, cache):
|
||||
return
|
||||
|
||||
|
||||
class InstallAction:
|
||||
class ExecuteResult(enum.Enum):
|
||||
# Task succeeded
|
||||
SUCCESS = enum.auto()
|
||||
# Task failed
|
||||
FAILED = enum.auto()
|
||||
# Task is missing build spec and will be requeued
|
||||
MISSING_BUILD_SPEC = enum.auto()
|
||||
|
||||
|
||||
class InstallAction(enum.Enum):
|
||||
#: Don't perform an install
|
||||
NONE = 0
|
||||
NONE = enum.auto()
|
||||
#: Do a standard install
|
||||
INSTALL = 1
|
||||
INSTALL = enum.auto()
|
||||
#: Do an overwrite install
|
||||
OVERWRITE = 2
|
||||
OVERWRITE = enum.auto()
|
||||
|
||||
|
||||
class InstallStatus:
|
||||
@@ -393,7 +412,7 @@ def _process_external_package(pkg: "spack.package_base.PackageBase", explicit: b
|
||||
tty.debug(f"{pre} already registered in DB")
|
||||
record = spack.store.STORE.db.get_record(spec)
|
||||
if explicit and not record.explicit:
|
||||
spack.store.STORE.db.update_explicit(spec, explicit)
|
||||
spack.store.STORE.db.mark(spec, "explicit", True)
|
||||
|
||||
except KeyError:
|
||||
# If not, register it and generate the module file.
|
||||
@@ -431,7 +450,7 @@ def _process_binary_cache_tarball(
|
||||
"""
|
||||
with timer.measure("fetch"):
|
||||
download_result = binary_distribution.download_tarball(
|
||||
pkg.spec, unsigned, mirrors_for_spec
|
||||
pkg.spec.build_spec, unsigned, mirrors_for_spec
|
||||
)
|
||||
|
||||
if download_result is None:
|
||||
@@ -442,6 +461,11 @@ def _process_binary_cache_tarball(
|
||||
with timer.measure("install"), spack.util.path.filter_padding():
|
||||
binary_distribution.extract_tarball(pkg.spec, download_result, force=False, timer=timer)
|
||||
|
||||
if pkg.spec.spliced: # overwrite old metadata with new
|
||||
spack.store.STORE.layout.write_spec(
|
||||
pkg.spec, spack.store.STORE.layout.spec_file_path(pkg.spec)
|
||||
)
|
||||
|
||||
if hasattr(pkg, "_post_buildcache_install_hook"):
|
||||
pkg._post_buildcache_install_hook()
|
||||
|
||||
@@ -677,7 +701,7 @@ def log(pkg: "spack.package_base.PackageBase") -> None:
|
||||
def package_id(spec: "spack.spec.Spec") -> str:
|
||||
"""A "unique" package identifier for installation purposes
|
||||
|
||||
The identifier is used to track build tasks, locks, install, and
|
||||
The identifier is used to track tasks, locks, install, and
|
||||
failure statuses.
|
||||
|
||||
The identifier needs to distinguish between combinations of compilers
|
||||
@@ -736,14 +760,14 @@ def __init__(self, pkg: "spack.package_base.PackageBase", install_args: dict):
|
||||
)
|
||||
|
||||
def __repr__(self) -> str:
|
||||
"""Returns a formal representation of the build request."""
|
||||
"""Return a formal representation of the build request."""
|
||||
rep = f"{self.__class__.__name__}("
|
||||
for attr, value in self.__dict__.items():
|
||||
rep += f"{attr}={value.__repr__()}, "
|
||||
return f"{rep.strip(', ')})"
|
||||
|
||||
def __str__(self) -> str:
|
||||
"""Returns a printable version of the build request."""
|
||||
"""Return a printable version of the build request."""
|
||||
return f"package={self.pkg.name}, install_args={self.install_args}"
|
||||
|
||||
def _add_default_args(self) -> None:
|
||||
@@ -840,37 +864,42 @@ def traverse_dependencies(self, spec=None, visited=None) -> Iterator["spack.spec
|
||||
yield dep
|
||||
|
||||
|
||||
class BuildTask:
|
||||
"""Class for representing the build task for a package."""
|
||||
class Task:
|
||||
"""Base class for representing a task for a package."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
pkg: "spack.package_base.PackageBase",
|
||||
request: Optional[BuildRequest],
|
||||
compiler: bool,
|
||||
start: float,
|
||||
attempts: int,
|
||||
status: str,
|
||||
installed: Set[str],
|
||||
request: BuildRequest,
|
||||
*,
|
||||
compiler: bool = False,
|
||||
start: float = 0.0,
|
||||
attempts: int = 0,
|
||||
status: BuildStatus = BuildStatus.QUEUED,
|
||||
installed: Set[str] = set(),
|
||||
):
|
||||
"""
|
||||
Instantiate a build task for a package.
|
||||
Instantiate a task for a package.
|
||||
|
||||
Args:
|
||||
pkg: the package to be built and installed
|
||||
request: the associated install request where ``None`` can be
|
||||
used to indicate the package was explicitly requested by the user
|
||||
compiler: whether task is for a bootstrap compiler
|
||||
request: the associated install request
|
||||
start: the initial start time for the package, in seconds
|
||||
attempts: the number of attempts to install the package
|
||||
attempts: the number of attempts to install the package, which
|
||||
should be 0 when the task is initially instantiated
|
||||
status: the installation status
|
||||
installed: the identifiers of packages that have
|
||||
installed: the (string) identifiers of packages that have
|
||||
been installed so far
|
||||
|
||||
Raises:
|
||||
``InstallError`` if the build status is incompatible with the task
|
||||
``TypeError`` if provided an argument of the wrong type
|
||||
``ValueError`` if provided an argument with the wrong value or state
|
||||
"""
|
||||
|
||||
# Ensure dealing with a package that has a concrete spec
|
||||
if not isinstance(pkg, spack.package_base.PackageBase):
|
||||
raise ValueError(f"{str(pkg)} must be a package")
|
||||
raise TypeError(f"{str(pkg)} must be a package")
|
||||
|
||||
self.pkg = pkg
|
||||
if not self.pkg.spec.concrete:
|
||||
@@ -881,26 +910,34 @@ def __init__(
|
||||
|
||||
# The explicit build request associated with the package
|
||||
if not isinstance(request, BuildRequest):
|
||||
raise ValueError(f"{str(pkg)} must have a build request")
|
||||
|
||||
raise TypeError(f"{request} is not a valid build request")
|
||||
self.request = request
|
||||
|
||||
# Initialize the status to an active state. The status is used to
|
||||
# ensure priority queue invariants when tasks are "removed" from the
|
||||
# queue.
|
||||
if status == STATUS_REMOVED:
|
||||
raise spack.error.InstallError(
|
||||
f"Cannot create a build task for {self.pkg_id} with status '{status}'", pkg=pkg
|
||||
)
|
||||
if not isinstance(status, BuildStatus):
|
||||
raise TypeError(f"{status} is not a valid build status")
|
||||
|
||||
# The initial build task cannot have status "removed".
|
||||
if attempts == 0 and status == BuildStatus.REMOVED:
|
||||
raise spack.error.InstallError(
|
||||
f"Cannot create a task for {self.pkg_id} with status '{status}'", pkg=pkg
|
||||
)
|
||||
self.status = status
|
||||
|
||||
# Package is associated with a bootstrap compiler
|
||||
self.compiler = compiler
|
||||
# cache the PID, which is used for distributed build messages in self.execute
|
||||
self.pid = os.getpid()
|
||||
|
||||
# The initial start time for processing the spec
|
||||
self.start = start
|
||||
|
||||
if not isinstance(installed, set):
|
||||
raise TypeError(
|
||||
f"BuildTask constructor requires 'installed' be a 'set', "
|
||||
f"not '{installed.__class__.__name__}'."
|
||||
)
|
||||
|
||||
# Set of dependents, which needs to include the requesting package
|
||||
# to support tracking of parallel, multi-spec, environment installs.
|
||||
self.dependents = set(get_dependent_ids(self.pkg.spec))
|
||||
@@ -921,16 +958,22 @@ def __init__(
|
||||
)
|
||||
|
||||
# List of uninstalled dependencies, which is used to establish
|
||||
# the priority of the build task.
|
||||
#
|
||||
# the priority of the task.
|
||||
self.uninstalled_deps = set(
|
||||
pkg_id for pkg_id in self.dependencies if pkg_id not in installed
|
||||
)
|
||||
|
||||
# Ensure key sequence-related properties are updated accordingly.
|
||||
self.attempts = 0
|
||||
self.attempts = attempts
|
||||
self._update()
|
||||
|
||||
def execute(self, install_status: InstallStatus) -> ExecuteResult:
|
||||
"""Execute the work of this task.
|
||||
|
||||
The ``install_status`` is an ``InstallStatus`` object used to format progress reporting for
|
||||
this task in the context of the full ``BuildRequest``."""
|
||||
raise NotImplementedError
|
||||
|
||||
def __eq__(self, other):
|
||||
return self.key == other.key
|
||||
|
||||
@@ -950,14 +993,14 @@ def __ne__(self, other):
|
||||
return self.key != other.key
|
||||
|
||||
def __repr__(self) -> str:
|
||||
"""Returns a formal representation of the build task."""
|
||||
"""Returns a formal representation of the task."""
|
||||
rep = f"{self.__class__.__name__}("
|
||||
for attr, value in self.__dict__.items():
|
||||
rep += f"{attr}={value.__repr__()}, "
|
||||
return f"{rep.strip(', ')})"
|
||||
|
||||
def __str__(self) -> str:
|
||||
"""Returns a printable version of the build task."""
|
||||
"""Returns a printable version of the task."""
|
||||
dependencies = f"#dependencies={len(self.dependencies)}"
|
||||
return "priority={0}, status={1}, start={2}, {3}".format(
|
||||
self.priority, self.status, self.start, dependencies
|
||||
@@ -974,8 +1017,7 @@ def _update(self) -> None:
|
||||
|
||||
def add_dependent(self, pkg_id: str) -> None:
|
||||
"""
|
||||
Ensure the dependent package id is in the task's list so it will be
|
||||
properly updated when this package is installed.
|
||||
Ensure the package is in this task's ``dependents`` list.
|
||||
|
||||
Args:
|
||||
pkg_id: package identifier of the dependent package
|
||||
@@ -984,6 +1026,20 @@ def add_dependent(self, pkg_id: str) -> None:
|
||||
tty.debug(f"Adding {pkg_id} as a dependent of {self.pkg_id}")
|
||||
self.dependents.add(pkg_id)
|
||||
|
||||
def add_dependency(self, pkg_id, installed=False):
|
||||
"""
|
||||
Ensure the package is in this task's ``dependencies`` list.
|
||||
|
||||
Args:
|
||||
pkg_id (str): package identifier of the dependency package
|
||||
installed (bool): install status of the dependency package
|
||||
"""
|
||||
if pkg_id != self.pkg_id and pkg_id not in self.dependencies:
|
||||
tty.debug(f"Adding {pkg_id} as a depencency of {self.pkg_id}")
|
||||
self.dependencies.add(pkg_id)
|
||||
if not installed:
|
||||
self.uninstalled_deps.add(pkg_id)
|
||||
|
||||
def flag_installed(self, installed: List[str]) -> None:
|
||||
"""
|
||||
Ensure the dependency is not considered to still be uninstalled.
|
||||
@@ -1000,6 +1056,39 @@ def flag_installed(self, installed: List[str]) -> None:
|
||||
level=2,
|
||||
)
|
||||
|
||||
def _setup_install_dir(self, pkg: "spack.package_base.PackageBase") -> None:
|
||||
"""
|
||||
Create and ensure proper access controls for the install directory.
|
||||
Write a small metadata file with the current spack environment.
|
||||
|
||||
Args:
|
||||
pkg: the package to be built and installed
|
||||
"""
|
||||
# Move to a module level method.
|
||||
if not os.path.exists(pkg.spec.prefix):
|
||||
path = spack.util.path.debug_padded_filter(pkg.spec.prefix)
|
||||
tty.debug(f"Creating the installation directory {path}")
|
||||
spack.store.STORE.layout.create_install_directory(pkg.spec)
|
||||
else:
|
||||
# Set the proper group for the prefix
|
||||
group = prefs.get_package_group(pkg.spec)
|
||||
if group:
|
||||
fs.chgrp(pkg.spec.prefix, group)
|
||||
|
||||
# Set the proper permissions.
|
||||
# This has to be done after group because changing groups blows
|
||||
# away the sticky group bit on the directory
|
||||
mode = os.stat(pkg.spec.prefix).st_mode
|
||||
perms = prefs.get_package_dir_permissions(pkg.spec)
|
||||
if mode != perms:
|
||||
os.chmod(pkg.spec.prefix, perms)
|
||||
|
||||
# Ensure the metadata path exists as well
|
||||
fs.mkdirp(spack.store.STORE.layout.metadata_path(pkg.spec), mode=perms)
|
||||
|
||||
# Always write host environment - we assume this can change
|
||||
spack.store.STORE.layout.write_host_environment(pkg.spec)
|
||||
|
||||
@property
|
||||
def explicit(self) -> bool:
|
||||
return self.pkg.spec.dag_hash() in self.request.install_args.get("explicit", [])
|
||||
@@ -1030,7 +1119,7 @@ def key(self) -> Tuple[int, int]:
|
||||
"""The key is the tuple (# uninstalled dependencies, sequence)."""
|
||||
return (self.priority, self.sequence)
|
||||
|
||||
def next_attempt(self, installed) -> "BuildTask":
|
||||
def next_attempt(self, installed) -> "Task":
|
||||
"""Create a new, updated task for the next installation attempt."""
|
||||
task = copy.copy(self)
|
||||
task._update()
|
||||
@@ -1044,6 +1133,100 @@ def priority(self):
|
||||
return len(self.uninstalled_deps)
|
||||
|
||||
|
||||
class BuildTask(Task):
|
||||
"""Class for representing a build task for a package."""
|
||||
|
||||
def execute(self, install_status):
|
||||
"""
|
||||
Perform the installation of the requested spec and/or dependency
|
||||
represented by the build task.
|
||||
"""
|
||||
install_args = self.request.install_args
|
||||
tests = install_args.get("tests")
|
||||
unsigned = install_args.get("unsigned")
|
||||
|
||||
pkg, pkg_id = self.pkg, self.pkg_id
|
||||
|
||||
tty.msg(install_msg(pkg_id, self.pid, install_status))
|
||||
self.start = self.start or time.time()
|
||||
self.status = BuildStatus.INSTALLING
|
||||
|
||||
# Use the binary cache if requested
|
||||
if self.use_cache:
|
||||
if _install_from_cache(pkg, self.explicit, unsigned):
|
||||
return ExecuteResult.SUCCESS
|
||||
elif self.cache_only:
|
||||
raise spack.error.InstallError(
|
||||
"No binary found when cache-only was specified", pkg=pkg
|
||||
)
|
||||
else:
|
||||
tty.msg(f"No binary for {pkg_id} found: installing from source")
|
||||
|
||||
pkg.run_tests = tests is True or tests and pkg.name in tests
|
||||
|
||||
# hook that allows tests to inspect the Package before installation
|
||||
# see unit_test_check() docs.
|
||||
if not pkg.unit_test_check():
|
||||
return ExecuteResult.FAILED
|
||||
|
||||
try:
|
||||
# Create stage object now and let it be serialized for the child process. That
|
||||
# way monkeypatch in tests works correctly.
|
||||
pkg.stage
|
||||
|
||||
self._setup_install_dir(pkg)
|
||||
|
||||
# Create a child process to do the actual installation.
|
||||
# Preserve verbosity settings across installs.
|
||||
spack.package_base.PackageBase._verbose = spack.build_environment.start_build_process(
|
||||
pkg, build_process, install_args
|
||||
)
|
||||
|
||||
# Note: PARENT of the build process adds the new package to
|
||||
# the database, so that we don't need to re-read from file.
|
||||
spack.store.STORE.db.add(pkg.spec, explicit=self.explicit)
|
||||
except spack.error.StopPhase as e:
|
||||
# A StopPhase exception means that do_install was asked to
|
||||
# stop early from clients, and is not an error at this point
|
||||
pid = f"{self.pid}: " if tty.show_pid() else ""
|
||||
tty.debug(f"{pid}{str(e)}")
|
||||
tty.debug(f"Package stage directory: {pkg.stage.source_path}")
|
||||
return ExecuteResult.SUCCESS
|
||||
|
||||
|
||||
class RewireTask(Task):
|
||||
"""Class for representing a rewire task for a package."""
|
||||
|
||||
def execute(self, install_status):
|
||||
"""Execute rewire task
|
||||
|
||||
Rewire tasks are executed by either rewiring self.package.spec.build_spec that is already
|
||||
installed or downloading and rewiring a binary for the it.
|
||||
|
||||
If not available installed or as binary, return ExecuteResult.MISSING_BUILD_SPEC.
|
||||
This will prompt the Installer to requeue the task with a dependency on the BuildTask
|
||||
to install self.pkg.spec.build_spec
|
||||
"""
|
||||
oldstatus = self.status
|
||||
self.status = BuildStatus.INSTALLING
|
||||
tty.msg(install_msg(self.pkg_id, self.pid, install_status))
|
||||
self.start = self.start or time.time()
|
||||
if not self.pkg.spec.build_spec.installed:
|
||||
try:
|
||||
install_args = self.request.install_args
|
||||
unsigned = install_args.get("unsigned")
|
||||
_process_binary_cache_tarball(self.pkg, explicit=self.explicit, unsigned=unsigned)
|
||||
_print_installed_pkg(self.pkg.prefix)
|
||||
return ExecuteResult.SUCCESS
|
||||
except BaseException as e:
|
||||
tty.error(f"Failed to rewire {self.pkg.spec} from binary. {e}")
|
||||
self.status = oldstatus
|
||||
return ExecuteResult.MISSING_BUILD_SPEC
|
||||
spack.rewiring.rewire_node(self.pkg.spec, self.explicit)
|
||||
_print_installed_pkg(self.pkg.prefix)
|
||||
return ExecuteResult.SUCCESS
|
||||
|
||||
|
||||
class PackageInstaller:
|
||||
"""
|
||||
Class for managing the install process for a Spack instance based on a bottom-up DAG approach.
|
||||
@@ -1137,11 +1320,11 @@ def __init__(
|
||||
# List of build requests
|
||||
self.build_requests = [BuildRequest(pkg, install_args) for pkg in packages]
|
||||
|
||||
# Priority queue of build tasks
|
||||
self.build_pq: List[Tuple[Tuple[int, int], BuildTask]] = []
|
||||
# Priority queue of tasks
|
||||
self.build_pq: List[Tuple[Tuple[int, int], Task]] = []
|
||||
|
||||
# Mapping of unique package ids to build task
|
||||
self.build_tasks: Dict[str, BuildTask] = {}
|
||||
# Mapping of unique package ids to task
|
||||
self.build_tasks: Dict[str, Task] = {}
|
||||
|
||||
# Cache of package locks for failed packages, keyed on package's ids
|
||||
self.failed: Dict[str, Optional[lk.Lock]] = {}
|
||||
@@ -1162,6 +1345,9 @@ def __init__(
|
||||
# fast then that option applies to all build requests.
|
||||
self.fail_fast = False
|
||||
|
||||
# Initializing all_dependencies to empty. This will be set later in _init_queue.
|
||||
self.all_dependencies: Dict[str, Set[str]] = {}
|
||||
|
||||
def __repr__(self) -> str:
|
||||
"""Returns a formal representation of the package installer."""
|
||||
rep = f"{self.__class__.__name__}("
|
||||
@@ -1180,23 +1366,19 @@ def __str__(self) -> str:
|
||||
def _add_init_task(
|
||||
self,
|
||||
pkg: "spack.package_base.PackageBase",
|
||||
request: Optional[BuildRequest],
|
||||
is_compiler: bool,
|
||||
request: BuildRequest,
|
||||
all_deps: Dict[str, Set[str]],
|
||||
) -> None:
|
||||
"""
|
||||
Creates and queus the initial build task for the package.
|
||||
Creates and queues the initial task for the package.
|
||||
|
||||
Args:
|
||||
pkg: the package to be built and installed
|
||||
request (BuildRequest or None): the associated install request
|
||||
where ``None`` can be used to indicate the package was
|
||||
explicitly requested by the user
|
||||
is_compiler (bool): whether task is for a bootstrap compiler
|
||||
all_deps (defaultdict(set)): dictionary of all dependencies and
|
||||
associated dependents
|
||||
request: the associated install request
|
||||
all_deps: dictionary of all dependencies and associated dependents
|
||||
"""
|
||||
task = BuildTask(pkg, request, is_compiler, 0, 0, STATUS_ADDED, self.installed)
|
||||
cls = RewireTask if pkg.spec.spliced else BuildTask
|
||||
task = cls(pkg, request=request, status=BuildStatus.QUEUED, installed=self.installed)
|
||||
for dep_id in task.dependencies:
|
||||
all_deps[dep_id].add(package_id(pkg.spec))
|
||||
|
||||
@@ -1270,7 +1452,7 @@ def _check_deps_status(self, request: BuildRequest) -> None:
|
||||
else:
|
||||
lock.release_read()
|
||||
|
||||
def _prepare_for_install(self, task: BuildTask) -> None:
|
||||
def _prepare_for_install(self, task: Task) -> None:
|
||||
"""
|
||||
Check the database and leftover installation directories/files and
|
||||
prepare for a new install attempt for an uninstalled package.
|
||||
@@ -1278,7 +1460,7 @@ def _prepare_for_install(self, task: BuildTask) -> None:
|
||||
and ensuring the database is up-to-date.
|
||||
|
||||
Args:
|
||||
task (BuildTask): the build task whose associated package is
|
||||
task: the task whose associated package is
|
||||
being checked
|
||||
"""
|
||||
install_args = task.request.install_args
|
||||
@@ -1325,11 +1507,11 @@ def _prepare_for_install(self, task: BuildTask) -> None:
|
||||
self._update_installed(task)
|
||||
|
||||
# Only update the explicit entry once for the explicit package
|
||||
if task.explicit:
|
||||
spack.store.STORE.db.update_explicit(task.pkg.spec, True)
|
||||
if task.explicit and not rec.explicit:
|
||||
spack.store.STORE.db.mark(task.pkg.spec, "explicit", True)
|
||||
|
||||
def _cleanup_all_tasks(self) -> None:
|
||||
"""Cleanup all build tasks to include releasing their locks."""
|
||||
"""Cleanup all tasks to include releasing their locks."""
|
||||
for pkg_id in self.locks:
|
||||
self._release_lock(pkg_id)
|
||||
|
||||
@@ -1361,7 +1543,7 @@ def _cleanup_failed(self, pkg_id: str) -> None:
|
||||
|
||||
def _cleanup_task(self, pkg: "spack.package_base.PackageBase") -> None:
|
||||
"""
|
||||
Cleanup the build task for the spec
|
||||
Cleanup the task for the spec
|
||||
|
||||
Args:
|
||||
pkg: the package being installed
|
||||
@@ -1433,7 +1615,7 @@ def _ensure_locked(
|
||||
|
||||
if lock_type == "read":
|
||||
# Wait until the other process finishes if there are no more
|
||||
# build tasks with priority 0 (i.e., with no uninstalled
|
||||
# tasks with priority 0 (i.e., with no uninstalled
|
||||
# dependencies).
|
||||
no_p0 = len(self.build_tasks) == 0 or not self._next_is_pri0()
|
||||
timeout = None if no_p0 else 3.0
|
||||
@@ -1485,6 +1667,33 @@ def _ensure_locked(
|
||||
self.locks[pkg_id] = (lock_type, lock)
|
||||
return self.locks[pkg_id]
|
||||
|
||||
def _requeue_with_build_spec_tasks(self, task):
|
||||
"""Requeue the task and its missing build spec dependencies"""
|
||||
# Full install of the build_spec is necessary because it didn't already exist somewhere
|
||||
spec = task.pkg.spec
|
||||
for dep in spec.build_spec.traverse():
|
||||
dep_pkg = dep.package
|
||||
|
||||
dep_id = package_id(dep)
|
||||
if dep_id not in self.build_tasks:
|
||||
self._add_init_task(dep_pkg, task.request, self.all_dependencies)
|
||||
|
||||
# Clear any persistent failure markings _unless_ they are
|
||||
# associated with another process in this parallel build
|
||||
# of the spec.
|
||||
spack.store.STORE.failure_tracker.clear(dep, force=False)
|
||||
|
||||
# Queue the build spec.
|
||||
build_pkg_id = package_id(spec.build_spec)
|
||||
build_spec_task = self.build_tasks[build_pkg_id]
|
||||
spec_pkg_id = package_id(spec)
|
||||
spec_task = task.next_attempt(self.installed)
|
||||
spec_task.status = BuildStatus.QUEUED
|
||||
# Convey a build spec as a dependency of a deployed spec.
|
||||
build_spec_task.add_dependent(spec_pkg_id)
|
||||
spec_task.add_dependency(build_pkg_id)
|
||||
self._push_task(spec_task)
|
||||
|
||||
def _add_tasks(self, request: BuildRequest, all_deps):
|
||||
"""Add tasks to the priority queue for the given build request.
|
||||
|
||||
@@ -1514,7 +1723,7 @@ def _add_tasks(self, request: BuildRequest, all_deps):
|
||||
|
||||
dep_id = package_id(dep)
|
||||
if dep_id not in self.build_tasks:
|
||||
self._add_init_task(dep_pkg, request, False, all_deps)
|
||||
self._add_init_task(dep_pkg, request, all_deps=all_deps)
|
||||
|
||||
# Clear any persistent failure markings _unless_ they are
|
||||
# associated with another process in this parallel build
|
||||
@@ -1532,80 +1741,29 @@ def _add_tasks(self, request: BuildRequest, all_deps):
|
||||
self._check_deps_status(request)
|
||||
|
||||
# Now add the package itself, if appropriate
|
||||
self._add_init_task(request.pkg, request, False, all_deps)
|
||||
self._add_init_task(request.pkg, request, all_deps=all_deps)
|
||||
|
||||
# Ensure if one request is to fail fast then all requests will.
|
||||
fail_fast = bool(request.install_args.get("fail_fast"))
|
||||
self.fail_fast = self.fail_fast or fail_fast
|
||||
|
||||
def _install_task(self, task: BuildTask, install_status: InstallStatus) -> None:
|
||||
def _install_task(self, task: Task, install_status: InstallStatus) -> None:
|
||||
"""
|
||||
Perform the installation of the requested spec and/or dependency
|
||||
represented by the build task.
|
||||
represented by the task.
|
||||
|
||||
Args:
|
||||
task: the installation build task for a package
|
||||
task: the installation task for a package
|
||||
install_status: the installation status for the package"""
|
||||
|
||||
explicit = task.explicit
|
||||
install_args = task.request.install_args
|
||||
cache_only = task.cache_only
|
||||
use_cache = task.use_cache
|
||||
tests = install_args.get("tests", False)
|
||||
assert isinstance(tests, (bool, list)) # make mypy happy.
|
||||
unsigned: Optional[bool] = install_args.get("unsigned")
|
||||
|
||||
pkg, pkg_id = task.pkg, task.pkg_id
|
||||
|
||||
tty.msg(install_msg(pkg_id, self.pid, install_status))
|
||||
task.start = task.start or time.time()
|
||||
task.status = STATUS_INSTALLING
|
||||
|
||||
# Use the binary cache if requested
|
||||
if use_cache:
|
||||
if _install_from_cache(pkg, explicit, unsigned):
|
||||
self._update_installed(task)
|
||||
return
|
||||
elif cache_only:
|
||||
raise spack.error.InstallError(
|
||||
"No binary found when cache-only was specified", pkg=pkg
|
||||
)
|
||||
else:
|
||||
tty.msg(f"No binary for {pkg_id} found: installing from source")
|
||||
|
||||
pkg.run_tests = tests if isinstance(tests, bool) else pkg.name in tests
|
||||
|
||||
# hook that allows tests to inspect the Package before installation
|
||||
# see unit_test_check() docs.
|
||||
if not pkg.unit_test_check():
|
||||
return
|
||||
|
||||
try:
|
||||
self._setup_install_dir(pkg)
|
||||
|
||||
# Create stage object now and let it be serialized for the child process. That
|
||||
# way monkeypatch in tests works correctly.
|
||||
pkg.stage
|
||||
|
||||
# Create a child process to do the actual installation.
|
||||
# Preserve verbosity settings across installs.
|
||||
spack.package_base.PackageBase._verbose = spack.build_environment.start_build_process(
|
||||
pkg, build_process, install_args
|
||||
)
|
||||
# Note: PARENT of the build process adds the new package to
|
||||
# the database, so that we don't need to re-read from file.
|
||||
spack.store.STORE.db.add(pkg.spec, explicit=explicit)
|
||||
|
||||
except spack.error.StopPhase as e:
|
||||
# A StopPhase exception means that the installer was asked to stop early from clients,
|
||||
# and is not an error at this point
|
||||
pid = f"{self.pid}: " if tty.show_pid() else ""
|
||||
tty.debug(f"{pid}{str(e)}")
|
||||
tty.debug(f"Package stage directory: {pkg.stage.source_path}")
|
||||
rc = task.execute(install_status)
|
||||
if rc == ExecuteResult.MISSING_BUILD_SPEC:
|
||||
self._requeue_with_build_spec_tasks(task)
|
||||
else: # if rc == ExecuteResult.SUCCESS or rc == ExecuteResult.FAILED
|
||||
self._update_installed(task)
|
||||
|
||||
def _next_is_pri0(self) -> bool:
|
||||
"""
|
||||
Determine if the next build task has priority 0
|
||||
Determine if the next task has priority 0
|
||||
|
||||
Return:
|
||||
True if it does, False otherwise
|
||||
@@ -1615,31 +1773,31 @@ def _next_is_pri0(self) -> bool:
|
||||
task = self.build_pq[0][1]
|
||||
return task.priority == 0
|
||||
|
||||
def _pop_task(self) -> Optional[BuildTask]:
|
||||
def _pop_task(self) -> Optional[Task]:
|
||||
"""
|
||||
Remove and return the lowest priority build task.
|
||||
Remove and return the lowest priority task.
|
||||
|
||||
Source: Variant of function at docs.python.org/2/library/heapq.html
|
||||
"""
|
||||
while self.build_pq:
|
||||
task = heapq.heappop(self.build_pq)[1]
|
||||
if task.status != STATUS_REMOVED:
|
||||
if task.status != BuildStatus.REMOVED:
|
||||
del self.build_tasks[task.pkg_id]
|
||||
task.status = STATUS_DEQUEUED
|
||||
task.status = BuildStatus.DEQUEUED
|
||||
return task
|
||||
return None
|
||||
|
||||
def _push_task(self, task: BuildTask) -> None:
|
||||
def _push_task(self, task: Task) -> None:
|
||||
"""
|
||||
Push (or queue) the specified build task for the package.
|
||||
Push (or queue) the specified task for the package.
|
||||
|
||||
Source: Customization of "add_task" function at
|
||||
docs.python.org/2/library/heapq.html
|
||||
|
||||
Args:
|
||||
task: the installation build task for a package
|
||||
task: the installation task for a package
|
||||
"""
|
||||
msg = "{0} a build task for {1} with status '{2}'"
|
||||
msg = "{0} a task for {1} with status '{2}'"
|
||||
skip = "Skipping requeue of task for {0}: {1}"
|
||||
|
||||
# Ensure do not (re-)queue installed or failed packages whose status
|
||||
@@ -1652,9 +1810,11 @@ def _push_task(self, task: BuildTask) -> None:
|
||||
tty.debug(skip.format(task.pkg_id, "failed"))
|
||||
return
|
||||
|
||||
# Remove any associated build task since its sequence will change
|
||||
# Remove any associated task since its sequence will change
|
||||
self._remove_task(task.pkg_id)
|
||||
desc = "Queueing" if task.attempts == 0 else "Requeueing"
|
||||
desc = (
|
||||
"Queueing" if task.attempts == 1 else f"Requeueing ({ordinal(task.attempts)} attempt)"
|
||||
)
|
||||
tty.debug(msg.format(desc, task.pkg_id, task.status))
|
||||
|
||||
# Now add the new task to the queue with a new sequence number to
|
||||
@@ -1685,9 +1845,9 @@ def _release_lock(self, pkg_id: str) -> None:
|
||||
except Exception as exc:
|
||||
tty.warn(err.format(exc.__class__.__name__, ltype, pkg_id, str(exc)))
|
||||
|
||||
def _remove_task(self, pkg_id: str) -> Optional[BuildTask]:
|
||||
def _remove_task(self, pkg_id: str) -> Optional[Task]:
|
||||
"""
|
||||
Mark the existing package build task as being removed and return it.
|
||||
Mark the existing package task as being removed and return it.
|
||||
Raises KeyError if not found.
|
||||
|
||||
Source: Variant of function at docs.python.org/2/library/heapq.html
|
||||
@@ -1696,71 +1856,39 @@ def _remove_task(self, pkg_id: str) -> Optional[BuildTask]:
|
||||
pkg_id: identifier for the package to be removed
|
||||
"""
|
||||
if pkg_id in self.build_tasks:
|
||||
tty.debug(f"Removing build task for {pkg_id} from list")
|
||||
tty.debug(f"Removing task for {pkg_id} from list")
|
||||
task = self.build_tasks.pop(pkg_id)
|
||||
task.status = STATUS_REMOVED
|
||||
task.status = BuildStatus.REMOVED
|
||||
return task
|
||||
else:
|
||||
return None
|
||||
|
||||
def _requeue_task(self, task: BuildTask, install_status: InstallStatus) -> None:
|
||||
def _requeue_task(self, task: Task, install_status: InstallStatus) -> None:
|
||||
"""
|
||||
Requeues a task that appears to be in progress by another process.
|
||||
|
||||
Args:
|
||||
task (BuildTask): the installation build task for a package
|
||||
task (Task): the installation task for a package
|
||||
"""
|
||||
if task.status not in [STATUS_INSTALLED, STATUS_INSTALLING]:
|
||||
if task.status not in [BuildStatus.INSTALLED, BuildStatus.INSTALLING]:
|
||||
tty.debug(
|
||||
f"{install_msg(task.pkg_id, self.pid, install_status)} "
|
||||
"in progress by another process"
|
||||
)
|
||||
|
||||
new_task = task.next_attempt(self.installed)
|
||||
new_task.status = STATUS_INSTALLING
|
||||
new_task.status = BuildStatus.INSTALLING
|
||||
self._push_task(new_task)
|
||||
|
||||
def _setup_install_dir(self, pkg: "spack.package_base.PackageBase") -> None:
|
||||
"""
|
||||
Create and ensure proper access controls for the install directory.
|
||||
Write a small metadata file with the current spack environment.
|
||||
|
||||
Args:
|
||||
pkg: the package to be built and installed
|
||||
"""
|
||||
if not os.path.exists(pkg.spec.prefix):
|
||||
path = spack.util.path.debug_padded_filter(pkg.spec.prefix)
|
||||
tty.debug(f"Creating the installation directory {path}")
|
||||
spack.store.STORE.layout.create_install_directory(pkg.spec)
|
||||
else:
|
||||
# Set the proper group for the prefix
|
||||
group = prefs.get_package_group(pkg.spec)
|
||||
if group:
|
||||
fs.chgrp(pkg.spec.prefix, group)
|
||||
|
||||
# Set the proper permissions.
|
||||
# This has to be done after group because changing groups blows
|
||||
# away the sticky group bit on the directory
|
||||
mode = os.stat(pkg.spec.prefix).st_mode
|
||||
perms = prefs.get_package_dir_permissions(pkg.spec)
|
||||
if mode != perms:
|
||||
os.chmod(pkg.spec.prefix, perms)
|
||||
|
||||
# Ensure the metadata path exists as well
|
||||
fs.mkdirp(spack.store.STORE.layout.metadata_path(pkg.spec), mode=perms)
|
||||
|
||||
# Always write host environment - we assume this can change
|
||||
spack.store.STORE.layout.write_host_environment(pkg.spec)
|
||||
|
||||
def _update_failed(
|
||||
self, task: BuildTask, mark: bool = False, exc: Optional[BaseException] = None
|
||||
self, task: Task, mark: bool = False, exc: Optional[BaseException] = None
|
||||
) -> None:
|
||||
"""
|
||||
Update the task and transitive dependents as failed; optionally mark
|
||||
externally as failed; and remove associated build tasks.
|
||||
externally as failed; and remove associated tasks.
|
||||
|
||||
Args:
|
||||
task: the build task for the failed package
|
||||
task: the task for the failed package
|
||||
mark: ``True`` if the package and its dependencies are to
|
||||
be marked as "failed", otherwise, ``False``
|
||||
exc: optional exception if associated with the failure
|
||||
@@ -1772,34 +1900,34 @@ def _update_failed(
|
||||
self.failed[pkg_id] = spack.store.STORE.failure_tracker.mark(task.pkg.spec)
|
||||
else:
|
||||
self.failed[pkg_id] = None
|
||||
task.status = STATUS_FAILED
|
||||
task.status = BuildStatus.FAILED
|
||||
|
||||
for dep_id in task.dependents:
|
||||
if dep_id in self.build_tasks:
|
||||
tty.warn(f"Skipping build of {dep_id} since {pkg_id} failed")
|
||||
# Ensure the dependent's uninstalled dependents are
|
||||
# up-to-date and their build tasks removed.
|
||||
# up-to-date and their tasks removed.
|
||||
dep_task = self.build_tasks[dep_id]
|
||||
self._update_failed(dep_task, mark)
|
||||
self._remove_task(dep_id)
|
||||
else:
|
||||
tty.debug(f"No build task for {dep_id} to skip since {pkg_id} failed")
|
||||
tty.debug(f"No task for {dep_id} to skip since {pkg_id} failed")
|
||||
|
||||
def _update_installed(self, task: BuildTask) -> None:
|
||||
def _update_installed(self, task: Task) -> None:
|
||||
"""
|
||||
Mark the task as installed and ensure dependent build tasks are aware.
|
||||
Mark the task as installed and ensure dependent tasks are aware.
|
||||
|
||||
Args:
|
||||
task (BuildTask): the build task for the installed package
|
||||
task: the task for the installed package
|
||||
"""
|
||||
task.status = STATUS_INSTALLED
|
||||
task.status = BuildStatus.INSTALLED
|
||||
self._flag_installed(task.pkg, task.dependents)
|
||||
|
||||
def _flag_installed(
|
||||
self, pkg: "spack.package_base.PackageBase", dependent_ids: Optional[Set[str]] = None
|
||||
) -> None:
|
||||
"""
|
||||
Flag the package as installed and ensure known by all build tasks of
|
||||
Flag the package as installed and ensure known by all tasks of
|
||||
known dependents.
|
||||
|
||||
Args:
|
||||
@@ -1827,7 +1955,7 @@ def _flag_installed(
|
||||
dep_task = self.build_tasks[dep_id]
|
||||
self._push_task(dep_task.next_attempt(self.installed))
|
||||
else:
|
||||
tty.debug(f"{dep_id} has no build task to update for {pkg_id}'s success")
|
||||
tty.debug(f"{dep_id} has no task to update for {pkg_id}'s success")
|
||||
|
||||
def _init_queue(self) -> None:
|
||||
"""Initialize the build queue from the list of build requests."""
|
||||
@@ -1846,8 +1974,9 @@ def _init_queue(self) -> None:
|
||||
task = self.build_tasks[dep_id]
|
||||
for dependent_id in dependents.difference(task.dependents):
|
||||
task.add_dependent(dependent_id)
|
||||
self.all_dependencies = all_dependencies
|
||||
|
||||
def _install_action(self, task: BuildTask) -> int:
|
||||
def _install_action(self, task: Task) -> InstallAction:
|
||||
"""
|
||||
Determine whether the installation should be overwritten (if it already
|
||||
exists) or skipped (if has been handled by another process).
|
||||
@@ -1995,7 +2124,6 @@ def install(self) -> None:
|
||||
self._update_installed(task)
|
||||
path = spack.util.path.debug_padded_filter(pkg.prefix)
|
||||
_print_installed_pkg(path)
|
||||
|
||||
else:
|
||||
# At this point we've failed to get a write or a read
|
||||
# lock, which means another process has taken a write
|
||||
@@ -2035,8 +2163,6 @@ def install(self) -> None:
|
||||
# wrapper -- silence mypy
|
||||
OverwriteInstall(self, spack.store.STORE.db, task, install_status).install() # type: ignore[arg-type] # noqa: E501
|
||||
|
||||
self._update_installed(task)
|
||||
|
||||
# If we installed then we should keep the prefix
|
||||
stop_before_phase = getattr(pkg, "stop_before_phase", None)
|
||||
last_phase = getattr(pkg, "last_phase", None)
|
||||
@@ -2080,7 +2206,9 @@ def install(self) -> None:
|
||||
)
|
||||
# Terminate if requested to do so on the first failure.
|
||||
if self.fail_fast:
|
||||
raise spack.error.InstallError(f"{fail_fast_err}: {str(exc)}", pkg=pkg)
|
||||
raise spack.error.InstallError(
|
||||
f"{fail_fast_err}: {str(exc)}", pkg=pkg
|
||||
) from exc
|
||||
|
||||
# Terminate when a single build request has failed, or summarize errors later.
|
||||
if task.is_build_request:
|
||||
@@ -2096,7 +2224,8 @@ def install(self) -> None:
|
||||
|
||||
# Perform basic task cleanup for the installed spec to
|
||||
# include downgrading the write to a read lock
|
||||
self._cleanup_task(pkg)
|
||||
if pkg.spec.installed:
|
||||
self._cleanup_task(pkg)
|
||||
|
||||
# Cleanup, which includes releasing all of the read locks
|
||||
self._cleanup_all_tasks()
|
||||
@@ -2365,6 +2494,15 @@ def build_process(pkg: "spack.package_base.PackageBase", install_args: dict) ->
|
||||
|
||||
def deprecate(spec: "spack.spec.Spec", deprecator: "spack.spec.Spec", link_fn) -> None:
|
||||
"""Deprecate this package in favor of deprecator spec"""
|
||||
# Here we assume we don't deprecate across different stores, and that same hash
|
||||
# means same binary artifacts
|
||||
if spec.dag_hash() == deprecator.dag_hash():
|
||||
return
|
||||
|
||||
# We can't really have control over external specs, and cannot link anything in their place
|
||||
if spec.external:
|
||||
return
|
||||
|
||||
# Install deprecator if it isn't installed already
|
||||
if not spack.store.STORE.db.query(deprecator):
|
||||
PackageInstaller([deprecator.package], explicit=True).install()
|
||||
@@ -2395,7 +2533,7 @@ def __init__(
|
||||
self,
|
||||
installer: PackageInstaller,
|
||||
database: spack.database.Database,
|
||||
task: BuildTask,
|
||||
task: Task,
|
||||
install_status: InstallStatus,
|
||||
):
|
||||
self.installer = installer
|
||||
|
||||
@@ -102,9 +102,6 @@
|
||||
|
||||
spack_ld_library_path = os.environ.get("LD_LIBRARY_PATH", "")
|
||||
|
||||
#: Whether to print backtraces on error
|
||||
SHOW_BACKTRACE = False
|
||||
|
||||
|
||||
def add_all_commands(parser):
|
||||
"""Add all spack subcommands to the parser."""
|
||||
@@ -492,6 +489,7 @@ def make_argument_parser(**kwargs):
|
||||
help="add stacktraces to all printed statements",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-t",
|
||||
"--backtrace",
|
||||
action="store_true",
|
||||
default="SPACK_BACKTRACE" in os.environ,
|
||||
@@ -527,8 +525,7 @@ def setup_main_options(args):
|
||||
|
||||
if args.debug or args.backtrace:
|
||||
spack.error.debug = True
|
||||
global SHOW_BACKTRACE
|
||||
SHOW_BACKTRACE = True
|
||||
spack.error.SHOW_BACKTRACE = True
|
||||
|
||||
if args.debug:
|
||||
spack.util.debug.register_interrupt_handler()
|
||||
@@ -914,13 +911,6 @@ def _main(argv=None):
|
||||
# Make spack load / env activate work on macOS
|
||||
restore_macos_dyld_vars()
|
||||
|
||||
# make spack.config aware of any command line configuration scopes
|
||||
if args.config_scopes:
|
||||
spack.config.COMMAND_LINE_SCOPES = args.config_scopes
|
||||
|
||||
# ensure options on spack command come before everything
|
||||
setup_main_options(args)
|
||||
|
||||
# activate an environment if one was specified on the command line
|
||||
env_format_error = None
|
||||
if not args.no_env:
|
||||
@@ -934,6 +924,12 @@ def _main(argv=None):
|
||||
e.print_context()
|
||||
env_format_error = e
|
||||
|
||||
# Push scopes from the command line last
|
||||
if args.config_scopes:
|
||||
spack.config._add_command_line_scopes(spack.config.CONFIG, args.config_scopes)
|
||||
spack.config.CONFIG.push_scope(spack.config.InternalConfigScope("command_line"))
|
||||
setup_main_options(args)
|
||||
|
||||
# ------------------------------------------------------------------------
|
||||
# Things that require configuration should go below here
|
||||
# ------------------------------------------------------------------------
|
||||
@@ -1021,19 +1017,19 @@ def main(argv=None):
|
||||
e.die() # gracefully die on any SpackErrors
|
||||
|
||||
except KeyboardInterrupt:
|
||||
if spack.config.get("config:debug") or SHOW_BACKTRACE:
|
||||
if spack.config.get("config:debug") or spack.error.SHOW_BACKTRACE:
|
||||
raise
|
||||
sys.stderr.write("\n")
|
||||
tty.error("Keyboard interrupt.")
|
||||
return signal.SIGINT.value
|
||||
|
||||
except SystemExit as e:
|
||||
if spack.config.get("config:debug") or SHOW_BACKTRACE:
|
||||
if spack.config.get("config:debug") or spack.error.SHOW_BACKTRACE:
|
||||
traceback.print_exc()
|
||||
return e.code
|
||||
|
||||
except Exception as e:
|
||||
if spack.config.get("config:debug") or SHOW_BACKTRACE:
|
||||
if spack.config.get("config:debug") or spack.error.SHOW_BACKTRACE:
|
||||
raise
|
||||
tty.error(e)
|
||||
return 3
|
||||
|
||||
@@ -29,7 +29,6 @@
|
||||
import spack.config
|
||||
import spack.error
|
||||
import spack.fetch_strategy
|
||||
import spack.mirror
|
||||
import spack.oci.image
|
||||
import spack.repo
|
||||
import spack.spec
|
||||
@@ -89,9 +88,8 @@ def from_url(url: str):
|
||||
"""Create an anonymous mirror by URL. This method validates the URL."""
|
||||
if not urllib.parse.urlparse(url).scheme in supported_url_schemes:
|
||||
raise ValueError(
|
||||
'"{}" is not a valid mirror URL. Scheme must be once of {}.'.format(
|
||||
url, ", ".join(supported_url_schemes)
|
||||
)
|
||||
f'"{url}" is not a valid mirror URL. '
|
||||
f"Scheme must be one of {supported_url_schemes}."
|
||||
)
|
||||
return Mirror(url)
|
||||
|
||||
@@ -757,9 +755,9 @@ def create_mirror_from_package_object(pkg_obj, mirror_cache, mirror_stats):
|
||||
|
||||
def require_mirror_name(mirror_name):
|
||||
"""Find a mirror by name and raise if it does not exist"""
|
||||
mirror = spack.mirror.MirrorCollection().get(mirror_name)
|
||||
mirror = MirrorCollection().get(mirror_name)
|
||||
if not mirror:
|
||||
raise ValueError('no mirror named "{0}"'.format(mirror_name))
|
||||
raise ValueError(f'no mirror named "{mirror_name}"')
|
||||
return mirror
|
||||
|
||||
|
||||
|
||||
@@ -527,7 +527,8 @@ def use_name(self):
|
||||
parts = name.split("/")
|
||||
name = os.path.join(*parts)
|
||||
# Add optional suffixes based on constraints
|
||||
path_elements = [name] + self.conf.suffixes
|
||||
path_elements = [name]
|
||||
path_elements.extend(map(self.spec.format, self.conf.suffixes))
|
||||
return "-".join(path_elements)
|
||||
|
||||
@property
|
||||
|
||||
@@ -32,7 +32,6 @@
|
||||
from llnl.util.lang import classproperty, memoized
|
||||
from llnl.util.link_tree import LinkTree
|
||||
|
||||
import spack.build_environment
|
||||
import spack.builder
|
||||
import spack.compilers
|
||||
import spack.config
|
||||
@@ -50,7 +49,6 @@
|
||||
import spack.store
|
||||
import spack.url
|
||||
import spack.util.environment
|
||||
import spack.util.executable
|
||||
import spack.util.path
|
||||
import spack.util.web
|
||||
from spack.error import InstallError, NoURLError, PackageError
|
||||
@@ -1855,13 +1853,22 @@ def _has_make_target(self, target):
|
||||
#
|
||||
# BSD Make:
|
||||
# make: don't know how to make test. Stop
|
||||
#
|
||||
# Note: "Stop." is not printed when running a Make jobserver (spack env depfile) that runs
|
||||
# with `make -k/--keep-going`
|
||||
missing_target_msgs = [
|
||||
"No rule to make target `{0}'. Stop.",
|
||||
"No rule to make target '{0}'. Stop.",
|
||||
"don't know how to make {0}. Stop",
|
||||
"No rule to make target `{0}'.",
|
||||
"No rule to make target '{0}'.",
|
||||
"don't know how to make {0}.",
|
||||
]
|
||||
|
||||
kwargs = {"fail_on_error": False, "output": os.devnull, "error": str}
|
||||
kwargs = {
|
||||
"fail_on_error": False,
|
||||
"output": os.devnull,
|
||||
"error": str,
|
||||
# Remove MAKEFLAGS to avoid inherited flags from Make jobserver (spack env depfile)
|
||||
"extra_env": {"MAKEFLAGS": ""},
|
||||
}
|
||||
|
||||
stderr = make("-n", target, **kwargs)
|
||||
|
||||
|
||||
@@ -205,23 +205,33 @@ def macho_find_paths(orig_rpaths, deps, idpath, old_layout_root, prefix_to_prefi
|
||||
paths_to_paths dictionary which maps all of the old paths to new paths
|
||||
"""
|
||||
paths_to_paths = dict()
|
||||
# Sort from longest path to shortest, to ensure we try /foo/bar/baz before /foo/bar
|
||||
prefix_iteration_order = sorted(prefix_to_prefix, key=len, reverse=True)
|
||||
for orig_rpath in orig_rpaths:
|
||||
if orig_rpath.startswith(old_layout_root):
|
||||
for old_prefix, new_prefix in prefix_to_prefix.items():
|
||||
for old_prefix in prefix_iteration_order:
|
||||
new_prefix = prefix_to_prefix[old_prefix]
|
||||
if orig_rpath.startswith(old_prefix):
|
||||
new_rpath = re.sub(re.escape(old_prefix), new_prefix, orig_rpath)
|
||||
paths_to_paths[orig_rpath] = new_rpath
|
||||
break
|
||||
else:
|
||||
paths_to_paths[orig_rpath] = orig_rpath
|
||||
|
||||
if idpath:
|
||||
for old_prefix, new_prefix in prefix_to_prefix.items():
|
||||
for old_prefix in prefix_iteration_order:
|
||||
new_prefix = prefix_to_prefix[old_prefix]
|
||||
if idpath.startswith(old_prefix):
|
||||
paths_to_paths[idpath] = re.sub(re.escape(old_prefix), new_prefix, idpath)
|
||||
break
|
||||
|
||||
for dep in deps:
|
||||
for old_prefix, new_prefix in prefix_to_prefix.items():
|
||||
for old_prefix in prefix_iteration_order:
|
||||
new_prefix = prefix_to_prefix[old_prefix]
|
||||
if dep.startswith(old_prefix):
|
||||
paths_to_paths[dep] = re.sub(re.escape(old_prefix), new_prefix, dep)
|
||||
break
|
||||
|
||||
if dep.startswith("@"):
|
||||
paths_to_paths[dep] = dep
|
||||
|
||||
@@ -270,40 +280,14 @@ def modify_macho_object(cur_path, rpaths, deps, idpath, paths_to_paths):
|
||||
install_name_tool = executable.Executable("install_name_tool")
|
||||
install_name_tool(*args)
|
||||
|
||||
return
|
||||
|
||||
|
||||
def modify_object_macholib(cur_path, paths_to_paths):
|
||||
"""
|
||||
This function is used when install machO buildcaches on linux by
|
||||
rewriting mach-o loader commands for dependency library paths of
|
||||
mach-o binaries and the id path for mach-o libraries.
|
||||
Rewritting of rpaths is handled by replace_prefix_bin.
|
||||
Inputs
|
||||
mach-o binary to be modified
|
||||
dictionary mapping paths in old install layout to new install layout
|
||||
"""
|
||||
|
||||
dll = macholib.MachO.MachO(cur_path)
|
||||
dll.rewriteLoadCommands(paths_to_paths.get)
|
||||
|
||||
try:
|
||||
f = open(dll.filename, "rb+")
|
||||
for header in dll.headers:
|
||||
f.seek(0)
|
||||
dll.write(f)
|
||||
f.seek(0, 2)
|
||||
f.flush()
|
||||
f.close()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return
|
||||
|
||||
|
||||
def macholib_get_paths(cur_path):
|
||||
"""Get rpaths, dependent libraries, and library id of mach-o objects."""
|
||||
headers = macholib.MachO.MachO(cur_path).headers
|
||||
headers = []
|
||||
try:
|
||||
headers = macholib.MachO.MachO(cur_path).headers
|
||||
except ValueError:
|
||||
pass
|
||||
if not headers:
|
||||
tty.warn("Failed to read Mach-O headers: {0}".format(cur_path))
|
||||
commands = []
|
||||
@@ -415,10 +399,7 @@ def relocate_macho_binaries(
|
||||
# normalized paths
|
||||
rel_to_orig = macho_make_paths_normal(orig_path_name, rpaths, deps, idpath)
|
||||
# replace the relativized paths with normalized paths
|
||||
if sys.platform == "darwin":
|
||||
modify_macho_object(path_name, rpaths, deps, idpath, rel_to_orig)
|
||||
else:
|
||||
modify_object_macholib(path_name, rel_to_orig)
|
||||
modify_macho_object(path_name, rpaths, deps, idpath, rel_to_orig)
|
||||
# get the normalized paths in the mach-o binary
|
||||
rpaths, deps, idpath = macholib_get_paths(path_name)
|
||||
# get the mapping of paths in old prefix to path in new prefix
|
||||
@@ -426,10 +407,7 @@ def relocate_macho_binaries(
|
||||
rpaths, deps, idpath, old_layout_root, prefix_to_prefix
|
||||
)
|
||||
# replace the old paths with new paths
|
||||
if sys.platform == "darwin":
|
||||
modify_macho_object(path_name, rpaths, deps, idpath, paths_to_paths)
|
||||
else:
|
||||
modify_object_macholib(path_name, paths_to_paths)
|
||||
modify_macho_object(path_name, rpaths, deps, idpath, paths_to_paths)
|
||||
# get the new normalized path in the mach-o binary
|
||||
rpaths, deps, idpath = macholib_get_paths(path_name)
|
||||
# get the mapping of paths to relative paths in the new prefix
|
||||
@@ -437,10 +415,7 @@ def relocate_macho_binaries(
|
||||
path_name, new_layout_root, rpaths, deps, idpath
|
||||
)
|
||||
# replace the new paths with relativized paths in the new prefix
|
||||
if sys.platform == "darwin":
|
||||
modify_macho_object(path_name, rpaths, deps, idpath, paths_to_paths)
|
||||
else:
|
||||
modify_object_macholib(path_name, paths_to_paths)
|
||||
modify_macho_object(path_name, rpaths, deps, idpath, paths_to_paths)
|
||||
else:
|
||||
# get the paths in the old prefix
|
||||
rpaths, deps, idpath = macholib_get_paths(path_name)
|
||||
@@ -449,10 +424,7 @@ def relocate_macho_binaries(
|
||||
rpaths, deps, idpath, old_layout_root, prefix_to_prefix
|
||||
)
|
||||
# replace the old paths with new paths
|
||||
if sys.platform == "darwin":
|
||||
modify_macho_object(path_name, rpaths, deps, idpath, paths_to_paths)
|
||||
else:
|
||||
modify_object_macholib(path_name, paths_to_paths)
|
||||
modify_macho_object(path_name, rpaths, deps, idpath, paths_to_paths)
|
||||
|
||||
|
||||
def _transform_rpaths(orig_rpaths, orig_root, new_prefixes):
|
||||
|
||||
@@ -39,9 +39,9 @@
|
||||
import spack.error
|
||||
import spack.patch
|
||||
import spack.provider_index
|
||||
import spack.repo
|
||||
import spack.spec
|
||||
import spack.tag
|
||||
import spack.util.file_cache
|
||||
import spack.util.git
|
||||
import spack.util.naming as nm
|
||||
import spack.util.path
|
||||
@@ -216,9 +216,9 @@ def compute_loader(self, fullname):
|
||||
def packages_path():
|
||||
"""Get the test repo if it is active, otherwise the builtin repo."""
|
||||
try:
|
||||
return spack.repo.PATH.get_repo("builtin.mock").packages_path
|
||||
except spack.repo.UnknownNamespaceError:
|
||||
return spack.repo.PATH.get_repo("builtin").packages_path
|
||||
return PATH.get_repo("builtin.mock").packages_path
|
||||
except UnknownNamespaceError:
|
||||
return PATH.get_repo("builtin").packages_path
|
||||
|
||||
|
||||
class GitExe:
|
||||
@@ -314,7 +314,7 @@ def add_package_to_git_stage(packages):
|
||||
git = GitExe()
|
||||
|
||||
for pkg_name in packages:
|
||||
filename = spack.repo.PATH.filename_for_package_name(pkg_name)
|
||||
filename = PATH.filename_for_package_name(pkg_name)
|
||||
if not os.path.isfile(filename):
|
||||
tty.die("No such package: %s. Path does not exist:" % pkg_name, filename)
|
||||
|
||||
@@ -590,7 +590,7 @@ def __init__(
|
||||
self,
|
||||
package_checker: FastPackageChecker,
|
||||
namespace: str,
|
||||
cache: "spack.caches.FileCacheType",
|
||||
cache: spack.util.file_cache.FileCache,
|
||||
):
|
||||
self.checker = package_checker
|
||||
self.packages_path = self.checker.packages_path
|
||||
@@ -683,7 +683,7 @@ class RepoPath:
|
||||
def __init__(
|
||||
self,
|
||||
*repos: Union[str, "Repo"],
|
||||
cache: Optional["spack.caches.FileCacheType"],
|
||||
cache: Optional[spack.util.file_cache.FileCache],
|
||||
overrides: Optional[Dict[str, Any]] = None,
|
||||
) -> None:
|
||||
self.repos: List[Repo] = []
|
||||
@@ -965,7 +965,7 @@ def __init__(
|
||||
self,
|
||||
root: str,
|
||||
*,
|
||||
cache: "spack.caches.FileCacheType",
|
||||
cache: spack.util.file_cache.FileCache,
|
||||
overrides: Optional[Dict[str, Any]] = None,
|
||||
) -> None:
|
||||
"""Instantiate a package repository from a filesystem path.
|
||||
@@ -1440,9 +1440,7 @@ def _path(configuration=None):
|
||||
return create(configuration=configuration)
|
||||
|
||||
|
||||
def create(
|
||||
configuration: Union["spack.config.Configuration", llnl.util.lang.Singleton]
|
||||
) -> RepoPath:
|
||||
def create(configuration: spack.config.Configuration) -> RepoPath:
|
||||
"""Create a RepoPath from a configuration object.
|
||||
|
||||
Args:
|
||||
@@ -1465,7 +1463,7 @@ def create(
|
||||
|
||||
|
||||
#: Singleton repo path instance
|
||||
PATH: Union[RepoPath, llnl.util.lang.Singleton] = llnl.util.lang.Singleton(_path)
|
||||
PATH: RepoPath = llnl.util.lang.Singleton(_path) # type: ignore
|
||||
|
||||
# Add the finder to sys.meta_path
|
||||
REPOS_FINDER = ReposFinder()
|
||||
@@ -1585,7 +1583,7 @@ def __init__(self, name, repo=None):
|
||||
long_msg = "Use 'spack create' to create a new package."
|
||||
|
||||
if not repo:
|
||||
repo = spack.repo.PATH
|
||||
repo = PATH
|
||||
|
||||
# We need to compare the base package name
|
||||
pkg_name = name.rsplit(".", 1)[-1]
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
from llnl.util.symlink import readlink, symlink
|
||||
|
||||
import spack.binary_distribution as bindist
|
||||
import spack.deptypes as dt
|
||||
import spack.error
|
||||
import spack.hooks
|
||||
import spack.platforms
|
||||
@@ -52,6 +53,7 @@ def rewire_node(spec, explicit):
|
||||
its subgraph. Binaries, text, and links are all changed in accordance with
|
||||
the splice. The resulting package is then 'installed.'"""
|
||||
tempdir = tempfile.mkdtemp()
|
||||
|
||||
# copy anything installed to a temporary directory
|
||||
shutil.copytree(spec.build_spec.prefix, os.path.join(tempdir, spec.dag_hash()))
|
||||
|
||||
@@ -59,8 +61,21 @@ def rewire_node(spec, explicit):
|
||||
# compute prefix-to-prefix for every node from the build spec to the spliced
|
||||
# spec
|
||||
prefix_to_prefix = OrderedDict({spec.build_spec.prefix: spec.prefix})
|
||||
for build_dep in spec.build_spec.traverse(root=False):
|
||||
prefix_to_prefix[build_dep.prefix] = spec[build_dep.name].prefix
|
||||
build_spec_ids = set(id(s) for s in spec.build_spec.traverse(deptype=dt.ALL & ~dt.BUILD))
|
||||
for s in bindist.deps_to_relocate(spec):
|
||||
analog = s
|
||||
if id(s) not in build_spec_ids:
|
||||
analogs = [
|
||||
d
|
||||
for d in spec.build_spec.traverse(deptype=dt.ALL & ~dt.BUILD)
|
||||
if s._splice_match(d, self_root=spec, other_root=spec.build_spec)
|
||||
]
|
||||
if analogs:
|
||||
# Prefer same-name analogs and prefer higher versions
|
||||
# This matches the preferences in Spec.splice, so we will find same node
|
||||
analog = max(analogs, key=lambda a: (a.name == s.name, a.version))
|
||||
|
||||
prefix_to_prefix[analog.prefix] = s.prefix
|
||||
|
||||
manifest = bindist.get_buildfile_manifest(spec.build_spec)
|
||||
platform = spack.platforms.by_name(spec.platform)
|
||||
|
||||
@@ -11,8 +11,6 @@
|
||||
|
||||
from llnl.util.lang import union_dicts
|
||||
|
||||
import spack.schema.gitlab_ci
|
||||
|
||||
# Schema for script fields
|
||||
# List of lists and/or strings
|
||||
# This is similar to what is allowed in
|
||||
@@ -47,7 +45,7 @@
|
||||
"tags": {"type": "array", "items": {"type": "string"}},
|
||||
"variables": {
|
||||
"type": "object",
|
||||
"patternProperties": {r"[\w\d\-_\.]+": {"type": "string"}},
|
||||
"patternProperties": {r"[\w\d\-_\.]+": {"type": ["string", "number"]}},
|
||||
},
|
||||
"before_script": script_schema,
|
||||
"script": script_schema,
|
||||
@@ -77,58 +75,54 @@
|
||||
},
|
||||
}
|
||||
|
||||
named_attributes_schema = {
|
||||
"oneOf": [
|
||||
{
|
||||
dynamic_mapping_schema = {
|
||||
"type": "object",
|
||||
"additionalProperties": False,
|
||||
"required": ["dynamic-mapping"],
|
||||
"properties": {
|
||||
"dynamic-mapping": {
|
||||
"type": "object",
|
||||
"additionalProperties": False,
|
||||
"properties": {"noop-job": attributes_schema, "noop-job-remove": attributes_schema},
|
||||
},
|
||||
{
|
||||
"type": "object",
|
||||
"additionalProperties": False,
|
||||
"properties": {"build-job": attributes_schema, "build-job-remove": attributes_schema},
|
||||
},
|
||||
{
|
||||
"type": "object",
|
||||
"additionalProperties": False,
|
||||
"properties": {"copy-job": attributes_schema, "copy-job-remove": attributes_schema},
|
||||
},
|
||||
{
|
||||
"type": "object",
|
||||
"additionalProperties": False,
|
||||
"required": ["endpoint"],
|
||||
"properties": {
|
||||
"reindex-job": attributes_schema,
|
||||
"reindex-job-remove": attributes_schema,
|
||||
"name": {"type": "string"},
|
||||
# "endpoint" cannot have http patternProperties constaint as it is a required field
|
||||
# Constrain is applied in code
|
||||
"endpoint": {"type": "string"},
|
||||
"timeout": {"type": "integer", "minimum": 0},
|
||||
"verify_ssl": {"type": "boolean", "default": False},
|
||||
"header": {"type": "object", "additionalProperties": False},
|
||||
"allow": {"type": "array", "items": {"type": "string"}},
|
||||
"require": {"type": "array", "items": {"type": "string"}},
|
||||
"ignore": {"type": "array", "items": {"type": "string"}},
|
||||
},
|
||||
},
|
||||
{
|
||||
"type": "object",
|
||||
"additionalProperties": False,
|
||||
"properties": {
|
||||
"signing-job": attributes_schema,
|
||||
"signing-job-remove": attributes_schema,
|
||||
},
|
||||
},
|
||||
{
|
||||
"type": "object",
|
||||
"additionalProperties": False,
|
||||
"properties": {
|
||||
"cleanup-job": attributes_schema,
|
||||
"cleanup-job-remove": attributes_schema,
|
||||
},
|
||||
},
|
||||
{
|
||||
"type": "object",
|
||||
"additionalProperties": False,
|
||||
"properties": {"any-job": attributes_schema, "any-job-remove": attributes_schema},
|
||||
},
|
||||
]
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def job_schema(name: str):
|
||||
return {
|
||||
"type": "object",
|
||||
"additionalProperties": False,
|
||||
"properties": {f"{name}-job": attributes_schema, f"{name}-job-remove": attributes_schema},
|
||||
}
|
||||
|
||||
|
||||
pipeline_gen_schema = {
|
||||
"type": "array",
|
||||
"items": {"oneOf": [submapping_schema, named_attributes_schema]},
|
||||
"items": {
|
||||
"oneOf": [
|
||||
submapping_schema,
|
||||
dynamic_mapping_schema,
|
||||
job_schema("any"),
|
||||
job_schema("build"),
|
||||
job_schema("cleanup"),
|
||||
job_schema("copy"),
|
||||
job_schema("noop"),
|
||||
job_schema("reindex"),
|
||||
job_schema("signing"),
|
||||
]
|
||||
},
|
||||
}
|
||||
|
||||
core_shared_properties = union_dicts(
|
||||
@@ -141,39 +135,8 @@
|
||||
}
|
||||
)
|
||||
|
||||
# TODO: Remove in Spack 0.23
|
||||
ci_properties = {
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "object",
|
||||
"additionalProperties": False,
|
||||
# "required": ["mappings"],
|
||||
"properties": union_dicts(
|
||||
core_shared_properties, {"enable-artifacts-buildcache": {"type": "boolean"}}
|
||||
),
|
||||
},
|
||||
{
|
||||
"type": "object",
|
||||
"additionalProperties": False,
|
||||
# "required": ["mappings"],
|
||||
"properties": union_dicts(
|
||||
core_shared_properties, {"temporary-storage-url-prefix": {"type": "string"}}
|
||||
),
|
||||
},
|
||||
]
|
||||
}
|
||||
|
||||
#: Properties for inclusion in other schemas
|
||||
properties: Dict[str, Any] = {
|
||||
"ci": {
|
||||
"oneOf": [
|
||||
# TODO: Replace with core-shared-properties in Spack 0.23
|
||||
ci_properties,
|
||||
# Allow legacy format under `ci` for `config update ci`
|
||||
spack.schema.gitlab_ci.gitlab_ci_properties,
|
||||
]
|
||||
}
|
||||
}
|
||||
properties: Dict[str, Any] = {"ci": core_shared_properties}
|
||||
|
||||
#: Full schema with metadata
|
||||
schema = {
|
||||
@@ -183,21 +146,3 @@
|
||||
"additionalProperties": False,
|
||||
"properties": properties,
|
||||
}
|
||||
|
||||
|
||||
def update(data):
|
||||
import llnl.util.tty as tty
|
||||
|
||||
import spack.ci
|
||||
import spack.environment as ev
|
||||
|
||||
# Warn if deprecated section is still in the environment
|
||||
ci_env = ev.active_environment()
|
||||
if ci_env:
|
||||
env_config = ci_env.manifest[ev.TOP_LEVEL_KEY]
|
||||
if "gitlab-ci" in env_config:
|
||||
tty.die("Error: `gitlab-ci` section detected with `ci`, these are not compatible")
|
||||
|
||||
# Detect if the ci section is using the new pipeline-gen
|
||||
# If it is, assume it has already been converted
|
||||
return spack.ci.translate_deprecated_config(data)
|
||||
|
||||
@@ -61,7 +61,10 @@
|
||||
"target": {"type": "string"},
|
||||
"alias": {"anyOf": [{"type": "string"}, {"type": "null"}]},
|
||||
"modules": {
|
||||
"anyOf": [{"type": "string"}, {"type": "null"}, {"type": "array"}]
|
||||
"anyOf": [
|
||||
{"type": "null"},
|
||||
{"type": "array", "items": {"type": "string"}},
|
||||
]
|
||||
},
|
||||
"implicit_rpaths": implicit_rpaths,
|
||||
"environment": spack.schema.environment.definition,
|
||||
|
||||
@@ -33,8 +33,14 @@
|
||||
"properties": {
|
||||
"type": {
|
||||
"type": "string",
|
||||
"enum": ["local", "buildcache", "external"],
|
||||
"enum": [
|
||||
"local",
|
||||
"buildcache",
|
||||
"external",
|
||||
"environment",
|
||||
],
|
||||
},
|
||||
"path": {"type": "string"},
|
||||
"include": LIST_OF_SPECS,
|
||||
"exclude": LIST_OF_SPECS,
|
||||
},
|
||||
@@ -55,6 +61,26 @@
|
||||
"unify": {
|
||||
"oneOf": [{"type": "boolean"}, {"type": "string", "enum": ["when_possible"]}]
|
||||
},
|
||||
"splice": {
|
||||
"type": "object",
|
||||
"additionalProperties": False,
|
||||
"properties": {
|
||||
"explicit": {
|
||||
"type": "array",
|
||||
"default": [],
|
||||
"items": {
|
||||
"type": "object",
|
||||
"required": ["target", "replacement"],
|
||||
"additionalProperties": False,
|
||||
"properties": {
|
||||
"target": {"type": "string"},
|
||||
"replacement": {"type": "string"},
|
||||
"transitive": {"type": "boolean", "default": False},
|
||||
},
|
||||
},
|
||||
}
|
||||
},
|
||||
},
|
||||
"duplicates": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
|
||||
@@ -12,7 +12,6 @@
|
||||
|
||||
from llnl.util.lang import union_dicts
|
||||
|
||||
import spack.schema.gitlab_ci # DEPRECATED
|
||||
import spack.schema.merged
|
||||
|
||||
from .spec_list import spec_list_schema
|
||||
@@ -20,21 +19,21 @@
|
||||
#: Top level key in a manifest file
|
||||
TOP_LEVEL_KEY = "spack"
|
||||
|
||||
include_concrete = {"type": "array", "default": [], "items": {"type": "string"}}
|
||||
|
||||
properties: Dict[str, Any] = {
|
||||
"spack": {
|
||||
"type": "object",
|
||||
"default": {},
|
||||
"additionalProperties": False,
|
||||
"properties": union_dicts(
|
||||
# Include deprecated "gitlab-ci" section
|
||||
spack.schema.gitlab_ci.properties,
|
||||
# merged configuration scope schemas
|
||||
spack.schema.merged.properties,
|
||||
# extra environment schema properties
|
||||
{
|
||||
"include": {"type": "array", "default": [], "items": {"type": "string"}},
|
||||
"specs": spec_list_schema,
|
||||
"include_concrete": {"type": "array", "default": [], "items": {"type": "string"}},
|
||||
"include_concrete": include_concrete,
|
||||
},
|
||||
),
|
||||
}
|
||||
@@ -58,15 +57,6 @@ def update(data):
|
||||
Returns:
|
||||
True if data was changed, False otherwise
|
||||
"""
|
||||
|
||||
import spack.ci
|
||||
|
||||
if "gitlab-ci" in data:
|
||||
data["ci"] = data.pop("gitlab-ci")
|
||||
|
||||
if "ci" in data:
|
||||
return spack.ci.translate_deprecated_config(data["ci"])
|
||||
|
||||
# There are not currently any deprecated attributes in this section
|
||||
# that have not been removed
|
||||
return False
|
||||
|
||||
@@ -1,125 +0,0 @@
|
||||
# Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
"""Schema for gitlab-ci.yaml configuration file.
|
||||
|
||||
.. literalinclude:: ../spack/schema/gitlab_ci.py
|
||||
:lines: 15-
|
||||
"""
|
||||
from typing import Any, Dict
|
||||
|
||||
from llnl.util.lang import union_dicts
|
||||
|
||||
image_schema = {
|
||||
"oneOf": [
|
||||
{"type": "string"},
|
||||
{
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"name": {"type": "string"},
|
||||
"entrypoint": {"type": "array", "items": {"type": "string"}},
|
||||
},
|
||||
},
|
||||
]
|
||||
}
|
||||
|
||||
runner_attributes_schema_items = {
|
||||
"image": image_schema,
|
||||
"tags": {"type": "array", "items": {"type": "string"}},
|
||||
"variables": {"type": "object", "patternProperties": {r"[\w\d\-_\.]+": {"type": "string"}}},
|
||||
"before_script": {"type": "array", "items": {"type": "string"}},
|
||||
"script": {"type": "array", "items": {"type": "string"}},
|
||||
"after_script": {"type": "array", "items": {"type": "string"}},
|
||||
}
|
||||
|
||||
runner_selector_schema = {
|
||||
"type": "object",
|
||||
"additionalProperties": True,
|
||||
"required": ["tags"],
|
||||
"properties": runner_attributes_schema_items,
|
||||
}
|
||||
|
||||
remove_attributes_schema = {
|
||||
"type": "object",
|
||||
"additionalProperties": False,
|
||||
"required": ["tags"],
|
||||
"properties": {"tags": {"type": "array", "items": {"type": "string"}}},
|
||||
}
|
||||
|
||||
|
||||
core_shared_properties = union_dicts(
|
||||
runner_attributes_schema_items,
|
||||
{
|
||||
"bootstrap": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"anyOf": [
|
||||
{"type": "string"},
|
||||
{
|
||||
"type": "object",
|
||||
"additionalProperties": False,
|
||||
"required": ["name"],
|
||||
"properties": {
|
||||
"name": {"type": "string"},
|
||||
"compiler-agnostic": {"type": "boolean", "default": False},
|
||||
},
|
||||
},
|
||||
]
|
||||
},
|
||||
},
|
||||
"match_behavior": {"type": "string", "enum": ["first", "merge"], "default": "first"},
|
||||
"mappings": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "object",
|
||||
"additionalProperties": False,
|
||||
"required": ["match"],
|
||||
"properties": {
|
||||
"match": {"type": "array", "items": {"type": "string"}},
|
||||
"remove-attributes": remove_attributes_schema,
|
||||
"runner-attributes": runner_selector_schema,
|
||||
},
|
||||
},
|
||||
},
|
||||
"service-job-attributes": runner_selector_schema,
|
||||
"signing-job-attributes": runner_selector_schema,
|
||||
"rebuild-index": {"type": "boolean"},
|
||||
"broken-specs-url": {"type": "string"},
|
||||
"broken-tests-packages": {"type": "array", "items": {"type": "string"}},
|
||||
},
|
||||
)
|
||||
|
||||
gitlab_ci_properties = {
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "object",
|
||||
"additionalProperties": False,
|
||||
"required": ["mappings"],
|
||||
"properties": union_dicts(
|
||||
core_shared_properties, {"enable-artifacts-buildcache": {"type": "boolean"}}
|
||||
),
|
||||
},
|
||||
{
|
||||
"type": "object",
|
||||
"additionalProperties": False,
|
||||
"required": ["mappings"],
|
||||
"properties": union_dicts(
|
||||
core_shared_properties, {"temporary-storage-url-prefix": {"type": "string"}}
|
||||
),
|
||||
},
|
||||
]
|
||||
}
|
||||
|
||||
#: Properties for inclusion in other schemas
|
||||
properties: Dict[str, Any] = {"gitlab-ci": gitlab_ci_properties}
|
||||
|
||||
#: Full schema with metadata
|
||||
schema = {
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"title": "Spack gitlab-ci configuration file schema",
|
||||
"type": "object",
|
||||
"additionalProperties": False,
|
||||
"properties": properties,
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user