Compare commits
701 Commits
packages/t
...
revert-473
Author | SHA1 | Date | |
---|---|---|---|
![]() |
e8fc64f38d | ||
![]() |
bf11fb037b | ||
![]() |
074b845cd3 | ||
![]() |
dd26732897 | ||
![]() |
3665c5c01b | ||
![]() |
73219e4b02 | ||
![]() |
57a90c91a4 | ||
![]() |
8f4a0718bf | ||
![]() |
9049ffdc7a | ||
![]() |
d1f313342e | ||
![]() |
e62cf9c45b | ||
![]() |
ee2723dc46 | ||
![]() |
d09b185522 | ||
![]() |
a31c525778 | ||
![]() |
2aa5a16433 | ||
![]() |
0c164d2740 | ||
![]() |
801390f6be | ||
![]() |
c601692bc7 | ||
![]() |
2b9c6790f2 | ||
![]() |
09ae2516d5 | ||
![]() |
eb9ff5d7a7 | ||
![]() |
dadb30f0e2 | ||
![]() |
d45f682573 | ||
![]() |
b7601f3042 | ||
![]() |
6b5a479d1e | ||
![]() |
1297dd7fbc | ||
![]() |
75c169d870 | ||
![]() |
afe431cfb5 | ||
![]() |
14bc900e9d | ||
![]() |
e42e541605 | ||
![]() |
9310fcabd8 | ||
![]() |
6822f99cc6 | ||
![]() |
703cd6a313 | ||
![]() |
5b59a53545 | ||
![]() |
b862eec6bc | ||
![]() |
dcc199ae63 | ||
![]() |
f8da72cffe | ||
![]() |
8650ba3cea | ||
![]() |
54aaa95a35 | ||
![]() |
5a29c9d82b | ||
![]() |
c8873ea35c | ||
![]() |
c7659df4af | ||
![]() |
0de6c17477 | ||
![]() |
6924c530e2 | ||
![]() |
38c8069ab4 | ||
![]() |
5cc07522ab | ||
![]() |
575a006ca3 | ||
![]() |
23ac56edfb | ||
![]() |
8c3068809f | ||
![]() |
2214fc855d | ||
![]() |
d44bdc40c9 | ||
![]() |
e952f6be8e | ||
![]() |
b95936f752 | ||
![]() |
8d0856d1cc | ||
![]() |
10f7014add | ||
![]() |
c9ed91758d | ||
![]() |
2c1d74db9b | ||
![]() |
5b93466340 | ||
![]() |
1ee344c75c | ||
![]() |
754011643c | ||
![]() |
2148292bdb | ||
![]() |
cf3576a9bb | ||
![]() |
a86f164835 | ||
![]() |
2782ae6d7e | ||
![]() |
b1fd6dbb6d | ||
![]() |
18936771ff | ||
![]() |
9a94ea7dfe | ||
![]() |
a93bd6cee4 | ||
![]() |
4c247e206c | ||
![]() |
fcdaccfeb6 | ||
![]() |
2fc056e27c | ||
![]() |
417c48b07a | ||
![]() |
f05033b0d2 | ||
![]() |
d63f06e4b7 | ||
![]() |
8296aaf175 | ||
![]() |
86ebcabd46 | ||
![]() |
87329639f2 | ||
![]() |
0acd6ae7b2 | ||
![]() |
395c911689 | ||
![]() |
2664303d7a | ||
![]() |
ff9568fa2f | ||
![]() |
632c009569 | ||
![]() |
55918c31d2 | ||
![]() |
b8461f3d2d | ||
![]() |
133895e785 | ||
![]() |
19e3ab83cf | ||
![]() |
e42a4a8bac | ||
![]() |
1462c35761 | ||
![]() |
0cf8cb70f4 | ||
![]() |
7b2450c22a | ||
![]() |
8f09f523cc | ||
![]() |
24d3ed8c18 | ||
![]() |
492c52089f | ||
![]() |
5df7dc88fc | ||
![]() |
4a75c3c87a | ||
![]() |
35aa02771a | ||
![]() |
b38a29f4df | ||
![]() |
9a25a58219 | ||
![]() |
c0c9743300 | ||
![]() |
a69af3c71f | ||
![]() |
cb92d70d6d | ||
![]() |
76ed4578e7 | ||
![]() |
504cc808d6 | ||
![]() |
8076134c91 | ||
![]() |
b4b3320f71 | ||
![]() |
e35bc1f82d | ||
![]() |
0de1ddcbe8 | ||
![]() |
e3aca49e25 | ||
![]() |
94c29e1cfc | ||
![]() |
0c00a297e1 | ||
![]() |
c6a1ec996c | ||
![]() |
0437c5314e | ||
![]() |
ffde309a99 | ||
![]() |
a08b4ae538 | ||
![]() |
404b1c6c19 | ||
![]() |
275339ab4c | ||
![]() |
877930c4ef | ||
![]() |
89d0215d5b | ||
![]() |
f003d8c0c3 | ||
![]() |
6ab92b119d | ||
![]() |
f809b56f81 | ||
![]() |
ec058556ad | ||
![]() |
ce78e8a1f8 | ||
![]() |
c3435b4e7d | ||
![]() |
02d2c4a9ff | ||
![]() |
9d03170cb2 | ||
![]() |
8892c878ce | ||
![]() |
cbf4d3967a | ||
![]() |
8bc0b2e086 | ||
![]() |
354615d491 | ||
![]() |
9ac261af58 | ||
![]() |
34b2f28a5e | ||
![]() |
8a10eff757 | ||
![]() |
44d09f2b2b | ||
![]() |
161b2d7cb0 | ||
![]() |
4de5b664cd | ||
![]() |
5d0c6c3350 | ||
![]() |
8391c8eb87 | ||
![]() |
3108849121 | ||
![]() |
52471bab02 | ||
![]() |
b8e3246e89 | ||
![]() |
60cb628283 | ||
![]() |
5bca7187a5 | ||
![]() |
65daf17b54 | ||
![]() |
d776dead56 | ||
![]() |
741a4a5d4f | ||
![]() |
dbe7b6bc6b | ||
![]() |
ffc904aa6b | ||
![]() |
f889b2a95e | ||
![]() |
7f609ba934 | ||
![]() |
ffd7830bfa | ||
![]() |
20a6b22f78 | ||
![]() |
1bff2f7034 | ||
![]() |
ca48233ef7 | ||
![]() |
c302049b5d | ||
![]() |
360dbe41f7 | ||
![]() |
ea1aa0714b | ||
![]() |
7af1a3d240 | ||
![]() |
962115b386 | ||
![]() |
f81ca0cd89 | ||
![]() |
25a5585f7d | ||
![]() |
e81ce18cad | ||
![]() |
d48d993ae7 | ||
![]() |
fbd5c3d589 | ||
![]() |
11aa02b37a | ||
![]() |
b9ebf8cc9c | ||
![]() |
1229d5a3cc | ||
![]() |
be5a096665 | ||
![]() |
32ce278a51 | ||
![]() |
e83536de38 | ||
![]() |
ff058377c5 | ||
![]() |
e855bb011d | ||
![]() |
dbab4828ed | ||
![]() |
fac92dceca | ||
![]() |
035b890b17 | ||
![]() |
2a7e5cafa1 | ||
![]() |
49845760b6 | ||
![]() |
ce6255c0bb | ||
![]() |
f0d54ba39d | ||
![]() |
2ec4281c4f | ||
![]() |
e80d75cbe3 | ||
![]() |
47e70c5c3a | ||
![]() |
fea2171672 | ||
![]() |
12a475e648 | ||
![]() |
c348891c07 | ||
![]() |
019e90ab36 | ||
![]() |
19137b2653 | ||
![]() |
2761e650fa | ||
![]() |
84ea389017 | ||
![]() |
17f07523f5 | ||
![]() |
bd2ddb8909 | ||
![]() |
f5db757e66 | ||
![]() |
277f8596de | ||
![]() |
c8bebff7f5 | ||
![]() |
61d2d21acc | ||
![]() |
7b27aed4c8 | ||
![]() |
ad0b256407 | ||
![]() |
a2a3a83a26 | ||
![]() |
7d86670826 | ||
![]() |
ae306b73c3 | ||
![]() |
b63cbe4e6e | ||
![]() |
ef220daaca | ||
![]() |
e86a3b68f7 | ||
![]() |
7319408bc7 | ||
![]() |
b34159348f | ||
![]() |
f13d998d21 | ||
![]() |
2912d4a661 | ||
![]() |
8e2ec58859 | ||
![]() |
01eb26578b | ||
![]() |
fe0a8a1735 | ||
![]() |
d523f12e99 | ||
![]() |
1b0631b69e | ||
![]() |
65bb3a12ea | ||
![]() |
5ac2b8a178 | ||
![]() |
b063765c2e | ||
![]() |
4511052d26 | ||
![]() |
3804d128e7 | ||
![]() |
f09ce00fe1 | ||
![]() |
cdde7c3ccf | ||
![]() |
c52c0a482f | ||
![]() |
8a75cdad9a | ||
![]() |
e0eea48ccf | ||
![]() |
61cbfc1da0 | ||
![]() |
d8c8074762 | ||
![]() |
faeef6272d | ||
![]() |
f6ad1e23f8 | ||
![]() |
a0173a5a94 | ||
![]() |
225be45687 | ||
![]() |
3581821d3c | ||
![]() |
79ad6f6b48 | ||
![]() |
6320993409 | ||
![]() |
1472dcace4 | ||
![]() |
755c113c16 | ||
![]() |
43bcb5056f | ||
![]() |
fd1c95a432 | ||
![]() |
5b5be0582f | ||
![]() |
aed1a3f980 | ||
![]() |
978be305a7 | ||
![]() |
7ddb40a804 | ||
![]() |
37664b36da | ||
![]() |
f33912d707 | ||
![]() |
e785d3716e | ||
![]() |
328787b017 | ||
![]() |
67a40c6cc4 | ||
![]() |
eccf97af33 | ||
![]() |
e63e8b5efa | ||
![]() |
bb25210b62 | ||
![]() |
f8ab94061f | ||
![]() |
ed15b73c3b | ||
![]() |
1f6da280b7 | ||
![]() |
1ad5739094 | ||
![]() |
06f33dcdbb | ||
![]() |
582254f891 | ||
![]() |
31694fe9bd | ||
![]() |
a53a14346e | ||
![]() |
c102ff953b | ||
![]() |
59a2a87937 | ||
![]() |
d86feeac54 | ||
![]() |
43e26b330c | ||
![]() |
9c8b5f58c0 | ||
![]() |
50aa5a7b24 | ||
![]() |
ffab156366 | ||
![]() |
e147679d40 | ||
![]() |
ef9bb7ebe5 | ||
![]() |
fc443ea30e | ||
![]() |
b601bace24 | ||
![]() |
cbad3d464a | ||
![]() |
b56e792295 | ||
![]() |
5b279c0732 | ||
![]() |
149753a52e | ||
![]() |
b582eacbc1 | ||
![]() |
037196c2bd | ||
![]() |
d9e8c5f13e | ||
![]() |
275d1d88f4 | ||
![]() |
a07d42d35b | ||
![]() |
19ad29a690 | ||
![]() |
4187c57250 | ||
![]() |
590be9bba1 | ||
![]() |
3edd68d981 | ||
![]() |
5ca0e94bdd | ||
![]() |
f6c9d98c8f | ||
![]() |
9854c9e5f2 | ||
![]() |
e5a602c1bb | ||
![]() |
37fe3b4984 | ||
![]() |
a00fddef4e | ||
![]() |
260b36e272 | ||
![]() |
117480dba9 | ||
![]() |
bc75f23927 | ||
![]() |
b0f1a0eb7c | ||
![]() |
4d616e1168 | ||
![]() |
4de8344c16 | ||
![]() |
411ea019f1 | ||
![]() |
296f99d800 | ||
![]() |
ca4df91e7d | ||
![]() |
9b8c06a049 | ||
![]() |
011ff48f82 | ||
![]() |
adcd05b365 | ||
![]() |
dc160e3a52 | ||
![]() |
ba953352a1 | ||
![]() |
d47e726b76 | ||
![]() |
89ab47284f | ||
![]() |
31bdcd7dc6 | ||
![]() |
f2bd11cbf4 | ||
![]() |
f69e8297a7 | ||
![]() |
c9377d9437 | ||
![]() |
899004e29a | ||
![]() |
df6427d259 | ||
![]() |
31cfcafeba | ||
![]() |
230bc7010a | ||
![]() |
957c0cc9da | ||
![]() |
99e4d6b446 | ||
![]() |
7acd0cd86c | ||
![]() |
d3378ffd25 | ||
![]() |
2356ccc816 | ||
![]() |
1d25275bd1 | ||
![]() |
7678635d36 | ||
![]() |
b2e28a0b08 | ||
![]() |
53385f12da | ||
![]() |
cfae194fbd | ||
![]() |
88c193b83a | ||
![]() |
c006cb573a | ||
![]() |
d8d41e9b0e | ||
![]() |
c6bfe7c6bd | ||
![]() |
4432f5a1fe | ||
![]() |
b9e0914ab2 | ||
![]() |
49a8e84588 | ||
![]() |
d36452cf4e | ||
![]() |
580cc3c91b | ||
![]() |
9ba7af404a | ||
![]() |
2da812cbad | ||
![]() |
420266c5c4 | ||
![]() |
049ade024a | ||
![]() |
75c71f7291 | ||
![]() |
0a7533a609 | ||
![]() |
7ecdc175ff | ||
![]() |
962262a1d3 | ||
![]() |
adaa0a4863 | ||
![]() |
5f56eee8b0 | ||
![]() |
aa6caf9ee6 | ||
![]() |
1eb2cb97ad | ||
![]() |
178a8bbdc5 | ||
![]() |
e4c233710c | ||
![]() |
b661acfa9b | ||
![]() |
7bddcd27d2 | ||
![]() |
5d2c67ec83 | ||
![]() |
62fd5d12c2 | ||
![]() |
64a7525e3f | ||
![]() |
bfe434cbd5 | ||
![]() |
39063baf18 | ||
![]() |
f4a4acd272 | ||
![]() |
8d2a059279 | ||
![]() |
34c89c0f7b | ||
![]() |
e1ea9e12a6 | ||
![]() |
5611523baf | ||
![]() |
4ff07c3918 | ||
![]() |
49489a4815 | ||
![]() |
fb53d31d09 | ||
![]() |
80b9807e10 | ||
![]() |
b573ec3920 | ||
![]() |
cbdc07248f | ||
![]() |
db6a2523d9 | ||
![]() |
c710a1597f | ||
![]() |
8c70912b11 | ||
![]() |
64f90c38be | ||
![]() |
d2f1e29927 | ||
![]() |
57586df91a | ||
![]() |
c00f36b5e2 | ||
![]() |
2a7dd29f95 | ||
![]() |
58e2f7a54f | ||
![]() |
e3afe9a364 | ||
![]() |
b0314faa3d | ||
![]() |
2099e9f5cd | ||
![]() |
5947c13570 | ||
![]() |
1259992159 | ||
![]() |
0477875667 | ||
![]() |
4d5844b460 | ||
![]() |
fc79c37e2d | ||
![]() |
1d76ed7aa4 | ||
![]() |
237f886e5d | ||
![]() |
834ed2f117 | ||
![]() |
73069045ae | ||
![]() |
e0efd2bea2 | ||
![]() |
b9873c5cea | ||
![]() |
2f711bda5f | ||
![]() |
f8381c9a63 | ||
![]() |
c8f61c8662 | ||
![]() |
507965cbc6 | ||
![]() |
1f6ce56d3b | ||
![]() |
3918f83ddc | ||
![]() |
d4dc13fffb | ||
![]() |
5008519a56 | ||
![]() |
dad5ff8796 | ||
![]() |
a24220b53f | ||
![]() |
2186ff720e | ||
![]() |
65d61e12c9 | ||
![]() |
05f3fef72c | ||
![]() |
21c2eedb80 | ||
![]() |
66a3c7bc42 | ||
![]() |
8b3d3ac2de | ||
![]() |
b5610cdb8b | ||
![]() |
6c6b262140 | ||
![]() |
796e372bde | ||
![]() |
78740942f9 | ||
![]() |
02a991688f | ||
![]() |
a8029c8ec4 | ||
![]() |
adb8f37fc5 | ||
![]() |
81b41d5948 | ||
![]() |
0ff980ae87 | ||
![]() |
74a93c04d8 | ||
![]() |
b72c7deacb | ||
![]() |
b061bbbb8f | ||
![]() |
bbfad7e979 | ||
![]() |
3a9963b497 | ||
![]() |
8ac00aa58f | ||
![]() |
13f80ff142 | ||
![]() |
e8291cbd74 | ||
![]() |
0dded55f39 | ||
![]() |
a4ca6452c0 | ||
![]() |
36761715fd | ||
![]() |
02b116bd56 | ||
![]() |
d4d7d5830d | ||
![]() |
389b1824e9 | ||
![]() |
e65be13056 | ||
![]() |
1580c1745c | ||
![]() |
cf54ef0fd3 | ||
![]() |
b8b02e0691 | ||
![]() |
8d986b8a99 | ||
![]() |
4b836cb795 | ||
![]() |
d5966e676d | ||
![]() |
e187508485 | ||
![]() |
80982149d5 | ||
![]() |
a1f2e794c7 | ||
![]() |
dbe323c631 | ||
![]() |
77ddafaaac | ||
![]() |
17efd6153c | ||
![]() |
93f356c1cc | ||
![]() |
386d115333 | ||
![]() |
6b512210d4 | ||
![]() |
ba215ca824 | ||
![]() |
629a3e9396 | ||
![]() |
08b07b9b27 | ||
![]() |
3a38122764 | ||
![]() |
25ab7cc16d | ||
![]() |
41773383ec | ||
![]() |
9855fbf7f1 | ||
![]() |
5ef9d7e3ed | ||
![]() |
5a4b7d3d44 | ||
![]() |
9b40c1e89d | ||
![]() |
edff99aab3 | ||
![]() |
22043617aa | ||
![]() |
7df23c7471 | ||
![]() |
ef87a9a052 | ||
![]() |
af62a062cc | ||
![]() |
e6114f544d | ||
![]() |
8d651625f7 | ||
![]() |
9346306b79 | ||
![]() |
f3a3e85bb9 | ||
![]() |
caaaba464e | ||
![]() |
8fae388f57 | ||
![]() |
a332e0c143 | ||
![]() |
bc662b8764 | ||
![]() |
7a8955597d | ||
![]() |
bcf9c646cf | ||
![]() |
a76fffe8ff | ||
![]() |
26c8714a24 | ||
![]() |
0776ff05d2 | ||
![]() |
d3beef6584 | ||
![]() |
bdd06cb176 | ||
![]() |
f639c4f1e6 | ||
![]() |
f18a106759 | ||
![]() |
5b01ddf832 | ||
![]() |
c1fc98eef8 | ||
![]() |
e9831985e4 | ||
![]() |
30e9545d3e | ||
![]() |
ce0910a82c | ||
![]() |
afc01f9570 | ||
![]() |
fc3a484a8c | ||
![]() |
de0d5ba883 | ||
![]() |
f756ab156c | ||
![]() |
540de118c1 | ||
![]() |
675be13a7b | ||
![]() |
3342866e0e | ||
![]() |
39ff675898 | ||
![]() |
f807337273 | ||
![]() |
8e4e3c9060 | ||
![]() |
6d67992191 | ||
![]() |
0f3fea511e | ||
![]() |
a0611650e2 | ||
![]() |
5959be577f | ||
![]() |
9b5e508d15 | ||
![]() |
66a30aef98 | ||
![]() |
b117074df4 | ||
![]() |
9f4be17451 | ||
![]() |
d70e9e131d | ||
![]() |
d7643d4f88 | ||
![]() |
73b6aa9b92 | ||
![]() |
6d51d94dab | ||
![]() |
1a965e9ec2 | ||
![]() |
a9e9b901d1 | ||
![]() |
95b46dca3d | ||
![]() |
7f6ae2a51e | ||
![]() |
489d5b0f21 | ||
![]() |
f884817009 | ||
![]() |
a30704fdad | ||
![]() |
57eb21ac3d | ||
![]() |
f48c36fc2c | ||
![]() |
a09b9f0659 | ||
![]() |
92d940b7f4 | ||
![]() |
d8c7cbe8f0 | ||
![]() |
717d4800e1 | ||
![]() |
c77916146c | ||
![]() |
f5135018dd | ||
![]() |
adfb3a77ad | ||
![]() |
d5ccf8203d | ||
![]() |
416943f7ed | ||
![]() |
519684978b | ||
![]() |
c9de1cbcda | ||
![]() |
eedc41405b | ||
![]() |
e6f48ceaf5 | ||
![]() |
2ba583e7eb | ||
![]() |
741b6bc0e4 | ||
![]() |
ff98c15065 | ||
![]() |
625d032e80 | ||
![]() |
5227f5f387 | ||
![]() |
170e322701 | ||
![]() |
cb673862d1 | ||
![]() |
31d6e7a901 | ||
![]() |
79db34574b | ||
![]() |
b3831d4e8c | ||
![]() |
35f0feba00 | ||
![]() |
9a04a94a26 | ||
![]() |
a87fc566ec | ||
![]() |
c8f6a19fc0 | ||
![]() |
365892be4c | ||
![]() |
70acce1aad | ||
![]() |
48e2dd8038 | ||
![]() |
2844f7425b | ||
![]() |
f75760d4f2 | ||
![]() |
b8e3f35a8b | ||
![]() |
f610c3e4d0 | ||
![]() |
a0b925dae3 | ||
![]() |
c99518709a | ||
![]() |
d67b5b300c | ||
![]() |
9bcca28afd | ||
![]() |
b07d1e0194 | ||
![]() |
7ad08213dc | ||
![]() |
dce3cf6f31 | ||
![]() |
d763d6f738 | ||
![]() |
d87464e995 | ||
![]() |
280fd1cd68 | ||
![]() |
bfbd0a4d4c | ||
![]() |
be21b0b3bf | ||
![]() |
8f798c01ec | ||
![]() |
853a7b2567 | ||
![]() |
1756aeb45a | ||
![]() |
7a9a634c94 | ||
![]() |
4caf718626 | ||
![]() |
5867d90ccf | ||
![]() |
8a9bfd162d | ||
![]() |
7bc1316ce4 | ||
![]() |
c9d312e9c1 | ||
![]() |
792b576224 | ||
![]() |
5c66f6b994 | ||
![]() |
f17e76e9d8 | ||
![]() |
905e7b9b45 | ||
![]() |
ad294bc19f | ||
![]() |
8dd978ddb9 | ||
![]() |
b0c48b66c2 | ||
![]() |
96880e0b65 | ||
![]() |
48de9d48e2 | ||
![]() |
72581ded8f | ||
![]() |
8c4ff56d9f | ||
![]() |
d2a551c047 | ||
![]() |
9dfe096d3a | ||
![]() |
62f5b18491 | ||
![]() |
8f5af6eb7a | ||
![]() |
d3a0904790 | ||
![]() |
3c2a682876 | ||
![]() |
a52ec2a9cc | ||
![]() |
43a9c6cb66 | ||
![]() |
4cace0cb62 | ||
![]() |
482e2fbde8 | ||
![]() |
ad75e8fc95 | ||
![]() |
af43f6be0d | ||
![]() |
76243bfcd7 | ||
![]() |
feabcb884a | ||
![]() |
1c065f2da9 | ||
![]() |
401e7b4477 | ||
![]() |
1b0020f3ee | ||
![]() |
d21577803f | ||
![]() |
d3518f866b | ||
![]() |
93bf5b302d | ||
![]() |
6e0efdff61 | ||
![]() |
95f16f203a | ||
![]() |
322a83c808 | ||
![]() |
aa53007f82 | ||
![]() |
12fd940e81 | ||
![]() |
e193320ebb | ||
![]() |
77839303ca | ||
![]() |
e572189112 | ||
![]() |
20a18b5710 | ||
![]() |
e17d81692f | ||
![]() |
4bf6c61ea0 | ||
![]() |
21b03d149e | ||
![]() |
21fbebd273 | ||
![]() |
9326e9211c | ||
![]() |
742e8142cf | ||
![]() |
c30a17a302 | ||
![]() |
beecc5dc87 | ||
![]() |
dfb0f58254 | ||
![]() |
31f0905f3f | ||
![]() |
726bf85d15 | ||
![]() |
d187a8ab68 | ||
![]() |
1245a2e058 | ||
![]() |
f8bd11c18f | ||
![]() |
787863e176 | ||
![]() |
1da7d3bfe3 | ||
![]() |
26bc91fe9b | ||
![]() |
470774687d | ||
![]() |
20aec1536a | ||
![]() |
9e1082b625 | ||
![]() |
f8f13ad8aa | ||
![]() |
fcf72201d3 | ||
![]() |
1c528719cb | ||
![]() |
f3faeb0f77 | ||
![]() |
b1707c2e3c | ||
![]() |
0093dd74e3 | ||
![]() |
f92c5d7a2e | ||
![]() |
dcdc678b19 | ||
![]() |
8e3e7a5541 | ||
![]() |
4016172938 | ||
![]() |
95ea678d77 | ||
![]() |
8ccf244c6b | ||
![]() |
308dfeb2a6 | ||
![]() |
118a9e8db7 | ||
![]() |
6740ea79af | ||
![]() |
6af8526aa1 | ||
![]() |
f73cb0ded4 | ||
![]() |
0a683c14a4 | ||
![]() |
9cf5ba82c7 | ||
![]() |
104b5c3f68 | ||
![]() |
c8efea117f | ||
![]() |
486ff2beac | ||
![]() |
12334099f7 | ||
![]() |
e4f571b1ee | ||
![]() |
f8b9178630 | ||
![]() |
ed57db9498 | ||
![]() |
731abee6b4 | ||
![]() |
60469f83e2 | ||
![]() |
f2f3af19c3 | ||
![]() |
cbbdf38f4f | ||
![]() |
44618e31c8 | ||
![]() |
5bc105c01c | ||
![]() |
4e3a8b1928 | ||
![]() |
59179764d7 | ||
![]() |
acce67241e | ||
![]() |
73036f9567 | ||
![]() |
d40ea48db7 | ||
![]() |
fdb5178f99 | ||
![]() |
7a3525a053 | ||
![]() |
394ed4f90d | ||
![]() |
d7f5dbaf89 | ||
![]() |
cb43019455 | ||
![]() |
5303ec9aa5 | ||
![]() |
02499c72c9 | ||
![]() |
f1275536a2 | ||
![]() |
a88239affc | ||
![]() |
260ea91b83 | ||
![]() |
dea075a8b0 | ||
![]() |
5f333c3632 | ||
![]() |
59b5cb0962 | ||
![]() |
16410cda18 | ||
![]() |
100c8b7630 | ||
![]() |
47591f73da | ||
![]() |
d3b4f0bebc | ||
![]() |
5c52ded1be | ||
![]() |
ff7e1b5918 | ||
![]() |
6ea8079919 | ||
![]() |
d48cf4ea3e | ||
![]() |
41044b6f84 | ||
![]() |
338c331115 | ||
![]() |
563f76fbe5 | ||
![]() |
6a2fb38673 | ||
![]() |
429c5149ce | ||
![]() |
043fea4ef9 | ||
![]() |
4aa1f92c9c | ||
![]() |
92d3e0ca6f | ||
![]() |
a28b9ec8f3 | ||
![]() |
d7452f68a0 | ||
![]() |
4c333fb535 | ||
![]() |
f32bdea867 | ||
![]() |
05ad0618cc | ||
![]() |
e0987e0350 | ||
![]() |
519ed7f611 | ||
![]() |
ecbc21a7ce | ||
![]() |
56c61763d7 | ||
![]() |
8168b17ddf | ||
![]() |
d2a5b07d7f | ||
![]() |
b570ca3cf3 | ||
![]() |
343586d832 | ||
![]() |
a8d02bd3b0 |
@@ -5,7 +5,7 @@ coverage:
|
||||
status:
|
||||
project:
|
||||
default:
|
||||
threshold: 0.2%
|
||||
threshold: 2.0%
|
||||
|
||||
ignore:
|
||||
- lib/spack/spack/test/.*
|
||||
|
6
.github/workflows/audit.yaml
vendored
6
.github/workflows/audit.yaml
vendored
@@ -28,8 +28,8 @@ jobs:
|
||||
run:
|
||||
shell: ${{ matrix.system.shell }}
|
||||
steps:
|
||||
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
|
||||
- uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
- uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b
|
||||
with:
|
||||
python-version: ${{inputs.python_version}}
|
||||
- name: Install Python packages
|
||||
@@ -66,7 +66,7 @@ jobs:
|
||||
./share/spack/qa/validate_last_exit.ps1
|
||||
spack -d audit externals
|
||||
./share/spack/qa/validate_last_exit.ps1
|
||||
- uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874
|
||||
- uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882
|
||||
if: ${{ inputs.with_coverage == 'true' && runner.os != 'Windows' }}
|
||||
with:
|
||||
name: coverage-audits-${{ matrix.system.os }}
|
||||
|
2
.github/workflows/bin/bootstrap-test.sh
vendored
2
.github/workflows/bin/bootstrap-test.sh
vendored
@@ -1,7 +1,7 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
source share/spack/setup-env.sh
|
||||
$PYTHON bin/spack bootstrap disable github-actions-v0.4
|
||||
$PYTHON bin/spack bootstrap disable github-actions-v0.5
|
||||
$PYTHON bin/spack bootstrap disable spack-install
|
||||
$PYTHON bin/spack $SPACK_FLAGS solve zlib
|
||||
tree $BOOTSTRAP/store
|
||||
|
133
.github/workflows/bootstrap.yml
vendored
133
.github/workflows/bootstrap.yml
vendored
@@ -37,14 +37,14 @@ jobs:
|
||||
make patch unzip which xz python3 python3-devel tree \
|
||||
cmake bison
|
||||
- name: Checkout
|
||||
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Bootstrap clingo
|
||||
run: |
|
||||
source share/spack/setup-env.sh
|
||||
spack bootstrap disable github-actions-v0.6
|
||||
spack bootstrap disable github-actions-v0.5
|
||||
spack bootstrap disable github-actions-v0.4
|
||||
spack external find cmake bison
|
||||
spack -d solve zlib
|
||||
tree ~/.spack/bootstrap/store/
|
||||
@@ -53,33 +53,27 @@ jobs:
|
||||
runs-on: ${{ matrix.runner }}
|
||||
strategy:
|
||||
matrix:
|
||||
runner: ['macos-13', 'macos-14', "ubuntu-latest", "windows-latest"]
|
||||
runner: ['macos-13', 'macos-14', "ubuntu-latest"]
|
||||
steps:
|
||||
- name: Setup macOS
|
||||
if: ${{ matrix.runner != 'ubuntu-latest' && matrix.runner != 'windows-latest' }}
|
||||
if: ${{ matrix.runner != 'ubuntu-latest' }}
|
||||
run: |
|
||||
brew install cmake bison tree
|
||||
- name: Checkout
|
||||
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3
|
||||
- uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b
|
||||
with:
|
||||
python-version: "3.12"
|
||||
- name: Bootstrap clingo
|
||||
env:
|
||||
SETUP_SCRIPT_EXT: ${{ matrix.runner == 'windows-latest' && 'ps1' || 'sh' }}
|
||||
SETUP_SCRIPT_SOURCE: ${{ matrix.runner == 'windows-latest' && './' || 'source ' }}
|
||||
USER_SCOPE_PARENT_DIR: ${{ matrix.runner == 'windows-latest' && '$env:userprofile' || '$HOME' }}
|
||||
VALIDATE_LAST_EXIT: ${{ matrix.runner == 'windows-latest' && './share/spack/qa/validate_last_exit.ps1' || '' }}
|
||||
run: |
|
||||
${{ env.SETUP_SCRIPT_SOURCE }}share/spack/setup-env.${{ env.SETUP_SCRIPT_EXT }}
|
||||
source share/spack/setup-env.sh
|
||||
spack bootstrap disable github-actions-v0.6
|
||||
spack bootstrap disable github-actions-v0.5
|
||||
spack bootstrap disable github-actions-v0.4
|
||||
spack external find --not-buildable cmake bison
|
||||
spack -d solve zlib
|
||||
${{ env.VALIDATE_LAST_EXIT }}
|
||||
tree ${{ env.USER_SCOPE_PARENT_DIR }}/.spack/bootstrap/store/
|
||||
tree $HOME/.spack/bootstrap/store/
|
||||
|
||||
gnupg-sources:
|
||||
runs-on: ${{ matrix.runner }}
|
||||
@@ -89,22 +83,22 @@ jobs:
|
||||
steps:
|
||||
- name: Setup macOS
|
||||
if: ${{ matrix.runner != 'ubuntu-latest' }}
|
||||
run: brew install tree gawk
|
||||
- name: Remove system executables
|
||||
run: |
|
||||
brew install tree gawk
|
||||
sudo rm -rf $(command -v gpg gpg2)
|
||||
- name: Setup Ubuntu
|
||||
if: ${{ matrix.runner == 'ubuntu-latest' }}
|
||||
run: sudo rm -rf $(command -v gpg gpg2 patchelf)
|
||||
while [ -n "$(command -v gpg gpg2 patchelf)" ]; do
|
||||
sudo rm $(command -v gpg gpg2 patchelf)
|
||||
done
|
||||
- name: Checkout
|
||||
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Bootstrap GnuPG
|
||||
run: |
|
||||
source share/spack/setup-env.sh
|
||||
spack solve zlib
|
||||
spack bootstrap disable github-actions-v0.6
|
||||
spack bootstrap disable github-actions-v0.5
|
||||
spack bootstrap disable github-actions-v0.4
|
||||
spack -d gpg list
|
||||
tree ~/.spack/bootstrap/store/
|
||||
|
||||
@@ -112,28 +106,21 @@ jobs:
|
||||
runs-on: ${{ matrix.runner }}
|
||||
strategy:
|
||||
matrix:
|
||||
runner: ['macos-13', 'macos-14', "ubuntu-latest", "windows-latest"]
|
||||
runner: ['macos-13', 'macos-14', "ubuntu-latest"]
|
||||
steps:
|
||||
- name: Setup macOS
|
||||
if: ${{ matrix.runner != 'ubuntu-latest' && matrix.runner != 'windows-latest'}}
|
||||
if: ${{ matrix.runner != 'ubuntu-latest' }}
|
||||
run: brew install tree
|
||||
- name: Remove system executables
|
||||
run: |
|
||||
brew install tree
|
||||
# Remove GnuPG since we want to bootstrap it
|
||||
sudo rm -rf /usr/local/bin/gpg
|
||||
- name: Setup Ubuntu
|
||||
if: ${{ matrix.runner == 'ubuntu-latest' }}
|
||||
run: |
|
||||
sudo rm -rf $(which gpg) $(which gpg2) $(which patchelf)
|
||||
- name: Setup Windows
|
||||
if: ${{ matrix.runner == 'windows-latest' }}
|
||||
run: |
|
||||
Remove-Item -Path (Get-Command gpg).Path
|
||||
Remove-Item -Path (Get-Command file).Path
|
||||
while [ -n "$(command -v gpg gpg2 patchelf)" ]; do
|
||||
sudo rm $(command -v gpg gpg2 patchelf)
|
||||
done
|
||||
- name: Checkout
|
||||
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3
|
||||
- uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b
|
||||
with:
|
||||
python-version: |
|
||||
3.8
|
||||
@@ -141,24 +128,16 @@ jobs:
|
||||
3.10
|
||||
3.11
|
||||
3.12
|
||||
3.13
|
||||
- name: Set bootstrap sources
|
||||
env:
|
||||
SETUP_SCRIPT_EXT: ${{ matrix.runner == 'windows-latest' && 'ps1' || 'sh' }}
|
||||
SETUP_SCRIPT_SOURCE: ${{ matrix.runner == 'windows-latest' && './' || 'source ' }}
|
||||
run: |
|
||||
${{ env.SETUP_SCRIPT_SOURCE }}share/spack/setup-env.${{ env.SETUP_SCRIPT_EXT }}
|
||||
spack bootstrap disable github-actions-v0.4
|
||||
- name: Disable from source bootstrap
|
||||
if: ${{ matrix.runner != 'windows-latest' }}
|
||||
run: |
|
||||
source share/spack/setup-env.sh
|
||||
spack bootstrap disable github-actions-v0.5
|
||||
spack bootstrap disable spack-install
|
||||
- name: Bootstrap clingo
|
||||
# No binary clingo on Windows yet
|
||||
if: ${{ matrix.runner != 'windows-latest' }}
|
||||
run: |
|
||||
set -e
|
||||
for ver in '3.8' '3.9' '3.10' '3.11' '3.12' ; do
|
||||
for ver in '3.8' '3.9' '3.10' '3.11' '3.12' '3.13'; do
|
||||
not_found=1
|
||||
ver_dir="$(find $RUNNER_TOOL_CACHE/Python -wholename "*/${ver}.*/*/bin" | grep . || true)"
|
||||
if [[ -d "$ver_dir" ]] ; then
|
||||
@@ -178,24 +157,48 @@ jobs:
|
||||
fi
|
||||
done
|
||||
- name: Bootstrap GnuPG
|
||||
env:
|
||||
SETUP_SCRIPT_EXT: ${{ matrix.runner == 'windows-latest' && 'ps1' || 'sh' }}
|
||||
SETUP_SCRIPT_SOURCE: ${{ matrix.runner == 'windows-latest' && './' || 'source ' }}
|
||||
USER_SCOPE_PARENT_DIR: ${{ matrix.runner == 'windows-latest' && '$env:userprofile' || '$HOME' }}
|
||||
VALIDATE_LAST_EXIT: ${{ matrix.runner == 'windows-latest' && './share/spack/qa/validate_last_exit.ps1' || '' }}
|
||||
run: |
|
||||
${{ env.SETUP_SCRIPT_SOURCE }}share/spack/setup-env.${{ env.SETUP_SCRIPT_EXT }}
|
||||
source share/spack/setup-env.sh
|
||||
spack -d gpg list
|
||||
${{ env.VALIDATE_LAST_EXIT }}
|
||||
tree ${{ env.USER_SCOPE_PARENT_DIR }}/.spack/bootstrap/store/
|
||||
tree $HOME/.spack/bootstrap/store/
|
||||
- name: Bootstrap File
|
||||
env:
|
||||
SETUP_SCRIPT_EXT: ${{ matrix.runner == 'windows-latest' && 'ps1' || 'sh' }}
|
||||
SETUP_SCRIPT_SOURCE: ${{ matrix.runner == 'windows-latest' && './' || 'source ' }}
|
||||
USER_SCOPE_PARENT_DIR: ${{ matrix.runner == 'windows-latest' && '$env:userprofile' || '$HOME' }}
|
||||
VALIDATE_LAST_EXIT: ${{ matrix.runner == 'windows-latest' && './share/spack/qa/validate_last_exit.ps1' || '' }}
|
||||
run: |
|
||||
${{ env.SETUP_SCRIPT_SOURCE }}share/spack/setup-env.${{ env.SETUP_SCRIPT_EXT }}
|
||||
source share/spack/setup-env.sh
|
||||
spack -d python share/spack/qa/bootstrap-file.py
|
||||
${{ env.VALIDATE_LAST_EXIT }}
|
||||
tree ${{ env.USER_SCOPE_PARENT_DIR }}/.spack/bootstrap/store/
|
||||
tree $HOME/.spack/bootstrap/store/
|
||||
|
||||
windows:
|
||||
runs-on: "windows-latest"
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b
|
||||
with:
|
||||
python-version: "3.12"
|
||||
- name: Setup Windows
|
||||
run: |
|
||||
Remove-Item -Path (Get-Command gpg).Path
|
||||
Remove-Item -Path (Get-Command file).Path
|
||||
- name: Bootstrap clingo
|
||||
run: |
|
||||
./share/spack/setup-env.ps1
|
||||
spack bootstrap disable github-actions-v0.6
|
||||
spack bootstrap disable github-actions-v0.5
|
||||
spack external find --not-buildable cmake bison
|
||||
spack -d solve zlib
|
||||
./share/spack/qa/validate_last_exit.ps1
|
||||
tree $env:userprofile/.spack/bootstrap/store/
|
||||
- name: Bootstrap GnuPG
|
||||
run: |
|
||||
./share/spack/setup-env.ps1
|
||||
spack -d gpg list
|
||||
./share/spack/qa/validate_last_exit.ps1
|
||||
tree $env:userprofile/.spack/bootstrap/store/
|
||||
- name: Bootstrap File
|
||||
run: |
|
||||
./share/spack/setup-env.ps1
|
||||
spack -d python share/spack/qa/bootstrap-file.py
|
||||
./share/spack/qa/validate_last_exit.ps1
|
||||
tree $env:userprofile/.spack/bootstrap/store/
|
||||
|
10
.github/workflows/build-containers.yml
vendored
10
.github/workflows/build-containers.yml
vendored
@@ -55,7 +55,7 @@ jobs:
|
||||
if: github.repository == 'spack/spack'
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
|
||||
- uses: docker/metadata-action@8e5442c4ef9f78752691e2d8f8d19755c6f78e81
|
||||
id: docker_meta
|
||||
@@ -87,7 +87,7 @@ jobs:
|
||||
fi
|
||||
|
||||
- name: Upload Dockerfile
|
||||
uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874
|
||||
uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882
|
||||
with:
|
||||
name: dockerfiles_${{ matrix.dockerfile[0] }}
|
||||
path: dockerfiles
|
||||
@@ -96,7 +96,7 @@ jobs:
|
||||
uses: docker/setup-qemu-action@49b3bc8e6bdd4a60e6116a5414239cba5943d3cf
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db
|
||||
uses: docker/setup-buildx-action@c47758b77c9736f4b2ef4073d4d51994fabfe349
|
||||
|
||||
- name: Log in to GitHub Container Registry
|
||||
uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567
|
||||
@@ -113,7 +113,7 @@ jobs:
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Build & Deploy ${{ matrix.dockerfile[0] }}
|
||||
uses: docker/build-push-action@32945a339266b759abcbdc89316275140b0fc960
|
||||
uses: docker/build-push-action@4f58ea79222b3b9dc2c8bbdd6debcef730109a75
|
||||
with:
|
||||
context: dockerfiles/${{ matrix.dockerfile[0] }}
|
||||
platforms: ${{ matrix.dockerfile[1] }}
|
||||
@@ -126,7 +126,7 @@ jobs:
|
||||
needs: deploy-images
|
||||
steps:
|
||||
- name: Merge Artifacts
|
||||
uses: actions/upload-artifact/merge@50769540e7f4bd5e21e526ee35c689e35e0d6874
|
||||
uses: actions/upload-artifact/merge@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882
|
||||
with:
|
||||
name: dockerfiles
|
||||
pattern: dockerfiles_*
|
||||
|
2
.github/workflows/ci.yaml
vendored
2
.github/workflows/ci.yaml
vendored
@@ -24,7 +24,7 @@ jobs:
|
||||
core: ${{ steps.filter.outputs.core }}
|
||||
packages: ${{ steps.filter.outputs.packages }}
|
||||
steps:
|
||||
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
if: ${{ github.event_name == 'push' }}
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
6
.github/workflows/coverage.yml
vendored
6
.github/workflows/coverage.yml
vendored
@@ -8,8 +8,8 @@ jobs:
|
||||
upload:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332
|
||||
- uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
- uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b
|
||||
with:
|
||||
python-version: '3.11'
|
||||
cache: 'pip'
|
||||
@@ -29,6 +29,6 @@ jobs:
|
||||
- run: coverage xml
|
||||
|
||||
- name: "Upload coverage report to CodeCov"
|
||||
uses: codecov/codecov-action@e28ff129e5465c2c0dcc6f003fc735cb6ae0c673
|
||||
uses: codecov/codecov-action@b9fd7d16f6d7d1b5d2bec1a2887e65ceed900238
|
||||
with:
|
||||
verbose: true
|
||||
|
4
.github/workflows/nightly-win-builds.yml
vendored
4
.github/workflows/nightly-win-builds.yml
vendored
@@ -14,10 +14,10 @@ jobs:
|
||||
build-paraview-deps:
|
||||
runs-on: windows-latest
|
||||
steps:
|
||||
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3
|
||||
- uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b
|
||||
with:
|
||||
python-version: 3.9
|
||||
- name: Install Python packages
|
||||
|
@@ -1,7 +1,7 @@
|
||||
black==24.8.0
|
||||
black==24.10.0
|
||||
clingo==5.7.1
|
||||
flake8==7.1.1
|
||||
isort==5.13.2
|
||||
mypy==1.8.0
|
||||
types-six==1.16.21.20240513
|
||||
types-six==1.16.21.20241009
|
||||
vermin==1.6.0
|
||||
|
53
.github/workflows/unit_tests.yaml
vendored
53
.github/workflows/unit_tests.yaml
vendored
@@ -40,10 +40,10 @@ jobs:
|
||||
on_develop: false
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3
|
||||
- uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- name: Install System packages
|
||||
@@ -80,7 +80,7 @@ jobs:
|
||||
UNIT_TEST_COVERAGE: ${{ matrix.python-version == '3.11' }}
|
||||
run: |
|
||||
share/spack/qa/run-unit-tests
|
||||
- uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874
|
||||
- uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882
|
||||
with:
|
||||
name: coverage-${{ matrix.os }}-python${{ matrix.python-version }}
|
||||
path: coverage
|
||||
@@ -89,10 +89,10 @@ jobs:
|
||||
shell:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3
|
||||
- uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b
|
||||
with:
|
||||
python-version: '3.11'
|
||||
- name: Install System packages
|
||||
@@ -113,7 +113,7 @@ jobs:
|
||||
COVERAGE: true
|
||||
run: |
|
||||
share/spack/qa/run-shell-tests
|
||||
- uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874
|
||||
- uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882
|
||||
with:
|
||||
name: coverage-shell
|
||||
path: coverage
|
||||
@@ -130,7 +130,7 @@ jobs:
|
||||
dnf install -y \
|
||||
bzip2 curl file gcc-c++ gcc gcc-gfortran git gnupg2 gzip \
|
||||
make patch tcl unzip which xz
|
||||
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
- name: Setup repo and non-root user
|
||||
run: |
|
||||
git --version
|
||||
@@ -149,32 +149,33 @@ jobs:
|
||||
clingo-cffi:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3
|
||||
- uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b
|
||||
with:
|
||||
python-version: '3.11'
|
||||
python-version: '3.13'
|
||||
- name: Install System packages
|
||||
run: |
|
||||
sudo apt-get -y update
|
||||
sudo apt-get -y install coreutils cvs gfortran graphviz gnupg2 mercurial ninja-build kcov
|
||||
sudo apt-get -y install coreutils gfortran graphviz gnupg2
|
||||
- name: Install Python packages
|
||||
run: |
|
||||
pip install --upgrade pip setuptools pytest coverage[toml] pytest-cov clingo pytest-xdist
|
||||
pip install --upgrade pip setuptools pytest coverage[toml] pytest-cov clingo
|
||||
pip install --upgrade flake8 "isort>=4.3.5" "mypy>=0.900" "click" "black"
|
||||
- name: Setup git configuration
|
||||
run: |
|
||||
# Need this for the git tests to succeed.
|
||||
git --version
|
||||
. .github/workflows/bin/setup_git.sh
|
||||
- name: Run unit tests (full suite with coverage)
|
||||
env:
|
||||
COVERAGE: true
|
||||
COVERAGE_FILE: coverage/.coverage-clingo-cffi
|
||||
run: |
|
||||
share/spack/qa/run-unit-tests
|
||||
- uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874
|
||||
. share/spack/setup-env.sh
|
||||
spack bootstrap disable spack-install
|
||||
spack bootstrap disable github-actions-v0.5
|
||||
spack bootstrap disable github-actions-v0.6
|
||||
spack bootstrap status
|
||||
spack solve zlib
|
||||
spack unit-test --verbose --cov --cov-config=pyproject.toml --cov-report=xml:coverage.xml lib/spack/spack/test/concretize.py
|
||||
- uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882
|
||||
with:
|
||||
name: coverage-clingo-cffi
|
||||
path: coverage
|
||||
@@ -187,10 +188,10 @@ jobs:
|
||||
os: [macos-13, macos-14]
|
||||
python-version: ["3.11"]
|
||||
steps:
|
||||
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3
|
||||
- uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- name: Install Python packages
|
||||
@@ -199,7 +200,7 @@ jobs:
|
||||
pip install --upgrade pytest coverage[toml] pytest-xdist pytest-cov
|
||||
- name: Setup Homebrew packages
|
||||
run: |
|
||||
brew install dash fish gcc gnupg2 kcov
|
||||
brew install dash fish gcc gnupg kcov
|
||||
- name: Run unit tests
|
||||
env:
|
||||
SPACK_TEST_PARALLEL: 4
|
||||
@@ -212,7 +213,7 @@ jobs:
|
||||
$(which spack) solve zlib
|
||||
common_args=(--dist loadfile --tx '4*popen//python=./bin/spack-tmpconfig python -u ./bin/spack python' -x)
|
||||
$(which spack) unit-test --verbose --cov --cov-config=pyproject.toml --cov-report=xml:coverage.xml "${common_args[@]}"
|
||||
- uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874
|
||||
- uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882
|
||||
with:
|
||||
name: coverage-${{ matrix.os }}-python${{ matrix.python-version }}
|
||||
path: coverage
|
||||
@@ -225,10 +226,10 @@ jobs:
|
||||
powershell Invoke-Expression -Command "./share/spack/qa/windows_test_setup.ps1"; {0}
|
||||
runs-on: windows-latest
|
||||
steps:
|
||||
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3
|
||||
- uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b
|
||||
with:
|
||||
python-version: 3.9
|
||||
- name: Install Python packages
|
||||
@@ -243,7 +244,7 @@ jobs:
|
||||
run: |
|
||||
spack unit-test -x --verbose --cov --cov-config=pyproject.toml
|
||||
./share/spack/qa/validate_last_exit.ps1
|
||||
- uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874
|
||||
- uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882
|
||||
with:
|
||||
name: coverage-windows
|
||||
path: coverage
|
||||
|
22
.github/workflows/valid-style.yml
vendored
22
.github/workflows/valid-style.yml
vendored
@@ -18,8 +18,8 @@ jobs:
|
||||
validate:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
|
||||
- uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
- uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b
|
||||
with:
|
||||
python-version: '3.11'
|
||||
cache: 'pip'
|
||||
@@ -35,10 +35,10 @@ jobs:
|
||||
style:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3
|
||||
- uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b
|
||||
with:
|
||||
python-version: '3.11'
|
||||
cache: 'pip'
|
||||
@@ -70,7 +70,7 @@ jobs:
|
||||
dnf install -y \
|
||||
bzip2 curl file gcc-c++ gcc gcc-gfortran git gnupg2 gzip \
|
||||
make patch tcl unzip which xz
|
||||
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
- name: Setup repo and non-root user
|
||||
run: |
|
||||
git --version
|
||||
@@ -85,7 +85,7 @@ jobs:
|
||||
source share/spack/setup-env.sh
|
||||
spack debug report
|
||||
spack -d bootstrap now --dev
|
||||
spack style -t black
|
||||
spack -d style -t black
|
||||
spack unit-test -V
|
||||
import-check:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -98,14 +98,14 @@ jobs:
|
||||
# PR: use the base of the PR as the old commit
|
||||
- name: Checkout PR base commit
|
||||
if: github.event_name == 'pull_request'
|
||||
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.base.sha }}
|
||||
path: old
|
||||
# not a PR: use the previous commit as the old commit
|
||||
- name: Checkout previous commit
|
||||
if: github.event_name != 'pull_request'
|
||||
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
with:
|
||||
fetch-depth: 2
|
||||
path: old
|
||||
@@ -114,14 +114,14 @@ jobs:
|
||||
run: git -C old reset --hard HEAD^
|
||||
|
||||
- name: Checkout new commit
|
||||
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
with:
|
||||
path: new
|
||||
- name: Install circular import checker
|
||||
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
with:
|
||||
repository: haampie/circular-import-fighter
|
||||
ref: 555519c6fd5564fd2eb844e7b87e84f4d12602e2
|
||||
ref: 9f60f51bc7134e0be73f27623f1b0357d1718427
|
||||
path: circular-import-fighter
|
||||
- name: Install dependencies
|
||||
working-directory: circular-import-fighter
|
||||
|
@@ -14,3 +14,26 @@ sphinx:
|
||||
python:
|
||||
install:
|
||||
- requirements: lib/spack/docs/requirements.txt
|
||||
|
||||
search:
|
||||
ranking:
|
||||
spack.html: -10
|
||||
spack.*.html: -10
|
||||
llnl.html: -10
|
||||
llnl.*.html: -10
|
||||
_modules/*: -10
|
||||
command_index.html: -9
|
||||
basic_usage.html: 5
|
||||
configuration.html: 5
|
||||
config_yaml.html: 5
|
||||
packages_yaml.html: 5
|
||||
build_settings.html: 5
|
||||
environments.html: 5
|
||||
containers.html: 5
|
||||
mirrors.html: 5
|
||||
module_file_support.html: 5
|
||||
repositories.html: 5
|
||||
binary_caches.html: 5
|
||||
chain.html: 5
|
||||
pipelines.html: 5
|
||||
packaging_guide.html: 5
|
||||
|
71
CHANGELOG.md
71
CHANGELOG.md
@@ -1,3 +1,64 @@
|
||||
# v0.22.2 (2024-09-21)
|
||||
|
||||
## Bugfixes
|
||||
- Forward compatibility with Spack 0.23 packages with language dependencies (#45205, #45191)
|
||||
- Forward compatibility with `urllib` from Python 3.12.6+ (#46453, #46483)
|
||||
- Bump vendored `archspec` for better aarch64 support (#45721, #46445)
|
||||
- Support macOS Sequoia (#45018, #45127)
|
||||
- Fix regression in `{variants.X}` and `{variants.X.value}` format strings (#46206)
|
||||
- Ensure shell escaping of environment variable values in load and activate commands (#42780)
|
||||
- Fix an issue where `spec[pkg]` considers specs outside the current DAG (#45090)
|
||||
- Do not halt concretization on unknown variants in externals (#45326)
|
||||
- Improve validation of `develop` config section (#46485)
|
||||
- Explicitly disable `ccache` if turned off in config, to avoid cache pollution (#45275)
|
||||
- Improve backwards compatibility in `include_concrete` (#45766)
|
||||
- Fix issue where package tags were sometimes repeated (#45160)
|
||||
- Make `setup-env.sh` "sourced only" by dropping execution bits (#45641)
|
||||
- Make certain source/binary fetch errors recoverable instead of a hard error (#45683)
|
||||
- Remove debug statements in package hash computation (#45235)
|
||||
- Remove redundant clingo warnings (#45269)
|
||||
- Remove hard-coded layout version (#45645)
|
||||
- Do not initialize previous store state in `use_store` (#45268)
|
||||
- Docs improvements (#46475)
|
||||
|
||||
## Package updates
|
||||
- `chapel` major update (#42197, #44931, #45304)
|
||||
|
||||
# v0.22.1 (2024-07-04)
|
||||
|
||||
## Bugfixes
|
||||
- Fix reuse of externals on Linux (#44316)
|
||||
- Ensure parent gcc-runtime version >= child (#44834, #44870)
|
||||
- Ensure the latest gcc-runtime is rpath'ed when multiple exist among link deps (#44219)
|
||||
- Improve version detection of glibc (#44154)
|
||||
- Improve heuristics for solver (#44893, #44976, #45023)
|
||||
- Make strong preferences override reuse (#44373)
|
||||
- Reduce verbosity when C compiler is missing (#44182)
|
||||
- Make missing ccache executable an error when required (#44740)
|
||||
- Make every environment view containing `python` a `venv` (#44382)
|
||||
- Fix external detection for compilers with os but no target (#44156)
|
||||
- Fix version optimization for roots (#44272)
|
||||
- Handle common implementations of pagination of tags in OCI build caches (#43136)
|
||||
- Apply fetched patches to develop specs (#44950)
|
||||
- Avoid Windows wrappers for filesystem utilities on non-Windows (#44126)
|
||||
- Fix issue with long filenames in build caches on Windows (#43851)
|
||||
- Fix formatting issue in `spack audit` (#45045)
|
||||
- CI fixes (#44582, #43965, #43967, #44279, #44213)
|
||||
|
||||
## Package updates
|
||||
- protobuf: fix 3.4:3.21 patch checksum (#44443)
|
||||
- protobuf: update hash for patch needed when="@3.4:3.21" (#44210)
|
||||
- git: bump v2.39 to 2.45; deprecate unsafe versions (#44248)
|
||||
- gcc: use -rpath {rpath_dir} not -rpath={rpath dir} (#44315)
|
||||
- Remove mesa18 and libosmesa (#44264)
|
||||
- Enforce consistency of `gl` providers (#44307)
|
||||
- Require libiconv for iconv (#44335, #45026).
|
||||
Notice that glibc/musl also provide iconv, but are not guaranteed to be
|
||||
complete. Set `packages:iconv:require:[glibc]` to restore the old behavior.
|
||||
- py-matplotlib: qualify when to do a post install (#44191)
|
||||
- rust: fix v1.78.0 instructions (#44127)
|
||||
- suite-sparse: improve setting of the `libs` property (#44214)
|
||||
- netlib-lapack: provide blas and lapack together (#44981)
|
||||
|
||||
# v0.22.0 (2024-05-12)
|
||||
|
||||
@@ -319,6 +380,16 @@
|
||||
* 344 committers to packages
|
||||
* 45 committers to core
|
||||
|
||||
# v0.21.3 (2024-10-02)
|
||||
|
||||
## Bugfixes
|
||||
- Forward compatibility with Spack 0.23 packages with language dependencies (#45205, #45191)
|
||||
- Forward compatibility with `urllib` from Python 3.12.6+ (#46453, #46483)
|
||||
- Bump `archspec` to 0.2.5-dev for better aarch64 and Windows support (#42854, #44005,
|
||||
#45721, #46445)
|
||||
- Support macOS Sequoia (#45018, #45127, #43862)
|
||||
- CI and test maintenance (#42909, #42728, #46711, #41943, #43363)
|
||||
|
||||
# v0.21.2 (2024-03-01)
|
||||
|
||||
## Bugfixes
|
||||
|
@@ -1,71 +1,11 @@
|
||||
@ECHO OFF
|
||||
setlocal EnableDelayedExpansion
|
||||
:: (c) 2021 Lawrence Livermore National Laboratory
|
||||
:: To use this file independently of Spack's installer, execute this script in its directory, or add the
|
||||
:: associated bin directory to your PATH. Invoke to launch Spack Shell.
|
||||
::
|
||||
:: source_dir/spack/bin/spack_cmd.bat
|
||||
::
|
||||
pushd %~dp0..
|
||||
set SPACK_ROOT=%CD%
|
||||
pushd %CD%\..
|
||||
set spackinstdir=%CD%
|
||||
popd
|
||||
|
||||
|
||||
:: Check if Python is on the PATH
|
||||
if not defined python_pf_ver (
|
||||
(for /f "delims=" %%F in ('where python.exe') do (
|
||||
set "python_pf_ver=%%F"
|
||||
goto :found_python
|
||||
) ) 2> NUL
|
||||
)
|
||||
:found_python
|
||||
if not defined python_pf_ver (
|
||||
:: If not, look for Python from the Spack installer
|
||||
:get_builtin
|
||||
(for /f "tokens=*" %%g in ('dir /b /a:d "!spackinstdir!\Python*"') do (
|
||||
set "python_ver=%%g")) 2> NUL
|
||||
|
||||
if not defined python_ver (
|
||||
echo Python was not found on your system.
|
||||
echo Please install Python or add Python to your PATH.
|
||||
) else (
|
||||
set "py_path=!spackinstdir!\!python_ver!"
|
||||
set "py_exe=!py_path!\python.exe"
|
||||
)
|
||||
goto :exitpoint
|
||||
) else (
|
||||
:: Python is already on the path
|
||||
set "py_exe=!python_pf_ver!"
|
||||
(for /F "tokens=* USEBACKQ" %%F in (
|
||||
`"!py_exe!" --version`) do (set "output=%%F")) 2>NUL
|
||||
if not "!output:Microsoft Store=!"=="!output!" goto :get_builtin
|
||||
goto :exitpoint
|
||||
)
|
||||
:exitpoint
|
||||
|
||||
set "PATH=%SPACK_ROOT%\bin\;%PATH%"
|
||||
if defined py_path (
|
||||
set "PATH=%py_path%;%PATH%"
|
||||
)
|
||||
|
||||
if defined py_exe (
|
||||
"%py_exe%" "%SPACK_ROOT%\bin\haspywin.py"
|
||||
)
|
||||
|
||||
set "EDITOR=notepad"
|
||||
|
||||
DOSKEY spacktivate=spack env activate $*
|
||||
|
||||
@echo **********************************************************************
|
||||
@echo ** Spack Package Manager
|
||||
@echo **********************************************************************
|
||||
|
||||
IF "%1"=="" GOTO CONTINUE
|
||||
set
|
||||
GOTO:EOF
|
||||
|
||||
:continue
|
||||
set PROMPT=[spack] %PROMPT%
|
||||
%comspec% /k
|
||||
call "%~dp0..\share\spack\setup-env.bat"
|
||||
pushd %SPACK_ROOT%
|
||||
%comspec% /K
|
||||
|
@@ -9,15 +9,15 @@ bootstrap:
|
||||
# may not be able to bootstrap all the software that Spack needs,
|
||||
# depending on its type.
|
||||
sources:
|
||||
- name: 'github-actions-v0.5'
|
||||
- name: github-actions-v0.6
|
||||
metadata: $spack/share/spack/bootstrap/github-actions-v0.6
|
||||
- name: github-actions-v0.5
|
||||
metadata: $spack/share/spack/bootstrap/github-actions-v0.5
|
||||
- name: 'github-actions-v0.4'
|
||||
metadata: $spack/share/spack/bootstrap/github-actions-v0.4
|
||||
- name: 'spack-install'
|
||||
- name: spack-install
|
||||
metadata: $spack/share/spack/bootstrap/spack-install
|
||||
trusted:
|
||||
# By default we trust bootstrapping from sources and from binaries
|
||||
# produced on Github via the workflow
|
||||
github-actions-v0.6: true
|
||||
github-actions-v0.5: true
|
||||
github-actions-v0.4: true
|
||||
spack-install: true
|
||||
|
@@ -42,8 +42,8 @@ concretizer:
|
||||
# "minimal": allows the duplication of 'build-tools' nodes only (e.g. py-setuptools, cmake etc.)
|
||||
# "full" (experimental): allows separation of the entire build-tool stack (e.g. the entire "cmake" subDAG)
|
||||
strategy: minimal
|
||||
# Option to specify compatiblity between operating systems for reuse of compilers and packages
|
||||
# Specified as a key: [list] where the key is the os that is being targeted, and the list contains the OS's
|
||||
# it can reuse. Note this is a directional compatibility so mutual compatibility between two OS's
|
||||
# Option to specify compatibility between operating systems for reuse of compilers and packages
|
||||
# Specified as a key: [list] where the key is the os that is being targeted, and the list contains the OS's
|
||||
# it can reuse. Note this is a directional compatibility so mutual compatibility between two OS's
|
||||
# requires two entries i.e. os_compatible: {sonoma: [monterey], monterey: [sonoma]}
|
||||
os_compatible: {}
|
||||
|
@@ -40,9 +40,9 @@ packages:
|
||||
jpeg: [libjpeg-turbo, libjpeg]
|
||||
lapack: [openblas, amdlibflame]
|
||||
libc: [glibc, musl]
|
||||
libgfortran: [ gcc-runtime ]
|
||||
libgfortran: [gcc-runtime]
|
||||
libglx: [mesa+glx]
|
||||
libifcore: [ intel-oneapi-runtime ]
|
||||
libifcore: [intel-oneapi-runtime]
|
||||
libllvm: [llvm]
|
||||
lua-lang: [lua, lua-luajit-openresty, lua-luajit]
|
||||
luajit: [lua-luajit-openresty, lua-luajit]
|
||||
|
@@ -1359,6 +1359,10 @@ For example, for the ``stackstart`` variant:
|
||||
mpileaks stackstart==4 # variant will be propagated to dependencies
|
||||
mpileaks stackstart=4 # only mpileaks will have this variant value
|
||||
|
||||
Spack also allows variants to be propagated from a package that does
|
||||
not have that variant.
|
||||
|
||||
|
||||
^^^^^^^^^^^^^^
|
||||
Compiler Flags
|
||||
^^^^^^^^^^^^^^
|
||||
|
@@ -166,3 +166,74 @@ while `py-numpy` still needs an older version:
|
||||
|
||||
Up to Spack v0.20 ``duplicates:strategy:none`` was the default (and only) behavior. From Spack v0.21 the
|
||||
default behavior is ``duplicates:strategy:minimal``.
|
||||
|
||||
--------
|
||||
Splicing
|
||||
--------
|
||||
|
||||
The ``splice`` key covers config attributes for splicing specs in the solver.
|
||||
|
||||
"Splicing" is a method for replacing a dependency with another spec
|
||||
that provides the same package or virtual. There are two types of
|
||||
splices, referring to different behaviors for shared dependencies
|
||||
between the root spec and the new spec replacing a dependency:
|
||||
"transitive" and "intransitive". A "transitive" splice is one that
|
||||
resolves all conflicts by taking the dependency from the new node. An
|
||||
"intransitive" splice is one that resolves all conflicts by taking the
|
||||
dependency from the original root. From a theory perspective, hybrid
|
||||
splices are possible but are not modeled by Spack.
|
||||
|
||||
All spliced specs retain a ``build_spec`` attribute that points to the
|
||||
original Spec before any splice occurred. The ``build_spec`` for a
|
||||
non-spliced spec is itself.
|
||||
|
||||
The figure below shows examples of transitive and intransitive splices:
|
||||
|
||||
.. figure:: images/splices.png
|
||||
:align: center
|
||||
|
||||
The concretizer can be configured to explicitly splice particular
|
||||
replacements for a target spec. Splicing will allow the user to make
|
||||
use of generically built public binary caches, while swapping in
|
||||
highly optimized local builds for performance critical components
|
||||
and/or components that interact closely with the specific hardware
|
||||
details of the system. The most prominent candidate for splicing is
|
||||
MPI providers. MPI packages have relatively well-understood ABI
|
||||
characteristics, and most High Performance Computing facilities deploy
|
||||
highly optimized MPI packages tailored to their particular
|
||||
hardware. The following config block configures Spack to replace
|
||||
whatever MPI provider each spec was concretized to use with the
|
||||
particular package of ``mpich`` with the hash that begins ``abcdef``.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
concretizer:
|
||||
splice:
|
||||
explicit:
|
||||
- target: mpi
|
||||
replacement: mpich/abcdef
|
||||
transitive: false
|
||||
|
||||
.. warning::
|
||||
|
||||
When configuring an explicit splice, you as the user take on the
|
||||
responsibility for ensuring ABI compatibility between the specs
|
||||
matched by the target and the replacement you provide. If they are
|
||||
not compatible, Spack will not warn you and your application will
|
||||
fail to run.
|
||||
|
||||
The ``target`` field of an explicit splice can be any abstract
|
||||
spec. The ``replacement`` field must be a spec that includes the hash
|
||||
of a concrete spec, and the replacement must either be the same
|
||||
package as the target, provide the virtual that is the target, or
|
||||
provide a virtual that the target provides. The ``transitive`` field
|
||||
is optional -- by default, splices will be transitive.
|
||||
|
||||
.. note::
|
||||
|
||||
With explicit splices configured, it is possible for Spack to
|
||||
concretize to a spec that does not satisfy the input. For example,
|
||||
with the config above ``hdf5 ^mvapich2`` will concretize to user
|
||||
``mpich/abcdef`` instead of ``mvapich2`` as the MPI provider. Spack
|
||||
will warn the user in this case, but will not fail the
|
||||
concretization.
|
||||
|
@@ -130,14 +130,19 @@ before or after a particular phase. For example, in ``perl``, we see:
|
||||
|
||||
@run_after("install")
|
||||
def install_cpanm(self):
|
||||
spec = self.spec
|
||||
|
||||
if spec.satisfies("+cpanm"):
|
||||
with working_dir(join_path("cpanm", "cpanm")):
|
||||
perl = spec["perl"].command
|
||||
perl("Makefile.PL")
|
||||
make()
|
||||
make("install")
|
||||
spec = self.spec
|
||||
maker = make
|
||||
cpan_dir = join_path("cpanm", "cpanm")
|
||||
if sys.platform == "win32":
|
||||
maker = nmake
|
||||
cpan_dir = join_path(self.stage.source_path, cpan_dir)
|
||||
cpan_dir = windows_sfn(cpan_dir)
|
||||
if "+cpanm" in spec:
|
||||
with working_dir(cpan_dir):
|
||||
perl = spec["perl"].command
|
||||
perl("Makefile.PL")
|
||||
maker()
|
||||
maker("install")
|
||||
|
||||
This extra step automatically installs ``cpanm`` in addition to the
|
||||
base Perl installation.
|
||||
@@ -176,8 +181,14 @@ In the ``perl`` package, we can see:
|
||||
|
||||
@run_after("build")
|
||||
@on_package_attributes(run_tests=True)
|
||||
def test(self):
|
||||
make("test")
|
||||
def build_test(self):
|
||||
if sys.platform == "win32":
|
||||
win32_dir = os.path.join(self.stage.source_path, "win32")
|
||||
win32_dir = windows_sfn(win32_dir)
|
||||
with working_dir(win32_dir):
|
||||
nmake("test", ignore_quotes=True)
|
||||
else:
|
||||
make("test")
|
||||
|
||||
As you can guess, this runs ``make test`` *after* building the package,
|
||||
if and only if testing is requested. Again, this is not specific to
|
||||
|
@@ -49,14 +49,14 @@ following phases:
|
||||
#. ``install`` - install the package
|
||||
|
||||
Package developers often add unit tests that can be invoked with
|
||||
``scons test`` or ``scons check``. Spack provides a ``test`` method
|
||||
``scons test`` or ``scons check``. Spack provides a ``build_test`` method
|
||||
to handle this. Since we don't know which one the package developer
|
||||
chose, the ``test`` method does nothing by default, but can be easily
|
||||
chose, the ``build_test`` method does nothing by default, but can be easily
|
||||
overridden like so:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def test(self):
|
||||
def build_test(self):
|
||||
scons("check")
|
||||
|
||||
|
||||
|
@@ -214,12 +214,15 @@ def setup(sphinx):
|
||||
# Spack classes that intersphinx is unable to resolve
|
||||
("py:class", "spack.version.StandardVersion"),
|
||||
("py:class", "spack.spec.DependencySpec"),
|
||||
("py:class", "spack.spec.ArchSpec"),
|
||||
("py:class", "spack.spec.InstallStatus"),
|
||||
("py:class", "spack.spec.SpecfileReaderBase"),
|
||||
("py:class", "spack.install_test.Pb"),
|
||||
("py:class", "spack.filesystem_view.SimpleFilesystemView"),
|
||||
("py:class", "spack.traverse.EdgeAndDepth"),
|
||||
("py:class", "archspec.cpu.microarchitecture.Microarchitecture"),
|
||||
# TypeVar that is not handled correctly
|
||||
("py:class", "llnl.util.lang.T"),
|
||||
]
|
||||
|
||||
# The reST default role (used for this markup: `text`) to use for all documents.
|
||||
|
@@ -281,7 +281,7 @@ When spack queries for configuration parameters, it searches in
|
||||
higher-precedence scopes first. So, settings in a higher-precedence file
|
||||
can override those with the same key in a lower-precedence one. For
|
||||
list-valued settings, Spack *prepends* higher-precedence settings to
|
||||
lower-precedence settings. Completely ignoring higher-level configuration
|
||||
lower-precedence settings. Completely ignoring lower-precedence configuration
|
||||
options is supported with the ``::`` notation for keys (see
|
||||
:ref:`config-overrides` below).
|
||||
|
||||
@@ -511,6 +511,7 @@ Spack understands over a dozen special variables. These are:
|
||||
* ``$target_family``. The target family for the current host, as
|
||||
detected by ArchSpec. E.g. ``x86_64`` or ``aarch64``.
|
||||
* ``$date``: the current date in the format YYYY-MM-DD
|
||||
* ``$spack_short_version``: the Spack version truncated to the first components.
|
||||
|
||||
|
||||
Note that, as with shell variables, you can write these as ``$varname``
|
||||
|
@@ -184,7 +184,7 @@ Style Tests
|
||||
|
||||
Spack uses `Flake8 <http://flake8.pycqa.org/en/latest/>`_ to test for
|
||||
`PEP 8 <https://www.python.org/dev/peps/pep-0008/>`_ conformance and
|
||||
`mypy <https://mypy.readthedocs.io/en/stable/>` for type checking. PEP 8 is
|
||||
`mypy <https://mypy.readthedocs.io/en/stable/>`_ for type checking. PEP 8 is
|
||||
a series of style guides for Python that provide suggestions for everything
|
||||
from variable naming to indentation. In order to limit the number of PRs that
|
||||
were mostly style changes, we decided to enforce PEP 8 conformance. Your PR
|
||||
|
@@ -333,13 +333,9 @@ inserting them at different places in the spack code base. Whenever a hook
|
||||
type triggers by way of a function call, we find all the hooks of that type,
|
||||
and run them.
|
||||
|
||||
Spack defines hooks by way of a module at ``lib/spack/spack/hooks`` where we can define
|
||||
types of hooks in the ``__init__.py``, and then python files in that folder
|
||||
can use hook functions. The files are automatically parsed, so if you write
|
||||
a new file for some integration (e.g., ``lib/spack/spack/hooks/myintegration.py``
|
||||
you can then write hook functions in that file that will be automatically detected,
|
||||
and run whenever your hook is called. This section will cover the basic kind
|
||||
of hooks, and how to write them.
|
||||
Spack defines hooks by way of a module in the ``lib/spack/spack/hooks`` directory.
|
||||
This module has to be registered in ``__init__.py`` so that Spack is aware of it.
|
||||
This section will cover the basic kind of hooks, and how to write them.
|
||||
|
||||
^^^^^^^^^^^^^^
|
||||
Types of Hooks
|
||||
@@ -712,27 +708,27 @@ Release branches
|
||||
^^^^^^^^^^^^^^^^
|
||||
|
||||
There are currently two types of Spack releases: :ref:`major releases
|
||||
<major-releases>` (``0.17.0``, ``0.18.0``, etc.) and :ref:`point releases
|
||||
<point-releases>` (``0.17.1``, ``0.17.2``, ``0.17.3``, etc.). Here is a
|
||||
<major-releases>` (``0.21.0``, ``0.22.0``, etc.) and :ref:`patch releases
|
||||
<patch-releases>` (``0.22.1``, ``0.22.2``, ``0.22.3``, etc.). Here is a
|
||||
diagram of how Spack release branches work::
|
||||
|
||||
o branch: develop (latest version, v0.19.0.dev0)
|
||||
o branch: develop (latest version, v0.23.0.dev0)
|
||||
|
|
||||
o
|
||||
| o branch: releases/v0.18, tag: v0.18.1
|
||||
| o branch: releases/v0.22, tag: v0.22.1
|
||||
o |
|
||||
| o tag: v0.18.0
|
||||
| o tag: v0.22.0
|
||||
o |
|
||||
| o
|
||||
|/
|
||||
o
|
||||
|
|
||||
o
|
||||
| o branch: releases/v0.17, tag: v0.17.2
|
||||
| o branch: releases/v0.21, tag: v0.21.2
|
||||
o |
|
||||
| o tag: v0.17.1
|
||||
| o tag: v0.21.1
|
||||
o |
|
||||
| o tag: v0.17.0
|
||||
| o tag: v0.21.0
|
||||
o |
|
||||
| o
|
||||
|/
|
||||
@@ -743,8 +739,8 @@ requests target ``develop``. The ``develop`` branch will report that its
|
||||
version is that of the next **major** release with a ``.dev0`` suffix.
|
||||
|
||||
Each Spack release series also has a corresponding branch, e.g.
|
||||
``releases/v0.18`` has ``0.18.x`` versions of Spack, and
|
||||
``releases/v0.17`` has ``0.17.x`` versions. A major release is the first
|
||||
``releases/v0.22`` has ``v0.22.x`` versions of Spack, and
|
||||
``releases/v0.21`` has ``v0.21.x`` versions. A major release is the first
|
||||
tagged version on a release branch. Minor releases are back-ported from
|
||||
develop onto release branches. This is typically done by cherry-picking
|
||||
bugfix commits off of ``develop``.
|
||||
@@ -774,27 +770,40 @@ for more details.
|
||||
Scheduling work for releases
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
We schedule work for releases by creating `GitHub projects
|
||||
<https://github.com/spack/spack/projects>`_. At any time, there may be
|
||||
several open release projects. For example, below are two releases (from
|
||||
some past version of the page linked above):
|
||||
We schedule work for **major releases** through `milestones
|
||||
<https://github.com/spack/spack/milestones>`_ and `GitHub Projects
|
||||
<https://github.com/spack/spack/projects>`_, while **patch releases** use `labels
|
||||
<https://github.com/spack/spack/labels>`_.
|
||||
|
||||
.. image:: images/projects.png
|
||||
There is only one milestone open at a time. Its name corresponds to the next major version, for
|
||||
example ``v0.23``. Important issues and pull requests should be assigned to this milestone by
|
||||
core developers, so that they are not forgotten at the time of release. The milestone is closed
|
||||
when the release is made, and a new milestone is created for the next major release.
|
||||
|
||||
This image shows one release in progress for ``0.15.1`` and another for
|
||||
``0.16.0``. Each of these releases has a project board containing issues
|
||||
and pull requests. GitHub shows a status bar with completed work in
|
||||
green, work in progress in purple, and work not started yet in gray, so
|
||||
it's fairly easy to see progress.
|
||||
Bug reports in GitHub issues are automatically labelled ``bug`` and ``triage``. Spack developers
|
||||
assign one of the labels ``impact-low``, ``impact-medium`` or ``impact-high``. This will make the
|
||||
issue appear in the `Triaged bugs <https://github.com/orgs/spack/projects/6>`_ project board.
|
||||
Important issues should be assigned to the next milestone as well, so they appear at the top of
|
||||
the project board.
|
||||
|
||||
Spack's project boards are not firm commitments so we move work between
|
||||
releases frequently. If we need to make a release and some tasks are not
|
||||
yet done, we will simply move them to the next minor or major release, rather
|
||||
than delaying the release to complete them.
|
||||
Spack's milestones are not firm commitments so we move work between releases frequently. If we
|
||||
need to make a release and some tasks are not yet done, we will simply move them to the next major
|
||||
release milestone, rather than delaying the release to complete them.
|
||||
|
||||
For more on using GitHub project boards, see `GitHub's documentation
|
||||
<https://docs.github.com/en/github/managing-your-work-on-github/about-project-boards>`_.
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
Backporting bug fixes
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
When a bug is fixed in the ``develop`` branch, it is often necessary to backport the fix to one
|
||||
(or more) of the ``release/vX.Y`` branches. Only the release manager is responsible for doing
|
||||
backports, but Spack maintainers are responsible for labelling pull requests (and issues if no bug
|
||||
fix is available yet) with ``vX.Y.Z`` labels. The label should correspond to the next patch version
|
||||
that the bug fix should be backported to.
|
||||
|
||||
Backports are done publicly by the release manager using a pull request named ``Backports vX.Y.Z``.
|
||||
This pull request is opened from the ``backports/vX.Y.Z`` branch, targets the ``releases/vX.Y``
|
||||
branch and contains a (growing) list of cherry-picked commits from the ``develop`` branch.
|
||||
Typically there are one or two backport pull requests open at any given time.
|
||||
|
||||
.. _major-releases:
|
||||
|
||||
@@ -802,25 +811,21 @@ For more on using GitHub project boards, see `GitHub's documentation
|
||||
Making major releases
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Assuming a project board has already been created and all required work
|
||||
completed, the steps to make the major release are:
|
||||
Assuming all required work from the milestone is completed, the steps to make the major release
|
||||
are:
|
||||
|
||||
#. Create two new project boards:
|
||||
#. `Create a new milestone <https://github.com/spack/spack/milestones>`_ for the next major
|
||||
release.
|
||||
|
||||
* One for the next major release
|
||||
* One for the next point release
|
||||
#. `Create a new label <https://github.com/spack/spack/labels>`_ for the next patch release.
|
||||
|
||||
#. Move any optional tasks that are not done to one of the new project boards.
|
||||
|
||||
In general, small bugfixes should go to the next point release. Major
|
||||
features, refactors, and changes that could affect concretization should
|
||||
go in the next major release.
|
||||
#. Move any optional tasks that are not done to the next milestone.
|
||||
|
||||
#. Create a branch for the release, based on ``develop``:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ git checkout -b releases/v0.15 develop
|
||||
$ git checkout -b releases/v0.23 develop
|
||||
|
||||
For a version ``vX.Y.Z``, the branch's name should be
|
||||
``releases/vX.Y``. That is, you should create a ``releases/vX.Y``
|
||||
@@ -856,8 +861,8 @@ completed, the steps to make the major release are:
|
||||
|
||||
Create a pull request targeting the ``develop`` branch, bumping the major
|
||||
version in ``lib/spack/spack/__init__.py`` with a ``dev0`` release segment.
|
||||
For instance when you have just released ``v0.15.0``, set the version
|
||||
to ``(0, 16, 0, 'dev0')`` on ``develop``.
|
||||
For instance when you have just released ``v0.23.0``, set the version
|
||||
to ``(0, 24, 0, 'dev0')`` on ``develop``.
|
||||
|
||||
#. Follow the steps in :ref:`publishing-releases`.
|
||||
|
||||
@@ -866,82 +871,52 @@ completed, the steps to make the major release are:
|
||||
#. Follow the steps in :ref:`announcing-releases`.
|
||||
|
||||
|
||||
.. _point-releases:
|
||||
.. _patch-releases:
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
Making point releases
|
||||
Making patch releases
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Assuming a project board has already been created and all required work
|
||||
completed, the steps to make the point release are:
|
||||
To make the patch release process both efficient and transparent, we use a *backports pull request*
|
||||
which contains cherry-picked commits from the ``develop`` branch. The majority of the work is to
|
||||
cherry-pick the bug fixes, which ideally should be done as soon as they land on ``develop``:
|
||||
this ensures cherry-picking happens in order, and makes conflicts easier to resolve since the
|
||||
changes are fresh in the mind of the developer.
|
||||
|
||||
#. Create a new project board for the next point release.
|
||||
The backports pull request is always titled ``Backports vX.Y.Z`` and is labelled ``backports``. It
|
||||
is opened from a branch named ``backports/vX.Y.Z`` and targets the ``releases/vX.Y`` branch.
|
||||
|
||||
#. Move any optional tasks that are not done to the next project board.
|
||||
Whenever a pull request labelled ``vX.Y.Z`` is merged, cherry-pick the associated squashed commit
|
||||
on ``develop`` to the ``backports/vX.Y.Z`` branch. For pull requests that were rebased (or not
|
||||
squashed), cherry-pick each associated commit individually. Never force push to the
|
||||
``backports/vX.Y.Z`` branch.
|
||||
|
||||
#. Check out the release branch (it should already exist).
|
||||
.. warning::
|
||||
|
||||
For the ``X.Y.Z`` release, the release branch is called ``releases/vX.Y``.
|
||||
For ``v0.15.1``, you would check out ``releases/v0.15``:
|
||||
Sometimes you may **still** get merge conflicts even if you have
|
||||
cherry-picked all the commits in order. This generally means there
|
||||
is some other intervening pull request that the one you're trying
|
||||
to pick depends on. In these cases, you'll need to make a judgment
|
||||
call regarding those pull requests. Consider the number of affected
|
||||
files and/or the resulting differences.
|
||||
|
||||
.. code-block:: console
|
||||
1. If the changes are small, you might just cherry-pick it.
|
||||
|
||||
$ git checkout releases/v0.15
|
||||
2. If the changes are large, then you may decide that this fix is not
|
||||
worth including in a patch release, in which case you should remove
|
||||
the label from the pull request. Remember that large, manual backports
|
||||
are seldom the right choice for a patch release.
|
||||
|
||||
#. If a pull request to the release branch named ``Backports vX.Y.Z`` is not already
|
||||
in the project, create it. This pull request ought to be created as early as
|
||||
possible when working on a release project, so that we can build the release
|
||||
commits incrementally, and identify potential conflicts at an early stage.
|
||||
When all commits are cherry-picked in the ``backports/vX.Y.Z`` branch, make the patch
|
||||
release as follows:
|
||||
|
||||
#. Cherry-pick each pull request in the ``Done`` column of the release
|
||||
project board onto the ``Backports vX.Y.Z`` pull request.
|
||||
#. `Create a new label <https://github.com/spack/spack/labels>`_ ``vX.Y.{Z+1}`` for the next patch
|
||||
release.
|
||||
|
||||
This is **usually** fairly simple since we squash the commits from the
|
||||
vast majority of pull requests. That means there is only one commit
|
||||
per pull request to cherry-pick. For example, `this pull request
|
||||
<https://github.com/spack/spack/pull/15777>`_ has three commits, but
|
||||
they were squashed into a single commit on merge. You can see the
|
||||
commit that was created here:
|
||||
#. Replace the label ``vX.Y.Z`` with ``vX.Y.{Z+1}`` for all PRs and issues that are not done.
|
||||
|
||||
.. image:: images/pr-commit.png
|
||||
|
||||
You can easily cherry pick it like this (assuming you already have the
|
||||
release branch checked out):
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ git cherry-pick 7e46da7
|
||||
|
||||
For pull requests that were rebased (or not squashed), you'll need to
|
||||
cherry-pick each associated commit individually.
|
||||
|
||||
.. warning::
|
||||
|
||||
It is important to cherry-pick commits in the order they happened,
|
||||
otherwise you can get conflicts while cherry-picking. When
|
||||
cherry-picking look at the merge date,
|
||||
**not** the number of the pull request or the date it was opened.
|
||||
|
||||
Sometimes you may **still** get merge conflicts even if you have
|
||||
cherry-picked all the commits in order. This generally means there
|
||||
is some other intervening pull request that the one you're trying
|
||||
to pick depends on. In these cases, you'll need to make a judgment
|
||||
call regarding those pull requests. Consider the number of affected
|
||||
files and or the resulting differences.
|
||||
|
||||
1. If the dependency changes are small, you might just cherry-pick it,
|
||||
too. If you do this, add the task to the release board.
|
||||
|
||||
2. If the changes are large, then you may decide that this fix is not
|
||||
worth including in a point release, in which case you should remove
|
||||
the task from the release project.
|
||||
|
||||
3. You can always decide to manually back-port the fix to the release
|
||||
branch if neither of the above options makes sense, but this can
|
||||
require a lot of work. It's seldom the right choice.
|
||||
|
||||
#. When all the commits from the project board are cherry-picked into
|
||||
the ``Backports vX.Y.Z`` pull request, you can push a commit to:
|
||||
#. Manually push a single commit with commit message ``Set version to vX.Y.Z`` to the
|
||||
``backports/vX.Y.Z`` branch, that both bumps the Spack version number and updates the changelog:
|
||||
|
||||
1. Bump the version in ``lib/spack/spack/__init__.py``.
|
||||
2. Update ``CHANGELOG.md`` with a list of the changes.
|
||||
@@ -950,20 +925,22 @@ completed, the steps to make the point release are:
|
||||
release branch. See `the changelog from 0.14.1
|
||||
<https://github.com/spack/spack/commit/ff0abb9838121522321df2a054d18e54b566b44a>`_.
|
||||
|
||||
#. Merge the ``Backports vX.Y.Z`` PR with the **Rebase and merge** strategy. This
|
||||
is needed to keep track in the release branch of all the commits that were
|
||||
cherry-picked.
|
||||
|
||||
#. Make sure CI passes on the release branch, including:
|
||||
#. Make sure CI passes on the **backports pull request**, including:
|
||||
|
||||
* Regular unit tests
|
||||
* Build tests
|
||||
* The E4S pipeline at `gitlab.spack.io <https://gitlab.spack.io>`_
|
||||
|
||||
If CI does not pass, you'll need to figure out why, and make changes
|
||||
to the release branch until it does. You can make more commits, modify
|
||||
or remove cherry-picked commits, or cherry-pick **more** from
|
||||
``develop`` to make this happen.
|
||||
#. Merge the ``Backports vX.Y.Z`` PR with the **Rebase and merge** strategy. This
|
||||
is needed to keep track in the release branch of all the commits that were
|
||||
cherry-picked.
|
||||
|
||||
#. Make sure CI passes on the last commit of the **release branch**.
|
||||
|
||||
#. In the rare case you need to include additional commits in the patch release after the backports
|
||||
PR is merged, it is best to delete the last commit ``Set version to vX.Y.Z`` from the release
|
||||
branch with a single force push, open a new backports PR named ``Backports vX.Y.Z (2)``, and
|
||||
repeat the process. Avoid repeated force pushes to the release branch.
|
||||
|
||||
#. Follow the steps in :ref:`publishing-releases`.
|
||||
|
||||
@@ -1038,25 +1015,31 @@ Updating `releases/latest`
|
||||
|
||||
If the new release is the **highest** Spack release yet, you should
|
||||
also tag it as ``releases/latest``. For example, suppose the highest
|
||||
release is currently ``0.15.3``:
|
||||
release is currently ``0.22.3``:
|
||||
|
||||
* If you are releasing ``0.15.4`` or ``0.16.0``, then you should tag
|
||||
it with ``releases/latest``, as these are higher than ``0.15.3``.
|
||||
* If you are releasing ``0.22.4`` or ``0.23.0``, then you should tag
|
||||
it with ``releases/latest``, as these are higher than ``0.22.3``.
|
||||
|
||||
* If you are making a new release of an **older** major version of
|
||||
Spack, e.g. ``0.14.4``, then you should not tag it as
|
||||
Spack, e.g. ``0.21.4``, then you should not tag it as
|
||||
``releases/latest`` (as there are newer major versions).
|
||||
|
||||
To tag ``releases/latest``, do this:
|
||||
To do so, first fetch the latest tag created on GitHub, since you may not have it locally:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ git checkout releases/vX.Y # vX.Y is the new release's branch
|
||||
$ git tag --force releases/latest
|
||||
$ git push --force --tags
|
||||
$ git fetch --force git@github.com:spack/spack vX.Y.Z
|
||||
|
||||
The ``--force`` argument to ``git tag`` makes ``git`` overwrite the existing
|
||||
``releases/latest`` tag with the new one.
|
||||
Then tag ``vX.Y.Z`` as ``releases/latest`` and push the individual tag to GitHub.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ git tag --force releases/latest vX.Y.Z
|
||||
$ git push --force git@github.com:spack/spack releases/latest
|
||||
|
||||
The ``--force`` argument to ``git tag`` makes ``git`` overwrite the existing ``releases/latest``
|
||||
tag with the new one. Do **not** use the ``--tags`` flag when pushing, since this will push *all*
|
||||
local tags.
|
||||
|
||||
|
||||
.. _announcing-releases:
|
||||
|
@@ -5,49 +5,56 @@
|
||||
|
||||
.. _environments:
|
||||
|
||||
=========================
|
||||
Environments (spack.yaml)
|
||||
=========================
|
||||
=====================================
|
||||
Environments (spack.yaml, spack.lock)
|
||||
=====================================
|
||||
|
||||
An environment is used to group together a set of specs for the
|
||||
purpose of building, rebuilding and deploying in a coherent fashion.
|
||||
Environments provide a number of advantages over the *à la carte*
|
||||
approach of building and loading individual Spack modules:
|
||||
An environment is used to group a set of specs intended for some purpose
|
||||
to be built, rebuilt, and deployed in a coherent fashion. Environments
|
||||
define aspects of the installation of the software, such as:
|
||||
|
||||
#. Environments separate the steps of (a) choosing what to
|
||||
install, (b) concretizing, and (c) installing. This allows
|
||||
Environments to remain stable and repeatable, even if Spack packages
|
||||
are upgraded: specs are only re-concretized when the user
|
||||
explicitly asks for it. It is even possible to reliably
|
||||
transport environments between different computers running
|
||||
different versions of Spack!
|
||||
#. Environments allow several specs to be built at once; a more robust
|
||||
solution than ad-hoc scripts making multiple calls to ``spack
|
||||
install``.
|
||||
#. An Environment that is built as a whole can be loaded as a whole
|
||||
into the user environment. An Environment can be built to maintain
|
||||
a filesystem view of its packages, and the environment can load
|
||||
that view into the user environment at activation time. Spack can
|
||||
also generate a script to load all modules related to an
|
||||
environment.
|
||||
#. *which* specs to install;
|
||||
#. *how* those specs are configured; and
|
||||
#. *where* the concretized software will be installed.
|
||||
|
||||
Aggregating this information into an environment for processing has advantages
|
||||
over the *à la carte* approach of building and loading individual Spack modules.
|
||||
|
||||
With environments, you concretize, install, or load (activate) all of the
|
||||
specs with a single command. Concretization fully configures the specs
|
||||
and dependencies of the environment in preparation for installing the
|
||||
software. This is a more robust solution than ad-hoc installation scripts.
|
||||
And you can share an environment or even re-use it on a different computer.
|
||||
|
||||
Environment definitions, especially *how* specs are configured, allow the
|
||||
software to remain stable and repeatable even when Spack packages are upgraded. Changes are only picked up when the environment is explicitly re-concretized.
|
||||
|
||||
Defining *where* specs are installed supports a filesystem view of the
|
||||
environment. Yet Spack maintains a single installation of the software that
|
||||
can be re-used across multiple environments.
|
||||
|
||||
Activating an environment determines *when* all of the associated (and
|
||||
installed) specs are loaded so limits the software loaded to those specs
|
||||
actually needed by the environment. Spack can even generate a script to
|
||||
load all modules related to an environment.
|
||||
|
||||
Other packaging systems also provide environments that are similar in
|
||||
some ways to Spack environments; for example, `Conda environments
|
||||
<https://conda.io/docs/user-guide/tasks/manage-environments.html>`_ or
|
||||
`Python Virtual Environments
|
||||
<https://docs.python.org/3/tutorial/venv.html>`_. Spack environments
|
||||
provide some distinctive features:
|
||||
provide some distinctive features though:
|
||||
|
||||
#. A spec installed "in" an environment is no different from the same
|
||||
spec installed anywhere else in Spack. Environments are assembled
|
||||
simply by collecting together a set of specs.
|
||||
#. Spack Environments may contain more than one spec of the same
|
||||
spec installed anywhere else in Spack.
|
||||
#. Spack environments may contain more than one spec of the same
|
||||
package.
|
||||
|
||||
Spack uses a "manifest and lock" model similar to `Bundler gemfiles
|
||||
<https://bundler.io/man/gemfile.5.html>`_ and other package
|
||||
managers. The user input file is named ``spack.yaml`` and the lock
|
||||
file is named ``spack.lock``
|
||||
<https://bundler.io/man/gemfile.5.html>`_ and other package managers.
|
||||
The environment's user input file (or manifest), is named ``spack.yaml``.
|
||||
The lock file, which contains the fully configured and concretized specs,
|
||||
is named ``spack.lock``.
|
||||
|
||||
.. _environments-using:
|
||||
|
||||
@@ -68,55 +75,60 @@ An environment is created by:
|
||||
|
||||
$ spack env create myenv
|
||||
|
||||
Spack then creates the directory ``var/spack/environments/myenv``.
|
||||
The directory ``$SPACK_ROOT/var/spack/environments/myenv`` is created
|
||||
to manage the environment.
|
||||
|
||||
.. note::
|
||||
|
||||
All managed environments by default are stored in the ``var/spack/environments`` folder.
|
||||
This location can be changed by setting the ``environments_root`` variable in ``config.yaml``.
|
||||
All managed environments by default are stored in the
|
||||
``$SPACK_ROOT/var/spack/environments`` folder. This location can be changed
|
||||
by setting the ``environments_root`` variable in ``config.yaml``.
|
||||
|
||||
In the ``var/spack/environments/myenv`` directory, Spack creates the
|
||||
file ``spack.yaml`` and the hidden directory ``.spack-env``.
|
||||
|
||||
Spack stores metadata in the ``.spack-env`` directory. User
|
||||
interaction will occur through the ``spack.yaml`` file and the Spack
|
||||
commands that affect it. When the environment is concretized, Spack
|
||||
will create a file ``spack.lock`` with the concrete information for
|
||||
Spack creates the file ``spack.yaml``, hidden directory ``.spack-env``, and
|
||||
``spack.lock`` file under ``$SPACK_ROOT/var/spack/environments/myenv``. User
|
||||
interaction occurs through the ``spack.yaml`` file and the Spack commands
|
||||
that affect it. Metadata and, by default, the view are stored in the
|
||||
``.spack-env`` directory. When the environment is concretized, Spack creates
|
||||
the ``spack.lock`` file with the fully configured specs and dependencies for
|
||||
the environment.
|
||||
|
||||
In addition to being the default location for the view associated with
|
||||
an Environment, the ``.spack-env`` directory also contains:
|
||||
The ``.spack-env`` subdirectory also contains:
|
||||
|
||||
* ``repo/``: A repo consisting of the Spack packages used in this
|
||||
environment. This allows the environment to build the same, in
|
||||
theory, even on different versions of Spack with different
|
||||
* ``repo/``: A subdirectory acting as the repo consisting of the Spack
|
||||
packages used in the environment. It allows the environment to build
|
||||
the same, in theory, even on different versions of Spack with different
|
||||
packages!
|
||||
* ``logs/``: A directory containing the build logs for the packages
|
||||
in this Environment.
|
||||
* ``logs/``: A subdirectory containing the build logs for the packages
|
||||
in this environment.
|
||||
|
||||
Spack Environments can also be created from either a manifest file
|
||||
(usually but not necessarily named, ``spack.yaml``) or a lockfile.
|
||||
To create an Environment from a manifest:
|
||||
Spack Environments can also be created from either the user input, or
|
||||
manifest, file or the lockfile. Create an environment from a manifest using:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack env create myenv spack.yaml
|
||||
|
||||
To create an Environment from a ``spack.lock`` lockfile:
|
||||
The resulting environment is guaranteed to have the same root specs as
|
||||
the original but may concretize differently in the presence of different
|
||||
explicit or default configuration settings (e.g., a different version of
|
||||
Spack or for a different user account).
|
||||
|
||||
Create an environment from a ``spack.lock`` file using:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack env create myenv spack.lock
|
||||
|
||||
Either of these commands can also take a full path to the
|
||||
initialization file.
|
||||
The resulting environment, when on the same or a compatible machine, is
|
||||
guaranteed to initially have the same concrete specs as the original.
|
||||
|
||||
A Spack Environment created from a ``spack.yaml`` manifest is
|
||||
guaranteed to have the same root specs as the original Environment,
|
||||
but may concretize differently. A Spack Environment created from a
|
||||
``spack.lock`` lockfile is guaranteed to have the same concrete specs
|
||||
as the original Environment. Either may obviously then differ as the
|
||||
user modifies it.
|
||||
.. note::
|
||||
|
||||
Environment creation also accepts a full path to the file.
|
||||
|
||||
If the path is not under the ``$SPACK_ROOT/var/spack/environments``
|
||||
directory then the source is referred to as an
|
||||
:ref:`independent environment <independent_environments>`.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Activating an Environment
|
||||
@@ -129,7 +141,7 @@ To activate an environment, use the following command:
|
||||
$ spack env activate myenv
|
||||
|
||||
By default, the ``spack env activate`` will load the view associated
|
||||
with the Environment into the user environment. The ``-v,
|
||||
with the environment into the user environment. The ``-v,
|
||||
--with-view`` argument ensures this behavior, and the ``-V,
|
||||
--without-view`` argument activates the environment without changing
|
||||
the user environment variables.
|
||||
@@ -142,8 +154,11 @@ user's prompt to begin with the environment name in brackets.
|
||||
$ spack env activate -p myenv
|
||||
[myenv] $ ...
|
||||
|
||||
The ``activate`` command can also be used to create a new environment if it does not already
|
||||
exist.
|
||||
The ``activate`` command can also be used to create a new environment, if it is
|
||||
not already defined, by adding the ``--create`` flag. Managed and independent
|
||||
environments can both be created using the same flags that `spack env create`
|
||||
accepts. If an environment already exists then spack will simply activate it
|
||||
and ignore the create-specific flags.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
@@ -168,49 +183,50 @@ or the shortcut alias
|
||||
If the environment was activated with its view, deactivating the
|
||||
environment will remove the view from the user environment.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^
|
||||
Anonymous Environments
|
||||
^^^^^^^^^^^^^^^^^^^^^^
|
||||
.. _independent_environments:
|
||||
|
||||
Apart from managed environments, Spack also supports anonymous environments.
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Independent Environments
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Anonymous environments can be placed in any directory of choice.
|
||||
Independent environments can be located in any directory outside of Spack.
|
||||
|
||||
.. note::
|
||||
|
||||
When uninstalling packages, Spack asks the user to confirm the removal of packages
|
||||
that are still used in a managed environment. This is not the case for anonymous
|
||||
that are still used in a managed environment. This is not the case for independent
|
||||
environments.
|
||||
|
||||
To create an anonymous environment, use one of the following commands:
|
||||
To create an independent environment, use one of the following commands:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack env create --dir my_env
|
||||
$ spack env create ./my_env
|
||||
|
||||
As a shorthand, you can also create an anonymous environment upon activation if it does not
|
||||
As a shorthand, you can also create an independent environment upon activation if it does not
|
||||
already exist:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack env activate --create ./my_env
|
||||
|
||||
For convenience, Spack can also place an anonymous environment in a temporary directory for you:
|
||||
For convenience, Spack can also place an independent environment in a temporary directory for you:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack env activate --temp
|
||||
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Environment Sensitive Commands
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Environment-Aware Commands
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Spack commands are environment sensitive. For example, the ``find``
|
||||
command shows only the specs in the active Environment if an
|
||||
Environment has been activated. Similarly, the ``install`` and
|
||||
``uninstall`` commands act on the active environment.
|
||||
Spack commands are environment-aware. For example, the ``find``
|
||||
command shows only the specs in the active environment if an
|
||||
environment has been activated. Otherwise it shows all specs in
|
||||
the Spack instance. The same rule applies to the ``install`` and
|
||||
``uninstall`` commands.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
@@ -255,32 +271,33 @@ Environment has been activated. Similarly, the ``install`` and
|
||||
|
||||
|
||||
Note that when we installed the abstract spec ``zlib@1.2.8``, it was
|
||||
presented as a root of the Environment. All explicitly installed
|
||||
packages will be listed as roots of the Environment.
|
||||
presented as a root of the environment. All explicitly installed
|
||||
packages will be listed as roots of the environment.
|
||||
|
||||
All of the Spack commands that act on the list of installed specs are
|
||||
Environment-sensitive in this way, including ``install``,
|
||||
``uninstall``, ``find``, ``extensions``, and more. In the
|
||||
environment-aware in this way, including ``install``,
|
||||
``uninstall``, ``find``, ``extensions``, etcetera. In the
|
||||
:ref:`environment-configuration` section we will discuss
|
||||
Environment-sensitive commands further.
|
||||
environment-aware commands further.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
Adding Abstract Specs
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
An abstract spec is the user-specified spec before Spack has applied
|
||||
any defaults or dependency information.
|
||||
An abstract spec is the user-specified spec before Spack applies
|
||||
defaults or dependency information.
|
||||
|
||||
Users can add abstract specs to an Environment using the ``spack add``
|
||||
command. The most important component of an Environment is a list of
|
||||
Users can add abstract specs to an environment using the ``spack add``
|
||||
command. The most important component of an environment is a list of
|
||||
abstract specs.
|
||||
|
||||
Adding a spec adds to the manifest (the ``spack.yaml`` file), which is
|
||||
used to define the roots of the Environment, but does not affect the
|
||||
concrete specs in the lockfile, nor does it install the spec.
|
||||
Adding a spec adds it as a root spec of the environment in the user
|
||||
input file (``spack.yaml``). It does not affect the concrete specs
|
||||
in the lock file (``spack.lock``) and it does not install the spec.
|
||||
|
||||
The ``spack add`` command is environment aware. It adds to the
|
||||
currently active environment. All environment aware commands can also
|
||||
The ``spack add`` command is environment-aware. It adds the spec to the
|
||||
currently active environment. An error is generated if there isn't an
|
||||
active environment. All environment-aware commands can also
|
||||
be called using the ``spack -e`` flag to specify the environment.
|
||||
|
||||
.. code-block:: console
|
||||
@@ -300,11 +317,11 @@ or
|
||||
Concretizing
|
||||
^^^^^^^^^^^^
|
||||
|
||||
Once some user specs have been added to an environment, they can be concretized.
|
||||
There are at the moment three different modes of operation to concretize an environment,
|
||||
which are explained in details in :ref:`environments_concretization_config`.
|
||||
Regardless of which mode of operation has been chosen, the following
|
||||
command will ensure all the root specs are concretized according to the
|
||||
Once user specs have been added to an environment, they can be concretized.
|
||||
There are three different modes of operation to concretize an environment,
|
||||
explained in detail in :ref:`environments_concretization_config`.
|
||||
Regardless of which mode of operation is chosen, the following
|
||||
command will ensure all of the root specs are concretized according to the
|
||||
constraints that are prescribed in the configuration:
|
||||
|
||||
.. code-block:: console
|
||||
@@ -313,16 +330,15 @@ constraints that are prescribed in the configuration:
|
||||
|
||||
In the case of specs that are not concretized together, the command
|
||||
above will concretize only the specs that were added and not yet
|
||||
concretized. Forcing a re-concretization of all the specs can be done
|
||||
instead with this command:
|
||||
concretized. Forcing a re-concretization of all of the specs can be done
|
||||
by adding the ``-f`` option:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
[myenv]$ spack concretize -f
|
||||
|
||||
When the ``-f`` flag is not used to reconcretize all specs, Spack
|
||||
guarantees that already concretized specs are unchanged in the
|
||||
environment.
|
||||
Without the option, Spack guarantees that already concretized specs are
|
||||
unchanged in the environment.
|
||||
|
||||
The ``concretize`` command does not install any packages. For packages
|
||||
that have already been installed outside of the environment, the
|
||||
@@ -355,16 +371,16 @@ installed specs using the ``-c`` (``--concretized``) flag.
|
||||
Installing an Environment
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
In addition to installing individual specs into an Environment, one
|
||||
can install the entire Environment at once using the command
|
||||
In addition to adding individual specs to an environment, one
|
||||
can install the entire environment at once using the command
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
[myenv]$ spack install
|
||||
|
||||
If the Environment has been concretized, Spack will install the
|
||||
concretized specs. Otherwise, ``spack install`` will first concretize
|
||||
the Environment and then install the concretized specs.
|
||||
If the environment has been concretized, Spack will install the
|
||||
concretized specs. Otherwise, ``spack install`` will concretize
|
||||
the environment before installing the concretized specs.
|
||||
|
||||
.. note::
|
||||
|
||||
@@ -385,17 +401,17 @@ the Environment and then install the concretized specs.
|
||||
|
||||
|
||||
As it installs, ``spack install`` creates symbolic links in the
|
||||
``logs/`` directory in the Environment, allowing for easy inspection
|
||||
``logs/`` directory in the environment, allowing for easy inspection
|
||||
of build logs related to that environment. The ``spack install``
|
||||
command also stores a Spack repo containing the ``package.py`` file
|
||||
used at install time for each package in the ``repos/`` directory in
|
||||
the Environment.
|
||||
the environment.
|
||||
|
||||
The ``--no-add`` option can be used in a concrete environment to tell
|
||||
spack to install specs already present in the environment but not to
|
||||
add any new root specs to the environment. For root specs provided
|
||||
to ``spack install`` on the command line, ``--no-add`` is the default,
|
||||
while for dependency specs on the other hand, it is optional. In other
|
||||
while for dependency specs, it is optional. In other
|
||||
words, if there is an unambiguous match in the active concrete environment
|
||||
for a root spec provided to ``spack install`` on the command line, spack
|
||||
does not require you to specify the ``--no-add`` option to prevent the spec
|
||||
@@ -409,9 +425,13 @@ Developing Packages in a Spack Environment
|
||||
|
||||
The ``spack develop`` command allows one to develop Spack packages in
|
||||
an environment. It requires a spec containing a concrete version, and
|
||||
will configure Spack to install the package from local source. By
|
||||
default, it will also clone the package to a subdirectory in the
|
||||
environment. This package will have a special variant ``dev_path``
|
||||
will configure Spack to install the package from local source.
|
||||
If a version is not provided from the command line interface then spack
|
||||
will automatically pick the highest version the package has defined.
|
||||
This means any infinity versions (``develop``, ``main``, ``stable``) will be
|
||||
preferred in this selection process.
|
||||
By default, ``spack develop`` will also clone the package to a subdirectory in the
|
||||
environment for the local source. This package will have a special variant ``dev_path``
|
||||
set, and Spack will ensure the package and its dependents are rebuilt
|
||||
any time the environment is installed if the package's local source
|
||||
code has been modified. Spack's native implementation to check for modifications
|
||||
@@ -430,7 +450,7 @@ also be used as valid concrete versions (see :ref:`version-specifier`).
|
||||
This means that for a package ``foo``, ``spack develop foo@git.main`` will clone
|
||||
the ``main`` branch of the package, and ``spack install`` will install from
|
||||
that git clone if ``foo`` is in the environment.
|
||||
Further development on ``foo`` can be tested by reinstalling the environment,
|
||||
Further development on ``foo`` can be tested by re-installing the environment,
|
||||
and eventually committed and pushed to the upstream git repo.
|
||||
|
||||
If the package being developed supports out-of-source builds then users can use the
|
||||
@@ -615,7 +635,7 @@ manipulate configuration inline in the ``spack.yaml`` file.
|
||||
Inline configurations
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Inline Environment-scope configuration is done using the same yaml
|
||||
Inline environment-scope configuration is done using the same yaml
|
||||
format as standard Spack configuration scopes, covered in the
|
||||
:ref:`configuration` section. Each section is contained under a
|
||||
top-level yaml object with it's name. For example, a ``spack.yaml``
|
||||
@@ -640,7 +660,7 @@ Included configurations
|
||||
|
||||
Spack environments allow an ``include`` heading in their yaml
|
||||
schema. This heading pulls in external configuration files and applies
|
||||
them to the Environment.
|
||||
them to the environment.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
@@ -653,6 +673,9 @@ them to the Environment.
|
||||
Environments can include files or URLs. File paths can be relative or
|
||||
absolute. URLs include the path to the text for individual files or
|
||||
can be the path to a directory containing configuration files.
|
||||
Spack supports ``file``, ``http``, ``https`` and ``ftp`` protocols (or
|
||||
schemes). Spack-specific, environment and user path variables may be
|
||||
used in these paths. See :ref:`config-file-variables` for more information.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Configuration precedence
|
||||
@@ -667,7 +690,7 @@ have higher precedence, as the included configs are applied in reverse order.
|
||||
Manually Editing the Specs List
|
||||
-------------------------------
|
||||
|
||||
The list of abstract/root specs in the Environment is maintained in
|
||||
The list of abstract/root specs in the environment is maintained in
|
||||
the ``spack.yaml`` manifest under the heading ``specs``.
|
||||
|
||||
.. code-block:: yaml
|
||||
@@ -775,7 +798,7 @@ evaluates to the cross-product of those specs. Spec matrices also
|
||||
contain an ``excludes`` directive, which eliminates certain
|
||||
combinations from the evaluated result.
|
||||
|
||||
The following two Environment manifests are identical:
|
||||
The following two environment manifests are identical:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
@@ -850,7 +873,7 @@ files are identical.
|
||||
In short files like the example, it may be easier to simply list the
|
||||
included specs. However for more complicated examples involving many
|
||||
packages across many toolchains, separately factored lists make
|
||||
Environments substantially more manageable.
|
||||
environments substantially more manageable.
|
||||
|
||||
Additionally, the ``-l`` option to the ``spack add`` command allows
|
||||
one to add to named lists in the definitions section of the manifest
|
||||
@@ -1066,7 +1089,7 @@ true``). The argument ``--without-view`` can be used to create an
|
||||
environment without any view configured.
|
||||
|
||||
The ``spack env view`` command can be used to change the manage views
|
||||
of an Environment. The subcommand ``spack env view enable`` will add a
|
||||
of an environment. The subcommand ``spack env view enable`` will add a
|
||||
view named ``default`` to an environment. It takes an optional
|
||||
argument to specify the path for the new default view. The subcommand
|
||||
``spack env view disable`` will remove the view named ``default`` from
|
||||
@@ -1234,7 +1257,7 @@ gets installed and is available for use in the ``env`` target.
|
||||
$(SPACK) -e . env depfile -o $@ --make-prefix spack
|
||||
|
||||
env: spack/env
|
||||
$(info Environment installed!)
|
||||
$(info environment installed!)
|
||||
|
||||
clean:
|
||||
rm -rf spack.lock env.mk spack/
|
||||
|
Binary file not shown.
Before Width: | Height: | Size: 44 KiB |
Binary file not shown.
Before Width: | Height: | Size: 68 KiB |
BIN
lib/spack/docs/images/splices.png
Normal file
BIN
lib/spack/docs/images/splices.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 358 KiB |
@@ -12,10 +12,6 @@
|
||||
Spack
|
||||
===================
|
||||
|
||||
.. epigraph::
|
||||
|
||||
`These are docs for the Spack package manager. For sphere packing, see` `pyspack <https://pyspack.readthedocs.io>`_.
|
||||
|
||||
Spack is a package management tool designed to support multiple
|
||||
versions and configurations of software on a wide variety of platforms
|
||||
and environments. It was designed for large supercomputing centers,
|
||||
|
@@ -457,11 +457,11 @@ For instance, the following config options,
|
||||
tcl:
|
||||
all:
|
||||
suffixes:
|
||||
^python@3.12: 'python-3.12'
|
||||
^python@3: 'python{^python.version}'
|
||||
^openblas: 'openblas'
|
||||
|
||||
will add a ``python-3.12`` version string to any packages compiled with
|
||||
Python matching the spec, ``python@3.12``. This is useful to know which
|
||||
will add a ``python-3.12.1`` version string to any packages compiled with
|
||||
Python matching the spec, ``python@3``. This is useful to know which
|
||||
version of Python a set of Python extensions is associated with. Likewise, the
|
||||
``openblas`` string is attached to any program that has openblas in the spec,
|
||||
most likely via the ``+blas`` variant specification.
|
||||
|
@@ -2503,15 +2503,14 @@ with. For example, suppose that in the ``libdwarf`` package you write:
|
||||
|
||||
depends_on("libelf@0.8")
|
||||
|
||||
Now ``libdwarf`` will require ``libelf`` at *exactly* version ``0.8``.
|
||||
You can also specify a requirement for a particular variant or for
|
||||
specific compiler flags:
|
||||
Now ``libdwarf`` will require ``libelf`` in the range ``0.8``, which
|
||||
includes patch versions ``0.8.1``, ``0.8.2``, etc. Apart from version
|
||||
restrictions, you can also specify variants if this package requires
|
||||
optional features of the dependency.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
depends_on("libelf@0.8+debug")
|
||||
depends_on("libelf debug=True")
|
||||
depends_on("libelf cppflags='-fPIC'")
|
||||
depends_on("libelf@0.8 +parser +pic")
|
||||
|
||||
Both users *and* package authors can use the same spec syntax to refer
|
||||
to different package configurations. Users use the spec syntax on the
|
||||
@@ -2519,46 +2518,82 @@ command line to find installed packages or to install packages with
|
||||
particular constraints, and package authors can use specs to describe
|
||||
relationships between packages.
|
||||
|
||||
^^^^^^^^^^^^^^
|
||||
Version ranges
|
||||
^^^^^^^^^^^^^^
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Specifying backward and forward compatibility
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Although some packages require a specific version for their dependencies,
|
||||
most can be built with a range of versions. For example, if you are
|
||||
writing a package for a legacy Python module that only works with Python
|
||||
2.4 through 2.6, this would look like:
|
||||
Packages are often compatible with a range of versions of their
|
||||
dependencies. This is typically referred to as backward and forward
|
||||
compatibility. Spack allows you to specify this in the ``depends_on``
|
||||
directive using version ranges.
|
||||
|
||||
**Backwards compatibility** means that the package requires at least a
|
||||
certain version of its dependency:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
depends_on("python@2.4:2.6")
|
||||
depends_on("python@3.10:")
|
||||
|
||||
Version ranges in Spack are *inclusive*, so ``2.4:2.6`` means any version
|
||||
greater than or equal to ``2.4`` and up to and including any ``2.6.x``. If
|
||||
you want to specify that a package works with any version of Python 3 (or
|
||||
higher), this would look like:
|
||||
In this case, the package requires Python 3.10 or newer.
|
||||
|
||||
Commonly, packages drop support for older versions of a dependency as
|
||||
they release new versions. In Spack you can conveniently add every
|
||||
backward compatibility rule as a separate line:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
depends_on("python@3:")
|
||||
# backward compatibility with Python
|
||||
depends_on("python@3.8:")
|
||||
depends_on("python@3.9:", when="@1.2:")
|
||||
depends_on("python@3.10:", when="@1.4:")
|
||||
|
||||
Here we leave out the upper bound. If you want to say that a package
|
||||
requires Python 2, you can similarly leave out the lower bound:
|
||||
This means that in general we need Python 3.8 or newer; from version
|
||||
1.2 onwards we need Python 3.9 or newer; from version 1.4 onwards we
|
||||
need Python 3.10 or newer. Notice that it's fine to have overlapping
|
||||
ranges in the ``when`` clauses.
|
||||
|
||||
**Forward compatibility** means that the package requires at most a
|
||||
certain version of its dependency. Forward compatibility rules are
|
||||
necessary when there are breaking changes in the dependency that the
|
||||
package cannot handle. In Spack we often add forward compatibility
|
||||
bounds only at the time a new, breaking version of a dependency is
|
||||
released. As with backward compatibility, it is typical to see a list
|
||||
of forward compatibility bounds in a package file as seperate lines:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
depends_on("python@:2")
|
||||
# forward compatibility with Python
|
||||
depends_on("python@:3.12", when="@:1.10")
|
||||
depends_on("python@:3.13", when="@:1.12")
|
||||
|
||||
Notice that we didn't use ``@:3``. Version ranges are *inclusive*, so
|
||||
``@:3`` means "up to and including any 3.x version".
|
||||
Notice how the ``:`` now appears before the version number both in the
|
||||
dependency and in the ``when`` clause. This tells Spack that in general
|
||||
we need Python 3.13 or older up to version ``1.12.x``, and up to version
|
||||
``1.10.x`` we need Python 3.12 or older. Said differently, forward compatibility
|
||||
with Python 3.13 was added in version 1.11, while version 1.13 added forward
|
||||
compatibility with Python 3.14.
|
||||
|
||||
You can also simply write
|
||||
Notice that a version range ``@:3.12`` includes *any* patch version
|
||||
number ``3.12.x``, which is often useful when specifying forward compatibility
|
||||
bounds.
|
||||
|
||||
So far we have seen open-ended version ranges, which is by far the most
|
||||
common use case. It is also possible to specify both a lower and an upper bound
|
||||
on the version of a dependency, like this:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
depends_on("python@2.7")
|
||||
depends_on("python@3.10:3.12")
|
||||
|
||||
to tell Spack that the package needs Python 2.7.x. This is equivalent to
|
||||
``@2.7:2.7``.
|
||||
There is short syntax to specify that a package is compatible with say any
|
||||
``3.x`` version:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
depends_on("python@3")
|
||||
|
||||
The above is equivalent to ``depends_on("python@3:3")``, which means at least
|
||||
Python version 3 and at most any version ``3.x.y``.
|
||||
|
||||
In very rare cases, you may need to specify an exact version, for example
|
||||
if you need to distinguish between ``3.2`` and ``3.2.1``:
|
||||
|
@@ -59,7 +59,7 @@ Functional Example
|
||||
------------------
|
||||
|
||||
The simplest fully functional standalone example of a working pipeline can be
|
||||
examined live at this example `project <https://gitlab.com/scott.wittenburg/spack-pipeline-demo>`_
|
||||
examined live at this example `project <https://gitlab.com/spack/pipeline-quickstart>`_
|
||||
on gitlab.com.
|
||||
|
||||
Here's the ``.gitlab-ci.yml`` file from that example that builds and runs the
|
||||
@@ -67,39 +67,46 @@ pipeline:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
stages: [generate, build]
|
||||
stages: [ "generate", "build" ]
|
||||
|
||||
variables:
|
||||
SPACK_REPO: https://github.com/scottwittenburg/spack.git
|
||||
SPACK_REF: pipelines-reproducible-builds
|
||||
SPACK_REPOSITORY: "https://github.com/spack/spack.git"
|
||||
SPACK_REF: "develop-2024-10-06"
|
||||
SPACK_USER_CONFIG_PATH: ${CI_PROJECT_DIR}
|
||||
SPACK_BACKTRACE: 1
|
||||
|
||||
generate-pipeline:
|
||||
stage: generate
|
||||
tags:
|
||||
- docker
|
||||
- saas-linux-small-amd64
|
||||
stage: generate
|
||||
image:
|
||||
name: ghcr.io/scottwittenburg/ecpe4s-ubuntu18.04-runner-x86_64:2020-09-01
|
||||
entrypoint: [""]
|
||||
before_script:
|
||||
- git clone ${SPACK_REPO}
|
||||
- pushd spack && git checkout ${SPACK_REF} && popd
|
||||
- . "./spack/share/spack/setup-env.sh"
|
||||
name: ghcr.io/spack/ubuntu20.04-runner-x86_64:2023-01-01
|
||||
script:
|
||||
- git clone ${SPACK_REPOSITORY}
|
||||
- cd spack && git checkout ${SPACK_REF} && cd ../
|
||||
- . "./spack/share/spack/setup-env.sh"
|
||||
- spack --version
|
||||
- spack env activate --without-view .
|
||||
- spack -d ci generate
|
||||
- spack -d -v --color=always
|
||||
ci generate
|
||||
--check-index-only
|
||||
--artifacts-root "${CI_PROJECT_DIR}/jobs_scratch_dir"
|
||||
--output-file "${CI_PROJECT_DIR}/jobs_scratch_dir/pipeline.yml"
|
||||
--output-file "${CI_PROJECT_DIR}/jobs_scratch_dir/cloud-ci-pipeline.yml"
|
||||
artifacts:
|
||||
paths:
|
||||
- "${CI_PROJECT_DIR}/jobs_scratch_dir"
|
||||
|
||||
build-jobs:
|
||||
build-pipeline:
|
||||
stage: build
|
||||
trigger:
|
||||
include:
|
||||
- artifact: "jobs_scratch_dir/pipeline.yml"
|
||||
- artifact: jobs_scratch_dir/cloud-ci-pipeline.yml
|
||||
job: generate-pipeline
|
||||
strategy: depend
|
||||
needs:
|
||||
- artifacts: True
|
||||
job: generate-pipeline
|
||||
|
||||
|
||||
The key thing to note above is that there are two jobs: The first job to run,
|
||||
``generate-pipeline``, runs the ``spack ci generate`` command to generate a
|
||||
@@ -114,82 +121,93 @@ And here's the spack environment built by the pipeline represented as a
|
||||
spack:
|
||||
view: false
|
||||
concretizer:
|
||||
unify: false
|
||||
unify: true
|
||||
reuse: false
|
||||
|
||||
definitions:
|
||||
- pkgs:
|
||||
- zlib
|
||||
- bzip2
|
||||
- arch:
|
||||
- '%gcc@7.5.0 arch=linux-ubuntu18.04-x86_64'
|
||||
- bzip2 ~debug
|
||||
- compiler:
|
||||
- '%gcc'
|
||||
|
||||
specs:
|
||||
- matrix:
|
||||
- - $pkgs
|
||||
- - $arch
|
||||
|
||||
mirrors: { "mirror": "s3://spack-public/mirror" }
|
||||
- - $compiler
|
||||
|
||||
ci:
|
||||
enable-artifacts-buildcache: True
|
||||
rebuild-index: False
|
||||
target: gitlab
|
||||
|
||||
pipeline-gen:
|
||||
- any-job:
|
||||
before_script:
|
||||
- git clone ${SPACK_REPO}
|
||||
- pushd spack && git checkout ${SPACK_CHECKOUT_VERSION} && popd
|
||||
- . "./spack/share/spack/setup-env.sh"
|
||||
- build-job:
|
||||
tags: [docker]
|
||||
tags:
|
||||
- saas-linux-small-amd64
|
||||
image:
|
||||
name: ghcr.io/scottwittenburg/ecpe4s-ubuntu18.04-runner-x86_64:2020-09-01
|
||||
entrypoint: [""]
|
||||
name: ghcr.io/spack/ubuntu20.04-runner-x86_64:2023-01-01
|
||||
before_script:
|
||||
- git clone ${SPACK_REPOSITORY}
|
||||
- cd spack && git checkout ${SPACK_REF} && cd ../
|
||||
- . "./spack/share/spack/setup-env.sh"
|
||||
- spack --version
|
||||
- export SPACK_USER_CONFIG_PATH=${CI_PROJECT_DIR}
|
||||
- spack config blame mirrors
|
||||
|
||||
|
||||
The elements of this file important to spack ci pipelines are described in more
|
||||
detail below, but there are a couple of things to note about the above working
|
||||
example:
|
||||
|
||||
.. note::
|
||||
There is no ``script`` attribute specified for here. The reason for this is
|
||||
Spack CI will automatically generate reasonable default scripts. More
|
||||
detail on what is in these scripts can be found below.
|
||||
The use of ``reuse: false`` in spack environments used for pipelines is
|
||||
almost always what you want, as without it your pipelines will not rebuild
|
||||
packages even if package hashes have changed. This is due to the concretizer
|
||||
strongly preferring known hashes when ``reuse: true``.
|
||||
|
||||
Also notice the ``before_script`` section. It is required when using any of the
|
||||
default scripts to source the ``setup-env.sh`` script in order to inform
|
||||
the default scripts where to find the ``spack`` executable.
|
||||
The ``ci`` section in the above environment file contains the bare minimum
|
||||
configuration required for ``spack ci generate`` to create a working pipeline.
|
||||
The ``target: gitlab`` tells spack that the desired pipeline output is for
|
||||
gitlab. However, this isn't strictly required, as currently gitlab is the
|
||||
only possible output format for pipelines. The ``pipeline-gen`` section
|
||||
contains the key information needed to specify attributes for the generated
|
||||
jobs. Notice that it contains a list which has only a single element in
|
||||
this case. In real pipelines it will almost certainly have more elements,
|
||||
and in those cases, order is important: spack starts at the bottom of the
|
||||
list and works upwards when applying attributes.
|
||||
|
||||
Normally ``enable-artifacts-buildcache`` is not recommended in production as it
|
||||
results in large binary artifacts getting transferred back and forth between
|
||||
gitlab and the runners. But in this example on gitlab.com where there is no
|
||||
shared, persistent file system, and where no secrets are stored for giving
|
||||
permission to write to an S3 bucket, ``enabled-buildcache-artifacts`` is the only
|
||||
way to propagate binaries from jobs to their dependents.
|
||||
But in this simple case, we use only the special key ``any-job`` to
|
||||
indicate that spack should apply the specified attributes (``tags``, ``image``,
|
||||
and ``before_script``) to any job it generates. This includes jobs for
|
||||
building/pushing all packages, a ``rebuild-index`` job at the end of the
|
||||
pipeline, as well as any ``noop`` jobs that might be needed by gitlab when
|
||||
no rebuilds are required.
|
||||
|
||||
Also, it is usually a good idea to let the pipeline generate a final "rebuild the
|
||||
buildcache index" job, so that subsequent pipeline generation can quickly determine
|
||||
which specs are up to date and which need to be rebuilt (it's a good idea for other
|
||||
reasons as well, but those are out of scope for this discussion). In this case we
|
||||
have disabled it (using ``rebuild-index: False``) because the index would only be
|
||||
generated in the artifacts mirror anyway, and consequently would not be available
|
||||
during subsequent pipeline runs.
|
||||
Something to note is that in this simple case, we rely on spack to
|
||||
generate a reasonable script for the package build jobs (it just creates
|
||||
a script that invokes ``spack ci rebuild``).
|
||||
|
||||
.. note::
|
||||
With the addition of reproducible builds (#22887) a previously working
|
||||
pipeline will require some changes:
|
||||
Another thing to note is the use of the ``SPACK_USER_CONFIG_DIR`` environment
|
||||
variable in any generated jobs. The purpose of this is to make spack
|
||||
aware of one final file in the example, the one that contains the mirror
|
||||
configuration. This file, ``mirrors.yaml`` looks like this:
|
||||
|
||||
* In the build-jobs, the environment location changed.
|
||||
This will typically show as a ``KeyError`` in the failing job. Be sure to
|
||||
point to ``${SPACK_CONCRETE_ENV_DIR}``.
|
||||
.. code-block:: yaml
|
||||
|
||||
* When using ``include`` in your environment, be sure to make the included
|
||||
files available in the build jobs. This means adding those files to the
|
||||
artifact directory. Those files will also be missing in the reproducibility
|
||||
artifact.
|
||||
mirrors:
|
||||
buildcache-destination:
|
||||
url: oci://registry.gitlab.com/spack/pipeline-quickstart
|
||||
binary: true
|
||||
access_pair:
|
||||
id_variable: CI_REGISTRY_USER
|
||||
secret_variable: CI_REGISTRY_PASSWORD
|
||||
|
||||
* Because the location of the environment changed, including files with
|
||||
relative path may have to be adapted to work both in the project context
|
||||
(generation job) and in the concrete env dir context (build job).
|
||||
|
||||
Note the name of the mirror is ``buildcache-destination``, which is required
|
||||
as of Spack 0.23 (see below for more information). The mirror url simply
|
||||
points to the container registry associated with the project, while
|
||||
``id_variable`` and ``secret_variable`` refer to to environment variables
|
||||
containing the access credentials for the mirror.
|
||||
|
||||
When spack builds packages for this example project, they will be pushed to
|
||||
the project container registry, where they will be available for subsequent
|
||||
jobs to install as dependencies, or for other pipelines to use to build runnable
|
||||
container images.
|
||||
|
||||
-----------------------------------
|
||||
Spack commands supporting pipelines
|
||||
@@ -417,15 +435,6 @@ configuration with a ``script`` attribute. Specifying a signing job without a sc
|
||||
does not create a signing job and the job configuration attributes will be ignored.
|
||||
Signing jobs are always assigned the runner tags ``aws``, ``protected``, and ``notary``.
|
||||
|
||||
^^^^^^^^^^^^^^^^^
|
||||
Cleanup (cleanup)
|
||||
^^^^^^^^^^^^^^^^^
|
||||
|
||||
When using ``temporary-storage-url-prefix`` the cleanup job will destroy the mirror
|
||||
created for the associated Gitlab pipeline. Cleanup jobs do not allow modifying the
|
||||
script, but do expect that the spack command is in the path and require a
|
||||
``before_script`` to be specified that sources the ``setup-env.sh`` script.
|
||||
|
||||
.. _noop_jobs:
|
||||
|
||||
^^^^^^^^^^^^
|
||||
@@ -592,6 +601,77 @@ the attributes will be merged starting from the bottom match going up to the top
|
||||
|
||||
In the case that no match is found in a submapping section, no additional attributes will be applied.
|
||||
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Dynamic Mapping Sections
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
For large scale CI where cost optimization is required, dynamic mapping allows for the use of real-time
|
||||
mapping schemes served by a web service. This type of mapping does not support the ``-remove`` type
|
||||
behavior, but it does follow the rest of the merge rules for configurations.
|
||||
|
||||
The dynamic mapping service needs to implement a single REST API interface for getting
|
||||
requests ``GET <URL>[:PORT][/PATH]?spec=<pkg_name@pkg_version +variant1+variant2%compiler@compiler_version>``.
|
||||
|
||||
example request.
|
||||
|
||||
.. code-block::
|
||||
|
||||
https://my-dyn-mapping.spack.io/allocation?spec=zlib-ng@2.1.6 +compat+opt+shared+pic+new_strategies arch=linux-ubuntu20.04-x86_64_v3%gcc@12.0.0
|
||||
|
||||
|
||||
With an example response the updates kubernetes request variables, overrides the max retries for gitlab,
|
||||
and prepends a note about the modifications made by the my-dyn-mapping.spack.io service.
|
||||
|
||||
.. code-block::
|
||||
|
||||
200 OK
|
||||
|
||||
{
|
||||
"variables":
|
||||
{
|
||||
"KUBERNETES_CPU_REQUEST": "500m",
|
||||
"KUBERNETES_MEMORY_REQUEST": "2G",
|
||||
},
|
||||
"retry": { "max:": "1"}
|
||||
"script+:":
|
||||
[
|
||||
"echo \"Job modified by my-dyn-mapping.spack.io\""
|
||||
]
|
||||
}
|
||||
|
||||
|
||||
The ci.yaml configuration section takes the URL endpoint as well as a number of options to configure how responses are handled.
|
||||
|
||||
It is possible to specify a list of allowed and ignored configuration attributes under ``allow`` and ``ignore``
|
||||
respectively. It is also possible to configure required attributes under ``required`` section.
|
||||
|
||||
Options to configure the client timeout and SSL verification using the ``timeout`` and ``verify_ssl`` options.
|
||||
By default, the ``timeout`` is set to the option in ``config:timeout`` and ``veryify_ssl`` is set the the option in ``config::verify_ssl``.
|
||||
|
||||
Passing header parameters to the request can be achieved through the ``header`` section. The values of the variables passed to the
|
||||
header may be environment variables that are expanded at runtime, such as a private token configured on the runner.
|
||||
|
||||
Here is an example configuration pointing to ``my-dyn-mapping.spack.io/allocation``.
|
||||
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
ci:
|
||||
- dynamic-mapping:
|
||||
endpoint: my-dyn-mapping.spack.io/allocation
|
||||
timeout: 10
|
||||
verify_ssl: True
|
||||
header:
|
||||
PRIVATE_TOKEN: ${MY_PRIVATE_TOKEN}
|
||||
MY_CONFIG: "fuzz_allocation:false"
|
||||
allow:
|
||||
- variables
|
||||
ignore:
|
||||
- script
|
||||
require: []
|
||||
|
||||
|
||||
^^^^^^^^^^^^^
|
||||
Bootstrapping
|
||||
^^^^^^^^^^^^^
|
||||
@@ -670,15 +750,6 @@ environment/stack file, and in that case no bootstrapping will be done (only the
|
||||
specs will be staged for building) and the runners will be expected to already
|
||||
have all needed compilers installed and configured for spack to use.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^
|
||||
Pipeline Buildcache
|
||||
^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
The ``enable-artifacts-buildcache`` key
|
||||
takes a boolean and determines whether the pipeline uses artifacts to store and
|
||||
pass along the buildcaches from one stage to the next (the default if you don't
|
||||
provide this option is ``False``).
|
||||
|
||||
^^^^^^^^^^^^^^^^
|
||||
Broken Specs URL
|
||||
^^^^^^^^^^^^^^^^
|
||||
|
@@ -1,13 +1,13 @@
|
||||
sphinx==7.4.7
|
||||
sphinx==8.1.3
|
||||
sphinxcontrib-programoutput==0.17
|
||||
sphinx_design==0.6.1
|
||||
sphinx-rtd-theme==2.0.0
|
||||
python-levenshtein==0.25.1
|
||||
docutils==0.20.1
|
||||
sphinx-rtd-theme==3.0.1
|
||||
python-levenshtein==0.26.1
|
||||
docutils==0.21.2
|
||||
pygments==2.18.0
|
||||
urllib3==2.2.3
|
||||
pytest==8.3.3
|
||||
isort==5.13.2
|
||||
black==24.8.0
|
||||
black==24.10.0
|
||||
flake8==7.1.1
|
||||
mypy==1.11.1
|
||||
|
60
lib/spack/env/cc
vendored
60
lib/spack/env/cc
vendored
@@ -118,39 +118,18 @@ prepend() {
|
||||
fi
|
||||
}
|
||||
|
||||
# contains LISTNAME ELEMENT
|
||||
# append LISTNAME ELEMENT [SEP]
|
||||
#
|
||||
# Test whether LISTNAME contains ELEMENT.
|
||||
# Set $? to 1 if LISTNAME does not contain ELEMENT.
|
||||
# Set $? to 0 if LISTNAME does not contain ELEMENT.
|
||||
contains() {
|
||||
varname="$1"
|
||||
elt="$2"
|
||||
|
||||
setsep "$varname"
|
||||
|
||||
# the list may: 1) only contain the element, 2) start with the element,
|
||||
# 3) contain the element in the middle, or 4) end wtih the element.
|
||||
eval "[ \"\${$varname}\" = \"$elt\" ]" \
|
||||
|| eval "[ \"\${$varname#${elt}${sep}}\" != \"\${$varname}\" ]" \
|
||||
|| eval "[ \"\${$varname#*${sep}${elt}${sep}}\" != \"\${$varname}\" ]" \
|
||||
|| eval "[ \"\${$varname%${sep}${elt}}\" != \"\${$varname}\" ]"
|
||||
}
|
||||
|
||||
# append LISTNAME ELEMENT [unique]
|
||||
#
|
||||
# Append ELEMENT to the list stored in the variable LISTNAME.
|
||||
# Append ELEMENT to the list stored in the variable LISTNAME,
|
||||
# assuming the list is separated by SEP.
|
||||
# Handles empty lists and single-element lists.
|
||||
#
|
||||
# If the third argument is provided and if it is the string 'unique',
|
||||
# this will not append if ELEMENT is already in the list LISTNAME.
|
||||
append() {
|
||||
varname="$1"
|
||||
elt="$2"
|
||||
|
||||
if empty "$varname"; then
|
||||
eval "$varname=\"\${elt}\""
|
||||
elif [ "$3" != "unique" ] || ! contains "$varname" "$elt" ; then
|
||||
else
|
||||
# Get the appropriate separator for the list we're appending to.
|
||||
setsep "$varname"
|
||||
eval "$varname=\"\${$varname}${sep}\${elt}\""
|
||||
@@ -168,21 +147,10 @@ extend() {
|
||||
if [ "$sep" != " " ]; then
|
||||
IFS="$sep"
|
||||
fi
|
||||
eval "for elt in \${$2}; do append $1 \"$3\${elt}\" ${_append_args}; done"
|
||||
eval "for elt in \${$2}; do append $1 \"$3\${elt}\"; done"
|
||||
unset IFS
|
||||
}
|
||||
|
||||
# extend_unique LISTNAME1 LISTNAME2 [PREFIX]
|
||||
#
|
||||
# Append the elements stored in the variable LISTNAME2 to the list
|
||||
# stored in LISTNAME1, if they are not already present.
|
||||
# If PREFIX is provided, prepend it to each element.
|
||||
extend_unique() {
|
||||
_append_args="unique"
|
||||
extend "$@"
|
||||
unset _append_args
|
||||
}
|
||||
|
||||
# preextend LISTNAME1 LISTNAME2 [PREFIX]
|
||||
#
|
||||
# Prepend the elements stored in the list at LISTNAME2
|
||||
@@ -731,8 +699,12 @@ categorize_arguments() {
|
||||
append_path_lists return_rpath_dirs_list "$arg"
|
||||
;;
|
||||
-rpath|--rpath)
|
||||
if [ $# -eq 1 ]; then
|
||||
# -rpath without value: let the linker raise an error.
|
||||
append return_other_args_list "$1"
|
||||
break
|
||||
fi
|
||||
shift
|
||||
[ $# -eq 0 ] && break # ignore -rpath without value
|
||||
append_path_lists return_rpath_dirs_list "$1"
|
||||
;;
|
||||
*)
|
||||
@@ -964,14 +936,14 @@ esac
|
||||
|
||||
# if mode is ccld or ld, extend RPATH lists with the prefix determined above
|
||||
if [ -n "$rpath_prefix" ]; then
|
||||
extend_unique args_list spack_store_spack_flags_rpath_dirs_list "$rpath_prefix"
|
||||
extend_unique args_list spack_store_rpath_dirs_list "$rpath_prefix"
|
||||
extend args_list spack_store_spack_flags_rpath_dirs_list "$rpath_prefix"
|
||||
extend args_list spack_store_rpath_dirs_list "$rpath_prefix"
|
||||
|
||||
extend_unique args_list spack_flags_rpath_dirs_list "$rpath_prefix"
|
||||
extend_unique args_list rpath_dirs_list "$rpath_prefix"
|
||||
extend args_list spack_flags_rpath_dirs_list "$rpath_prefix"
|
||||
extend args_list rpath_dirs_list "$rpath_prefix"
|
||||
|
||||
extend_unique args_list system_spack_flags_rpath_dirs_list "$rpath_prefix"
|
||||
extend_unique args_list system_rpath_dirs_list "$rpath_prefix"
|
||||
extend args_list system_spack_flags_rpath_dirs_list "$rpath_prefix"
|
||||
extend args_list system_rpath_dirs_list "$rpath_prefix"
|
||||
fi
|
||||
|
||||
# Other arguments from the input command
|
||||
|
2
lib/spack/external/__init__.py
vendored
2
lib/spack/external/__init__.py
vendored
@@ -18,7 +18,7 @@
|
||||
|
||||
* Homepage: https://pypi.python.org/pypi/archspec
|
||||
* Usage: Labeling, comparison and detection of microarchitectures
|
||||
* Version: 0.2.5-dev (commit bceb39528ac49dd0c876b2e9bf3e7482e9c2be4a)
|
||||
* Version: 0.2.5 (commit 38ce485258ffc4fc6dd6688f8dc90cb269478c47)
|
||||
|
||||
astunparse
|
||||
----------------
|
||||
|
@@ -81,8 +81,13 @@ def __init__(self, name, parents, vendor, features, compilers, generation=0, cpu
|
||||
self.generation = generation
|
||||
# Only relevant for AArch64
|
||||
self.cpu_part = cpu_part
|
||||
# Cache the ancestor computation
|
||||
|
||||
# Cache the "ancestor" computation
|
||||
self._ancestors = None
|
||||
# Cache the "generic" computation
|
||||
self._generic = None
|
||||
# Cache the "family" computation
|
||||
self._family = None
|
||||
|
||||
@property
|
||||
def ancestors(self):
|
||||
@@ -174,18 +179,22 @@ def __contains__(self, feature):
|
||||
@property
|
||||
def family(self):
|
||||
"""Returns the architecture family a given target belongs to"""
|
||||
roots = [x for x in [self] + self.ancestors if not x.ancestors]
|
||||
msg = "a target is expected to belong to just one architecture family"
|
||||
msg += f"[found {', '.join(str(x) for x in roots)}]"
|
||||
assert len(roots) == 1, msg
|
||||
if self._family is None:
|
||||
roots = [x for x in [self] + self.ancestors if not x.ancestors]
|
||||
msg = "a target is expected to belong to just one architecture family"
|
||||
msg += f"[found {', '.join(str(x) for x in roots)}]"
|
||||
assert len(roots) == 1, msg
|
||||
self._family = roots.pop()
|
||||
|
||||
return roots.pop()
|
||||
return self._family
|
||||
|
||||
@property
|
||||
def generic(self):
|
||||
"""Returns the best generic architecture that is compatible with self"""
|
||||
generics = [x for x in [self] + self.ancestors if x.vendor == "generic"]
|
||||
return max(generics, key=lambda x: len(x.ancestors))
|
||||
if self._generic is None:
|
||||
generics = [x for x in [self] + self.ancestors if x.vendor == "generic"]
|
||||
self._generic = max(generics, key=lambda x: len(x.ancestors))
|
||||
return self._generic
|
||||
|
||||
def to_dict(self):
|
||||
"""Returns a dictionary representation of this object."""
|
||||
|
@@ -1482,7 +1482,6 @@
|
||||
"cldemote",
|
||||
"movdir64b",
|
||||
"movdiri",
|
||||
"pdcm",
|
||||
"serialize",
|
||||
"waitpkg"
|
||||
],
|
||||
@@ -2237,6 +2236,84 @@
|
||||
]
|
||||
}
|
||||
},
|
||||
"zen5": {
|
||||
"from": ["zen4"],
|
||||
"vendor": "AuthenticAMD",
|
||||
"features": [
|
||||
"abm",
|
||||
"aes",
|
||||
"avx",
|
||||
"avx2",
|
||||
"avx512_bf16",
|
||||
"avx512_bitalg",
|
||||
"avx512bw",
|
||||
"avx512cd",
|
||||
"avx512dq",
|
||||
"avx512f",
|
||||
"avx512ifma",
|
||||
"avx512vbmi",
|
||||
"avx512_vbmi2",
|
||||
"avx512vl",
|
||||
"avx512_vnni",
|
||||
"avx512_vp2intersect",
|
||||
"avx512_vpopcntdq",
|
||||
"avx_vnni",
|
||||
"bmi1",
|
||||
"bmi2",
|
||||
"clflushopt",
|
||||
"clwb",
|
||||
"clzero",
|
||||
"cppc",
|
||||
"cx16",
|
||||
"f16c",
|
||||
"flush_l1d",
|
||||
"fma",
|
||||
"fsgsbase",
|
||||
"gfni",
|
||||
"ibrs_enhanced",
|
||||
"mmx",
|
||||
"movbe",
|
||||
"movdir64b",
|
||||
"movdiri",
|
||||
"pclmulqdq",
|
||||
"popcnt",
|
||||
"rdseed",
|
||||
"sse",
|
||||
"sse2",
|
||||
"sse4_1",
|
||||
"sse4_2",
|
||||
"sse4a",
|
||||
"ssse3",
|
||||
"tsc_adjust",
|
||||
"vaes",
|
||||
"vpclmulqdq",
|
||||
"xsavec",
|
||||
"xsaveopt"
|
||||
],
|
||||
"compilers": {
|
||||
"gcc": [
|
||||
{
|
||||
"versions": "14.1:",
|
||||
"name": "znver5",
|
||||
"flags": "-march={name} -mtune={name}"
|
||||
}
|
||||
],
|
||||
"aocc": [
|
||||
{
|
||||
"versions": "5.0:",
|
||||
"name": "znver5",
|
||||
"flags": "-march={name} -mtune={name}"
|
||||
}
|
||||
],
|
||||
"clang": [
|
||||
{
|
||||
"versions": "19.1:",
|
||||
"name": "znver5",
|
||||
"flags": "-march={name} -mtune={name}"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"ppc64": {
|
||||
"from": [],
|
||||
"vendor": "generic",
|
||||
|
@@ -41,6 +41,20 @@ def comma_and(sequence: List[str]) -> str:
|
||||
return comma_list(sequence, "and")
|
||||
|
||||
|
||||
def ordinal(number: int) -> str:
|
||||
"""Return the ordinal representation (1st, 2nd, 3rd, etc.) for the provided number.
|
||||
|
||||
Args:
|
||||
number: int to convert to ordinal number
|
||||
|
||||
Returns: number's corresponding ordinal
|
||||
"""
|
||||
idx = (number % 10) << 1
|
||||
tens = number % 100 // 10
|
||||
suffix = "th" if tens == 1 or idx > 6 else "thstndrd"[idx : idx + 2]
|
||||
return f"{number}{suffix}"
|
||||
|
||||
|
||||
def quote(sequence: List[str], q: str = "'") -> List[str]:
|
||||
"""Quotes each item in the input list with the quote character passed as second argument."""
|
||||
return [f"{q}{e}{q}" for e in sequence]
|
||||
|
@@ -20,11 +20,11 @@
|
||||
import tempfile
|
||||
from contextlib import contextmanager
|
||||
from itertools import accumulate
|
||||
from typing import Callable, Iterable, List, Match, Optional, Tuple, Union
|
||||
from typing import Callable, Deque, Dict, Iterable, List, Match, Optional, Set, Tuple, Union
|
||||
|
||||
import llnl.util.symlink
|
||||
from llnl.util import tty
|
||||
from llnl.util.lang import dedupe, memoized
|
||||
from llnl.util.lang import dedupe, fnmatch_translate_multiple, memoized
|
||||
from llnl.util.symlink import islink, readlink, resolve_link_target_relative_to_the_link, symlink
|
||||
|
||||
from ..path import path_to_os_path, system_path_filter
|
||||
@@ -47,6 +47,7 @@
|
||||
"copy_mode",
|
||||
"filter_file",
|
||||
"find",
|
||||
"find_first",
|
||||
"find_headers",
|
||||
"find_all_headers",
|
||||
"find_libraries",
|
||||
@@ -1672,28 +1673,40 @@ def find_first(root: str, files: Union[Iterable[str], str], bfs_depth: int = 2)
|
||||
return FindFirstFile(root, *files, bfs_depth=bfs_depth).find()
|
||||
|
||||
|
||||
def find(root, files, recursive=True):
|
||||
"""Search for ``files`` starting from the ``root`` directory.
|
||||
|
||||
Like GNU/BSD find but written entirely in Python.
|
||||
def find(
|
||||
root: Union[str, List[str]],
|
||||
files: Union[str, List[str]],
|
||||
recursive: bool = True,
|
||||
max_depth: Optional[int] = None,
|
||||
) -> List[str]:
|
||||
"""Finds all non-directory files matching the filename patterns from ``files`` starting from
|
||||
``root``. This function returns a deterministic result for the same input and directory
|
||||
structure when run multiple times. Symlinked directories are followed, and unique directories
|
||||
are searched only once. Each matching file is returned only once at lowest depth in case
|
||||
multiple paths exist due to symlinked directories. The function has similarities to the Unix
|
||||
``find`` utility.
|
||||
|
||||
Examples:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ find /usr -name python
|
||||
$ find -L /usr -name python3 -type f
|
||||
|
||||
is equivalent to:
|
||||
is roughly equivalent to
|
||||
|
||||
>>> find('/usr', 'python')
|
||||
>>> find("/usr", "python3")
|
||||
|
||||
with the notable difference that this function only lists a single path to each file in case of
|
||||
symlinked directories.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ find /usr/local/bin -maxdepth 1 -name python
|
||||
$ find -L /usr/local/bin /usr/local/sbin -maxdepth 1 '(' -name python3 -o -name getcap \\
|
||||
')' -type f
|
||||
|
||||
is equivalent to:
|
||||
is roughly equivalent to:
|
||||
|
||||
>>> find('/usr/local/bin', 'python', recursive=False)
|
||||
>>> find(["/usr/local/bin", "/usr/local/sbin"], ["python3", "getcap"], recursive=False)
|
||||
|
||||
Accepts any glob characters accepted by fnmatch:
|
||||
|
||||
@@ -1707,70 +1720,116 @@ def find(root, files, recursive=True):
|
||||
========== ====================================
|
||||
|
||||
Parameters:
|
||||
root (str): The root directory to start searching from
|
||||
files (str or collections.abc.Sequence): Library name(s) to search for
|
||||
recursive (bool): if False search only root folder,
|
||||
if True descends top-down from the root. Defaults to True.
|
||||
root: One or more root directories to start searching from
|
||||
files: One or more filename patterns to search for
|
||||
recursive: if False search only root, if True descends from roots. Defaults to True.
|
||||
max_depth: if set, don't search below this depth. Cannot be set if recursive is False
|
||||
|
||||
Returns:
|
||||
list: The files that have been found
|
||||
Returns a list of absolute, matching file paths.
|
||||
"""
|
||||
if isinstance(files, str):
|
||||
if not isinstance(root, list):
|
||||
root = [root]
|
||||
|
||||
if not isinstance(files, list):
|
||||
files = [files]
|
||||
|
||||
if recursive:
|
||||
tty.debug(f"Find (recursive): {root} {str(files)}")
|
||||
result = _find_recursive(root, files)
|
||||
else:
|
||||
tty.debug(f"Find (not recursive): {root} {str(files)}")
|
||||
result = _find_non_recursive(root, files)
|
||||
# If recursive is false, max_depth can only be None or 0
|
||||
if max_depth and not recursive:
|
||||
raise ValueError(f"max_depth ({max_depth}) cannot be set if recursive is False")
|
||||
|
||||
tty.debug(f"Find complete: {root} {str(files)}")
|
||||
if not recursive:
|
||||
max_depth = 0
|
||||
elif max_depth is None:
|
||||
max_depth = sys.maxsize
|
||||
|
||||
tty.debug(f"Find (max depth = {max_depth}): {root} {files}")
|
||||
result = _find_max_depth(root, files, max_depth)
|
||||
tty.debug(f"Find complete: {root} {files}")
|
||||
return result
|
||||
|
||||
|
||||
@system_path_filter
|
||||
def _find_recursive(root, search_files):
|
||||
# The variable here is **on purpose** a defaultdict. The idea is that
|
||||
# we want to poke the filesystem as little as possible, but still maintain
|
||||
# stability in the order of the answer. Thus we are recording each library
|
||||
# found in a key, and reconstructing the stable order later.
|
||||
found_files = collections.defaultdict(list)
|
||||
|
||||
# Make the path absolute to have os.walk also return an absolute path
|
||||
root = os.path.abspath(root)
|
||||
for path, _, list_files in os.walk(root):
|
||||
for search_file in search_files:
|
||||
matches = glob.glob(os.path.join(path, search_file))
|
||||
matches = [os.path.join(path, x) for x in matches]
|
||||
found_files[search_file].extend(matches)
|
||||
|
||||
answer = []
|
||||
for search_file in search_files:
|
||||
answer.extend(found_files[search_file])
|
||||
|
||||
return answer
|
||||
def _log_file_access_issue(e: OSError, path: str) -> None:
|
||||
errno_name = errno.errorcode.get(e.errno, "UNKNOWN")
|
||||
tty.debug(f"find must skip {path}: {errno_name} {e}")
|
||||
|
||||
|
||||
@system_path_filter
|
||||
def _find_non_recursive(root, search_files):
|
||||
# The variable here is **on purpose** a defaultdict as os.list_dir
|
||||
# can return files in any order (does not preserve stability)
|
||||
found_files = collections.defaultdict(list)
|
||||
def _dir_id(s: os.stat_result) -> Tuple[int, int]:
|
||||
# Note: on windows, st_ino is the file index and st_dev is the volume serial number. See
|
||||
# https://github.com/python/cpython/blob/3.9/Python/fileutils.c
|
||||
return (s.st_ino, s.st_dev)
|
||||
|
||||
# Make the path absolute to have absolute path returned
|
||||
root = os.path.abspath(root)
|
||||
|
||||
for search_file in search_files:
|
||||
matches = glob.glob(os.path.join(root, search_file))
|
||||
matches = [os.path.join(root, x) for x in matches]
|
||||
found_files[search_file].extend(matches)
|
||||
def _find_max_depth(roots: List[str], globs: List[str], max_depth: int = sys.maxsize) -> List[str]:
|
||||
"""See ``find`` for the public API."""
|
||||
# Apply normcase to file patterns and filenames to respect case insensitive filesystems
|
||||
regex, groups = fnmatch_translate_multiple([os.path.normcase(x) for x in globs])
|
||||
# Ordered dictionary that keeps track of the files found for each pattern
|
||||
capture_group_to_paths: Dict[str, List[str]] = {group: [] for group in groups}
|
||||
# Ensure returned paths are always absolute
|
||||
roots = [os.path.abspath(r) for r in roots]
|
||||
# Breadth-first search queue. Each element is a tuple of (depth, directory)
|
||||
dir_queue: Deque[Tuple[int, str]] = collections.deque()
|
||||
# Set of visited directories. Each element is a tuple of (inode, device)
|
||||
visited_dirs: Set[Tuple[int, int]] = set()
|
||||
|
||||
answer = []
|
||||
for search_file in search_files:
|
||||
answer.extend(found_files[search_file])
|
||||
for root in roots:
|
||||
try:
|
||||
stat_root = os.stat(root)
|
||||
except OSError as e:
|
||||
_log_file_access_issue(e, root)
|
||||
continue
|
||||
dir_id = _dir_id(stat_root)
|
||||
if dir_id not in visited_dirs:
|
||||
dir_queue.appendleft((0, root))
|
||||
visited_dirs.add(dir_id)
|
||||
|
||||
return answer
|
||||
while dir_queue:
|
||||
depth, next_dir = dir_queue.pop()
|
||||
try:
|
||||
dir_iter = os.scandir(next_dir)
|
||||
except OSError as e:
|
||||
_log_file_access_issue(e, next_dir)
|
||||
continue
|
||||
|
||||
with dir_iter:
|
||||
ordered_entries = sorted(dir_iter, key=lambda x: x.name)
|
||||
for dir_entry in ordered_entries:
|
||||
try:
|
||||
it_is_a_dir = dir_entry.is_dir(follow_symlinks=True)
|
||||
except OSError as e:
|
||||
# Possible permission issue, or a symlink that cannot be resolved (ELOOP).
|
||||
_log_file_access_issue(e, dir_entry.path)
|
||||
continue
|
||||
|
||||
if it_is_a_dir and depth < max_depth:
|
||||
try:
|
||||
# The stat should be performed in a try/except block. We repeat that here
|
||||
# vs. moving to the above block because we only want to call `stat` if we
|
||||
# haven't exceeded our max_depth
|
||||
if sys.platform == "win32":
|
||||
# Note: st_ino/st_dev on DirEntry.stat are not set on Windows, so we
|
||||
# have to call os.stat
|
||||
stat_info = os.stat(dir_entry.path, follow_symlinks=True)
|
||||
else:
|
||||
stat_info = dir_entry.stat(follow_symlinks=True)
|
||||
except OSError as e:
|
||||
_log_file_access_issue(e, dir_entry.path)
|
||||
continue
|
||||
|
||||
dir_id = _dir_id(stat_info)
|
||||
if dir_id not in visited_dirs:
|
||||
dir_queue.appendleft((depth + 1, dir_entry.path))
|
||||
visited_dirs.add(dir_id)
|
||||
else:
|
||||
m = regex.match(os.path.normcase(os.path.basename(dir_entry.path)))
|
||||
if not m:
|
||||
continue
|
||||
for group in capture_group_to_paths:
|
||||
if m.group(group):
|
||||
capture_group_to_paths[group].append(dir_entry.path)
|
||||
break
|
||||
|
||||
return [path for paths in capture_group_to_paths.values() for path in paths]
|
||||
|
||||
|
||||
# Utilities for libraries and headers
|
||||
@@ -2209,7 +2268,9 @@ def find_system_libraries(libraries, shared=True):
|
||||
return libraries_found
|
||||
|
||||
|
||||
def find_libraries(libraries, root, shared=True, recursive=False, runtime=True):
|
||||
def find_libraries(
|
||||
libraries, root, shared=True, recursive=False, runtime=True, max_depth: Optional[int] = None
|
||||
):
|
||||
"""Returns an iterable of full paths to libraries found in a root dir.
|
||||
|
||||
Accepts any glob characters accepted by fnmatch:
|
||||
@@ -2230,6 +2291,8 @@ def find_libraries(libraries, root, shared=True, recursive=False, runtime=True):
|
||||
otherwise for static. Defaults to True.
|
||||
recursive (bool): if False search only root folder,
|
||||
if True descends top-down from the root. Defaults to False.
|
||||
max_depth (int): if set, don't search below this depth. Cannot be set
|
||||
if recursive is False
|
||||
runtime (bool): Windows only option, no-op elsewhere. If true,
|
||||
search for runtime shared libs (.DLL), otherwise, search
|
||||
for .Lib files. If shared is false, this has no meaning.
|
||||
@@ -2238,6 +2301,7 @@ def find_libraries(libraries, root, shared=True, recursive=False, runtime=True):
|
||||
Returns:
|
||||
LibraryList: The libraries that have been found
|
||||
"""
|
||||
|
||||
if isinstance(libraries, str):
|
||||
libraries = [libraries]
|
||||
elif not isinstance(libraries, collections.abc.Sequence):
|
||||
@@ -2270,8 +2334,10 @@ def find_libraries(libraries, root, shared=True, recursive=False, runtime=True):
|
||||
libraries = ["{0}.{1}".format(lib, suffix) for lib in libraries for suffix in suffixes]
|
||||
|
||||
if not recursive:
|
||||
if max_depth:
|
||||
raise ValueError(f"max_depth ({max_depth}) cannot be set if recursive is False")
|
||||
# If not recursive, look for the libraries directly in root
|
||||
return LibraryList(find(root, libraries, False))
|
||||
return LibraryList(find(root, libraries, recursive=False))
|
||||
|
||||
# To speedup the search for external packages configured e.g. in /usr,
|
||||
# perform first non-recursive search in root/lib then in root/lib64 and
|
||||
@@ -2289,7 +2355,7 @@ def find_libraries(libraries, root, shared=True, recursive=False, runtime=True):
|
||||
if found_libs:
|
||||
break
|
||||
else:
|
||||
found_libs = find(root, libraries, True)
|
||||
found_libs = find(root, libraries, recursive=True, max_depth=max_depth)
|
||||
|
||||
return LibraryList(found_libs)
|
||||
|
||||
|
@@ -5,14 +5,17 @@
|
||||
|
||||
import collections.abc
|
||||
import contextlib
|
||||
import fnmatch
|
||||
import functools
|
||||
import itertools
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import traceback
|
||||
import typing
|
||||
import warnings
|
||||
from datetime import datetime, timedelta
|
||||
from typing import Any, Callable, Iterable, List, Tuple
|
||||
from typing import Callable, Iterable, List, Tuple, TypeVar
|
||||
|
||||
# Ignore emacs backups when listing modules
|
||||
ignore_modules = r"^\.#|~$"
|
||||
@@ -858,6 +861,32 @@ def elide_list(line_list: List[str], max_num: int = 10) -> List[str]:
|
||||
return line_list
|
||||
|
||||
|
||||
if sys.version_info >= (3, 9):
|
||||
PatternStr = re.Pattern[str]
|
||||
else:
|
||||
PatternStr = typing.Pattern[str]
|
||||
|
||||
|
||||
def fnmatch_translate_multiple(patterns: List[str]) -> Tuple[PatternStr, List[str]]:
|
||||
"""Same as fnmatch.translate, but creates a single regex of the form
|
||||
``(?P<pattern0>...)|(?P<pattern1>...)|...`` for each pattern in the iterable, where
|
||||
``patternN`` is a named capture group that matches the corresponding pattern translated by
|
||||
``fnmatch.translate``. This can be used to match multiple patterns in a single pass. No case
|
||||
normalization is performed on the patterns.
|
||||
|
||||
Args:
|
||||
patterns: list of fnmatch patterns
|
||||
|
||||
Returns:
|
||||
Tuple of the combined regex and the list of named capture groups corresponding to each
|
||||
pattern in the input list.
|
||||
"""
|
||||
groups = [f"pattern{i}" for i in range(len(patterns))]
|
||||
regexes = (fnmatch.translate(p) for p in patterns)
|
||||
combined = re.compile("|".join(f"(?P<{g}>{r})" for g, r in zip(groups, regexes)))
|
||||
return combined, groups
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def nullcontext(*args, **kwargs):
|
||||
"""Empty context manager.
|
||||
@@ -870,18 +899,12 @@ class UnhashableArguments(TypeError):
|
||||
"""Raise when an @memoized function receives unhashable arg or kwarg values."""
|
||||
|
||||
|
||||
def enum(**kwargs):
|
||||
"""Return an enum-like class.
|
||||
|
||||
Args:
|
||||
**kwargs: explicit dictionary of enums
|
||||
"""
|
||||
return type("Enum", (object,), kwargs)
|
||||
T = TypeVar("T")
|
||||
|
||||
|
||||
def stable_partition(
|
||||
input_iterable: Iterable, predicate_fn: Callable[[Any], bool]
|
||||
) -> Tuple[List[Any], List[Any]]:
|
||||
input_iterable: Iterable[T], predicate_fn: Callable[[T], bool]
|
||||
) -> Tuple[List[T], List[T]]:
|
||||
"""Partition the input iterable according to a custom predicate.
|
||||
|
||||
Args:
|
||||
@@ -893,12 +916,13 @@ def stable_partition(
|
||||
Tuple of the list of elements evaluating to True, and
|
||||
list of elements evaluating to False.
|
||||
"""
|
||||
true_items, false_items = [], []
|
||||
true_items: List[T] = []
|
||||
false_items: List[T] = []
|
||||
for item in input_iterable:
|
||||
if predicate_fn(item):
|
||||
true_items.append(item)
|
||||
continue
|
||||
false_items.append(item)
|
||||
else:
|
||||
false_items.append(item)
|
||||
return true_items, false_items
|
||||
|
||||
|
||||
@@ -910,6 +934,21 @@ def ensure_last(lst, *elements):
|
||||
lst.append(lst.pop(lst.index(elt)))
|
||||
|
||||
|
||||
class Const:
|
||||
"""Class level constant, raises when trying to set the attribute"""
|
||||
|
||||
__slots__ = ["value"]
|
||||
|
||||
def __init__(self, value):
|
||||
self.value = value
|
||||
|
||||
def __get__(self, instance, owner):
|
||||
return self.value
|
||||
|
||||
def __set__(self, instance, value):
|
||||
raise TypeError(f"Const value does not support assignment [value={self.value}]")
|
||||
|
||||
|
||||
class TypedMutableSequence(collections.abc.MutableSequence):
|
||||
"""Base class that behaves like a list, just with a different type.
|
||||
|
||||
@@ -1014,3 +1053,42 @@ def __init__(self, callback):
|
||||
|
||||
def __get__(self, instance, owner):
|
||||
return self.callback(owner)
|
||||
|
||||
|
||||
class DeprecatedProperty:
|
||||
"""Data descriptor to error or warn when a deprecated property is accessed.
|
||||
|
||||
Derived classes must define a factory method to return an adaptor for the deprecated
|
||||
property, if the descriptor is not set to error.
|
||||
"""
|
||||
|
||||
__slots__ = ["name"]
|
||||
|
||||
#: 0 - Nothing
|
||||
#: 1 - Warning
|
||||
#: 2 - Error
|
||||
error_lvl = 0
|
||||
|
||||
def __init__(self, name: str) -> None:
|
||||
self.name = name
|
||||
|
||||
def __get__(self, instance, owner):
|
||||
if instance is None:
|
||||
return self
|
||||
|
||||
if self.error_lvl == 1:
|
||||
warnings.warn(
|
||||
f"accessing the '{self.name}' property of '{instance}', which is deprecated"
|
||||
)
|
||||
elif self.error_lvl == 2:
|
||||
raise AttributeError(f"cannot access the '{self.name}' attribute of '{instance}'")
|
||||
|
||||
return self.factory(instance, owner)
|
||||
|
||||
def __set__(self, instance, value):
|
||||
raise TypeError(
|
||||
f"the deprecated property '{self.name}' of '{instance}' does not support assignment"
|
||||
)
|
||||
|
||||
def factory(self, instance, owner):
|
||||
raise NotImplementedError("must be implemented by derived classes")
|
||||
|
@@ -263,7 +263,9 @@ def match_to_ansi(match):
|
||||
f"Incomplete color format: '{match.group(0)}' in '{match.string}'"
|
||||
)
|
||||
|
||||
ansi_code = _escape(f"{styles[style]};{colors.get(color_code, '')}", color, enclose, zsh)
|
||||
color_number = colors.get(color_code, "")
|
||||
semi = ";" if color_number else ""
|
||||
ansi_code = _escape(f"{styles[style]}{semi}{color_number}", color, enclose, zsh)
|
||||
if text:
|
||||
return f"{ansi_code}{text}{_escape(0, color, enclose, zsh)}"
|
||||
else:
|
||||
|
@@ -10,7 +10,6 @@
|
||||
import errno
|
||||
import io
|
||||
import multiprocessing
|
||||
import multiprocessing.connection
|
||||
import os
|
||||
import re
|
||||
import select
|
||||
@@ -19,9 +18,10 @@
|
||||
import threading
|
||||
import traceback
|
||||
from contextlib import contextmanager
|
||||
from multiprocessing.connection import Connection
|
||||
from threading import Thread
|
||||
from types import ModuleType
|
||||
from typing import Optional
|
||||
from typing import Callable, Optional
|
||||
|
||||
import llnl.util.tty as tty
|
||||
|
||||
@@ -345,49 +345,6 @@ def close(self):
|
||||
self.file.close()
|
||||
|
||||
|
||||
class MultiProcessFd:
|
||||
"""Return an object which stores a file descriptor and can be passed as an
|
||||
argument to a function run with ``multiprocessing.Process``, such that
|
||||
the file descriptor is available in the subprocess."""
|
||||
|
||||
def __init__(self, fd):
|
||||
self._connection = None
|
||||
self._fd = None
|
||||
if sys.version_info >= (3, 8):
|
||||
self._connection = multiprocessing.connection.Connection(fd)
|
||||
else:
|
||||
self._fd = fd
|
||||
|
||||
@property
|
||||
def fd(self):
|
||||
if self._connection:
|
||||
return self._connection._handle
|
||||
else:
|
||||
return self._fd
|
||||
|
||||
def close(self):
|
||||
if self._connection:
|
||||
self._connection.close()
|
||||
else:
|
||||
os.close(self._fd)
|
||||
|
||||
|
||||
def close_connection_and_file(multiprocess_fd, file):
|
||||
# MultiprocessFd is intended to transmit a FD
|
||||
# to a child process, this FD is then opened to a Python File object
|
||||
# (using fdopen). In >= 3.8, MultiprocessFd encapsulates a
|
||||
# multiprocessing.connection.Connection; Connection closes the FD
|
||||
# when it is deleted, and prints a warning about duplicate closure if
|
||||
# it is not explicitly closed. In < 3.8, MultiprocessFd encapsulates a
|
||||
# simple FD; closing the FD here appears to conflict with
|
||||
# closure of the File object (in < 3.8 that is). Therefore this needs
|
||||
# to choose whether to close the File or the Connection.
|
||||
if sys.version_info >= (3, 8):
|
||||
multiprocess_fd.close()
|
||||
else:
|
||||
file.close()
|
||||
|
||||
|
||||
@contextmanager
|
||||
def replace_environment(env):
|
||||
"""Replace the current environment (`os.environ`) with `env`.
|
||||
@@ -545,22 +502,20 @@ def __enter__(self):
|
||||
# forcing debug output.
|
||||
self._saved_debug = tty._debug
|
||||
|
||||
# OS-level pipe for redirecting output to logger
|
||||
read_fd, write_fd = os.pipe()
|
||||
# Pipe for redirecting output to logger
|
||||
read_fd, self.write_fd = multiprocessing.Pipe(duplex=False)
|
||||
|
||||
read_multiprocess_fd = MultiProcessFd(read_fd)
|
||||
|
||||
# Multiprocessing pipe for communication back from the daemon
|
||||
# Pipe for communication back from the daemon
|
||||
# Currently only used to save echo value between uses
|
||||
self.parent_pipe, child_pipe = multiprocessing.Pipe()
|
||||
self.parent_pipe, child_pipe = multiprocessing.Pipe(duplex=False)
|
||||
|
||||
# Sets a daemon that writes to file what it reads from a pipe
|
||||
try:
|
||||
# need to pass this b/c multiprocessing closes stdin in child.
|
||||
input_multiprocess_fd = None
|
||||
input_fd = None
|
||||
try:
|
||||
if sys.stdin.isatty():
|
||||
input_multiprocess_fd = MultiProcessFd(os.dup(sys.stdin.fileno()))
|
||||
input_fd = Connection(os.dup(sys.stdin.fileno()))
|
||||
except BaseException:
|
||||
# just don't forward input if this fails
|
||||
pass
|
||||
@@ -569,9 +524,9 @@ def __enter__(self):
|
||||
self.process = multiprocessing.Process(
|
||||
target=_writer_daemon,
|
||||
args=(
|
||||
input_multiprocess_fd,
|
||||
read_multiprocess_fd,
|
||||
write_fd,
|
||||
input_fd,
|
||||
read_fd,
|
||||
self.write_fd,
|
||||
self.echo,
|
||||
self.log_file,
|
||||
child_pipe,
|
||||
@@ -582,9 +537,9 @@ def __enter__(self):
|
||||
self.process.start()
|
||||
|
||||
finally:
|
||||
if input_multiprocess_fd:
|
||||
input_multiprocess_fd.close()
|
||||
read_multiprocess_fd.close()
|
||||
if input_fd:
|
||||
input_fd.close()
|
||||
read_fd.close()
|
||||
|
||||
# Flush immediately before redirecting so that anything buffered
|
||||
# goes to the original stream
|
||||
@@ -602,9 +557,9 @@ def __enter__(self):
|
||||
self._saved_stderr = os.dup(sys.stderr.fileno())
|
||||
|
||||
# redirect to the pipe we created above
|
||||
os.dup2(write_fd, sys.stdout.fileno())
|
||||
os.dup2(write_fd, sys.stderr.fileno())
|
||||
os.close(write_fd)
|
||||
os.dup2(self.write_fd.fileno(), sys.stdout.fileno())
|
||||
os.dup2(self.write_fd.fileno(), sys.stderr.fileno())
|
||||
self.write_fd.close()
|
||||
|
||||
else:
|
||||
# Handle I/O the Python way. This won't redirect lower-level
|
||||
@@ -617,7 +572,7 @@ def __enter__(self):
|
||||
self._saved_stderr = sys.stderr
|
||||
|
||||
# create a file object for the pipe; redirect to it.
|
||||
pipe_fd_out = os.fdopen(write_fd, "w")
|
||||
pipe_fd_out = os.fdopen(self.write_fd.fileno(), "w", closefd=False)
|
||||
sys.stdout = pipe_fd_out
|
||||
sys.stderr = pipe_fd_out
|
||||
|
||||
@@ -653,6 +608,7 @@ def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
else:
|
||||
sys.stdout = self._saved_stdout
|
||||
sys.stderr = self._saved_stderr
|
||||
self.write_fd.close()
|
||||
|
||||
# print log contents in parent if needed.
|
||||
if self.log_file.write_in_parent:
|
||||
@@ -866,14 +822,14 @@ def force_echo(self):
|
||||
|
||||
|
||||
def _writer_daemon(
|
||||
stdin_multiprocess_fd,
|
||||
read_multiprocess_fd,
|
||||
write_fd,
|
||||
echo,
|
||||
log_file_wrapper,
|
||||
control_pipe,
|
||||
filter_fn,
|
||||
):
|
||||
stdin_fd: Optional[Connection],
|
||||
read_fd: Connection,
|
||||
write_fd: Connection,
|
||||
echo: bool,
|
||||
log_file_wrapper: FileWrapper,
|
||||
control_fd: Connection,
|
||||
filter_fn: Optional[Callable[[str], str]],
|
||||
) -> None:
|
||||
"""Daemon used by ``log_output`` to write to a log file and to ``stdout``.
|
||||
|
||||
The daemon receives output from the parent process and writes it both
|
||||
@@ -910,43 +866,37 @@ def _writer_daemon(
|
||||
``StringIO`` in the parent. This is mainly for testing.
|
||||
|
||||
Arguments:
|
||||
stdin_multiprocess_fd (int): input from the terminal
|
||||
read_multiprocess_fd (int): pipe for reading from parent's redirected
|
||||
stdout
|
||||
echo (bool): initial echo setting -- controlled by user and
|
||||
preserved across multiple writer daemons
|
||||
log_file_wrapper (FileWrapper): file to log all output
|
||||
control_pipe (Pipe): multiprocessing pipe on which to send control
|
||||
information to the parent
|
||||
filter_fn (callable, optional): function to filter each line of output
|
||||
stdin_fd: optional input from the terminal
|
||||
read_fd: pipe for reading from parent's redirected stdout
|
||||
echo: initial echo setting -- controlled by user and preserved across multiple writer
|
||||
daemons
|
||||
log_file_wrapper: file to log all output
|
||||
control_pipe: multiprocessing pipe on which to send control information to the parent
|
||||
filter_fn: optional function to filter each line of output
|
||||
|
||||
"""
|
||||
# If this process was forked, then it will inherit file descriptors from
|
||||
# the parent process. This process depends on closing all instances of
|
||||
# write_fd to terminate the reading loop, so we close the file descriptor
|
||||
# here. Forking is the process spawning method everywhere except Mac OS
|
||||
# for Python >= 3.8 and on Windows
|
||||
if sys.version_info < (3, 8) or sys.platform != "darwin":
|
||||
os.close(write_fd)
|
||||
# This process depends on closing all instances of write_pipe to terminate the reading loop
|
||||
write_fd.close()
|
||||
|
||||
# 1. Use line buffering (3rd param = 1) since Python 3 has a bug
|
||||
# that prevents unbuffered text I/O.
|
||||
# 2. Python 3.x before 3.7 does not open with UTF-8 encoding by default
|
||||
in_pipe = os.fdopen(read_multiprocess_fd.fd, "r", 1, encoding="utf-8")
|
||||
# 3. closefd=False because Connection has "ownership"
|
||||
read_file = os.fdopen(read_fd.fileno(), "r", 1, encoding="utf-8", closefd=False)
|
||||
|
||||
if stdin_multiprocess_fd:
|
||||
stdin = os.fdopen(stdin_multiprocess_fd.fd)
|
||||
if stdin_fd:
|
||||
stdin_file = os.fdopen(stdin_fd.fileno(), closefd=False)
|
||||
else:
|
||||
stdin = None
|
||||
stdin_file = None
|
||||
|
||||
# list of streams to select from
|
||||
istreams = [in_pipe, stdin] if stdin else [in_pipe]
|
||||
istreams = [read_file, stdin_file] if stdin_file else [read_file]
|
||||
force_echo = False # parent can force echo for certain output
|
||||
|
||||
log_file = log_file_wrapper.unwrap()
|
||||
|
||||
try:
|
||||
with keyboard_input(stdin) as kb:
|
||||
with keyboard_input(stdin_file) as kb:
|
||||
while True:
|
||||
# fix the terminal settings if we recently came to
|
||||
# the foreground
|
||||
@@ -959,12 +909,12 @@ def _writer_daemon(
|
||||
# Allow user to toggle echo with 'v' key.
|
||||
# Currently ignores other chars.
|
||||
# only read stdin if we're in the foreground
|
||||
if stdin in rlist and not _is_background_tty(stdin):
|
||||
if stdin_file and stdin_file in rlist and not _is_background_tty(stdin_file):
|
||||
# it's possible to be backgrounded between the above
|
||||
# check and the read, so we ignore SIGTTIN here.
|
||||
with ignore_signal(signal.SIGTTIN):
|
||||
try:
|
||||
if stdin.read(1) == "v":
|
||||
if stdin_file.read(1) == "v":
|
||||
echo = not echo
|
||||
except IOError as e:
|
||||
# If SIGTTIN is ignored, the system gives EIO
|
||||
@@ -973,13 +923,13 @@ def _writer_daemon(
|
||||
if e.errno != errno.EIO:
|
||||
raise
|
||||
|
||||
if in_pipe in rlist:
|
||||
if read_file in rlist:
|
||||
line_count = 0
|
||||
try:
|
||||
while line_count < 100:
|
||||
# Handle output from the calling process.
|
||||
try:
|
||||
line = _retry(in_pipe.readline)()
|
||||
line = _retry(read_file.readline)()
|
||||
except UnicodeDecodeError:
|
||||
# installs like --test=root gpgme produce non-UTF8 logs
|
||||
line = "<line lost: output was not encoded as UTF-8>\n"
|
||||
@@ -1008,7 +958,7 @@ def _writer_daemon(
|
||||
if xoff in controls:
|
||||
force_echo = False
|
||||
|
||||
if not _input_available(in_pipe):
|
||||
if not _input_available(read_file):
|
||||
break
|
||||
finally:
|
||||
if line_count > 0:
|
||||
@@ -1023,14 +973,14 @@ def _writer_daemon(
|
||||
finally:
|
||||
# send written data back to parent if we used a StringIO
|
||||
if isinstance(log_file, io.StringIO):
|
||||
control_pipe.send(log_file.getvalue())
|
||||
control_fd.send(log_file.getvalue())
|
||||
log_file_wrapper.close()
|
||||
close_connection_and_file(read_multiprocess_fd, in_pipe)
|
||||
if stdin_multiprocess_fd:
|
||||
close_connection_and_file(stdin_multiprocess_fd, stdin)
|
||||
read_fd.close()
|
||||
if stdin_fd:
|
||||
stdin_fd.close()
|
||||
|
||||
# send echo value back to the parent so it can be preserved.
|
||||
control_pipe.send(echo)
|
||||
control_fd.send(echo)
|
||||
|
||||
|
||||
def _retry(function):
|
||||
|
@@ -69,4 +69,15 @@ def get_version() -> str:
|
||||
return spack_version
|
||||
|
||||
|
||||
__all__ = ["spack_version_info", "spack_version", "get_version", "get_spack_commit"]
|
||||
def get_short_version() -> str:
|
||||
"""Short Spack version."""
|
||||
return f"{spack_version_info[0]}.{spack_version_info[1]}"
|
||||
|
||||
|
||||
__all__ = [
|
||||
"spack_version_info",
|
||||
"spack_version",
|
||||
"get_version",
|
||||
"get_spack_commit",
|
||||
"get_short_version",
|
||||
]
|
||||
|
@@ -39,6 +39,7 @@ def _search_duplicate_compilers(error_cls):
|
||||
import collections
|
||||
import collections.abc
|
||||
import glob
|
||||
import inspect
|
||||
import io
|
||||
import itertools
|
||||
import os
|
||||
@@ -50,6 +51,7 @@ def _search_duplicate_compilers(error_cls):
|
||||
from urllib.request import urlopen
|
||||
|
||||
import llnl.util.lang
|
||||
from llnl.string import plural
|
||||
|
||||
import spack.builder
|
||||
import spack.config
|
||||
@@ -386,6 +388,14 @@ def _make_config_error(config_data, summary, error_cls):
|
||||
)
|
||||
|
||||
|
||||
package_deprecated_attributes = AuditClass(
|
||||
group="packages",
|
||||
tag="PKG-DEPRECATED-ATTRIBUTES",
|
||||
description="Sanity checks to preclude use of deprecated package attributes",
|
||||
kwargs=("pkgs",),
|
||||
)
|
||||
|
||||
|
||||
package_properties = AuditClass(
|
||||
group="packages",
|
||||
tag="PKG-PROPERTIES",
|
||||
@@ -404,22 +414,23 @@ def _make_config_error(config_data, summary, error_cls):
|
||||
)
|
||||
|
||||
|
||||
@package_directives
|
||||
@package_properties
|
||||
def _check_build_test_callbacks(pkgs, error_cls):
|
||||
"""Ensure stand-alone test method is not included in build-time callbacks"""
|
||||
"""Ensure stand-alone test methods are not included in build-time callbacks.
|
||||
|
||||
Test methods are for checking the installed software as stand-alone tests.
|
||||
They could also be called during the post-install phase of a build.
|
||||
"""
|
||||
errors = []
|
||||
for pkg_name in pkgs:
|
||||
pkg_cls = spack.repo.PATH.get_pkg_class(pkg_name)
|
||||
test_callbacks = getattr(pkg_cls, "build_time_test_callbacks", None)
|
||||
|
||||
# TODO (post-34236): "test*"->"test_*" once remove deprecated methods
|
||||
# TODO (post-34236): "test"->"test_" once remove deprecated methods
|
||||
has_test_method = test_callbacks and any([m.startswith("test") for m in test_callbacks])
|
||||
has_test_method = test_callbacks and any([m.startswith("test_") for m in test_callbacks])
|
||||
if has_test_method:
|
||||
msg = '{0} package contains "test*" method(s) in ' "build_time_test_callbacks"
|
||||
instr = 'Remove all methods whose names start with "test" from: [{0}]'.format(
|
||||
", ".join(test_callbacks)
|
||||
)
|
||||
msg = f"Package {pkg_name} includes stand-alone test methods in build-time checks."
|
||||
callbacks = ", ".join(test_callbacks)
|
||||
instr = f"Remove the following from 'build_time_test_callbacks': {callbacks}"
|
||||
errors.append(error_cls(msg.format(pkg_name), [instr]))
|
||||
|
||||
return errors
|
||||
@@ -517,6 +528,46 @@ def _search_for_reserved_attributes_names_in_packages(pkgs, error_cls):
|
||||
return errors
|
||||
|
||||
|
||||
@package_deprecated_attributes
|
||||
def _search_for_deprecated_package_methods(pkgs, error_cls):
|
||||
"""Ensure the package doesn't define or use deprecated methods"""
|
||||
DEPRECATED_METHOD = (("test", "a name starting with 'test_'"),)
|
||||
DEPRECATED_USE = (
|
||||
("self.cache_extra_test_sources(", "cache_extra_test_sources(self, ..)"),
|
||||
("self.install_test_root(", "install_test_root(self, ..)"),
|
||||
("self.run_test(", "test_part(self, ..)"),
|
||||
)
|
||||
errors = []
|
||||
for pkg_name in pkgs:
|
||||
pkg_cls = spack.repo.PATH.get_pkg_class(pkg_name)
|
||||
methods = inspect.getmembers(pkg_cls, predicate=lambda x: inspect.isfunction(x))
|
||||
method_errors = collections.defaultdict(list)
|
||||
for name, function in methods:
|
||||
for deprecated_name, alternate in DEPRECATED_METHOD:
|
||||
if name == deprecated_name:
|
||||
msg = f"Rename '{deprecated_name}' method to {alternate} instead."
|
||||
method_errors[name].append(msg)
|
||||
|
||||
source = inspect.getsource(function)
|
||||
for deprecated_name, alternate in DEPRECATED_USE:
|
||||
if deprecated_name in source:
|
||||
msg = f"Change '{deprecated_name}' to '{alternate}' in '{name}' method."
|
||||
method_errors[name].append(msg)
|
||||
|
||||
num_methods = len(method_errors)
|
||||
if num_methods > 0:
|
||||
methods = plural(num_methods, "method", show_n=False)
|
||||
error_msg = (
|
||||
f"Package '{pkg_name}' implements or uses unsupported deprecated {methods}."
|
||||
)
|
||||
instr = [f"Make changes to '{pkg_cls.__module__}':"]
|
||||
for name in sorted(method_errors):
|
||||
instr.extend([f" {msg}" for msg in method_errors[name]])
|
||||
errors.append(error_cls(error_msg, instr))
|
||||
|
||||
return errors
|
||||
|
||||
|
||||
@package_properties
|
||||
def _ensure_all_package_names_are_lowercase(pkgs, error_cls):
|
||||
"""Ensure package names are lowercase and consistent"""
|
||||
@@ -671,9 +722,8 @@ def _ensure_env_methods_are_ported_to_builders(pkgs, error_cls):
|
||||
)
|
||||
builder_cls_names = [spack.builder.BUILDER_CLS[x].__name__ for x in build_system_names]
|
||||
|
||||
module = pkg_cls.module
|
||||
has_builders_in_package_py = any(
|
||||
getattr(module, name, False) for name in builder_cls_names
|
||||
spack.builder.get_builder_class(pkg_cls, name) for name in builder_cls_names
|
||||
)
|
||||
if not has_builders_in_package_py:
|
||||
continue
|
||||
@@ -755,7 +805,7 @@ def _uses_deprecated_globals(pkgs, error_cls):
|
||||
|
||||
file = spack.repo.PATH.filename_for_package_name(pkg_name)
|
||||
tree = ast.parse(open(file).read())
|
||||
visitor = DeprecatedMagicGlobals(("std_cmake_args",))
|
||||
visitor = DeprecatedMagicGlobals(("std_cmake_args", "std_meson_args", "std_pip_args"))
|
||||
visitor.visit(tree)
|
||||
if visitor.references_to_globals:
|
||||
errors.append(
|
||||
@@ -771,6 +821,89 @@ def _uses_deprecated_globals(pkgs, error_cls):
|
||||
return errors
|
||||
|
||||
|
||||
@package_properties
|
||||
def _ensure_test_docstring(pkgs, error_cls):
|
||||
"""Ensure stand-alone test methods have a docstring.
|
||||
|
||||
The docstring of a test method is implicitly used as the description of
|
||||
the corresponding test part during test results reporting.
|
||||
"""
|
||||
doc_regex = r'\s+("""[^"]+""")'
|
||||
|
||||
errors = []
|
||||
for pkg_name in pkgs:
|
||||
pkg_cls = spack.repo.PATH.get_pkg_class(pkg_name)
|
||||
methods = inspect.getmembers(pkg_cls, predicate=lambda x: inspect.isfunction(x))
|
||||
method_names = []
|
||||
for name, test_fn in methods:
|
||||
if not name.startswith("test_"):
|
||||
continue
|
||||
|
||||
# Ensure the test method has a docstring
|
||||
source = inspect.getsource(test_fn)
|
||||
match = re.search(doc_regex, source)
|
||||
if match is None or len(match.group(0).replace('"', "").strip()) == 0:
|
||||
method_names.append(name)
|
||||
|
||||
num_methods = len(method_names)
|
||||
if num_methods > 0:
|
||||
methods = plural(num_methods, "method", show_n=False)
|
||||
docstrings = plural(num_methods, "docstring", show_n=False)
|
||||
msg = f"Package {pkg_name} has test {methods} with empty or missing {docstrings}."
|
||||
names = ", ".join(method_names)
|
||||
instr = [
|
||||
"Docstrings are used as descriptions in test outputs.",
|
||||
f"Add a concise summary to the following {methods} in '{pkg_cls.__module__}':",
|
||||
f"{names}",
|
||||
]
|
||||
errors.append(error_cls(msg, instr))
|
||||
|
||||
return errors
|
||||
|
||||
|
||||
@package_properties
|
||||
def _ensure_test_implemented(pkgs, error_cls):
|
||||
"""Ensure stand-alone test methods are implemented.
|
||||
|
||||
The test method is also required to be non-empty.
|
||||
"""
|
||||
|
||||
def skip(line):
|
||||
ln = line.strip()
|
||||
return ln.startswith("#") or "pass" in ln
|
||||
|
||||
doc_regex = r'\s+("""[^"]+""")'
|
||||
|
||||
errors = []
|
||||
for pkg_name in pkgs:
|
||||
pkg_cls = spack.repo.PATH.get_pkg_class(pkg_name)
|
||||
methods = inspect.getmembers(pkg_cls, predicate=lambda x: inspect.isfunction(x))
|
||||
method_names = []
|
||||
for name, test_fn in methods:
|
||||
if not name.startswith("test_"):
|
||||
continue
|
||||
|
||||
source = inspect.getsource(test_fn)
|
||||
|
||||
# Attempt to ensure the test method is implemented.
|
||||
impl = re.sub(doc_regex, r"", source).splitlines()[1:]
|
||||
lines = [ln.strip() for ln in impl if not skip(ln)]
|
||||
if not lines:
|
||||
method_names.append(name)
|
||||
|
||||
num_methods = len(method_names)
|
||||
if num_methods > 0:
|
||||
methods = plural(num_methods, "method", show_n=False)
|
||||
msg = f"Package {pkg_name} has empty or missing test {methods}."
|
||||
names = ", ".join(method_names)
|
||||
instr = [
|
||||
f"Implement or remove the following {methods} from '{pkg_cls.__module__}': {names}"
|
||||
]
|
||||
errors.append(error_cls(msg, instr))
|
||||
|
||||
return errors
|
||||
|
||||
|
||||
@package_https_directives
|
||||
def _linting_package_file(pkgs, error_cls):
|
||||
"""Check for correctness of links"""
|
||||
|
@@ -35,6 +35,7 @@
|
||||
import spack.caches
|
||||
import spack.config as config
|
||||
import spack.database as spack_db
|
||||
import spack.deptypes as dt
|
||||
import spack.error
|
||||
import spack.hash_types as ht
|
||||
import spack.hooks
|
||||
@@ -251,7 +252,7 @@ def _associate_built_specs_with_mirror(self, cache_key, mirror_url):
|
||||
|
||||
spec_list = [
|
||||
s
|
||||
for s in db.query_local(installed=any, in_buildcache=any)
|
||||
for s in db.query_local(installed=any)
|
||||
if s.external or db.query_local_by_spec_hash(s.dag_hash()).in_buildcache
|
||||
]
|
||||
|
||||
@@ -712,15 +713,32 @@ def get_buildfile_manifest(spec):
|
||||
return data
|
||||
|
||||
|
||||
def hashes_to_prefixes(spec):
|
||||
"""Return a dictionary of hashes to prefixes for a spec and its deps, excluding externals"""
|
||||
return {
|
||||
s.dag_hash(): str(s.prefix)
|
||||
def deps_to_relocate(spec):
|
||||
"""Return the transitive link and direct run dependencies of the spec.
|
||||
|
||||
This is a special traversal for dependencies we need to consider when relocating a package.
|
||||
|
||||
Package binaries, scripts, and other files may refer to the prefixes of dependencies, so
|
||||
we need to rewrite those locations when dependencies are in a different place at install time
|
||||
than they were at build time.
|
||||
|
||||
This traversal covers transitive link dependencies and direct run dependencies because:
|
||||
|
||||
1. Spack adds RPATHs for transitive link dependencies so that packages can find needed
|
||||
dependency libraries.
|
||||
2. Packages may call any of their *direct* run dependencies (and may bake their paths into
|
||||
binaries or scripts), so we also need to search for run dependency prefixes when relocating.
|
||||
|
||||
This returns a deduplicated list of transitive link dependencies and direct run dependencies.
|
||||
"""
|
||||
deps = [
|
||||
s
|
||||
for s in itertools.chain(
|
||||
spec.traverse(root=True, deptype="link"), spec.dependencies(deptype="run")
|
||||
)
|
||||
if not s.external
|
||||
}
|
||||
]
|
||||
return llnl.util.lang.dedupe(deps, key=lambda s: s.dag_hash())
|
||||
|
||||
|
||||
def get_buildinfo_dict(spec):
|
||||
@@ -736,7 +754,7 @@ def get_buildinfo_dict(spec):
|
||||
"relocate_binaries": manifest["binary_to_relocate"],
|
||||
"relocate_links": manifest["link_to_relocate"],
|
||||
"hardlinks_deduped": manifest["hardlinks_deduped"],
|
||||
"hash_to_prefix": hashes_to_prefixes(spec),
|
||||
"hash_to_prefix": {d.dag_hash(): str(d.prefix) for d in deps_to_relocate(spec)},
|
||||
}
|
||||
|
||||
|
||||
@@ -1631,7 +1649,6 @@ def _oci_push(
|
||||
Dict[str, spack.oci.oci.Blob],
|
||||
List[Tuple[Spec, BaseException]],
|
||||
]:
|
||||
|
||||
# Spec dag hash -> blob
|
||||
checksums: Dict[str, spack.oci.oci.Blob] = {}
|
||||
|
||||
@@ -2201,11 +2218,36 @@ def relocate_package(spec):
|
||||
# First match specific prefix paths. Possibly the *local* install prefix
|
||||
# of some dependency is in an upstream, so we cannot assume the original
|
||||
# spack store root can be mapped uniformly to the new spack store root.
|
||||
for dag_hash, new_dep_prefix in hashes_to_prefixes(spec).items():
|
||||
if dag_hash in hash_to_old_prefix:
|
||||
old_dep_prefix = hash_to_old_prefix[dag_hash]
|
||||
prefix_to_prefix_bin[old_dep_prefix] = new_dep_prefix
|
||||
prefix_to_prefix_text[old_dep_prefix] = new_dep_prefix
|
||||
#
|
||||
# If the spec is spliced, we need to handle the simultaneous mapping
|
||||
# from the old install_tree to the new install_tree and from the build_spec
|
||||
# to the spliced spec.
|
||||
# Because foo.build_spec is foo for any non-spliced spec, we can simplify
|
||||
# by checking for spliced-in nodes by checking for nodes not in the build_spec
|
||||
# without any explicit check for whether the spec is spliced.
|
||||
# An analog in this algorithm is any spec that shares a name or provides the same virtuals
|
||||
# in the context of the relevant root spec. This ensures that the analog for a spec s
|
||||
# is the spec that s replaced when we spliced.
|
||||
relocation_specs = deps_to_relocate(spec)
|
||||
build_spec_ids = set(id(s) for s in spec.build_spec.traverse(deptype=dt.ALL & ~dt.BUILD))
|
||||
for s in relocation_specs:
|
||||
analog = s
|
||||
if id(s) not in build_spec_ids:
|
||||
analogs = [
|
||||
d
|
||||
for d in spec.build_spec.traverse(deptype=dt.ALL & ~dt.BUILD)
|
||||
if s._splice_match(d, self_root=spec, other_root=spec.build_spec)
|
||||
]
|
||||
if analogs:
|
||||
# Prefer same-name analogs and prefer higher versions
|
||||
# This matches the preferences in Spec.splice, so we will find same node
|
||||
analog = max(analogs, key=lambda a: (a.name == s.name, a.version))
|
||||
|
||||
lookup_dag_hash = analog.dag_hash()
|
||||
if lookup_dag_hash in hash_to_old_prefix:
|
||||
old_dep_prefix = hash_to_old_prefix[lookup_dag_hash]
|
||||
prefix_to_prefix_bin[old_dep_prefix] = str(s.prefix)
|
||||
prefix_to_prefix_text[old_dep_prefix] = str(s.prefix)
|
||||
|
||||
# Only then add the generic fallback of install prefix -> install prefix.
|
||||
prefix_to_prefix_text[old_prefix] = new_prefix
|
||||
@@ -2520,7 +2562,13 @@ def _ensure_common_prefix(tar: tarfile.TarFile) -> str:
|
||||
return pkg_prefix
|
||||
|
||||
|
||||
def install_root_node(spec, unsigned=False, force=False, sha256=None):
|
||||
def install_root_node(
|
||||
spec: spack.spec.Spec,
|
||||
unsigned=False,
|
||||
force: bool = False,
|
||||
sha256: Optional[str] = None,
|
||||
allow_missing: bool = False,
|
||||
) -> None:
|
||||
"""Install the root node of a concrete spec from a buildcache.
|
||||
|
||||
Checking the sha256 sum of a node before installation is usually needed only
|
||||
@@ -2529,11 +2577,10 @@ def install_root_node(spec, unsigned=False, force=False, sha256=None):
|
||||
|
||||
Args:
|
||||
spec: spec to be installed (note that only the root node will be installed)
|
||||
unsigned (bool): if True allows installing unsigned binaries
|
||||
force (bool): force installation if the spec is already present in the
|
||||
local store
|
||||
sha256 (str): optional sha256 of the binary package, to be checked
|
||||
before installation
|
||||
unsigned: if True allows installing unsigned binaries
|
||||
force: force installation if the spec is already present in the local store
|
||||
sha256: optional sha256 of the binary package, to be checked before installation
|
||||
allow_missing: when true, allows installing a node with missing dependencies
|
||||
"""
|
||||
# Early termination
|
||||
if spec.external or spec.virtual:
|
||||
@@ -2543,10 +2590,10 @@ def install_root_node(spec, unsigned=False, force=False, sha256=None):
|
||||
warnings.warn("Package for spec {0} already installed.".format(spec.format()))
|
||||
return
|
||||
|
||||
download_result = download_tarball(spec, unsigned)
|
||||
download_result = download_tarball(spec.build_spec, unsigned)
|
||||
if not download_result:
|
||||
msg = 'download of binary cache file for spec "{0}" failed'
|
||||
raise RuntimeError(msg.format(spec.format()))
|
||||
raise RuntimeError(msg.format(spec.build_spec.format()))
|
||||
|
||||
if sha256:
|
||||
checker = spack.util.crypto.Checker(sha256)
|
||||
@@ -2565,8 +2612,13 @@ def install_root_node(spec, unsigned=False, force=False, sha256=None):
|
||||
with spack.util.path.filter_padding():
|
||||
tty.msg('Installing "{0}" from a buildcache'.format(spec.format()))
|
||||
extract_tarball(spec, download_result, force)
|
||||
spec.package.windows_establish_runtime_linkage()
|
||||
if spec.spliced: # overwrite old metadata with new
|
||||
spack.store.STORE.layout.write_spec(
|
||||
spec, spack.store.STORE.layout.spec_file_path(spec)
|
||||
)
|
||||
spack.hooks.post_install(spec, False)
|
||||
spack.store.STORE.db.add(spec)
|
||||
spack.store.STORE.db.add(spec, allow_missing=allow_missing)
|
||||
|
||||
|
||||
def install_single_spec(spec, unsigned=False, force=False):
|
||||
|
@@ -4,6 +4,7 @@
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
"""Common basic functions used through the spack.bootstrap package"""
|
||||
import fnmatch
|
||||
import glob
|
||||
import importlib
|
||||
import os.path
|
||||
import re
|
||||
@@ -60,10 +61,19 @@ def _try_import_from_store(
|
||||
python, *_ = candidate_spec.dependencies("python-venv")
|
||||
else:
|
||||
python, *_ = candidate_spec.dependencies("python")
|
||||
module_paths = [
|
||||
os.path.join(candidate_spec.prefix, python.package.purelib),
|
||||
os.path.join(candidate_spec.prefix, python.package.platlib),
|
||||
]
|
||||
|
||||
# if python is installed, ask it for the layout
|
||||
if python.installed:
|
||||
module_paths = [
|
||||
os.path.join(candidate_spec.prefix, python.package.purelib),
|
||||
os.path.join(candidate_spec.prefix, python.package.platlib),
|
||||
]
|
||||
# otherwise search for the site-packages directory
|
||||
# (clingo from binaries with truncated python-venv runtime)
|
||||
else:
|
||||
module_paths = glob.glob(
|
||||
os.path.join(candidate_spec.prefix, "lib", "python*", "site-packages")
|
||||
)
|
||||
path_before = list(sys.path)
|
||||
|
||||
# NOTE: try module_paths first and last, last allows an existing version in path
|
||||
|
@@ -37,6 +37,7 @@
|
||||
import spack.binary_distribution
|
||||
import spack.config
|
||||
import spack.detection
|
||||
import spack.mirror
|
||||
import spack.platforms
|
||||
import spack.spec
|
||||
import spack.store
|
||||
@@ -44,7 +45,6 @@
|
||||
import spack.util.executable
|
||||
import spack.util.path
|
||||
import spack.util.spack_yaml
|
||||
import spack.util.url
|
||||
import spack.version
|
||||
from spack.installer import PackageInstaller
|
||||
|
||||
@@ -91,12 +91,7 @@ def __init__(self, conf: ConfigDictionary) -> None:
|
||||
self.metadata_dir = spack.util.path.canonicalize_path(conf["metadata"])
|
||||
|
||||
# Promote (relative) paths to file urls
|
||||
url = conf["info"]["url"]
|
||||
if spack.util.url.is_path_instead_of_url(url):
|
||||
if not os.path.isabs(url):
|
||||
url = os.path.join(self.metadata_dir, url)
|
||||
url = spack.util.url.path_to_file_url(url)
|
||||
self.url = url
|
||||
self.url = spack.mirror.Mirror(conf["info"]["url"]).fetch_url
|
||||
|
||||
@property
|
||||
def mirror_scope(self) -> spack.config.InternalConfigScope:
|
||||
@@ -175,7 +170,15 @@ def _install_by_hash(
|
||||
query = spack.binary_distribution.BinaryCacheQuery(all_architectures=True)
|
||||
for match in spack.store.find([f"/{pkg_hash}"], multiple=False, query_fn=query):
|
||||
spack.binary_distribution.install_root_node(
|
||||
match, unsigned=True, force=True, sha256=pkg_sha256
|
||||
# allow_missing is true since when bootstrapping clingo we truncate runtime
|
||||
# deps such as gcc-runtime, since we link libstdc++ statically, and the other
|
||||
# further runtime deps are loaded by the Python interpreter. This just silences
|
||||
# warnings about missing dependencies.
|
||||
match,
|
||||
unsigned=True,
|
||||
force=True,
|
||||
sha256=pkg_sha256,
|
||||
allow_missing=True,
|
||||
)
|
||||
|
||||
def _install_and_test(
|
||||
@@ -599,7 +602,10 @@ def bootstrapping_sources(scope: Optional[str] = None):
|
||||
current = copy.copy(entry)
|
||||
metadata_dir = spack.util.path.canonicalize_path(entry["metadata"])
|
||||
metadata_yaml = os.path.join(metadata_dir, METADATA_YAML_FILENAME)
|
||||
with open(metadata_yaml, encoding="utf-8") as stream:
|
||||
current.update(spack.util.spack_yaml.load(stream))
|
||||
list_of_sources.append(current)
|
||||
try:
|
||||
with open(metadata_yaml, encoding="utf-8") as stream:
|
||||
current.update(spack.util.spack_yaml.load(stream))
|
||||
list_of_sources.append(current)
|
||||
except OSError:
|
||||
pass
|
||||
return list_of_sources
|
||||
|
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@@ -37,13 +37,15 @@
|
||||
import multiprocessing
|
||||
import os
|
||||
import re
|
||||
import stat
|
||||
import sys
|
||||
import traceback
|
||||
import types
|
||||
from collections import defaultdict
|
||||
from enum import Flag, auto
|
||||
from itertools import chain
|
||||
from typing import Dict, List, Set, Tuple
|
||||
from multiprocessing.connection import Connection
|
||||
from typing import Callable, Dict, List, Optional, Set, Tuple
|
||||
|
||||
import archspec.cpu
|
||||
|
||||
@@ -53,7 +55,6 @@
|
||||
from llnl.util.lang import dedupe, stable_partition
|
||||
from llnl.util.symlink import symlink
|
||||
from llnl.util.tty.color import cescape, colorize
|
||||
from llnl.util.tty.log import MultiProcessFd
|
||||
|
||||
import spack.build_systems._checks
|
||||
import spack.build_systems.cmake
|
||||
@@ -74,6 +75,7 @@
|
||||
import spack.store
|
||||
import spack.subprocess_context
|
||||
import spack.util.executable
|
||||
import spack.util.libc
|
||||
from spack import traverse
|
||||
from spack.context import Context
|
||||
from spack.error import InstallError, NoHeadersError, NoLibrariesError
|
||||
@@ -89,7 +91,7 @@
|
||||
)
|
||||
from spack.util.executable import Executable
|
||||
from spack.util.log_parse import make_log_context, parse_log_events
|
||||
from spack.util.module_cmd import load_module, path_from_modules
|
||||
from spack.util.module_cmd import load_module
|
||||
|
||||
#
|
||||
# This can be set by the user to globally disable parallel builds.
|
||||
@@ -435,6 +437,35 @@ def optimization_flags(compiler, target):
|
||||
return result
|
||||
|
||||
|
||||
class FilterDefaultDynamicLinkerSearchPaths:
|
||||
"""Remove rpaths to directories that are default search paths of the dynamic linker."""
|
||||
|
||||
def __init__(self, dynamic_linker: Optional[str]) -> None:
|
||||
# Identify directories by (inode, device) tuple, which handles symlinks too.
|
||||
self.default_path_identifiers: Set[Tuple[int, int]] = set()
|
||||
if not dynamic_linker:
|
||||
return
|
||||
for path in spack.util.libc.default_search_paths_from_dynamic_linker(dynamic_linker):
|
||||
try:
|
||||
s = os.stat(path)
|
||||
if stat.S_ISDIR(s.st_mode):
|
||||
self.default_path_identifiers.add((s.st_ino, s.st_dev))
|
||||
except OSError:
|
||||
continue
|
||||
|
||||
def is_dynamic_loader_default_path(self, p: str) -> bool:
|
||||
try:
|
||||
s = os.stat(p)
|
||||
return (s.st_ino, s.st_dev) in self.default_path_identifiers
|
||||
except OSError:
|
||||
return False
|
||||
|
||||
def __call__(self, dirs: List[str]) -> List[str]:
|
||||
if not self.default_path_identifiers:
|
||||
return dirs
|
||||
return [p for p in dirs if not self.is_dynamic_loader_default_path(p)]
|
||||
|
||||
|
||||
def set_wrapper_variables(pkg, env):
|
||||
"""Set environment variables used by the Spack compiler wrapper (which have the prefix
|
||||
`SPACK_`) and also add the compiler wrappers to PATH.
|
||||
@@ -492,69 +523,71 @@ def set_wrapper_variables(pkg, env):
|
||||
env.set("CCACHE_DISABLE", "1")
|
||||
|
||||
# Gather information about various types of dependencies
|
||||
link_deps = set(pkg.spec.traverse(root=False, deptype=("link")))
|
||||
rpath_deps = get_rpath_deps(pkg)
|
||||
rpath_hashes = set(s.dag_hash() for s in get_rpath_deps(pkg))
|
||||
link_deps = pkg.spec.traverse(root=False, order="topo", deptype=dt.LINK)
|
||||
external_link_deps, nonexternal_link_deps = stable_partition(link_deps, lambda d: d.external)
|
||||
|
||||
link_dirs = []
|
||||
include_dirs = []
|
||||
rpath_dirs = []
|
||||
|
||||
def _prepend_all(list_to_modify, items_to_add):
|
||||
# Update the original list (creating a new list would be faster but
|
||||
# may not be convenient)
|
||||
for item in reversed(list(items_to_add)):
|
||||
list_to_modify.insert(0, item)
|
||||
for dep in chain(external_link_deps, nonexternal_link_deps):
|
||||
# TODO: is_system_path is wrong, but even if we knew default -L, -I flags from the compiler
|
||||
# and default search dirs from the dynamic linker, it's not obvious how to avoid a possibly
|
||||
# expensive search in `query.libs.directories` and `query.headers.directories`, which is
|
||||
# what this branch is trying to avoid.
|
||||
if is_system_path(dep.prefix):
|
||||
continue
|
||||
# TODO: as of Spack 0.22, multiple instances of the same package may occur among the link
|
||||
# deps, so keying by name is wrong. In practice it is not problematic: we obtain the same
|
||||
# gcc-runtime / glibc here, and repeatedly add the same dirs that are later deduped.
|
||||
query = pkg.spec[dep.name]
|
||||
dep_link_dirs = []
|
||||
try:
|
||||
# Locating libraries can be time consuming, so log start and finish.
|
||||
tty.debug(f"Collecting libraries for {dep.name}")
|
||||
dep_link_dirs.extend(query.libs.directories)
|
||||
tty.debug(f"Libraries for {dep.name} have been collected.")
|
||||
except NoLibrariesError:
|
||||
tty.debug(f"No libraries found for {dep.name}")
|
||||
|
||||
def update_compiler_args_for_dep(dep):
|
||||
if dep in link_deps and (not is_system_path(dep.prefix)):
|
||||
query = pkg.spec[dep.name]
|
||||
dep_link_dirs = list()
|
||||
try:
|
||||
# In some circumstances (particularly for externals) finding
|
||||
# libraries packages can be time consuming, so indicate that
|
||||
# we are performing this operation (and also report when it
|
||||
# finishes).
|
||||
tty.debug("Collecting libraries for {0}".format(dep.name))
|
||||
dep_link_dirs.extend(query.libs.directories)
|
||||
tty.debug("Libraries for {0} have been collected.".format(dep.name))
|
||||
except NoLibrariesError:
|
||||
tty.debug("No libraries found for {0}".format(dep.name))
|
||||
for default_lib_dir in ("lib", "lib64"):
|
||||
default_lib_prefix = os.path.join(dep.prefix, default_lib_dir)
|
||||
if os.path.isdir(default_lib_prefix):
|
||||
dep_link_dirs.append(default_lib_prefix)
|
||||
|
||||
for default_lib_dir in ["lib", "lib64"]:
|
||||
default_lib_prefix = os.path.join(dep.prefix, default_lib_dir)
|
||||
if os.path.isdir(default_lib_prefix):
|
||||
dep_link_dirs.append(default_lib_prefix)
|
||||
link_dirs[:0] = dep_link_dirs
|
||||
if dep.dag_hash() in rpath_hashes:
|
||||
rpath_dirs[:0] = dep_link_dirs
|
||||
|
||||
_prepend_all(link_dirs, dep_link_dirs)
|
||||
if dep in rpath_deps:
|
||||
_prepend_all(rpath_dirs, dep_link_dirs)
|
||||
try:
|
||||
tty.debug(f"Collecting headers for {dep.name}")
|
||||
include_dirs[:0] = query.headers.directories
|
||||
tty.debug(f"Headers for {dep.name} have been collected.")
|
||||
except NoHeadersError:
|
||||
tty.debug(f"No headers found for {dep.name}")
|
||||
|
||||
try:
|
||||
_prepend_all(include_dirs, query.headers.directories)
|
||||
except NoHeadersError:
|
||||
tty.debug("No headers found for {0}".format(dep.name))
|
||||
|
||||
for dspec in pkg.spec.traverse(root=False, order="post"):
|
||||
if dspec.external:
|
||||
update_compiler_args_for_dep(dspec)
|
||||
|
||||
# Just above, we prepended entries for -L/-rpath for externals. We
|
||||
# now do this for non-external packages so that Spack-built packages
|
||||
# are searched first for libraries etc.
|
||||
for dspec in pkg.spec.traverse(root=False, order="post"):
|
||||
if not dspec.external:
|
||||
update_compiler_args_for_dep(dspec)
|
||||
|
||||
# The top-level package is always RPATHed. It hasn't been installed yet
|
||||
# so the RPATHs are added unconditionally (e.g. even though lib64/ may
|
||||
# not be created for the install).
|
||||
for libdir in ["lib64", "lib"]:
|
||||
# The top-level package is heuristically rpath'ed.
|
||||
for libdir in ("lib64", "lib"):
|
||||
lib_path = os.path.join(pkg.prefix, libdir)
|
||||
rpath_dirs.insert(0, lib_path)
|
||||
|
||||
filter_default_dynamic_linker_search_paths = FilterDefaultDynamicLinkerSearchPaths(
|
||||
pkg.compiler.default_dynamic_linker
|
||||
)
|
||||
|
||||
# TODO: filter_system_paths is again wrong (and probably unnecessary due to the is_system_path
|
||||
# branch above). link_dirs should be filtered with entries from _parse_link_paths.
|
||||
link_dirs = list(dedupe(filter_system_paths(link_dirs)))
|
||||
include_dirs = list(dedupe(filter_system_paths(include_dirs)))
|
||||
rpath_dirs = list(dedupe(filter_system_paths(rpath_dirs)))
|
||||
rpath_dirs = filter_default_dynamic_linker_search_paths(rpath_dirs)
|
||||
|
||||
# TODO: implicit_rpaths is prefiltered by is_system_path, that should be removed in favor of
|
||||
# just this filter.
|
||||
implicit_rpaths = filter_default_dynamic_linker_search_paths(pkg.compiler.implicit_rpaths())
|
||||
if implicit_rpaths:
|
||||
env.set("SPACK_COMPILER_IMPLICIT_RPATHS", ":".join(implicit_rpaths))
|
||||
|
||||
# Spack managed directories include the stage, store and upstream stores. We extend this with
|
||||
# their real paths to make it more robust (e.g. /tmp vs /private/tmp on macOS).
|
||||
@@ -584,13 +617,11 @@ def set_package_py_globals(pkg, context: Context = Context.BUILD):
|
||||
"""
|
||||
module = ModuleChangePropagator(pkg)
|
||||
|
||||
if context == Context.BUILD:
|
||||
module.std_cmake_args = spack.build_systems.cmake.CMakeBuilder.std_args(pkg)
|
||||
module.std_meson_args = spack.build_systems.meson.MesonBuilder.std_args(pkg)
|
||||
module.std_pip_args = spack.build_systems.python.PythonPipBuilder.std_args(pkg)
|
||||
|
||||
jobs = spack.config.determine_number_of_jobs(parallel=pkg.parallel)
|
||||
module.make_jobs = jobs
|
||||
if context == Context.BUILD:
|
||||
module.std_meson_args = spack.build_systems.meson.MesonBuilder.std_args(pkg)
|
||||
module.std_pip_args = spack.build_systems.python.PythonPipBuilder.std_args(pkg)
|
||||
|
||||
# TODO: make these build deps that can be installed if not found.
|
||||
module.make = MakeExecutable("make", jobs)
|
||||
@@ -759,21 +790,6 @@ def get_rpath_deps(pkg: spack.package_base.PackageBase) -> List[spack.spec.Spec]
|
||||
return _get_rpath_deps_from_spec(pkg.spec, pkg.transitive_rpaths)
|
||||
|
||||
|
||||
def get_rpaths(pkg):
|
||||
"""Get a list of all the rpaths for a package."""
|
||||
rpaths = [pkg.prefix.lib, pkg.prefix.lib64]
|
||||
deps = get_rpath_deps(pkg)
|
||||
rpaths.extend(d.prefix.lib for d in deps if os.path.isdir(d.prefix.lib))
|
||||
rpaths.extend(d.prefix.lib64 for d in deps if os.path.isdir(d.prefix.lib64))
|
||||
# Second module is our compiler mod name. We use that to get rpaths from
|
||||
# module show output.
|
||||
if pkg.compiler.modules and len(pkg.compiler.modules) > 1:
|
||||
mod_rpath = path_from_modules([pkg.compiler.modules[1]])
|
||||
if mod_rpath:
|
||||
rpaths.append(mod_rpath)
|
||||
return list(dedupe(filter_system_paths(rpaths)))
|
||||
|
||||
|
||||
def load_external_modules(pkg):
|
||||
"""Traverse a package's spec DAG and load any external modules.
|
||||
|
||||
@@ -841,10 +857,6 @@ def setup_package(pkg, dirty, context: Context = Context.BUILD):
|
||||
|
||||
load_external_modules(pkg)
|
||||
|
||||
implicit_rpaths = pkg.compiler.implicit_rpaths()
|
||||
if implicit_rpaths:
|
||||
env_mods.set("SPACK_COMPILER_IMPLICIT_RPATHS", ":".join(implicit_rpaths))
|
||||
|
||||
# Make sure nothing's strange about the Spack environment.
|
||||
validate(env_mods, tty.warn)
|
||||
env_mods.apply_modifications()
|
||||
@@ -1034,6 +1046,12 @@ def set_all_package_py_globals(self):
|
||||
# This includes runtime dependencies, also runtime deps of direct build deps.
|
||||
set_package_py_globals(pkg, context=Context.RUN)
|
||||
|
||||
# Looping over the set of packages a second time
|
||||
# ensures all globals are loaded into the module space prior to
|
||||
# any package setup. This guarantees package setup methods have
|
||||
# access to expected module level definitions such as "spack_cc"
|
||||
for dspec, flag in chain(self.external, self.nonexternal):
|
||||
pkg = dspec.package
|
||||
for spec in dspec.dependents():
|
||||
# Note: some specs have dependents that are unreachable from the root, so avoid
|
||||
# setting globals for those.
|
||||
@@ -1043,6 +1061,15 @@ def set_all_package_py_globals(self):
|
||||
pkg.setup_dependent_package(dependent_module, spec)
|
||||
dependent_module.propagate_changes_to_mro()
|
||||
|
||||
if self.context == Context.BUILD:
|
||||
pkg = self.specs[0].package
|
||||
module = ModuleChangePropagator(pkg)
|
||||
# std_cmake_args is not sufficiently static to be defined
|
||||
# in set_package_py_globals and is deprecated so its handled
|
||||
# here as a special case
|
||||
module.std_cmake_args = spack.build_systems.cmake.CMakeBuilder.std_args(pkg)
|
||||
module.propagate_changes_to_mro()
|
||||
|
||||
def get_env_modifications(self) -> EnvironmentModifications:
|
||||
"""Returns the environment variable modifications for the given input specs and context.
|
||||
Environment modifications include:
|
||||
@@ -1112,45 +1139,61 @@ def _make_runnable(self, dep: spack.spec.Spec, env: EnvironmentModifications):
|
||||
env.prepend_path("PATH", bin_dir)
|
||||
|
||||
|
||||
def get_cmake_prefix_path(pkg):
|
||||
# Note that unlike modifications_from_dependencies, this does not include
|
||||
# any edits to CMAKE_PREFIX_PATH defined in custom
|
||||
# setup_dependent_build_environment implementations of dependency packages
|
||||
build_deps = set(pkg.spec.dependencies(deptype=("build", "test")))
|
||||
link_deps = set(pkg.spec.traverse(root=False, deptype=("link")))
|
||||
build_link_deps = build_deps | link_deps
|
||||
spack_built = []
|
||||
externals = []
|
||||
# modifications_from_dependencies updates CMAKE_PREFIX_PATH by first
|
||||
# prepending all externals and then all non-externals
|
||||
for dspec in pkg.spec.traverse(root=False, order="post"):
|
||||
if dspec in build_link_deps:
|
||||
if dspec.external:
|
||||
externals.insert(0, dspec)
|
||||
else:
|
||||
spack_built.insert(0, dspec)
|
||||
|
||||
ordered_build_link_deps = spack_built + externals
|
||||
cmake_prefix_path_entries = []
|
||||
for spec in ordered_build_link_deps:
|
||||
cmake_prefix_path_entries.extend(spec.package.cmake_prefix_paths)
|
||||
|
||||
return filter_system_paths(cmake_prefix_path_entries)
|
||||
|
||||
|
||||
def _setup_pkg_and_run(
|
||||
serialized_pkg, function, kwargs, write_pipe, input_multiprocess_fd, jsfd1, jsfd2
|
||||
serialized_pkg: "spack.subprocess_context.PackageInstallContext",
|
||||
function: Callable,
|
||||
kwargs: Dict,
|
||||
write_pipe: Connection,
|
||||
input_pipe: Optional[Connection],
|
||||
jsfd1: Optional[Connection],
|
||||
jsfd2: Optional[Connection],
|
||||
):
|
||||
"""Main entry point in the child process for Spack builds.
|
||||
|
||||
``_setup_pkg_and_run`` is called by the child process created in
|
||||
``start_build_process()``, and its main job is to run ``function()`` on behalf of
|
||||
some Spack installation (see :ref:`spack.installer.PackageInstaller._install_task`).
|
||||
|
||||
The child process is passed a ``write_pipe``, on which it's expected to send one of
|
||||
the following:
|
||||
|
||||
* ``StopPhase``: error raised by a build process indicating it's stopping at a
|
||||
particular build phase.
|
||||
|
||||
* ``BaseException``: any exception raised by a child build process, which will be
|
||||
wrapped in ``ChildError`` (which adds a bunch of debug info and log context) and
|
||||
raised in the parent.
|
||||
|
||||
* The return value of ``function()``, which can be anything (except an exception).
|
||||
This is returned to the caller.
|
||||
|
||||
Note: ``jsfd1`` and ``jsfd2`` are passed solely to ensure that the child process
|
||||
does not close these file descriptors. Some ``multiprocessing`` backends will close
|
||||
them automatically in the child if they are not passed at process creation time.
|
||||
|
||||
Arguments:
|
||||
serialized_pkg: Spack package install context object (serialized form of the
|
||||
package that we'll build in the child process).
|
||||
function: function to call in the child process; serialized_pkg is passed to
|
||||
this as the first argument.
|
||||
kwargs: additional keyword arguments to pass to ``function()``.
|
||||
write_pipe: multiprocessing ``Connection`` to the parent process, to which the
|
||||
child *must* send a result (or an error) back to parent on.
|
||||
input_multiprocess_fd: stdin from the parent (not passed currently on Windows)
|
||||
jsfd1: gmake Jobserver file descriptor 1.
|
||||
jsfd2: gmake Jobserver file descriptor 2.
|
||||
|
||||
"""
|
||||
|
||||
context: str = kwargs.get("context", "build")
|
||||
|
||||
try:
|
||||
# We are in the child process. Python sets sys.stdin to
|
||||
# open(os.devnull) to prevent our process and its parent from
|
||||
# simultaneously reading from the original stdin. But, we assume
|
||||
# that the parent process is not going to read from it till we
|
||||
# are done with the child, so we undo Python's precaution.
|
||||
if input_multiprocess_fd is not None:
|
||||
sys.stdin = os.fdopen(input_multiprocess_fd.fd)
|
||||
# We are in the child process. Python sets sys.stdin to open(os.devnull) to prevent our
|
||||
# process and its parent from simultaneously reading from the original stdin. But, we
|
||||
# assume that the parent process is not going to read from it till we are done with the
|
||||
# child, so we undo Python's precaution. closefd=False since Connection has ownership.
|
||||
if input_pipe is not None:
|
||||
sys.stdin = os.fdopen(input_pipe.fileno(), closefd=False)
|
||||
|
||||
pkg = serialized_pkg.restore()
|
||||
|
||||
@@ -1166,13 +1209,14 @@ def _setup_pkg_and_run(
|
||||
# Do not create a full ChildError from this, it's not an error
|
||||
# it's a control statement.
|
||||
write_pipe.send(e)
|
||||
except BaseException:
|
||||
except BaseException as e:
|
||||
# catch ANYTHING that goes wrong in the child process
|
||||
exc_type, exc, tb = sys.exc_info()
|
||||
|
||||
# Need to unwind the traceback in the child because traceback
|
||||
# objects can't be sent to the parent.
|
||||
tb_string = traceback.format_exc()
|
||||
exc_type = type(e)
|
||||
tb = e.__traceback__
|
||||
tb_string = "".join(traceback.format_exception(exc_type, e, tb))
|
||||
|
||||
# build up some context from the offending package so we can
|
||||
# show that, too.
|
||||
@@ -1189,8 +1233,8 @@ def _setup_pkg_and_run(
|
||||
elif context == "test":
|
||||
logfile = os.path.join(pkg.test_suite.stage, pkg.test_suite.test_log_name(pkg.spec))
|
||||
|
||||
error_msg = str(exc)
|
||||
if isinstance(exc, (spack.multimethod.NoSuchMethodError, AttributeError)):
|
||||
error_msg = str(e)
|
||||
if isinstance(e, (spack.multimethod.NoSuchMethodError, AttributeError)):
|
||||
process = "test the installation" if context == "test" else "build from sources"
|
||||
error_msg = (
|
||||
"The '{}' package cannot find an attribute while trying to {}. "
|
||||
@@ -1200,7 +1244,7 @@ def _setup_pkg_and_run(
|
||||
"More information at https://spack.readthedocs.io/en/latest/packaging_guide.html#installation-procedure"
|
||||
).format(pkg.name, process, context)
|
||||
error_msg = colorize("@*R{{{}}}".format(error_msg))
|
||||
error_msg = "{}\n\n{}".format(str(exc), error_msg)
|
||||
error_msg = "{}\n\n{}".format(str(e), error_msg)
|
||||
|
||||
# make a pickleable exception to send to parent.
|
||||
msg = "%s: %s" % (exc_type.__name__, error_msg)
|
||||
@@ -1218,8 +1262,8 @@ def _setup_pkg_and_run(
|
||||
|
||||
finally:
|
||||
write_pipe.close()
|
||||
if input_multiprocess_fd is not None:
|
||||
input_multiprocess_fd.close()
|
||||
if input_pipe is not None:
|
||||
input_pipe.close()
|
||||
|
||||
|
||||
def start_build_process(pkg, function, kwargs):
|
||||
@@ -1246,23 +1290,9 @@ def child_fun():
|
||||
If something goes wrong, the child process catches the error and
|
||||
passes it to the parent wrapped in a ChildError. The parent is
|
||||
expected to handle (or re-raise) the ChildError.
|
||||
|
||||
This uses `multiprocessing.Process` to create the child process. The
|
||||
mechanism used to create the process differs on different operating
|
||||
systems and for different versions of Python. In some cases "fork"
|
||||
is used (i.e. the "fork" system call) and some cases it starts an
|
||||
entirely new Python interpreter process (in the docs this is referred
|
||||
to as the "spawn" start method). Breaking it down by OS:
|
||||
|
||||
- Linux always uses fork.
|
||||
- Mac OS uses fork before Python 3.8 and "spawn" for 3.8 and after.
|
||||
- Windows always uses the "spawn" start method.
|
||||
|
||||
For more information on `multiprocessing` child process creation
|
||||
mechanisms, see https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
|
||||
"""
|
||||
read_pipe, write_pipe = multiprocessing.Pipe(duplex=False)
|
||||
input_multiprocess_fd = None
|
||||
input_fd = None
|
||||
jobserver_fd1 = None
|
||||
jobserver_fd2 = None
|
||||
|
||||
@@ -1271,14 +1301,13 @@ def child_fun():
|
||||
try:
|
||||
# Forward sys.stdin when appropriate, to allow toggling verbosity
|
||||
if sys.platform != "win32" and sys.stdin.isatty() and hasattr(sys.stdin, "fileno"):
|
||||
input_fd = os.dup(sys.stdin.fileno())
|
||||
input_multiprocess_fd = MultiProcessFd(input_fd)
|
||||
input_fd = Connection(os.dup(sys.stdin.fileno()))
|
||||
mflags = os.environ.get("MAKEFLAGS", False)
|
||||
if mflags:
|
||||
m = re.search(r"--jobserver-[^=]*=(\d),(\d)", mflags)
|
||||
if m:
|
||||
jobserver_fd1 = MultiProcessFd(int(m.group(1)))
|
||||
jobserver_fd2 = MultiProcessFd(int(m.group(2)))
|
||||
jobserver_fd1 = Connection(int(m.group(1)))
|
||||
jobserver_fd2 = Connection(int(m.group(2)))
|
||||
|
||||
p = multiprocessing.Process(
|
||||
target=_setup_pkg_and_run,
|
||||
@@ -1287,7 +1316,7 @@ def child_fun():
|
||||
function,
|
||||
kwargs,
|
||||
write_pipe,
|
||||
input_multiprocess_fd,
|
||||
input_fd,
|
||||
jobserver_fd1,
|
||||
jobserver_fd2,
|
||||
),
|
||||
@@ -1307,8 +1336,8 @@ def child_fun():
|
||||
|
||||
finally:
|
||||
# Close the input stream in the parent process
|
||||
if input_multiprocess_fd is not None:
|
||||
input_multiprocess_fd.close()
|
||||
if input_fd is not None:
|
||||
input_fd.close()
|
||||
|
||||
def exitcode_msg(p):
|
||||
typ = "exit" if p.exitcode >= 0 else "signal"
|
||||
|
@@ -7,7 +7,6 @@
|
||||
import llnl.util.filesystem as fs
|
||||
|
||||
import spack.directives
|
||||
import spack.package_base
|
||||
import spack.util.executable
|
||||
|
||||
from .autotools import AutotoolsBuilder, AutotoolsPackage
|
||||
|
@@ -10,7 +10,6 @@
|
||||
import llnl.util.filesystem as fs
|
||||
import llnl.util.tty as tty
|
||||
|
||||
import spack.build_environment
|
||||
import spack.builder
|
||||
|
||||
from .cmake import CMakeBuilder, CMakePackage
|
||||
@@ -297,18 +296,6 @@ def initconfig_hardware_entries(self):
|
||||
def std_initconfig_entries(self):
|
||||
cmake_prefix_path_env = os.environ["CMAKE_PREFIX_PATH"]
|
||||
cmake_prefix_path = cmake_prefix_path_env.replace(os.pathsep, ";")
|
||||
cmake_rpaths_env = spack.build_environment.get_rpaths(self.pkg)
|
||||
cmake_rpaths_path = ";".join(cmake_rpaths_env)
|
||||
complete_rpath_list = cmake_rpaths_path
|
||||
if "SPACK_COMPILER_EXTRA_RPATHS" in os.environ:
|
||||
spack_extra_rpaths_env = os.environ["SPACK_COMPILER_EXTRA_RPATHS"]
|
||||
spack_extra_rpaths_path = spack_extra_rpaths_env.replace(os.pathsep, ";")
|
||||
complete_rpath_list = "{0};{1}".format(complete_rpath_list, spack_extra_rpaths_path)
|
||||
|
||||
if "SPACK_COMPILER_IMPLICIT_RPATHS" in os.environ:
|
||||
spack_implicit_rpaths_env = os.environ["SPACK_COMPILER_IMPLICIT_RPATHS"]
|
||||
spack_implicit_rpaths_path = spack_implicit_rpaths_env.replace(os.pathsep, ";")
|
||||
complete_rpath_list = "{0};{1}".format(complete_rpath_list, spack_implicit_rpaths_path)
|
||||
|
||||
return [
|
||||
"#------------------{0}".format("-" * 60),
|
||||
@@ -318,8 +305,6 @@ def std_initconfig_entries(self):
|
||||
"#------------------{0}\n".format("-" * 60),
|
||||
cmake_cache_string("CMAKE_PREFIX_PATH", cmake_prefix_path),
|
||||
cmake_cache_string("CMAKE_INSTALL_RPATH_USE_LINK_PATH", "ON"),
|
||||
cmake_cache_string("CMAKE_BUILD_RPATH", complete_rpath_list),
|
||||
cmake_cache_string("CMAKE_INSTALL_RPATH", complete_rpath_list),
|
||||
self.define_cmake_cache_from_variant("CMAKE_BUILD_TYPE", "build_type"),
|
||||
]
|
||||
|
||||
|
@@ -8,17 +8,19 @@
|
||||
import platform
|
||||
import re
|
||||
import sys
|
||||
from typing import List, Optional, Tuple
|
||||
from itertools import chain
|
||||
from typing import List, Optional, Set, Tuple
|
||||
|
||||
import llnl.util.filesystem as fs
|
||||
from llnl.util.lang import stable_partition
|
||||
|
||||
import spack.build_environment
|
||||
import spack.builder
|
||||
import spack.deptypes as dt
|
||||
import spack.error
|
||||
import spack.package_base
|
||||
from spack.directives import build_system, conflicts, depends_on, variant
|
||||
from spack.multimethod import when
|
||||
from spack.util.environment import filter_system_paths
|
||||
|
||||
from ._checks import BaseBuilder, execute_build_time_tests
|
||||
|
||||
@@ -152,6 +154,24 @@ def _values(x):
|
||||
conflicts(f"generator={x}")
|
||||
|
||||
|
||||
def get_cmake_prefix_path(pkg: spack.package_base.PackageBase) -> List[str]:
|
||||
"""Obtain the CMAKE_PREFIX_PATH entries for a package, based on the cmake_prefix_path package
|
||||
attribute of direct build/test and transitive link dependencies."""
|
||||
# Add direct build/test deps
|
||||
selected: Set[str] = {s.dag_hash() for s in pkg.spec.dependencies(deptype=dt.BUILD | dt.TEST)}
|
||||
# Add transitive link deps
|
||||
selected.update(s.dag_hash() for s in pkg.spec.traverse(root=False, deptype=dt.LINK))
|
||||
# Separate out externals so they do not shadow Spack prefixes
|
||||
externals, spack_built = stable_partition(
|
||||
(s for s in pkg.spec.traverse(root=False, order="topo") if s.dag_hash() in selected),
|
||||
lambda x: x.external,
|
||||
)
|
||||
|
||||
return filter_system_paths(
|
||||
path for spec in chain(spack_built, externals) for path in spec.package.cmake_prefix_paths
|
||||
)
|
||||
|
||||
|
||||
class CMakePackage(spack.package_base.PackageBase):
|
||||
"""Specialized class for packages built using CMake
|
||||
|
||||
@@ -358,6 +378,16 @@ def std_args(pkg, generator=None):
|
||||
"-G",
|
||||
generator,
|
||||
define("CMAKE_INSTALL_PREFIX", pathlib.Path(pkg.prefix).as_posix()),
|
||||
define("CMAKE_INSTALL_RPATH_USE_LINK_PATH", True),
|
||||
# only include the install prefix lib dirs; rpaths for deps are added by USE_LINK_PATH
|
||||
define(
|
||||
"CMAKE_INSTALL_RPATH",
|
||||
[
|
||||
pathlib.Path(pkg.prefix, "lib").as_posix(),
|
||||
pathlib.Path(pkg.prefix, "lib64").as_posix(),
|
||||
],
|
||||
),
|
||||
define("CMAKE_PREFIX_PATH", get_cmake_prefix_path(pkg)),
|
||||
define("CMAKE_BUILD_TYPE", build_type),
|
||||
]
|
||||
|
||||
@@ -372,15 +402,6 @@ def std_args(pkg, generator=None):
|
||||
_conditional_cmake_defaults(pkg, args)
|
||||
_maybe_set_python_hints(pkg, args)
|
||||
|
||||
# Set up CMake rpath
|
||||
args.extend(
|
||||
[
|
||||
define("CMAKE_INSTALL_RPATH_USE_LINK_PATH", True),
|
||||
define("CMAKE_INSTALL_RPATH", spack.build_environment.get_rpaths(pkg)),
|
||||
define("CMAKE_PREFIX_PATH", spack.build_environment.get_cmake_prefix_path(pkg)),
|
||||
]
|
||||
)
|
||||
|
||||
return args
|
||||
|
||||
@staticmethod
|
||||
@@ -541,6 +562,13 @@ def cmake_args(self):
|
||||
|
||||
def cmake(self, pkg, spec, prefix):
|
||||
"""Runs ``cmake`` in the build directory"""
|
||||
|
||||
# skip cmake phase if it is an incremental develop build
|
||||
if spec.is_develop and os.path.isfile(
|
||||
os.path.join(self.build_directory, "CMakeCache.txt")
|
||||
):
|
||||
return
|
||||
|
||||
options = self.std_cmake_args
|
||||
options += self.cmake_args()
|
||||
options.append(os.path.abspath(self.root_cmakelists_dir))
|
||||
|
@@ -110,8 +110,8 @@ def compute_capabilities(arch_list: Iterable[str]) -> List[str]:
|
||||
|
||||
depends_on("cuda@5.0:10.2", when="cuda_arch=30")
|
||||
depends_on("cuda@5.0:10.2", when="cuda_arch=32")
|
||||
depends_on("cuda@5.0:", when="cuda_arch=35")
|
||||
depends_on("cuda@6.5:", when="cuda_arch=37")
|
||||
depends_on("cuda@5.0:11.8", when="cuda_arch=35")
|
||||
depends_on("cuda@6.5:11.8", when="cuda_arch=37")
|
||||
|
||||
depends_on("cuda@6.0:", when="cuda_arch=50")
|
||||
depends_on("cuda@6.5:", when="cuda_arch=52")
|
||||
@@ -131,6 +131,7 @@ def compute_capabilities(arch_list: Iterable[str]) -> List[str]:
|
||||
depends_on("cuda@11.8:", when="cuda_arch=89")
|
||||
|
||||
depends_on("cuda@12.0:", when="cuda_arch=90")
|
||||
depends_on("cuda@12.0:", when="cuda_arch=90a")
|
||||
|
||||
# From the NVIDIA install guide we know of conflicts for particular
|
||||
# platforms (linux, darwin), architectures (x86, powerpc) and compilers
|
||||
@@ -149,7 +150,6 @@ def compute_capabilities(arch_list: Iterable[str]) -> List[str]:
|
||||
# minimum supported versions
|
||||
conflicts("%gcc@:4", when="+cuda ^cuda@11.0:")
|
||||
conflicts("%gcc@:5", when="+cuda ^cuda@11.4:")
|
||||
conflicts("%gcc@:7.2", when="+cuda ^cuda@12.4:")
|
||||
conflicts("%clang@:6", when="+cuda ^cuda@12.2:")
|
||||
|
||||
# maximum supported version
|
||||
@@ -241,6 +241,11 @@ def compute_capabilities(arch_list: Iterable[str]) -> List[str]:
|
||||
conflicts("%intel@19.2:", when="+cuda ^cuda@:11.1.0")
|
||||
conflicts("%intel@2021:", when="+cuda ^cuda@:11.4.0")
|
||||
|
||||
# ARM
|
||||
# https://github.com/spack/spack/pull/39666#issuecomment-2377609263
|
||||
# Might need to be expanded to other gcc versions
|
||||
conflicts("%gcc@13.2.0", when="+cuda ^cuda@:12.4 target=aarch64:")
|
||||
|
||||
# XL is mostly relevant for ppc64le Linux
|
||||
conflicts("%xl@:12,14:", when="+cuda ^cuda@:9.1")
|
||||
conflicts("%xl@:12,14:15,17:", when="+cuda ^cuda@9.2")
|
||||
|
@@ -44,16 +44,27 @@ class GoBuilder(BaseBuilder):
|
||||
+-----------------------------------------------+--------------------+
|
||||
| **Method** | **Purpose** |
|
||||
+===============================================+====================+
|
||||
| :py:meth:`~.GoBuilder.build_args` | Specify arguments |
|
||||
| :py:attr:`~.GoBuilder.build_args` | Specify arguments |
|
||||
| | to ``go build`` |
|
||||
+-----------------------------------------------+--------------------+
|
||||
| :py:meth:`~.GoBuilder.check_args` | Specify arguments |
|
||||
| :py:attr:`~.GoBuilder.check_args` | Specify arguments |
|
||||
| | to ``go test`` |
|
||||
+-----------------------------------------------+--------------------+
|
||||
"""
|
||||
|
||||
phases = ("build", "install")
|
||||
|
||||
#: Names associated with package methods in the old build-system format
|
||||
legacy_methods = ("check", "installcheck")
|
||||
|
||||
#: Names associated with package attributes in the old build-system format
|
||||
legacy_attributes = (
|
||||
"build_args",
|
||||
"check_args",
|
||||
"build_directory",
|
||||
"install_time_test_callbacks",
|
||||
)
|
||||
|
||||
#: Callback names for install-time test
|
||||
install_time_test_callbacks = ["check"]
|
||||
|
||||
|
@@ -339,7 +339,7 @@ class PythonPackage(PythonExtension):
|
||||
legacy_buildsystem = "python_pip"
|
||||
|
||||
#: Callback names for install-time test
|
||||
install_time_test_callbacks = ["test"]
|
||||
install_time_test_callbacks = ["test_imports"]
|
||||
|
||||
build_system("python_pip")
|
||||
|
||||
@@ -429,7 +429,7 @@ class PythonPipBuilder(BaseBuilder):
|
||||
phases = ("install",)
|
||||
|
||||
#: Names associated with package methods in the old build-system format
|
||||
legacy_methods = ("test",)
|
||||
legacy_methods = ("test_imports",)
|
||||
|
||||
#: Same as legacy_methods, but the signature is different
|
||||
legacy_long_methods = ("install_options", "global_options", "config_settings")
|
||||
@@ -438,7 +438,7 @@ class PythonPipBuilder(BaseBuilder):
|
||||
legacy_attributes = ("archive_files", "build_directory", "install_time_test_callbacks")
|
||||
|
||||
#: Callback names for install-time test
|
||||
install_time_test_callbacks = ["test"]
|
||||
install_time_test_callbacks = ["test_imports"]
|
||||
|
||||
@staticmethod
|
||||
def std_args(cls) -> List[str]:
|
||||
|
@@ -12,6 +12,7 @@
|
||||
|
||||
import spack.error
|
||||
import spack.multimethod
|
||||
import spack.repo
|
||||
|
||||
#: Builder classes, as registered by the "builder" decorator
|
||||
BUILDER_CLS = {}
|
||||
@@ -74,6 +75,14 @@ def __call__(self, spec, prefix):
|
||||
return self.phase_fn(self.builder.pkg, spec, prefix)
|
||||
|
||||
|
||||
def get_builder_class(pkg, name: str) -> Optional[type]:
|
||||
"""Return the builder class if a package module defines it."""
|
||||
cls = getattr(pkg.module, name, None)
|
||||
if cls and cls.__module__.startswith(spack.repo.ROOT_PYTHON_NAMESPACE):
|
||||
return cls
|
||||
return None
|
||||
|
||||
|
||||
def _create(pkg):
|
||||
"""Return a new builder object for the package object being passed as argument.
|
||||
|
||||
@@ -99,9 +108,10 @@ class hierarchy (look at AspellDictPackage for an example of that)
|
||||
package_buildsystem = buildsystem_name(pkg)
|
||||
default_builder_cls = BUILDER_CLS[package_buildsystem]
|
||||
builder_cls_name = default_builder_cls.__name__
|
||||
builder_cls = getattr(pkg.module, builder_cls_name, None)
|
||||
if builder_cls:
|
||||
return builder_cls(pkg)
|
||||
builder_class = get_builder_class(pkg, builder_cls_name)
|
||||
|
||||
if builder_class:
|
||||
return builder_class(pkg)
|
||||
|
||||
# Specialized version of a given buildsystem can subclass some
|
||||
# base classes and specialize certain phases or methods or attributes.
|
||||
@@ -521,10 +531,6 @@ def stage(self):
|
||||
def prefix(self):
|
||||
return self.pkg.prefix
|
||||
|
||||
def test(self):
|
||||
# Defer tests to virtual and concrete packages
|
||||
pass
|
||||
|
||||
def setup_build_environment(self, env):
|
||||
"""Sets up the build environment for a package.
|
||||
|
||||
|
@@ -5,7 +5,6 @@
|
||||
|
||||
"""Caches used by Spack to store data"""
|
||||
import os
|
||||
from typing import Union
|
||||
|
||||
import llnl.util.lang
|
||||
from llnl.util.filesystem import mkdirp
|
||||
@@ -32,12 +31,8 @@ def _misc_cache():
|
||||
return spack.util.file_cache.FileCache(path)
|
||||
|
||||
|
||||
FileCacheType = Union[spack.util.file_cache.FileCache, llnl.util.lang.Singleton]
|
||||
|
||||
#: Spack's cache for small data
|
||||
MISC_CACHE: Union[spack.util.file_cache.FileCache, llnl.util.lang.Singleton] = (
|
||||
llnl.util.lang.Singleton(_misc_cache)
|
||||
)
|
||||
MISC_CACHE: spack.util.file_cache.FileCache = llnl.util.lang.Singleton(_misc_cache) # type: ignore
|
||||
|
||||
|
||||
def fetch_cache_location():
|
||||
@@ -74,6 +69,4 @@ def store(self, fetcher, relative_dest):
|
||||
|
||||
|
||||
#: Spack's local cache for downloaded source archives
|
||||
FETCH_CACHE: Union[spack.fetch_strategy.FsCache, llnl.util.lang.Singleton] = (
|
||||
llnl.util.lang.Singleton(_fetch_cache)
|
||||
)
|
||||
FETCH_CACHE: spack.fetch_strategy.FsCache = llnl.util.lang.Singleton(_fetch_cache) # type: ignore
|
||||
|
@@ -10,6 +10,7 @@
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import ssl
|
||||
import stat
|
||||
import subprocess
|
||||
import sys
|
||||
@@ -19,21 +20,21 @@
|
||||
from collections import defaultdict, namedtuple
|
||||
from typing import Dict, List, Optional, Set, Tuple
|
||||
from urllib.error import HTTPError, URLError
|
||||
from urllib.parse import urlencode
|
||||
from urllib.request import HTTPHandler, Request, build_opener
|
||||
from urllib.parse import quote, urlencode, urlparse
|
||||
from urllib.request import HTTPHandler, HTTPSHandler, Request, build_opener
|
||||
|
||||
import ruamel.yaml
|
||||
|
||||
import llnl.util.filesystem as fs
|
||||
import llnl.util.tty as tty
|
||||
from llnl.util.lang import memoized
|
||||
from llnl.util.lang import Singleton, memoized
|
||||
from llnl.util.tty.color import cescape, colorize
|
||||
|
||||
import spack
|
||||
import spack.binary_distribution as bindist
|
||||
import spack.concretize
|
||||
import spack.config as cfg
|
||||
import spack.environment as ev
|
||||
import spack.error
|
||||
import spack.main
|
||||
import spack.mirror
|
||||
import spack.paths
|
||||
@@ -50,6 +51,31 @@
|
||||
from spack.reporters.cdash import SPACK_CDASH_TIMEOUT
|
||||
from spack.reporters.cdash import build_stamp as cdash_build_stamp
|
||||
|
||||
|
||||
def _urlopen():
|
||||
error_handler = web_util.SpackHTTPDefaultErrorHandler()
|
||||
|
||||
# One opener with HTTPS ssl enabled
|
||||
with_ssl = build_opener(
|
||||
HTTPHandler(), HTTPSHandler(context=web_util.ssl_create_default_context()), error_handler
|
||||
)
|
||||
|
||||
# One opener with HTTPS ssl disabled
|
||||
without_ssl = build_opener(
|
||||
HTTPHandler(), HTTPSHandler(context=ssl._create_unverified_context()), error_handler
|
||||
)
|
||||
|
||||
# And dynamically dispatch based on the config:verify_ssl.
|
||||
def dispatch_open(fullurl, data=None, timeout=None, verify_ssl=True):
|
||||
opener = with_ssl if verify_ssl else without_ssl
|
||||
timeout = timeout or spack.config.get("config:connect_timeout", 1)
|
||||
return opener.open(fullurl, data, timeout)
|
||||
|
||||
return dispatch_open
|
||||
|
||||
|
||||
_dyn_mapping_urlopener = Singleton(_urlopen)
|
||||
|
||||
# See https://docs.gitlab.com/ee/ci/yaml/#retry for descriptions of conditions
|
||||
JOB_RETRY_CONDITIONS = [
|
||||
# "always",
|
||||
@@ -69,8 +95,6 @@
|
||||
|
||||
TEMP_STORAGE_MIRROR_NAME = "ci_temporary_mirror"
|
||||
SPACK_RESERVED_TAGS = ["public", "protected", "notary"]
|
||||
# TODO: Remove this in Spack 0.23
|
||||
SHARED_PR_MIRROR_URL = "s3://spack-binaries-prs/shared_pr_mirror"
|
||||
JOB_NAME_FORMAT = (
|
||||
"{name}{@version} {/hash:7} {%compiler.name}{@compiler.version}{ arch=architecture}"
|
||||
)
|
||||
@@ -175,11 +199,11 @@ def _remove_satisfied_deps(deps, satisfied_list):
|
||||
return nodes, edges, stages
|
||||
|
||||
|
||||
def _print_staging_summary(spec_labels, stages, mirrors_to_check, rebuild_decisions):
|
||||
def _print_staging_summary(spec_labels, stages, rebuild_decisions):
|
||||
if not stages:
|
||||
return
|
||||
|
||||
mirrors = spack.mirror.MirrorCollection(mirrors=mirrors_to_check, binary=True)
|
||||
mirrors = spack.mirror.MirrorCollection(binary=True)
|
||||
tty.msg("Checked the following mirrors for binaries:")
|
||||
for m in mirrors.values():
|
||||
tty.msg(f" {m.fetch_url}")
|
||||
@@ -226,21 +250,14 @@ def _spec_matches(spec, match_string):
|
||||
return spec.intersects(match_string)
|
||||
|
||||
|
||||
def _format_job_needs(
|
||||
dep_jobs, build_group, prune_dag, rebuild_decisions, enable_artifacts_buildcache
|
||||
):
|
||||
def _format_job_needs(dep_jobs, build_group, prune_dag, rebuild_decisions):
|
||||
needs_list = []
|
||||
for dep_job in dep_jobs:
|
||||
dep_spec_key = _spec_ci_label(dep_job)
|
||||
rebuild = rebuild_decisions[dep_spec_key].rebuild
|
||||
|
||||
if not prune_dag or rebuild:
|
||||
needs_list.append(
|
||||
{
|
||||
"job": get_job_name(dep_job, build_group),
|
||||
"artifacts": enable_artifacts_buildcache,
|
||||
}
|
||||
)
|
||||
needs_list.append({"job": get_job_name(dep_job, build_group), "artifacts": False})
|
||||
return needs_list
|
||||
|
||||
|
||||
@@ -384,12 +401,6 @@ def __init__(self, ci_config, spec_labels, stages):
|
||||
|
||||
self.ir = {
|
||||
"jobs": {},
|
||||
"temporary-storage-url-prefix": self.ci_config.get(
|
||||
"temporary-storage-url-prefix", None
|
||||
),
|
||||
"enable-artifacts-buildcache": self.ci_config.get(
|
||||
"enable-artifacts-buildcache", False
|
||||
),
|
||||
"rebuild-index": self.ci_config.get("rebuild-index", True),
|
||||
"broken-specs-url": self.ci_config.get("broken-specs-url", None),
|
||||
"broken-tests-packages": self.ci_config.get("broken-tests-packages", []),
|
||||
@@ -405,9 +416,20 @@ def __init__(self, ci_config, spec_labels, stages):
|
||||
if name not in ["any", "build"]:
|
||||
jobs[name] = self.__init_job("")
|
||||
|
||||
def __init_job(self, spec):
|
||||
def __init_job(self, release_spec):
|
||||
"""Initialize job object"""
|
||||
return {"spec": spec, "attributes": {}}
|
||||
job_object = {"spec": release_spec, "attributes": {}}
|
||||
if release_spec:
|
||||
job_vars = job_object["attributes"].setdefault("variables", {})
|
||||
job_vars["SPACK_JOB_SPEC_DAG_HASH"] = release_spec.dag_hash()
|
||||
job_vars["SPACK_JOB_SPEC_PKG_NAME"] = release_spec.name
|
||||
job_vars["SPACK_JOB_SPEC_PKG_VERSION"] = release_spec.format("{version}")
|
||||
job_vars["SPACK_JOB_SPEC_COMPILER_NAME"] = release_spec.format("{compiler.name}")
|
||||
job_vars["SPACK_JOB_SPEC_COMPILER_VERSION"] = release_spec.format("{compiler.version}")
|
||||
job_vars["SPACK_JOB_SPEC_ARCH"] = release_spec.format("{architecture}")
|
||||
job_vars["SPACK_JOB_SPEC_VARIANTS"] = release_spec.format("{variants}")
|
||||
|
||||
return job_object
|
||||
|
||||
def __is_named(self, section):
|
||||
"""Check if a pipeline-gen configuration section is for a named job,
|
||||
@@ -500,6 +522,7 @@ def generate_ir(self):
|
||||
for section in reversed(pipeline_gen):
|
||||
name = self.__is_named(section)
|
||||
has_submapping = "submapping" in section
|
||||
has_dynmapping = "dynamic-mapping" in section
|
||||
section = cfg.InternalConfigScope._process_dict_keyname_overrides(section)
|
||||
|
||||
if name:
|
||||
@@ -542,6 +565,108 @@ def _apply_section(dest, src):
|
||||
job["attributes"] = self.__apply_submapping(
|
||||
job["attributes"], job["spec"], section
|
||||
)
|
||||
elif has_dynmapping:
|
||||
mapping = section["dynamic-mapping"]
|
||||
|
||||
dynmap_name = mapping.get("name")
|
||||
|
||||
# Check if this section should be skipped
|
||||
dynmap_skip = os.environ.get("SPACK_CI_SKIP_DYNAMIC_MAPPING")
|
||||
if dynmap_name and dynmap_skip:
|
||||
if re.match(dynmap_skip, dynmap_name):
|
||||
continue
|
||||
|
||||
# Get the endpoint
|
||||
endpoint = mapping["endpoint"]
|
||||
endpoint_url = urlparse(endpoint)
|
||||
|
||||
# Configure the request header
|
||||
header = {"User-Agent": web_util.SPACK_USER_AGENT}
|
||||
header.update(mapping.get("header", {}))
|
||||
|
||||
# Expand header environment variables
|
||||
# ie. if tokens are passed
|
||||
for value in header.values():
|
||||
value = os.path.expandvars(value)
|
||||
|
||||
verify_ssl = mapping.get("verify_ssl", spack.config.get("config:verify_ssl", True))
|
||||
timeout = mapping.get("timeout", spack.config.get("config:connect_timeout", 1))
|
||||
|
||||
required = mapping.get("require", [])
|
||||
allowed = mapping.get("allow", [])
|
||||
ignored = mapping.get("ignore", [])
|
||||
|
||||
# required keys are implicitly allowed
|
||||
allowed = sorted(set(allowed + required))
|
||||
ignored = sorted(set(ignored))
|
||||
required = sorted(set(required))
|
||||
|
||||
# Make sure required things are not also ignored
|
||||
assert not any([ikey in required for ikey in ignored])
|
||||
|
||||
def job_query(job):
|
||||
job_vars = job["attributes"]["variables"]
|
||||
query = (
|
||||
"{SPACK_JOB_SPEC_PKG_NAME}@{SPACK_JOB_SPEC_PKG_VERSION}"
|
||||
# The preceding spaces are required (ref. https://github.com/spack/spack-gantry/blob/develop/docs/api.md#allocation)
|
||||
" {SPACK_JOB_SPEC_VARIANTS}"
|
||||
" arch={SPACK_JOB_SPEC_ARCH}"
|
||||
"%{SPACK_JOB_SPEC_COMPILER_NAME}@{SPACK_JOB_SPEC_COMPILER_VERSION}"
|
||||
).format_map(job_vars)
|
||||
return f"spec={quote(query)}"
|
||||
|
||||
for job in jobs.values():
|
||||
if not job["spec"]:
|
||||
continue
|
||||
|
||||
# Create request for this job
|
||||
query = job_query(job)
|
||||
request = Request(
|
||||
endpoint_url._replace(query=query).geturl(), headers=header, method="GET"
|
||||
)
|
||||
try:
|
||||
response = _dyn_mapping_urlopener(
|
||||
request, verify_ssl=verify_ssl, timeout=timeout
|
||||
)
|
||||
except Exception as e:
|
||||
# For now just ignore any errors from dynamic mapping and continue
|
||||
# This is still experimental, and failures should not stop CI
|
||||
# from running normally
|
||||
tty.warn(f"Failed to fetch dynamic mapping for query:\n\t{query}")
|
||||
tty.warn(f"{e}")
|
||||
continue
|
||||
|
||||
config = json.load(codecs.getreader("utf-8")(response))
|
||||
|
||||
# Strip ignore keys
|
||||
if ignored:
|
||||
for key in ignored:
|
||||
if key in config:
|
||||
config.pop(key)
|
||||
|
||||
# Only keep allowed keys
|
||||
clean_config = {}
|
||||
if allowed:
|
||||
for key in allowed:
|
||||
if key in config:
|
||||
clean_config[key] = config[key]
|
||||
else:
|
||||
clean_config = config
|
||||
|
||||
# Verify all of the required keys are present
|
||||
if required:
|
||||
missing_keys = []
|
||||
for key in required:
|
||||
if key not in clean_config.keys():
|
||||
missing_keys.append(key)
|
||||
|
||||
if missing_keys:
|
||||
tty.warn(f"Response missing required keys: {missing_keys}")
|
||||
|
||||
if clean_config:
|
||||
job["attributes"] = spack.config.merge_yaml(
|
||||
job.get("attributes", {}), clean_config
|
||||
)
|
||||
|
||||
for _, job in jobs.items():
|
||||
if job["spec"]:
|
||||
@@ -558,14 +683,13 @@ def generate_gitlab_ci_yaml(
|
||||
prune_dag=False,
|
||||
check_index_only=False,
|
||||
artifacts_root=None,
|
||||
remote_mirror_override=None,
|
||||
):
|
||||
"""Generate a gitlab yaml file to run a dynamic child pipeline from
|
||||
the spec matrix in the active environment.
|
||||
|
||||
Arguments:
|
||||
env (spack.environment.Environment): Activated environment object
|
||||
which must contain a gitlab-ci section describing how to map
|
||||
which must contain a ci section describing how to map
|
||||
specs to runners
|
||||
print_summary (bool): Should we print a summary of all the jobs in
|
||||
the stages in which they were placed.
|
||||
@@ -580,39 +704,21 @@ def generate_gitlab_ci_yaml(
|
||||
artifacts_root (str): Path where artifacts like logs, environment
|
||||
files (spack.yaml, spack.lock), etc should be written. GitLab
|
||||
requires this to be within the project directory.
|
||||
remote_mirror_override (str): Typically only needed when one spack.yaml
|
||||
is used to populate several mirrors with binaries, based on some
|
||||
criteria. Spack protected pipelines populate different mirrors based
|
||||
on branch name, facilitated by this option. DEPRECATED
|
||||
"""
|
||||
with spack.concretize.disable_compiler_existence_check():
|
||||
with env.write_transaction():
|
||||
env.concretize()
|
||||
env.write()
|
||||
|
||||
yaml_root = env.manifest[ev.TOP_LEVEL_KEY]
|
||||
|
||||
# Get the joined "ci" config with all of the current scopes resolved
|
||||
ci_config = cfg.get("ci")
|
||||
|
||||
config_deprecated = False
|
||||
if not ci_config:
|
||||
tty.warn("Environment does not have `ci` a configuration")
|
||||
gitlabci_config = yaml_root.get("gitlab-ci")
|
||||
if not gitlabci_config:
|
||||
tty.die("Environment yaml does not have `gitlab-ci` config section. Cannot recover.")
|
||||
|
||||
tty.warn(
|
||||
"The `gitlab-ci` configuration is deprecated in favor of `ci`.\n",
|
||||
"To update run \n\t$ spack env update /path/to/ci/spack.yaml",
|
||||
)
|
||||
translate_deprecated_config(gitlabci_config)
|
||||
ci_config = gitlabci_config
|
||||
config_deprecated = True
|
||||
raise SpackCIError("Environment does not have a `ci` configuration")
|
||||
|
||||
# Default target is gitlab...and only target is gitlab
|
||||
if not ci_config.get("target", "gitlab") == "gitlab":
|
||||
tty.die('Spack CI module only generates target "gitlab"')
|
||||
raise SpackCIError('Spack CI module only generates target "gitlab"')
|
||||
|
||||
cdash_config = cfg.get("cdash")
|
||||
cdash_handler = CDashHandler(cdash_config) if "build-group" in cdash_config else None
|
||||
@@ -673,12 +779,6 @@ def generate_gitlab_ci_yaml(
|
||||
spack_pipeline_type = os.environ.get("SPACK_PIPELINE_TYPE", None)
|
||||
|
||||
copy_only_pipeline = spack_pipeline_type == "spack_copy_only"
|
||||
if copy_only_pipeline and config_deprecated:
|
||||
tty.warn(
|
||||
"SPACK_PIPELINE_TYPE=spack_copy_only is not supported when using\n",
|
||||
"deprecated ci configuration, a no-op pipeline will be generated\n",
|
||||
"instead.",
|
||||
)
|
||||
|
||||
def ensure_expected_target_path(path):
|
||||
"""Returns passed paths with all Windows path separators exchanged
|
||||
@@ -697,38 +797,16 @@ def ensure_expected_target_path(path):
|
||||
return path
|
||||
|
||||
pipeline_mirrors = spack.mirror.MirrorCollection(binary=True)
|
||||
deprecated_mirror_config = False
|
||||
buildcache_destination = None
|
||||
if "buildcache-destination" in pipeline_mirrors:
|
||||
if remote_mirror_override:
|
||||
tty.die(
|
||||
"Using the deprecated --buildcache-destination cli option and "
|
||||
"having a mirror named 'buildcache-destination' at the same time "
|
||||
"is not allowed"
|
||||
)
|
||||
buildcache_destination = pipeline_mirrors["buildcache-destination"]
|
||||
else:
|
||||
deprecated_mirror_config = True
|
||||
# TODO: This will be an error in Spack 0.23
|
||||
if "buildcache-destination" not in pipeline_mirrors:
|
||||
raise SpackCIError("spack ci generate requires a mirror named 'buildcache-destination'")
|
||||
|
||||
# TODO: Remove this block in spack 0.23
|
||||
remote_mirror_url = None
|
||||
if deprecated_mirror_config:
|
||||
if "mirrors" not in yaml_root or len(yaml_root["mirrors"].values()) < 1:
|
||||
tty.die("spack ci generate requires an env containing a mirror")
|
||||
|
||||
ci_mirrors = yaml_root["mirrors"]
|
||||
mirror_urls = [url for url in ci_mirrors.values()]
|
||||
remote_mirror_url = mirror_urls[0]
|
||||
buildcache_destination = pipeline_mirrors["buildcache-destination"]
|
||||
|
||||
spack_buildcache_copy = os.environ.get("SPACK_COPY_BUILDCACHE", None)
|
||||
if spack_buildcache_copy:
|
||||
buildcache_copies = {}
|
||||
buildcache_copy_src_prefix = (
|
||||
buildcache_destination.fetch_url
|
||||
if buildcache_destination
|
||||
else remote_mirror_override or remote_mirror_url
|
||||
)
|
||||
buildcache_copy_src_prefix = buildcache_destination.fetch_url
|
||||
buildcache_copy_dest_prefix = spack_buildcache_copy
|
||||
|
||||
# Check for a list of "known broken" specs that we should not bother
|
||||
@@ -738,55 +816,10 @@ def ensure_expected_target_path(path):
|
||||
if "broken-specs-url" in ci_config:
|
||||
broken_specs_url = ci_config["broken-specs-url"]
|
||||
|
||||
enable_artifacts_buildcache = False
|
||||
if "enable-artifacts-buildcache" in ci_config:
|
||||
tty.warn("Support for enable-artifacts-buildcache will be removed in Spack 0.23")
|
||||
enable_artifacts_buildcache = ci_config["enable-artifacts-buildcache"]
|
||||
|
||||
rebuild_index_enabled = True
|
||||
if "rebuild-index" in ci_config and ci_config["rebuild-index"] is False:
|
||||
rebuild_index_enabled = False
|
||||
|
||||
temp_storage_url_prefix = None
|
||||
if "temporary-storage-url-prefix" in ci_config:
|
||||
tty.warn("Support for temporary-storage-url-prefix will be removed in Spack 0.23")
|
||||
temp_storage_url_prefix = ci_config["temporary-storage-url-prefix"]
|
||||
|
||||
# If a remote mirror override (alternate buildcache destination) was
|
||||
# specified, add it here in case it has already built hashes we might
|
||||
# generate.
|
||||
# TODO: Remove this block in Spack 0.23
|
||||
mirrors_to_check = None
|
||||
if deprecated_mirror_config and remote_mirror_override:
|
||||
if spack_pipeline_type == "spack_protected_branch":
|
||||
# Overriding the main mirror in this case might result
|
||||
# in skipping jobs on a release pipeline because specs are
|
||||
# up to date in develop. Eventually we want to notice and take
|
||||
# advantage of this by scheduling a job to copy the spec from
|
||||
# develop to the release, but until we have that, this makes
|
||||
# sure we schedule a rebuild job if the spec isn't already in
|
||||
# override mirror.
|
||||
mirrors_to_check = {"override": remote_mirror_override}
|
||||
|
||||
# If we have a remote override and we want generate pipeline using
|
||||
# --check-index-only, then the override mirror needs to be added to
|
||||
# the configured mirrors when bindist.update() is run, or else we
|
||||
# won't fetch its index and include in our local cache.
|
||||
spack.mirror.add(
|
||||
spack.mirror.Mirror(remote_mirror_override, name="ci_pr_mirror"),
|
||||
cfg.default_modify_scope(),
|
||||
)
|
||||
|
||||
# TODO: Remove this block in Spack 0.23
|
||||
shared_pr_mirror = None
|
||||
if deprecated_mirror_config and spack_pipeline_type == "spack_pull_request":
|
||||
stack_name = os.environ.get("SPACK_CI_STACK_NAME", "")
|
||||
shared_pr_mirror = url_util.join(SHARED_PR_MIRROR_URL, stack_name)
|
||||
spack.mirror.add(
|
||||
spack.mirror.Mirror(shared_pr_mirror, name="ci_shared_pr_mirror"),
|
||||
cfg.default_modify_scope(),
|
||||
)
|
||||
|
||||
pipeline_artifacts_dir = artifacts_root
|
||||
if not pipeline_artifacts_dir:
|
||||
proj_dir = os.environ.get("CI_PROJECT_DIR", os.getcwd())
|
||||
@@ -795,9 +828,8 @@ def ensure_expected_target_path(path):
|
||||
pipeline_artifacts_dir = os.path.abspath(pipeline_artifacts_dir)
|
||||
concrete_env_dir = os.path.join(pipeline_artifacts_dir, "concrete_environment")
|
||||
|
||||
# Now that we've added the mirrors we know about, they should be properly
|
||||
# reflected in the environment manifest file, so copy that into the
|
||||
# concrete environment directory, along with the spack.lock file.
|
||||
# Copy the environment manifest file into the concrete environment directory,
|
||||
# along with the spack.lock file.
|
||||
if not os.path.exists(concrete_env_dir):
|
||||
os.makedirs(concrete_env_dir)
|
||||
shutil.copyfile(env.manifest_path, os.path.join(concrete_env_dir, "spack.yaml"))
|
||||
@@ -822,18 +854,12 @@ def ensure_expected_target_path(path):
|
||||
env_includes.extend(include_scopes)
|
||||
env_yaml_root["spack"]["include"] = [ensure_expected_target_path(i) for i in env_includes]
|
||||
|
||||
if "gitlab-ci" in env_yaml_root["spack"] and "ci" not in env_yaml_root["spack"]:
|
||||
env_yaml_root["spack"]["ci"] = env_yaml_root["spack"].pop("gitlab-ci")
|
||||
translate_deprecated_config(env_yaml_root["spack"]["ci"])
|
||||
|
||||
with open(os.path.join(concrete_env_dir, "spack.yaml"), "w") as fd:
|
||||
fd.write(syaml.dump_config(env_yaml_root, default_flow_style=False))
|
||||
|
||||
job_log_dir = os.path.join(pipeline_artifacts_dir, "logs")
|
||||
job_repro_dir = os.path.join(pipeline_artifacts_dir, "reproduction")
|
||||
job_test_dir = os.path.join(pipeline_artifacts_dir, "tests")
|
||||
# TODO: Remove this line in Spack 0.23
|
||||
local_mirror_dir = os.path.join(pipeline_artifacts_dir, "mirror")
|
||||
user_artifacts_dir = os.path.join(pipeline_artifacts_dir, "user_data")
|
||||
|
||||
# We communicate relative paths to the downstream jobs to avoid issues in
|
||||
@@ -847,8 +873,6 @@ def ensure_expected_target_path(path):
|
||||
rel_job_log_dir = os.path.relpath(job_log_dir, ci_project_dir)
|
||||
rel_job_repro_dir = os.path.relpath(job_repro_dir, ci_project_dir)
|
||||
rel_job_test_dir = os.path.relpath(job_test_dir, ci_project_dir)
|
||||
# TODO: Remove this line in Spack 0.23
|
||||
rel_local_mirror_dir = os.path.join(local_mirror_dir, ci_project_dir)
|
||||
rel_user_artifacts_dir = os.path.relpath(user_artifacts_dir, ci_project_dir)
|
||||
|
||||
# Speed up staging by first fetching binary indices from all mirrors
|
||||
@@ -910,7 +934,7 @@ def ensure_expected_target_path(path):
|
||||
continue
|
||||
|
||||
up_to_date_mirrors = bindist.get_mirrors_for_spec(
|
||||
spec=release_spec, mirrors_to_check=mirrors_to_check, index_only=check_index_only
|
||||
spec=release_spec, index_only=check_index_only
|
||||
)
|
||||
|
||||
spec_record.rebuild = not up_to_date_mirrors
|
||||
@@ -952,36 +976,16 @@ def main_script_replacements(cmd):
|
||||
|
||||
job_name = get_job_name(release_spec, build_group)
|
||||
|
||||
job_vars = job_object.setdefault("variables", {})
|
||||
job_vars["SPACK_JOB_SPEC_DAG_HASH"] = release_spec_dag_hash
|
||||
job_vars["SPACK_JOB_SPEC_PKG_NAME"] = release_spec.name
|
||||
job_vars["SPACK_JOB_SPEC_PKG_VERSION"] = release_spec.format("{version}")
|
||||
job_vars["SPACK_JOB_SPEC_COMPILER_NAME"] = release_spec.format("{compiler.name}")
|
||||
job_vars["SPACK_JOB_SPEC_COMPILER_VERSION"] = release_spec.format("{compiler.version}")
|
||||
job_vars["SPACK_JOB_SPEC_ARCH"] = release_spec.format("{architecture}")
|
||||
job_vars["SPACK_JOB_SPEC_VARIANTS"] = release_spec.format("{variants}")
|
||||
|
||||
job_object["needs"] = []
|
||||
if spec_label in dependencies:
|
||||
if enable_artifacts_buildcache:
|
||||
# Get dependencies transitively, so they're all
|
||||
# available in the artifacts buildcache.
|
||||
dep_jobs = [d for d in release_spec.traverse(deptype="all", root=False)]
|
||||
else:
|
||||
# In this case, "needs" is only used for scheduling
|
||||
# purposes, so we only get the direct dependencies.
|
||||
dep_jobs = []
|
||||
for dep_label in dependencies[spec_label]:
|
||||
dep_jobs.append(spec_labels[dep_label])
|
||||
# In this case, "needs" is only used for scheduling
|
||||
# purposes, so we only get the direct dependencies.
|
||||
dep_jobs = []
|
||||
for dep_label in dependencies[spec_label]:
|
||||
dep_jobs.append(spec_labels[dep_label])
|
||||
|
||||
job_object["needs"].extend(
|
||||
_format_job_needs(
|
||||
dep_jobs,
|
||||
build_group,
|
||||
prune_dag,
|
||||
rebuild_decisions,
|
||||
enable_artifacts_buildcache,
|
||||
)
|
||||
_format_job_needs(dep_jobs, build_group, prune_dag, rebuild_decisions)
|
||||
)
|
||||
|
||||
rebuild_spec = spec_record.rebuild
|
||||
@@ -1038,6 +1042,7 @@ def main_script_replacements(cmd):
|
||||
|
||||
# Let downstream jobs know whether the spec needed rebuilding, regardless
|
||||
# whether DAG pruning was enabled or not.
|
||||
job_vars = job_object["variables"]
|
||||
job_vars["SPACK_SPEC_NEEDS_REBUILD"] = str(rebuild_spec)
|
||||
|
||||
if cdash_handler:
|
||||
@@ -1062,19 +1067,6 @@ def main_script_replacements(cmd):
|
||||
},
|
||||
)
|
||||
|
||||
# TODO: Remove this block in Spack 0.23
|
||||
if enable_artifacts_buildcache:
|
||||
bc_root = os.path.join(local_mirror_dir, "build_cache")
|
||||
job_object["artifacts"]["paths"].extend(
|
||||
[
|
||||
os.path.join(bc_root, p)
|
||||
for p in [
|
||||
bindist.tarball_name(release_spec, ".spec.json"),
|
||||
bindist.tarball_directory_name(release_spec),
|
||||
]
|
||||
]
|
||||
)
|
||||
|
||||
job_object["stage"] = stage_name
|
||||
job_object["retry"] = {"max": 2, "when": JOB_RETRY_CONDITIONS}
|
||||
job_object["interruptible"] = True
|
||||
@@ -1089,15 +1081,7 @@ def main_script_replacements(cmd):
|
||||
job_id += 1
|
||||
|
||||
if print_summary:
|
||||
_print_staging_summary(spec_labels, stages, mirrors_to_check, rebuild_decisions)
|
||||
|
||||
# Clean up remote mirror override if enabled
|
||||
# TODO: Remove this block in Spack 0.23
|
||||
if deprecated_mirror_config:
|
||||
if remote_mirror_override:
|
||||
spack.mirror.remove("ci_pr_mirror", cfg.default_modify_scope())
|
||||
if spack_pipeline_type == "spack_pull_request":
|
||||
spack.mirror.remove("ci_shared_pr_mirror", cfg.default_modify_scope())
|
||||
_print_staging_summary(spec_labels, stages, rebuild_decisions)
|
||||
|
||||
tty.debug(f"{job_id} build jobs generated in {stage_id} stages")
|
||||
|
||||
@@ -1119,7 +1103,7 @@ def main_script_replacements(cmd):
|
||||
"when": ["runner_system_failure", "stuck_or_timeout_failure", "script_failure"],
|
||||
}
|
||||
|
||||
if copy_only_pipeline and not config_deprecated:
|
||||
if copy_only_pipeline:
|
||||
stage_names.append("copy")
|
||||
sync_job = copy.deepcopy(spack_ci_ir["jobs"]["copy"]["attributes"])
|
||||
sync_job["stage"] = "copy"
|
||||
@@ -1129,17 +1113,12 @@ def main_script_replacements(cmd):
|
||||
if "variables" not in sync_job:
|
||||
sync_job["variables"] = {}
|
||||
|
||||
sync_job["variables"]["SPACK_COPY_ONLY_DESTINATION"] = (
|
||||
buildcache_destination.fetch_url
|
||||
if buildcache_destination
|
||||
else remote_mirror_override or remote_mirror_url
|
||||
)
|
||||
sync_job["variables"]["SPACK_COPY_ONLY_DESTINATION"] = buildcache_destination.fetch_url
|
||||
|
||||
if "buildcache-source" in pipeline_mirrors:
|
||||
buildcache_source = pipeline_mirrors["buildcache-source"].fetch_url
|
||||
else:
|
||||
# TODO: Remove this condition in Spack 0.23
|
||||
buildcache_source = os.environ.get("SPACK_SOURCE_MIRROR", None)
|
||||
if "buildcache-source" not in pipeline_mirrors:
|
||||
raise SpackCIError("Copy-only pipelines require a mirror named 'buildcache-source'")
|
||||
|
||||
buildcache_source = pipeline_mirrors["buildcache-source"].fetch_url
|
||||
sync_job["variables"]["SPACK_BUILDCACHE_SOURCE"] = buildcache_source
|
||||
sync_job["dependencies"] = []
|
||||
|
||||
@@ -1147,27 +1126,6 @@ def main_script_replacements(cmd):
|
||||
job_id += 1
|
||||
|
||||
if job_id > 0:
|
||||
# TODO: Remove this block in Spack 0.23
|
||||
if temp_storage_url_prefix:
|
||||
# There were some rebuild jobs scheduled, so we will need to
|
||||
# schedule a job to clean up the temporary storage location
|
||||
# associated with this pipeline.
|
||||
stage_names.append("cleanup-temp-storage")
|
||||
cleanup_job = copy.deepcopy(spack_ci_ir["jobs"]["cleanup"]["attributes"])
|
||||
|
||||
cleanup_job["stage"] = "cleanup-temp-storage"
|
||||
cleanup_job["when"] = "always"
|
||||
cleanup_job["retry"] = service_job_retries
|
||||
cleanup_job["interruptible"] = True
|
||||
|
||||
cleanup_job["script"] = _unpack_script(
|
||||
cleanup_job["script"],
|
||||
op=lambda cmd: cmd.replace("mirror_prefix", temp_storage_url_prefix),
|
||||
)
|
||||
|
||||
cleanup_job["dependencies"] = []
|
||||
output_object["cleanup"] = cleanup_job
|
||||
|
||||
if (
|
||||
"script" in spack_ci_ir["jobs"]["signing"]["attributes"]
|
||||
and spack_pipeline_type == "spack_protected_branch"
|
||||
@@ -1184,11 +1142,9 @@ def main_script_replacements(cmd):
|
||||
signing_job["interruptible"] = True
|
||||
if "variables" not in signing_job:
|
||||
signing_job["variables"] = {}
|
||||
signing_job["variables"]["SPACK_BUILDCACHE_DESTINATION"] = (
|
||||
buildcache_destination.push_url # need the s3 url for aws s3 sync
|
||||
if buildcache_destination
|
||||
else remote_mirror_override or remote_mirror_url
|
||||
)
|
||||
signing_job["variables"][
|
||||
"SPACK_BUILDCACHE_DESTINATION"
|
||||
] = buildcache_destination.push_url
|
||||
signing_job["dependencies"] = []
|
||||
|
||||
output_object["sign-pkgs"] = signing_job
|
||||
@@ -1199,9 +1155,7 @@ def main_script_replacements(cmd):
|
||||
final_job = spack_ci_ir["jobs"]["reindex"]["attributes"]
|
||||
|
||||
final_job["stage"] = "stage-rebuild-index"
|
||||
target_mirror = remote_mirror_override or remote_mirror_url
|
||||
if buildcache_destination:
|
||||
target_mirror = buildcache_destination.push_url
|
||||
target_mirror = buildcache_destination.push_url
|
||||
final_job["script"] = _unpack_script(
|
||||
final_job["script"],
|
||||
op=lambda cmd: cmd.replace("{index_target_mirror}", target_mirror),
|
||||
@@ -1227,17 +1181,11 @@ def main_script_replacements(cmd):
|
||||
"SPACK_CONCRETE_ENV_DIR": rel_concrete_env_dir,
|
||||
"SPACK_VERSION": spack_version,
|
||||
"SPACK_CHECKOUT_VERSION": version_to_clone,
|
||||
# TODO: Remove this line in Spack 0.23
|
||||
"SPACK_REMOTE_MIRROR_URL": remote_mirror_url,
|
||||
"SPACK_JOB_LOG_DIR": rel_job_log_dir,
|
||||
"SPACK_JOB_REPRO_DIR": rel_job_repro_dir,
|
||||
"SPACK_JOB_TEST_DIR": rel_job_test_dir,
|
||||
# TODO: Remove this line in Spack 0.23
|
||||
"SPACK_LOCAL_MIRROR_DIR": rel_local_mirror_dir,
|
||||
"SPACK_PIPELINE_TYPE": str(spack_pipeline_type),
|
||||
"SPACK_CI_STACK_NAME": os.environ.get("SPACK_CI_STACK_NAME", "None"),
|
||||
# TODO: Remove this line in Spack 0.23
|
||||
"SPACK_CI_SHARED_PR_MIRROR_URL": shared_pr_mirror or "None",
|
||||
"SPACK_REBUILD_CHECK_UP_TO_DATE": str(prune_dag),
|
||||
"SPACK_REBUILD_EVERYTHING": str(rebuild_everything),
|
||||
"SPACK_REQUIRE_SIGNING": os.environ.get("SPACK_REQUIRE_SIGNING", "False"),
|
||||
@@ -1246,10 +1194,6 @@ def main_script_replacements(cmd):
|
||||
for item, val in output_vars.items():
|
||||
output_vars[item] = ensure_expected_target_path(val)
|
||||
|
||||
# TODO: Remove this block in Spack 0.23
|
||||
if deprecated_mirror_config and remote_mirror_override:
|
||||
(output_object["variables"]["SPACK_REMOTE_MIRROR_OVERRIDE"]) = remote_mirror_override
|
||||
|
||||
spack_stack_name = os.environ.get("SPACK_CI_STACK_NAME", None)
|
||||
if spack_stack_name:
|
||||
output_object["variables"]["SPACK_CI_STACK_NAME"] = spack_stack_name
|
||||
@@ -1272,17 +1216,12 @@ def main_script_replacements(cmd):
|
||||
else:
|
||||
# No jobs were generated
|
||||
noop_job = spack_ci_ir["jobs"]["noop"]["attributes"]
|
||||
noop_job["retry"] = service_job_retries
|
||||
# If this job fails ignore the status and carry on
|
||||
noop_job["retry"] = 0
|
||||
noop_job["allow_failure"] = True
|
||||
|
||||
if copy_only_pipeline and config_deprecated:
|
||||
tty.debug("Generating no-op job as copy-only is unsupported here.")
|
||||
noop_job["script"] = [
|
||||
'echo "copy-only pipelines are not supported with deprecated ci configs"'
|
||||
]
|
||||
output_object = {"unsupported-copy": noop_job}
|
||||
else:
|
||||
tty.debug("No specs to rebuild, generating no-op job")
|
||||
output_object = {"no-specs-to-rebuild": noop_job}
|
||||
tty.debug("No specs to rebuild, generating no-op job")
|
||||
output_object = {"no-specs-to-rebuild": noop_job}
|
||||
|
||||
# Ensure the child pipeline always runs
|
||||
output_object["workflow"] = {"rules": [{"when": "always"}]}
|
||||
@@ -2320,83 +2259,6 @@ def report_skipped(self, spec: spack.spec.Spec, report_dir: str, reason: Optiona
|
||||
reporter.test_skipped_report(report_dir, spec, reason)
|
||||
|
||||
|
||||
def translate_deprecated_config(config):
|
||||
# Remove all deprecated keys from config
|
||||
mappings = config.pop("mappings", [])
|
||||
match_behavior = config.pop("match_behavior", "first")
|
||||
|
||||
build_job = {}
|
||||
if "image" in config:
|
||||
build_job["image"] = config.pop("image")
|
||||
if "tags" in config:
|
||||
build_job["tags"] = config.pop("tags")
|
||||
if "variables" in config:
|
||||
build_job["variables"] = config.pop("variables")
|
||||
|
||||
# Scripts always override in old CI
|
||||
if "before_script" in config:
|
||||
build_job["before_script:"] = config.pop("before_script")
|
||||
if "script" in config:
|
||||
build_job["script:"] = config.pop("script")
|
||||
if "after_script" in config:
|
||||
build_job["after_script:"] = config.pop("after_script")
|
||||
|
||||
signing_job = None
|
||||
if "signing-job-attributes" in config:
|
||||
signing_job = {"signing-job": config.pop("signing-job-attributes")}
|
||||
|
||||
service_job_attributes = None
|
||||
if "service-job-attributes" in config:
|
||||
service_job_attributes = config.pop("service-job-attributes")
|
||||
|
||||
# If this config already has pipeline-gen do not more
|
||||
if "pipeline-gen" in config:
|
||||
return True if mappings or build_job or signing_job or service_job_attributes else False
|
||||
|
||||
config["target"] = "gitlab"
|
||||
|
||||
config["pipeline-gen"] = []
|
||||
pipeline_gen = config["pipeline-gen"]
|
||||
|
||||
# Build Job
|
||||
submapping = []
|
||||
for section in mappings:
|
||||
submapping_section = {"match": section["match"]}
|
||||
if "runner-attributes" in section:
|
||||
remapped_attributes = {}
|
||||
if match_behavior == "first":
|
||||
for key, value in section["runner-attributes"].items():
|
||||
# Scripts always override in old CI
|
||||
if key == "script":
|
||||
remapped_attributes["script:"] = value
|
||||
elif key == "before_script":
|
||||
remapped_attributes["before_script:"] = value
|
||||
elif key == "after_script":
|
||||
remapped_attributes["after_script:"] = value
|
||||
else:
|
||||
remapped_attributes[key] = value
|
||||
else:
|
||||
# Handle "merge" behavior be allowing scripts to merge in submapping section
|
||||
remapped_attributes = section["runner-attributes"]
|
||||
submapping_section["build-job"] = remapped_attributes
|
||||
|
||||
if "remove-attributes" in section:
|
||||
# Old format only allowed tags in this section, so no extra checks are needed
|
||||
submapping_section["build-job-remove"] = section["remove-attributes"]
|
||||
submapping.append(submapping_section)
|
||||
pipeline_gen.append({"submapping": submapping, "match_behavior": match_behavior})
|
||||
|
||||
if build_job:
|
||||
pipeline_gen.append({"build-job": build_job})
|
||||
|
||||
# Signing Job
|
||||
if signing_job:
|
||||
pipeline_gen.append(signing_job)
|
||||
|
||||
# Service Jobs
|
||||
if service_job_attributes:
|
||||
pipeline_gen.append({"reindex-job": service_job_attributes})
|
||||
pipeline_gen.append({"noop-job": service_job_attributes})
|
||||
pipeline_gen.append({"cleanup-job": service_job_attributes})
|
||||
|
||||
return True
|
||||
class SpackCIError(spack.error.SpackError):
|
||||
def __init__(self, msg):
|
||||
super().__init__(msg)
|
||||
|
@@ -17,6 +17,7 @@
|
||||
from llnl.util.tty.colify import colify
|
||||
from llnl.util.tty.color import colorize
|
||||
|
||||
import spack.concretize
|
||||
import spack.config # breaks a cycle.
|
||||
import spack.environment as ev
|
||||
import spack.error
|
||||
@@ -173,10 +174,29 @@ def parse_specs(
|
||||
arg_string = " ".join([quote_kvp(arg) for arg in args])
|
||||
|
||||
specs = spack.parser.parse(arg_string)
|
||||
for spec in specs:
|
||||
if concretize:
|
||||
spec.concretize(tests=tests)
|
||||
return specs
|
||||
if not concretize:
|
||||
return specs
|
||||
|
||||
to_concretize = [(s, None) for s in specs]
|
||||
return _concretize_spec_pairs(to_concretize, tests=tests)
|
||||
|
||||
|
||||
def _concretize_spec_pairs(to_concretize, tests=False):
|
||||
"""Helper method that concretizes abstract specs from a list of abstract,concrete pairs.
|
||||
|
||||
Any spec with a concrete spec associated with it will concretize to that spec. Any spec
|
||||
with ``None`` for its concrete spec will be newly concretized. This method respects unification
|
||||
rules from config."""
|
||||
unify = spack.config.get("concretizer:unify", False)
|
||||
|
||||
concretize_method = spack.concretize.concretize_separately # unify: false
|
||||
if unify is True:
|
||||
concretize_method = spack.concretize.concretize_together
|
||||
elif unify == "when_possible":
|
||||
concretize_method = spack.concretize.concretize_together_when_possible
|
||||
|
||||
concretized = concretize_method(to_concretize, tests=tests)
|
||||
return [concrete for _, concrete in concretized]
|
||||
|
||||
|
||||
def matching_spec_from_env(spec):
|
||||
@@ -192,6 +212,22 @@ def matching_spec_from_env(spec):
|
||||
return spec.concretized()
|
||||
|
||||
|
||||
def matching_specs_from_env(specs):
|
||||
"""
|
||||
Same as ``matching_spec_from_env`` but respects spec unification rules.
|
||||
|
||||
For each spec, if there is a matching spec in the environment it is used. If no
|
||||
matching spec is found, this will return the given spec but concretized in the
|
||||
context of the active environment and other given specs, with unification rules applied.
|
||||
"""
|
||||
env = ev.active_environment()
|
||||
spec_pairs = [(spec, env.matching_spec(spec) if env else None) for spec in specs]
|
||||
additional_concrete_specs = (
|
||||
[(concrete, concrete) for _, concrete in env.concretized_specs()] if env else []
|
||||
)
|
||||
return _concretize_spec_pairs(spec_pairs + additional_concrete_specs)[: len(spec_pairs)]
|
||||
|
||||
|
||||
def disambiguate_spec(spec, env, local=False, installed=True, first=False):
|
||||
"""Given a spec, figure out which installed package it refers to.
|
||||
|
||||
|
@@ -19,12 +19,23 @@
|
||||
|
||||
|
||||
def setup_parser(subparser):
|
||||
# DEPRECATED: equivalent to --generic --target
|
||||
subparser.add_argument(
|
||||
"-g", "--generic-target", action="store_true", help="show the best generic target"
|
||||
"-g",
|
||||
"--generic-target",
|
||||
action="store_true",
|
||||
help="show the best generic target (deprecated)",
|
||||
)
|
||||
subparser.add_argument(
|
||||
"--known-targets", action="store_true", help="show a list of all known targets and exit"
|
||||
)
|
||||
target_type = subparser.add_mutually_exclusive_group()
|
||||
target_type.add_argument(
|
||||
"--family", action="store_true", help="print generic ISA (x86_64, aarch64, ppc64le, ...)"
|
||||
)
|
||||
target_type.add_argument(
|
||||
"--generic", action="store_true", help="print feature level (x86_64_v3, armv8.4a, ...)"
|
||||
)
|
||||
parts = subparser.add_mutually_exclusive_group()
|
||||
parts2 = subparser.add_mutually_exclusive_group()
|
||||
parts.add_argument(
|
||||
@@ -80,6 +91,7 @@ def display_target_group(header, target_group):
|
||||
|
||||
def arch(parser, args):
|
||||
if args.generic_target:
|
||||
# TODO: add deprecation warning in 0.24
|
||||
print(archspec.cpu.host().generic)
|
||||
return
|
||||
|
||||
@@ -96,6 +108,10 @@ def arch(parser, args):
|
||||
host_platform = spack.platforms.host()
|
||||
host_os = host_platform.operating_system(os_args)
|
||||
host_target = host_platform.target(target_args)
|
||||
if args.family:
|
||||
host_target = host_target.family
|
||||
elif args.generic:
|
||||
host_target = host_target.generic
|
||||
architecture = spack.spec.ArchSpec((str(host_platform), str(host_os), str(host_target)))
|
||||
|
||||
if args.platform:
|
||||
|
@@ -62,13 +62,6 @@ def setup_parser(subparser):
|
||||
"path to the file where generated jobs file should be written. "
|
||||
"default is .gitlab-ci.yml in the root of the repository",
|
||||
)
|
||||
generate.add_argument(
|
||||
"--copy-to",
|
||||
default=None,
|
||||
help="path to additional directory for job files\n\n"
|
||||
"this option provides an absolute path to a directory where the generated "
|
||||
"jobs yaml file should be copied. default is not to copy",
|
||||
)
|
||||
generate.add_argument(
|
||||
"--optimize",
|
||||
action="store_true",
|
||||
@@ -83,12 +76,6 @@ def setup_parser(subparser):
|
||||
default=False,
|
||||
help="(DEPRECATED) disable DAG scheduling (use 'plain' dependencies)",
|
||||
)
|
||||
generate.add_argument(
|
||||
"--buildcache-destination",
|
||||
default=None,
|
||||
help="override the mirror configured in the environment\n\n"
|
||||
"allows for pushing binaries from the generated pipeline to a different location",
|
||||
)
|
||||
prune_group = generate.add_mutually_exclusive_group()
|
||||
prune_group.add_argument(
|
||||
"--prune-dag",
|
||||
@@ -214,20 +201,10 @@ def ci_generate(args):
|
||||
|
||||
env = spack.cmd.require_active_env(cmd_name="ci generate")
|
||||
|
||||
if args.copy_to:
|
||||
tty.warn("The flag --copy-to is deprecated and will be removed in Spack 0.23")
|
||||
|
||||
if args.buildcache_destination:
|
||||
tty.warn(
|
||||
"The flag --buildcache-destination is deprecated and will be removed in Spack 0.23"
|
||||
)
|
||||
|
||||
output_file = args.output_file
|
||||
copy_yaml_to = args.copy_to
|
||||
prune_dag = args.prune_dag
|
||||
index_only = args.index_only
|
||||
artifacts_root = args.artifacts_root
|
||||
buildcache_destination = args.buildcache_destination
|
||||
|
||||
if not output_file:
|
||||
output_file = os.path.abspath(".gitlab-ci.yml")
|
||||
@@ -245,15 +222,8 @@ def ci_generate(args):
|
||||
prune_dag=prune_dag,
|
||||
check_index_only=index_only,
|
||||
artifacts_root=artifacts_root,
|
||||
remote_mirror_override=buildcache_destination,
|
||||
)
|
||||
|
||||
if copy_yaml_to:
|
||||
copy_to_dir = os.path.dirname(copy_yaml_to)
|
||||
if not os.path.exists(copy_to_dir):
|
||||
os.makedirs(copy_to_dir)
|
||||
shutil.copyfile(output_file, copy_yaml_to)
|
||||
|
||||
|
||||
def ci_reindex(args):
|
||||
"""rebuild the buildcache index for the remote mirror
|
||||
@@ -298,22 +268,13 @@ def ci_rebuild(args):
|
||||
job_log_dir = os.environ.get("SPACK_JOB_LOG_DIR")
|
||||
job_test_dir = os.environ.get("SPACK_JOB_TEST_DIR")
|
||||
repro_dir = os.environ.get("SPACK_JOB_REPRO_DIR")
|
||||
# TODO: Remove this in Spack 0.23
|
||||
local_mirror_dir = os.environ.get("SPACK_LOCAL_MIRROR_DIR")
|
||||
concrete_env_dir = os.environ.get("SPACK_CONCRETE_ENV_DIR")
|
||||
ci_pipeline_id = os.environ.get("CI_PIPELINE_ID")
|
||||
ci_job_name = os.environ.get("CI_JOB_NAME")
|
||||
signing_key = os.environ.get("SPACK_SIGNING_KEY")
|
||||
job_spec_pkg_name = os.environ.get("SPACK_JOB_SPEC_PKG_NAME")
|
||||
job_spec_dag_hash = os.environ.get("SPACK_JOB_SPEC_DAG_HASH")
|
||||
spack_pipeline_type = os.environ.get("SPACK_PIPELINE_TYPE")
|
||||
# TODO: Remove this in Spack 0.23
|
||||
remote_mirror_override = os.environ.get("SPACK_REMOTE_MIRROR_OVERRIDE")
|
||||
# TODO: Remove this in Spack 0.23
|
||||
remote_mirror_url = os.environ.get("SPACK_REMOTE_MIRROR_URL")
|
||||
spack_ci_stack_name = os.environ.get("SPACK_CI_STACK_NAME")
|
||||
# TODO: Remove this in Spack 0.23
|
||||
shared_pr_mirror_url = os.environ.get("SPACK_CI_SHARED_PR_MIRROR_URL")
|
||||
rebuild_everything = os.environ.get("SPACK_REBUILD_EVERYTHING")
|
||||
require_signing = os.environ.get("SPACK_REQUIRE_SIGNING")
|
||||
|
||||
@@ -333,12 +294,10 @@ def ci_rebuild(args):
|
||||
job_log_dir = os.path.join(ci_project_dir, job_log_dir)
|
||||
job_test_dir = os.path.join(ci_project_dir, job_test_dir)
|
||||
repro_dir = os.path.join(ci_project_dir, repro_dir)
|
||||
local_mirror_dir = os.path.join(ci_project_dir, local_mirror_dir)
|
||||
concrete_env_dir = os.path.join(ci_project_dir, concrete_env_dir)
|
||||
|
||||
# Debug print some of the key environment variables we should have received
|
||||
tty.debug("pipeline_artifacts_dir = {0}".format(pipeline_artifacts_dir))
|
||||
tty.debug("remote_mirror_url = {0}".format(remote_mirror_url))
|
||||
tty.debug("job_spec_pkg_name = {0}".format(job_spec_pkg_name))
|
||||
|
||||
# Query the environment manifest to find out whether we're reporting to a
|
||||
@@ -370,51 +329,11 @@ def ci_rebuild(args):
|
||||
full_rebuild = True if rebuild_everything and rebuild_everything.lower() == "true" else False
|
||||
|
||||
pipeline_mirrors = spack.mirror.MirrorCollection(binary=True)
|
||||
deprecated_mirror_config = False
|
||||
buildcache_destination = None
|
||||
if "buildcache-destination" in pipeline_mirrors:
|
||||
buildcache_destination = pipeline_mirrors["buildcache-destination"]
|
||||
else:
|
||||
deprecated_mirror_config = True
|
||||
# TODO: This will be an error in Spack 0.23
|
||||
if "buildcache-destination" not in pipeline_mirrors:
|
||||
tty.die("spack ci rebuild requires a mirror named 'buildcache-destination")
|
||||
|
||||
# If no override url exists, then just push binary package to the
|
||||
# normal remote mirror url.
|
||||
# TODO: Remove in Spack 0.23
|
||||
buildcache_mirror_url = remote_mirror_override or remote_mirror_url
|
||||
if buildcache_destination:
|
||||
buildcache_mirror_url = buildcache_destination.push_url
|
||||
|
||||
# Figure out what is our temporary storage mirror: Is it artifacts
|
||||
# buildcache? Or temporary-storage-url-prefix? In some cases we need to
|
||||
# force something or pipelines might not have a way to propagate build
|
||||
# artifacts from upstream to downstream jobs.
|
||||
# TODO: Remove this in Spack 0.23
|
||||
pipeline_mirror_url = None
|
||||
|
||||
# TODO: Remove this in Spack 0.23
|
||||
temp_storage_url_prefix = None
|
||||
if "temporary-storage-url-prefix" in ci_config:
|
||||
temp_storage_url_prefix = ci_config["temporary-storage-url-prefix"]
|
||||
pipeline_mirror_url = url_util.join(temp_storage_url_prefix, ci_pipeline_id)
|
||||
|
||||
# TODO: Remove this in Spack 0.23
|
||||
enable_artifacts_mirror = False
|
||||
if "enable-artifacts-buildcache" in ci_config:
|
||||
enable_artifacts_mirror = ci_config["enable-artifacts-buildcache"]
|
||||
if enable_artifacts_mirror or (
|
||||
spack_is_pr_pipeline and not enable_artifacts_mirror and not temp_storage_url_prefix
|
||||
):
|
||||
# If you explicitly enabled the artifacts buildcache feature, or
|
||||
# if this is a PR pipeline but you did not enable either of the
|
||||
# per-pipeline temporary storage features, we force the use of
|
||||
# artifacts buildcache. Otherwise jobs will not have binary
|
||||
# dependencies from previous stages available since we do not
|
||||
# allow pushing binaries to the remote mirror during PR pipelines.
|
||||
enable_artifacts_mirror = True
|
||||
pipeline_mirror_url = url_util.path_to_file_url(local_mirror_dir)
|
||||
mirror_msg = "artifact buildcache enabled, mirror url: {0}".format(pipeline_mirror_url)
|
||||
tty.debug(mirror_msg)
|
||||
buildcache_destination = pipeline_mirrors["buildcache-destination"]
|
||||
|
||||
# Get the concrete spec to be built by this job.
|
||||
try:
|
||||
@@ -489,48 +408,7 @@ def ci_rebuild(args):
|
||||
fd.write(spack_info.encode("utf8"))
|
||||
fd.write(b"\n")
|
||||
|
||||
pipeline_mirrors = []
|
||||
|
||||
# If we decided there should be a temporary storage mechanism, add that
|
||||
# mirror now so it's used when we check for a hash match already
|
||||
# built for this spec.
|
||||
# TODO: Remove this block in Spack 0.23
|
||||
if pipeline_mirror_url:
|
||||
mirror = spack.mirror.Mirror(pipeline_mirror_url, name=spack_ci.TEMP_STORAGE_MIRROR_NAME)
|
||||
spack.mirror.add(mirror, cfg.default_modify_scope())
|
||||
pipeline_mirrors.append(pipeline_mirror_url)
|
||||
|
||||
# Check configured mirrors for a built spec with a matching hash
|
||||
# TODO: Remove this block in Spack 0.23
|
||||
mirrors_to_check = None
|
||||
if remote_mirror_override:
|
||||
if spack_pipeline_type == "spack_protected_branch":
|
||||
# Passing "mirrors_to_check" below means we *only* look in the override
|
||||
# mirror to see if we should skip building, which is what we want.
|
||||
mirrors_to_check = {"override": remote_mirror_override}
|
||||
|
||||
# Adding this mirror to the list of configured mirrors means dependencies
|
||||
# could be installed from either the override mirror or any other configured
|
||||
# mirror (e.g. remote_mirror_url which is defined in the environment or
|
||||
# pipeline_mirror_url), which is also what we want.
|
||||
spack.mirror.add(
|
||||
spack.mirror.Mirror(remote_mirror_override, name="mirror_override"),
|
||||
cfg.default_modify_scope(),
|
||||
)
|
||||
pipeline_mirrors.append(remote_mirror_override)
|
||||
|
||||
# TODO: Remove this in Spack 0.23
|
||||
if deprecated_mirror_config and spack_pipeline_type == "spack_pull_request":
|
||||
if shared_pr_mirror_url != "None":
|
||||
pipeline_mirrors.append(shared_pr_mirror_url)
|
||||
|
||||
matches = (
|
||||
None
|
||||
if full_rebuild
|
||||
else bindist.get_mirrors_for_spec(
|
||||
job_spec, mirrors_to_check=mirrors_to_check, index_only=False
|
||||
)
|
||||
)
|
||||
matches = None if full_rebuild else bindist.get_mirrors_for_spec(job_spec, index_only=False)
|
||||
|
||||
if matches:
|
||||
# Got a hash match on at least one configured mirror. All
|
||||
@@ -542,25 +420,10 @@ def ci_rebuild(args):
|
||||
tty.msg("No need to rebuild {0}, found hash match at: ".format(job_spec_pkg_name))
|
||||
for match in matches:
|
||||
tty.msg(" {0}".format(match["mirror_url"]))
|
||||
# TODO: Remove this block in Spack 0.23
|
||||
if enable_artifacts_mirror:
|
||||
matching_mirror = matches[0]["mirror_url"]
|
||||
build_cache_dir = os.path.join(local_mirror_dir, "build_cache")
|
||||
tty.debug("Getting {0} buildcache from {1}".format(job_spec_pkg_name, matching_mirror))
|
||||
tty.debug("Downloading to {0}".format(build_cache_dir))
|
||||
bindist.download_single_spec(job_spec, build_cache_dir, mirror_url=matching_mirror)
|
||||
|
||||
# Now we are done and successful
|
||||
return 0
|
||||
|
||||
# Before beginning the install, if this is a "rebuild everything" pipeline, we
|
||||
# only want to keep the mirror being used by the current pipeline as it's binary
|
||||
# package destination. This ensures that the when we rebuild everything, we only
|
||||
# consume binary dependencies built in this pipeline.
|
||||
# TODO: Remove this in Spack 0.23
|
||||
if deprecated_mirror_config and full_rebuild:
|
||||
spack_ci.remove_other_mirrors(pipeline_mirrors, cfg.default_modify_scope())
|
||||
|
||||
# No hash match anywhere means we need to rebuild spec
|
||||
|
||||
# Start with spack arguments
|
||||
@@ -681,17 +544,11 @@ def ci_rebuild(args):
|
||||
cdash_handler.copy_test_results(reports_dir, job_test_dir)
|
||||
|
||||
if install_exit_code == 0:
|
||||
# If the install succeeded, push it to one or more mirrors. Failure to push to any mirror
|
||||
# If the install succeeded, push it to the buildcache destination. Failure to push
|
||||
# will result in a non-zero exit code. Pushing is best-effort.
|
||||
mirror_urls = [buildcache_mirror_url]
|
||||
|
||||
# TODO: Remove this block in Spack 0.23
|
||||
if pipeline_mirror_url:
|
||||
mirror_urls.append(pipeline_mirror_url)
|
||||
|
||||
for result in spack_ci.create_buildcache(
|
||||
input_spec=job_spec,
|
||||
destination_mirror_urls=mirror_urls,
|
||||
destination_mirror_urls=[buildcache_destination.push_url],
|
||||
sign_binaries=spack_ci.can_sign_binaries(),
|
||||
):
|
||||
if not result.success:
|
||||
|
@@ -105,7 +105,8 @@ def clean(parser, args):
|
||||
# Then do the cleaning falling through the cases
|
||||
if args.specs:
|
||||
specs = spack.cmd.parse_specs(args.specs, concretize=False)
|
||||
specs = list(spack.cmd.matching_spec_from_env(x) for x in specs)
|
||||
specs = spack.cmd.matching_specs_from_env(specs)
|
||||
|
||||
for spec in specs:
|
||||
msg = "Cleaning build stage [{0}]"
|
||||
tty.msg(msg.format(spec.short_spec))
|
||||
|
@@ -660,34 +660,32 @@ def mirror_name_or_url(m):
|
||||
# accidentally to a dir in the current working directory.
|
||||
|
||||
# If there's a \ or / in the name, it's interpreted as a path or url.
|
||||
if "/" in m or "\\" in m:
|
||||
if "/" in m or "\\" in m or m in (".", ".."):
|
||||
return spack.mirror.Mirror(m)
|
||||
|
||||
# Otherwise, the named mirror is required to exist.
|
||||
try:
|
||||
return spack.mirror.require_mirror_name(m)
|
||||
except ValueError as e:
|
||||
raise argparse.ArgumentTypeError(
|
||||
str(e) + ". Did you mean {}?".format(os.path.join(".", m))
|
||||
)
|
||||
raise argparse.ArgumentTypeError(f"{e}. Did you mean {os.path.join('.', m)}?") from e
|
||||
|
||||
|
||||
def mirror_url(url):
|
||||
try:
|
||||
return spack.mirror.Mirror.from_url(url)
|
||||
except ValueError as e:
|
||||
raise argparse.ArgumentTypeError(str(e))
|
||||
raise argparse.ArgumentTypeError(str(e)) from e
|
||||
|
||||
|
||||
def mirror_directory(path):
|
||||
try:
|
||||
return spack.mirror.Mirror.from_local_path(path)
|
||||
except ValueError as e:
|
||||
raise argparse.ArgumentTypeError(str(e))
|
||||
raise argparse.ArgumentTypeError(str(e)) from e
|
||||
|
||||
|
||||
def mirror_name(name):
|
||||
try:
|
||||
return spack.mirror.require_mirror_name(name)
|
||||
except ValueError as e:
|
||||
raise argparse.ArgumentTypeError(str(e))
|
||||
raise argparse.ArgumentTypeError(str(e)) from e
|
||||
|
@@ -99,5 +99,5 @@ def deconcretize(parser, args):
|
||||
" Use `spack deconcretize --all` to deconcretize ALL specs.",
|
||||
)
|
||||
|
||||
specs = spack.cmd.parse_specs(args.specs) if args.specs else [any]
|
||||
specs = spack.cmd.parse_specs(args.specs) if args.specs else [None]
|
||||
deconcretize_specs(args, specs)
|
||||
|
@@ -85,8 +85,14 @@ def _retrieve_develop_source(spec: spack.spec.Spec, abspath: str) -> None:
|
||||
|
||||
|
||||
def develop(parser, args):
|
||||
# Note: we could put develop specs in any scope, but I assume
|
||||
# users would only ever want to do this for either (a) an active
|
||||
# env or (b) a specified config file (e.g. that is included by
|
||||
# an environment)
|
||||
# TODO: when https://github.com/spack/spack/pull/35307 is merged,
|
||||
# an active env is not required if a scope is specified
|
||||
env = spack.cmd.require_active_env(cmd_name="develop")
|
||||
if not args.spec:
|
||||
env = spack.cmd.require_active_env(cmd_name="develop")
|
||||
if args.clone is False:
|
||||
raise SpackError("No spec provided to spack develop command")
|
||||
|
||||
@@ -116,16 +122,18 @@ def develop(parser, args):
|
||||
raise SpackError("spack develop requires at most one named spec")
|
||||
|
||||
spec = specs[0]
|
||||
|
||||
version = spec.versions.concrete_range_as_version
|
||||
if not version:
|
||||
raise SpackError("Packages to develop must have a concrete version")
|
||||
# look up the maximum version so infintiy versions are preferred for develop
|
||||
version = max(spec.package_class.versions.keys())
|
||||
tty.msg(f"Defaulting to highest version: {spec.name}@{version}")
|
||||
spec.versions = spack.version.VersionList([version])
|
||||
|
||||
# If user does not specify --path, we choose to create a directory in the
|
||||
# active environment's directory, named after the spec
|
||||
path = args.path or spec.name
|
||||
if not os.path.isabs(path):
|
||||
env = spack.cmd.require_active_env(cmd_name="develop")
|
||||
abspath = spack.util.path.canonicalize_path(path, default_wd=env.path)
|
||||
else:
|
||||
abspath = path
|
||||
@@ -149,13 +157,6 @@ def develop(parser, args):
|
||||
|
||||
_retrieve_develop_source(spec, abspath)
|
||||
|
||||
# Note: we could put develop specs in any scope, but I assume
|
||||
# users would only ever want to do this for either (a) an active
|
||||
# env or (b) a specified config file (e.g. that is included by
|
||||
# an environment)
|
||||
# TODO: when https://github.com/spack/spack/pull/35307 is merged,
|
||||
# an active env is not required if a scope is specified
|
||||
env = spack.cmd.require_active_env(cmd_name="develop")
|
||||
tty.debug("Updating develop config for {0} transactionally".format(env.name))
|
||||
with env.write_transaction():
|
||||
if args.build_directory is not None:
|
||||
|
@@ -57,35 +57,41 @@
|
||||
# env create
|
||||
#
|
||||
def env_create_setup_parser(subparser):
|
||||
"""create a new environment"""
|
||||
subparser.add_argument("env_name", metavar="env", help="name or directory of environment")
|
||||
"""create a new environment
|
||||
|
||||
create a new environment or, optionally, copy an existing environment
|
||||
|
||||
a manifest file results in a new abstract environment while a lock file
|
||||
creates a new concrete environment
|
||||
"""
|
||||
subparser.add_argument(
|
||||
"env_name", metavar="env", help="name or directory of the new environment"
|
||||
)
|
||||
subparser.add_argument(
|
||||
"-d", "--dir", action="store_true", help="create an environment in a specific directory"
|
||||
)
|
||||
subparser.add_argument(
|
||||
"--keep-relative",
|
||||
action="store_true",
|
||||
help="copy relative develop paths verbatim into the new environment"
|
||||
" when initializing from envfile",
|
||||
help="copy envfile's relative develop paths verbatim",
|
||||
)
|
||||
view_opts = subparser.add_mutually_exclusive_group()
|
||||
view_opts.add_argument(
|
||||
"--without-view", action="store_true", help="do not maintain a view for this environment"
|
||||
)
|
||||
view_opts.add_argument(
|
||||
"--with-view",
|
||||
help="specify that this environment should maintain a view at the"
|
||||
" specified path (by default the view is maintained in the"
|
||||
" environment directory)",
|
||||
"--with-view", help="maintain view at WITH_VIEW (vs. environment's directory)"
|
||||
)
|
||||
subparser.add_argument(
|
||||
"envfile",
|
||||
nargs="?",
|
||||
default=None,
|
||||
help="either a lockfile (must end with '.json' or '.lock') or a manifest file",
|
||||
help="manifest or lock file (ends with '.json' or '.lock')",
|
||||
)
|
||||
subparser.add_argument(
|
||||
"--include-concrete", action="append", help="name of old environment to copy specs from"
|
||||
"--include-concrete",
|
||||
action="append",
|
||||
help="copy concrete specs from INCLUDE_CONCRETE's environment",
|
||||
)
|
||||
|
||||
|
||||
@@ -173,7 +179,7 @@ def _env_create(
|
||||
# env activate
|
||||
#
|
||||
def env_activate_setup_parser(subparser):
|
||||
"""set the current environment"""
|
||||
"""set the active environment"""
|
||||
shells = subparser.add_mutually_exclusive_group()
|
||||
shells.add_argument(
|
||||
"--sh",
|
||||
@@ -213,14 +219,14 @@ def env_activate_setup_parser(subparser):
|
||||
|
||||
view_options = subparser.add_mutually_exclusive_group()
|
||||
view_options.add_argument(
|
||||
"--with-view",
|
||||
"-v",
|
||||
"--with-view",
|
||||
metavar="name",
|
||||
help="set runtime environment variables for specific view",
|
||||
help="set runtime environment variables for the named view",
|
||||
)
|
||||
view_options.add_argument(
|
||||
"--without-view",
|
||||
"-V",
|
||||
"--without-view",
|
||||
action="store_true",
|
||||
help="do not set runtime environment variables for any view",
|
||||
)
|
||||
@@ -230,14 +236,14 @@ def env_activate_setup_parser(subparser):
|
||||
"--prompt",
|
||||
action="store_true",
|
||||
default=False,
|
||||
help="decorate the command line prompt when activating",
|
||||
help="add the active environment to the command line prompt",
|
||||
)
|
||||
|
||||
subparser.add_argument(
|
||||
"--temp",
|
||||
action="store_true",
|
||||
default=False,
|
||||
help="create and activate an environment in a temporary directory",
|
||||
help="create and activate in a temporary directory",
|
||||
)
|
||||
subparser.add_argument(
|
||||
"--create",
|
||||
@@ -249,13 +255,12 @@ def env_activate_setup_parser(subparser):
|
||||
"--envfile",
|
||||
nargs="?",
|
||||
default=None,
|
||||
help="either a lockfile (must end with '.json' or '.lock') or a manifest file",
|
||||
help="manifest or lock file (ends with '.json' or '.lock')",
|
||||
)
|
||||
subparser.add_argument(
|
||||
"--keep-relative",
|
||||
action="store_true",
|
||||
help="copy relative develop paths verbatim into the new environment"
|
||||
" when initializing from envfile",
|
||||
help="copy envfile's relative develop paths verbatim when create",
|
||||
)
|
||||
subparser.add_argument(
|
||||
"-d",
|
||||
@@ -269,10 +274,7 @@ def env_activate_setup_parser(subparser):
|
||||
dest="env_name",
|
||||
nargs="?",
|
||||
default=None,
|
||||
help=(
|
||||
"name of managed environment or directory of the anonymous env"
|
||||
" (when using --dir/-d) to activate"
|
||||
),
|
||||
help=("name or directory of the environment being activated"),
|
||||
)
|
||||
|
||||
|
||||
@@ -385,7 +387,7 @@ def env_activate(args):
|
||||
# env deactivate
|
||||
#
|
||||
def env_deactivate_setup_parser(subparser):
|
||||
"""deactivate any active environment in the shell"""
|
||||
"""deactivate the active environment"""
|
||||
shells = subparser.add_mutually_exclusive_group()
|
||||
shells.add_argument(
|
||||
"--sh",
|
||||
@@ -448,23 +450,27 @@ def env_deactivate(args):
|
||||
# env remove
|
||||
#
|
||||
def env_remove_setup_parser(subparser):
|
||||
"""remove an existing environment"""
|
||||
subparser.add_argument("rm_env", metavar="env", nargs="+", help="environment(s) to remove")
|
||||
"""remove managed environment(s)
|
||||
|
||||
remove existing environment(s) managed by Spack
|
||||
|
||||
directory environments and manifests embedded in repositories must be
|
||||
removed manually
|
||||
"""
|
||||
subparser.add_argument(
|
||||
"rm_env", metavar="env", nargs="+", help="name(s) of the environment(s) being removed"
|
||||
)
|
||||
arguments.add_common_arguments(subparser, ["yes_to_all"])
|
||||
subparser.add_argument(
|
||||
"-f",
|
||||
"--force",
|
||||
action="store_true",
|
||||
help="remove the environment even if it is included in another environment",
|
||||
help="force removal even when included in other environment(s)",
|
||||
)
|
||||
|
||||
|
||||
def env_remove(args):
|
||||
"""Remove a *named* environment.
|
||||
|
||||
This removes an environment managed by Spack. Directory environments
|
||||
and manifests embedded in repositories should be removed manually.
|
||||
"""
|
||||
"""remove existing environment(s)"""
|
||||
remove_envs = []
|
||||
valid_envs = []
|
||||
bad_envs = []
|
||||
@@ -519,29 +525,32 @@ def env_remove(args):
|
||||
# env rename
|
||||
#
|
||||
def env_rename_setup_parser(subparser):
|
||||
"""rename an existing environment"""
|
||||
"""rename an existing environment
|
||||
|
||||
rename a managed environment or move an independent/directory environment
|
||||
|
||||
operation cannot be performed to or from an active environment
|
||||
"""
|
||||
subparser.add_argument(
|
||||
"mv_from", metavar="from", help="name (or path) of existing environment"
|
||||
)
|
||||
subparser.add_argument(
|
||||
"mv_to", metavar="to", help="new name (or path) for existing environment"
|
||||
"mv_from", metavar="from", help="current name or directory of the environment"
|
||||
)
|
||||
subparser.add_argument("mv_to", metavar="to", help="new name or directory for the environment")
|
||||
subparser.add_argument(
|
||||
"-d",
|
||||
"--dir",
|
||||
action="store_true",
|
||||
help="the specified arguments correspond to directory paths",
|
||||
help="positional arguments are environment directory paths",
|
||||
)
|
||||
subparser.add_argument(
|
||||
"-f", "--force", action="store_true", help="allow overwriting of an existing environment"
|
||||
"-f",
|
||||
"--force",
|
||||
action="store_true",
|
||||
help="force renaming even if overwriting an existing environment",
|
||||
)
|
||||
|
||||
|
||||
def env_rename(args):
|
||||
"""Rename an environment.
|
||||
|
||||
This renames a managed environment or moves an anonymous environment.
|
||||
"""
|
||||
"""rename or move an existing environment"""
|
||||
|
||||
# Directory option has been specified
|
||||
if args.dir:
|
||||
@@ -590,7 +599,7 @@ def env_rename(args):
|
||||
# env list
|
||||
#
|
||||
def env_list_setup_parser(subparser):
|
||||
"""list managed environments"""
|
||||
"""list all managed environments"""
|
||||
|
||||
|
||||
def env_list(args):
|
||||
@@ -626,13 +635,14 @@ def actions():
|
||||
# env view
|
||||
#
|
||||
def env_view_setup_parser(subparser):
|
||||
"""manage a view associated with the environment"""
|
||||
"""manage the environment's view
|
||||
|
||||
provide the path when enabling a view with a non-default path
|
||||
"""
|
||||
subparser.add_argument(
|
||||
"action", choices=ViewAction.actions(), help="action to take for the environment's view"
|
||||
)
|
||||
subparser.add_argument(
|
||||
"view_path", nargs="?", help="when enabling a view, optionally set the path manually"
|
||||
)
|
||||
subparser.add_argument("view_path", nargs="?", help="view's non-default path when enabling it")
|
||||
|
||||
|
||||
def env_view(args):
|
||||
@@ -660,7 +670,7 @@ def env_view(args):
|
||||
# env status
|
||||
#
|
||||
def env_status_setup_parser(subparser):
|
||||
"""print whether there is an active environment"""
|
||||
"""print active environment status"""
|
||||
|
||||
|
||||
def env_status(args):
|
||||
@@ -720,14 +730,22 @@ def env_loads(args):
|
||||
|
||||
|
||||
def env_update_setup_parser(subparser):
|
||||
"""update environments to the latest format"""
|
||||
"""update the environment manifest to the latest schema format
|
||||
|
||||
update the environment to the latest schema format, which may not be
|
||||
readable by older versions of spack
|
||||
|
||||
a backup copy of the manifest is retained in case there is a need to revert
|
||||
this operation
|
||||
"""
|
||||
subparser.add_argument(
|
||||
metavar="env", dest="update_env", help="name or directory of the environment to activate"
|
||||
metavar="env", dest="update_env", help="name or directory of the environment"
|
||||
)
|
||||
spack.cmd.common.arguments.add_common_arguments(subparser, ["yes_to_all"])
|
||||
|
||||
|
||||
def env_update(args):
|
||||
"""update the manifest to the latest format"""
|
||||
manifest_file = ev.manifest_file(args.update_env)
|
||||
backup_file = manifest_file + ".bkp"
|
||||
|
||||
@@ -757,14 +775,22 @@ def env_update(args):
|
||||
|
||||
|
||||
def env_revert_setup_parser(subparser):
|
||||
"""restore environments to their state before update"""
|
||||
"""restore the environment manifest to its previous format
|
||||
|
||||
revert the environment's manifest to the schema format from its last
|
||||
'spack env update'
|
||||
|
||||
the current manifest will be overwritten by the backup copy and the backup
|
||||
copy will be removed
|
||||
"""
|
||||
subparser.add_argument(
|
||||
metavar="env", dest="revert_env", help="name or directory of the environment to activate"
|
||||
metavar="env", dest="revert_env", help="name or directory of the environment"
|
||||
)
|
||||
spack.cmd.common.arguments.add_common_arguments(subparser, ["yes_to_all"])
|
||||
|
||||
|
||||
def env_revert(args):
|
||||
"""restore the environment manifest to its previous format"""
|
||||
manifest_file = ev.manifest_file(args.revert_env)
|
||||
backup_file = manifest_file + ".bkp"
|
||||
|
||||
@@ -796,15 +822,19 @@ def env_revert(args):
|
||||
|
||||
|
||||
def env_depfile_setup_parser(subparser):
|
||||
"""generate a depfile from the concrete environment specs"""
|
||||
"""generate a depfile to exploit parallel builds across specs
|
||||
|
||||
requires the active environment to be concrete
|
||||
"""
|
||||
subparser.add_argument(
|
||||
"--make-prefix",
|
||||
"--make-target-prefix",
|
||||
default=None,
|
||||
metavar="TARGET",
|
||||
help="prefix Makefile targets (and variables) with <TARGET>/<name>\n\nby default "
|
||||
"the absolute path to the directory makedeps under the environment metadata dir is "
|
||||
"used. can be set to an empty string --make-prefix ''",
|
||||
help="prefix Makefile targets/variables with <TARGET>/<name>,\n"
|
||||
"which can be an empty string (--make-prefix '')\n"
|
||||
"defaults to the absolute path of the environment's makedeps\n"
|
||||
"environment metadata dir\n",
|
||||
)
|
||||
subparser.add_argument(
|
||||
"--make-disable-jobserver",
|
||||
@@ -819,8 +849,8 @@ def env_depfile_setup_parser(subparser):
|
||||
type=arguments.use_buildcache,
|
||||
default="package:auto,dependencies:auto",
|
||||
metavar="[{auto,only,never},][package:{auto,only,never},][dependencies:{auto,only,never}]",
|
||||
help="when using `only`, redundant build dependencies are pruned from the DAG\n\n"
|
||||
"this flag is passed on to the generated spack install commands",
|
||||
help="use `only` to prune redundant build dependencies\n"
|
||||
"option is also passed to generated spack install commands",
|
||||
)
|
||||
subparser.add_argument(
|
||||
"-o",
|
||||
@@ -834,14 +864,14 @@ def env_depfile_setup_parser(subparser):
|
||||
"--generator",
|
||||
default="make",
|
||||
choices=("make",),
|
||||
help="specify the depfile type\n\ncurrently only make is supported",
|
||||
help="specify the depfile type (only supports `make`)",
|
||||
)
|
||||
subparser.add_argument(
|
||||
metavar="specs",
|
||||
dest="specs",
|
||||
nargs=argparse.REMAINDER,
|
||||
default=None,
|
||||
help="generate a depfile only for matching specs in the environment",
|
||||
help="limit the generated file to matching specs",
|
||||
)
|
||||
|
||||
|
||||
@@ -910,7 +940,12 @@ def setup_parser(subparser):
|
||||
setup_parser_cmd_name = "env_%s_setup_parser" % name
|
||||
setup_parser_cmd = globals()[setup_parser_cmd_name]
|
||||
|
||||
subsubparser = sp.add_parser(name, aliases=aliases, help=setup_parser_cmd.__doc__)
|
||||
subsubparser = sp.add_parser(
|
||||
name,
|
||||
aliases=aliases,
|
||||
description=setup_parser_cmd.__doc__,
|
||||
help=spack.cmd.first_line(setup_parser_cmd.__doc__),
|
||||
)
|
||||
setup_parser_cmd(subsubparser)
|
||||
|
||||
|
||||
|
@@ -174,17 +174,17 @@ def query_arguments(args):
|
||||
if (args.missing or args.only_missing) and not args.only_deprecated:
|
||||
installed.append(InstallStatuses.MISSING)
|
||||
|
||||
known = any
|
||||
predicate_fn = None
|
||||
if args.unknown:
|
||||
known = False
|
||||
predicate_fn = lambda x: not spack.repo.PATH.exists(x.spec.name)
|
||||
|
||||
explicit = any
|
||||
explicit = None
|
||||
if args.explicit:
|
||||
explicit = True
|
||||
if args.implicit:
|
||||
explicit = False
|
||||
|
||||
q_args = {"installed": installed, "known": known, "explicit": explicit}
|
||||
q_args = {"installed": installed, "predicate_fn": predicate_fn, "explicit": explicit}
|
||||
|
||||
install_tree = args.install_tree
|
||||
upstreams = spack.config.get("upstreams", {})
|
||||
|
@@ -41,7 +41,7 @@ def setup_parser(subparser):
|
||||
help="do not remove installed build-only dependencies of roots\n"
|
||||
"(default is to keep only link & run dependencies)",
|
||||
)
|
||||
spack.cmd.common.arguments.add_common_arguments(subparser, ["yes_to_all"])
|
||||
spack.cmd.common.arguments.add_common_arguments(subparser, ["yes_to_all", "constraint"])
|
||||
|
||||
|
||||
def roots_from_environments(args, active_env):
|
||||
@@ -97,6 +97,12 @@ def gc(parser, args):
|
||||
root_hashes = None
|
||||
|
||||
specs = spack.store.STORE.db.unused_specs(root_hashes=root_hashes, deptype=deptype)
|
||||
|
||||
# limit search to constraint specs if provided
|
||||
if args.constraint:
|
||||
hashes = set(spec.dag_hash() for spec in args.specs())
|
||||
specs = [spec for spec in specs if spec.dag_hash() in hashes]
|
||||
|
||||
if not specs:
|
||||
tty.msg("There are no unused specs. Spack's store is clean.")
|
||||
return
|
||||
|
@@ -80,8 +80,8 @@ def find_matching_specs(specs, allow_multiple_matches=False):
|
||||
has_errors = True
|
||||
|
||||
# No installed package matches the query
|
||||
if len(matching) == 0 and spec is not any:
|
||||
tty.die("{0} does not match any installed packages.".format(spec))
|
||||
if len(matching) == 0 and spec is not None:
|
||||
tty.die(f"{spec} does not match any installed packages.")
|
||||
|
||||
specs_from_cli.extend(matching)
|
||||
|
||||
@@ -98,8 +98,9 @@ def do_mark(specs, explicit):
|
||||
specs (list): list of specs to be marked
|
||||
explicit (bool): whether to mark specs as explicitly installed
|
||||
"""
|
||||
for spec in specs:
|
||||
spack.store.STORE.db.update_explicit(spec, explicit)
|
||||
with spack.store.STORE.db.write_transaction():
|
||||
for spec in specs:
|
||||
spack.store.STORE.db.mark(spec, "explicit", explicit)
|
||||
|
||||
|
||||
def mark_specs(args, specs):
|
||||
@@ -116,6 +117,6 @@ def mark(parser, args):
|
||||
" Use `spack mark --all` to mark ALL packages.",
|
||||
)
|
||||
|
||||
# [any] here handles the --all case by forcing all specs to be returned
|
||||
specs = spack.cmd.parse_specs(args.specs) if args.specs else [any]
|
||||
# [None] here handles the --all case by forcing all specs to be returned
|
||||
specs = spack.cmd.parse_specs(args.specs) if args.specs else [None]
|
||||
mark_specs(args, specs)
|
||||
|
@@ -378,7 +378,10 @@ def refresh(module_type, specs, args):
|
||||
def modules_cmd(parser, args, module_type, callbacks=callbacks):
|
||||
# Qualifiers to be used when querying the db for specs
|
||||
constraint_qualifiers = {
|
||||
"refresh": {"installed": True, "known": lambda x: not spack.repo.PATH.exists(x)}
|
||||
"refresh": {
|
||||
"installed": True,
|
||||
"predicate_fn": lambda x: spack.repo.PATH.exists(x.spec.name),
|
||||
}
|
||||
}
|
||||
query_args = constraint_qualifiers.get(args.subparser_name, {})
|
||||
|
||||
|
@@ -33,8 +33,9 @@ def patch(parser, args):
|
||||
spack.config.set("config:checksum", False, scope="command_line")
|
||||
|
||||
specs = spack.cmd.parse_specs(args.specs, concretize=False)
|
||||
specs = spack.cmd.matching_specs_from_env(specs)
|
||||
for spec in specs:
|
||||
_patch(spack.cmd.matching_spec_from_env(spec).package)
|
||||
_patch(spec.package)
|
||||
|
||||
|
||||
def _patch_env(env: ev.Environment):
|
||||
|
@@ -3,7 +3,6 @@
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
import argparse
|
||||
import re
|
||||
import sys
|
||||
|
||||
@@ -12,13 +11,12 @@
|
||||
|
||||
import spack
|
||||
import spack.cmd
|
||||
import spack.cmd.common.arguments
|
||||
import spack.cmd.spec
|
||||
import spack.config
|
||||
import spack.environment
|
||||
import spack.hash_types as ht
|
||||
import spack.solver.asp as asp
|
||||
import spack.spec
|
||||
from spack.cmd.common import arguments
|
||||
|
||||
description = "concretize a specs using an ASP solver"
|
||||
section = "developer"
|
||||
@@ -41,42 +39,6 @@ def setup_parser(subparser):
|
||||
" solutions models found by asp program\n"
|
||||
" all all of the above",
|
||||
)
|
||||
|
||||
# Below are arguments w.r.t. spec display (like spack spec)
|
||||
arguments.add_common_arguments(subparser, ["long", "very_long", "namespaces"])
|
||||
|
||||
install_status_group = subparser.add_mutually_exclusive_group()
|
||||
arguments.add_common_arguments(install_status_group, ["install_status", "no_install_status"])
|
||||
|
||||
subparser.add_argument(
|
||||
"-y",
|
||||
"--yaml",
|
||||
action="store_const",
|
||||
dest="format",
|
||||
default=None,
|
||||
const="yaml",
|
||||
help="print concrete spec as yaml",
|
||||
)
|
||||
subparser.add_argument(
|
||||
"-j",
|
||||
"--json",
|
||||
action="store_const",
|
||||
dest="format",
|
||||
default=None,
|
||||
const="json",
|
||||
help="print concrete spec as json",
|
||||
)
|
||||
subparser.add_argument(
|
||||
"-c",
|
||||
"--cover",
|
||||
action="store",
|
||||
default="nodes",
|
||||
choices=["nodes", "edges", "paths"],
|
||||
help="how extensively to traverse the DAG (default: nodes)",
|
||||
)
|
||||
subparser.add_argument(
|
||||
"-t", "--types", action="store_true", default=False, help="show dependency types"
|
||||
)
|
||||
subparser.add_argument(
|
||||
"--timers",
|
||||
action="store_true",
|
||||
@@ -86,9 +48,8 @@ def setup_parser(subparser):
|
||||
subparser.add_argument(
|
||||
"--stats", action="store_true", default=False, help="print out statistics from clingo"
|
||||
)
|
||||
subparser.add_argument("specs", nargs=argparse.REMAINDER, help="specs of packages")
|
||||
|
||||
spack.cmd.common.arguments.add_concretizer_args(subparser)
|
||||
spack.cmd.spec.setup_parser(subparser)
|
||||
|
||||
|
||||
def _process_result(result, show, required_format, kwargs):
|
||||
@@ -164,11 +125,12 @@ def solve(parser, args):
|
||||
|
||||
# If we have an active environment, pick the specs from there
|
||||
env = spack.environment.active_environment()
|
||||
if env and args.specs:
|
||||
msg = "cannot give explicit specs when an environment is active"
|
||||
raise RuntimeError(msg)
|
||||
|
||||
specs = list(env.user_specs) if env else spack.cmd.parse_specs(args.specs)
|
||||
if args.specs:
|
||||
specs = spack.cmd.parse_specs(args.specs)
|
||||
elif env:
|
||||
specs = list(env.user_specs)
|
||||
else:
|
||||
tty.die("spack solve requires at least one spec or an active environment")
|
||||
|
||||
solver = asp.Solver()
|
||||
output = sys.stdout if "asp" in show else None
|
||||
|
@@ -96,26 +96,25 @@ def spec(parser, args):
|
||||
if args.install_status:
|
||||
tree_context = spack.store.STORE.db.read_transaction
|
||||
|
||||
# Use command line specified specs, otherwise try to use environment specs.
|
||||
env = ev.active_environment()
|
||||
|
||||
if args.specs:
|
||||
input_specs = spack.cmd.parse_specs(args.specs)
|
||||
concretized_specs = spack.cmd.parse_specs(args.specs, concretize=True)
|
||||
specs = list(zip(input_specs, concretized_specs))
|
||||
else:
|
||||
env = ev.active_environment()
|
||||
if env:
|
||||
env.concretize()
|
||||
specs = env.concretized_specs()
|
||||
elif env:
|
||||
env.concretize()
|
||||
specs = env.concretized_specs()
|
||||
|
||||
if not args.format:
|
||||
# environments are printed together in a combined tree() invocation,
|
||||
# except when using --yaml or --json, which we print spec by spec below.
|
||||
if not args.format:
|
||||
tree_kwargs["key"] = spack.traverse.by_dag_hash
|
||||
tree_kwargs["hashes"] = args.long or args.very_long
|
||||
print(spack.spec.tree([concrete for _, concrete in specs], **tree_kwargs))
|
||||
return
|
||||
else:
|
||||
tty.die("spack spec requires at least one spec or an active environment")
|
||||
tree_kwargs["key"] = spack.traverse.by_dag_hash
|
||||
tree_kwargs["hashes"] = args.long or args.very_long
|
||||
print(spack.spec.tree([concrete for _, concrete in specs], **tree_kwargs))
|
||||
return
|
||||
else:
|
||||
tty.die("spack spec requires at least one spec or an active environment")
|
||||
|
||||
for input, output in specs:
|
||||
# With --yaml or --json, just print the raw specs to output
|
||||
|
@@ -47,8 +47,8 @@ def stage(parser, args):
|
||||
if len(specs) > 1 and custom_path:
|
||||
tty.die("`--path` requires a single spec, but multiple were provided")
|
||||
|
||||
specs = spack.cmd.matching_specs_from_env(specs)
|
||||
for spec in specs:
|
||||
spec = spack.cmd.matching_spec_from_env(spec)
|
||||
pkg = spec.package
|
||||
|
||||
if custom_path:
|
||||
|
@@ -165,7 +165,7 @@ def test_run(args):
|
||||
if args.fail_fast:
|
||||
spack.config.set("config:fail_fast", True, scope="command_line")
|
||||
|
||||
explicit = args.explicit or any
|
||||
explicit = args.explicit or None
|
||||
explicit_str = "explicitly " if args.explicit else ""
|
||||
|
||||
# Get specs to test
|
||||
|
@@ -90,6 +90,7 @@ def find_matching_specs(
|
||||
env: optional active environment
|
||||
specs: list of specs to be matched against installed packages
|
||||
allow_multiple_matches: if True multiple matches are admitted
|
||||
origin: origin of the spec
|
||||
|
||||
Return:
|
||||
list: list of specs
|
||||
@@ -98,7 +99,7 @@ def find_matching_specs(
|
||||
hashes = env.all_hashes() if env else None
|
||||
|
||||
# List of specs that match expressions given via command line
|
||||
specs_from_cli = []
|
||||
specs_from_cli: List["spack.spec.Spec"] = []
|
||||
has_errors = False
|
||||
for spec in specs:
|
||||
install_query = [InstallStatuses.INSTALLED, InstallStatuses.DEPRECATED]
|
||||
@@ -116,7 +117,7 @@ def find_matching_specs(
|
||||
has_errors = True
|
||||
|
||||
# No installed package matches the query
|
||||
if len(matching) == 0 and spec is not any:
|
||||
if len(matching) == 0 and spec is not None:
|
||||
if env:
|
||||
pkg_type = "packages in environment '%s'" % env.name
|
||||
else:
|
||||
@@ -213,7 +214,7 @@ def get_uninstall_list(args, specs: List[spack.spec.Spec], env: Optional[ev.Envi
|
||||
|
||||
# Gets the list of installed specs that match the ones given via cli
|
||||
# args.all takes care of the case where '-a' is given in the cli
|
||||
matching_specs = find_matching_specs(env, specs, args.all)
|
||||
matching_specs = find_matching_specs(env, specs, args.all, origin=args.origin)
|
||||
dependent_specs = installed_dependents(matching_specs)
|
||||
all_uninstall_specs = matching_specs + dependent_specs if args.dependents else matching_specs
|
||||
other_dependent_envs = dependent_environments(all_uninstall_specs, current_env=env)
|
||||
@@ -301,6 +302,6 @@ def uninstall(parser, args):
|
||||
" Use `spack uninstall --all` to uninstall ALL packages.",
|
||||
)
|
||||
|
||||
# [any] here handles the --all case by forcing all specs to be returned
|
||||
specs = spack.cmd.parse_specs(args.specs) if args.specs else [any]
|
||||
# [None] here handles the --all case by forcing all specs to be returned
|
||||
specs = spack.cmd.parse_specs(args.specs) if args.specs else [None]
|
||||
uninstall_specs(args, specs)
|
||||
|
@@ -33,6 +33,8 @@
|
||||
YamlFilesystemView.
|
||||
|
||||
"""
|
||||
import sys
|
||||
|
||||
import llnl.util.tty as tty
|
||||
from llnl.util.link_tree import MergeConflictError
|
||||
|
||||
@@ -178,7 +180,12 @@ def setup_parser(sp):
|
||||
|
||||
|
||||
def view(parser, args):
|
||||
"Produce a view of a set of packages."
|
||||
"""Produce a view of a set of packages."""
|
||||
|
||||
if sys.platform == "win32" and args.action in ("hardlink", "hard"):
|
||||
# Hard-linked views are not yet allowed on Windows.
|
||||
# See https://github.com/spack/spack/pull/46335#discussion_r1757411915
|
||||
tty.die("Hard linking is not supported on Windows. Please use symlinks or copy methods.")
|
||||
|
||||
specs = spack.cmd.parse_specs(args.specs)
|
||||
path = args.path[0]
|
||||
|
@@ -275,7 +275,7 @@ def __init__(
|
||||
operating_system,
|
||||
target,
|
||||
paths,
|
||||
modules=None,
|
||||
modules: Optional[List[str]] = None,
|
||||
alias=None,
|
||||
environment=None,
|
||||
extra_rpaths=None,
|
||||
@@ -415,14 +415,19 @@ def implicit_rpaths(self) -> List[str]:
|
||||
return list(paths_containing_libs(link_dirs, all_required_libs))
|
||||
|
||||
@property
|
||||
def default_libc(self) -> Optional["spack.spec.Spec"]:
|
||||
"""Determine libc targeted by the compiler from link line"""
|
||||
def default_dynamic_linker(self) -> Optional[str]:
|
||||
"""Determine default dynamic linker from compiler link line"""
|
||||
output = self.compiler_verbose_output
|
||||
|
||||
if not output:
|
||||
return None
|
||||
|
||||
dynamic_linker = spack.util.libc.parse_dynamic_linker(output)
|
||||
return spack.util.libc.parse_dynamic_linker(output)
|
||||
|
||||
@property
|
||||
def default_libc(self) -> Optional["spack.spec.Spec"]:
|
||||
"""Determine libc targeted by the compiler from link line"""
|
||||
dynamic_linker = self.default_dynamic_linker
|
||||
|
||||
if not dynamic_linker:
|
||||
return None
|
||||
|
@@ -92,6 +92,14 @@ def c11_flag(self):
|
||||
else:
|
||||
return "-std=c1x"
|
||||
|
||||
@property
|
||||
def c18_flag(self):
|
||||
# c18 supported since oneapi 2022, which is classic version 2021.5.0
|
||||
if self.real_version < Version("21.5.0"):
|
||||
raise UnsupportedCompilerFlag(self, "the C18 standard", "c18_flag", "< 21.5.0")
|
||||
else:
|
||||
return "-std=c18"
|
||||
|
||||
@property
|
||||
def cc_pic_flag(self):
|
||||
return "-fPIC"
|
||||
@@ -116,9 +124,8 @@ def setup_custom_environment(self, pkg, env):
|
||||
# Edge cases for Intel's oneAPI compilers when using the legacy classic compilers:
|
||||
# Always pass flags to disable deprecation warnings, since these warnings can
|
||||
# confuse tools that parse the output of compiler commands (e.g. version checks).
|
||||
if self.cc and self.cc.endswith("icc") and self.real_version >= Version("2021"):
|
||||
if self.real_version >= Version("2021") and self.real_version <= Version("2023"):
|
||||
env.append_flags("SPACK_ALWAYS_CFLAGS", "-diag-disable=10441")
|
||||
if self.cxx and self.cxx.endswith("icpc") and self.real_version >= Version("2021"):
|
||||
env.append_flags("SPACK_ALWAYS_CXXFLAGS", "-diag-disable=10441")
|
||||
if self.fc and self.fc.endswith("ifort") and self.real_version >= Version("2021"):
|
||||
if self.real_version >= Version("2021") and self.real_version <= Version("2024"):
|
||||
env.append_flags("SPACK_ALWAYS_FFLAGS", "-diag-disable=10448")
|
||||
|
@@ -293,6 +293,17 @@ def platform_toolset_ver(self):
|
||||
vs22_toolset = Version(toolset_ver) > Version("142")
|
||||
return toolset_ver if not vs22_toolset else "143"
|
||||
|
||||
@property
|
||||
def visual_studio_version(self):
|
||||
"""The four digit Visual Studio version (i.e. 2019 or 2022)
|
||||
|
||||
Note: This differs from the msvc version or toolset version as
|
||||
those properties track the compiler and build tools version
|
||||
respectively, whereas this tracks the VS release associated
|
||||
with a given MSVC compiler.
|
||||
"""
|
||||
return re.search(r"[0-9]{4}", self.cc).group(0)
|
||||
|
||||
def _compiler_version(self, compiler):
|
||||
"""Returns version object for given compiler"""
|
||||
# ignore_errors below is true here due to ifx's
|
||||
|
@@ -7,7 +7,9 @@
|
||||
from os.path import dirname, join
|
||||
|
||||
from llnl.util import tty
|
||||
from llnl.util.filesystem import ancestor
|
||||
|
||||
import spack.util.executable
|
||||
from spack.compiler import Compiler
|
||||
from spack.version import Version
|
||||
|
||||
@@ -116,6 +118,24 @@ def fc_pic_flag(self):
|
||||
def stdcxx_libs(self):
|
||||
return ("-cxxlib",)
|
||||
|
||||
@property
|
||||
def prefix(self):
|
||||
# OneAPI reports its install prefix when running ``--version``
|
||||
# on the line ``InstalledDir: <prefix>/bin/compiler``.
|
||||
cc = spack.util.executable.Executable(self.cc)
|
||||
with self.compiler_environment():
|
||||
oneapi_output = cc("--version", output=str, error=str)
|
||||
|
||||
for line in oneapi_output.splitlines():
|
||||
if line.startswith("InstalledDir:"):
|
||||
oneapi_prefix = line.split(":")[1].strip()
|
||||
# Go from <prefix>/bin/compiler to <prefix>
|
||||
return ancestor(oneapi_prefix, 2)
|
||||
|
||||
raise RuntimeError(
|
||||
"could not find install prefix of OneAPI from output:\n\t{}".format(oneapi_output)
|
||||
)
|
||||
|
||||
def setup_custom_environment(self, pkg, env):
|
||||
# workaround bug in icpx driver where it requires sycl-post-link is on the PATH
|
||||
# It is located in the same directory as the driver. Error message:
|
||||
@@ -131,11 +151,14 @@ def setup_custom_environment(self, pkg, env):
|
||||
# Edge cases for Intel's oneAPI compilers when using the legacy classic compilers:
|
||||
# Always pass flags to disable deprecation warnings, since these warnings can
|
||||
# confuse tools that parse the output of compiler commands (e.g. version checks).
|
||||
if self.cc and self.cc.endswith("icc") and self.real_version >= Version("2021"):
|
||||
# This is really only needed for Fortran, since oneapi@ should be using either
|
||||
# icx+icpx+ifx or icx+icpx+ifort. But to be on the safe side (some users may
|
||||
# want to try to swap icpx against icpc, for example), and since the Intel LLVM
|
||||
# compilers accept these diag-disable flags, we apply them for all compilers.
|
||||
if self.real_version >= Version("2021") and self.real_version <= Version("2023"):
|
||||
env.append_flags("SPACK_ALWAYS_CFLAGS", "-diag-disable=10441")
|
||||
if self.cxx and self.cxx.endswith("icpc") and self.real_version >= Version("2021"):
|
||||
env.append_flags("SPACK_ALWAYS_CXXFLAGS", "-diag-disable=10441")
|
||||
if self.fc and self.fc.endswith("ifort") and self.real_version >= Version("2021"):
|
||||
if self.real_version >= Version("2021") and self.real_version <= Version("2024"):
|
||||
env.append_flags("SPACK_ALWAYS_FFLAGS", "-diag-disable=10448")
|
||||
|
||||
# 2024 release bumped the libsycl version because of an ABI
|
||||
|
@@ -2,14 +2,20 @@
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
"""
|
||||
(DEPRECATED) Used to contain the code for the original concretizer
|
||||
"""
|
||||
"""High-level functions to concretize list of specs"""
|
||||
import sys
|
||||
import time
|
||||
from contextlib import contextmanager
|
||||
from itertools import chain
|
||||
from typing import Iterable, Optional, Sequence, Tuple, Union
|
||||
|
||||
import llnl.util.tty as tty
|
||||
|
||||
import spack.compilers
|
||||
import spack.config
|
||||
import spack.error
|
||||
import spack.repo
|
||||
import spack.util.parallel
|
||||
from spack.spec import ArchSpec, CompilerSpec, Spec
|
||||
|
||||
CHECK_COMPILER_EXISTENCE = True
|
||||
|
||||
@@ -30,67 +36,167 @@ def enable_compiler_existence_check():
|
||||
CHECK_COMPILER_EXISTENCE = saved
|
||||
|
||||
|
||||
def find_spec(spec, condition, default=None):
|
||||
"""Searches the dag from spec in an intelligent order and looks
|
||||
for a spec that matches a condition"""
|
||||
# First search parents, then search children
|
||||
deptype = ("build", "link")
|
||||
dagiter = chain(
|
||||
spec.traverse(direction="parents", deptype=deptype, root=False),
|
||||
spec.traverse(direction="children", deptype=deptype, root=False),
|
||||
)
|
||||
visited = set()
|
||||
for relative in dagiter:
|
||||
if condition(relative):
|
||||
return relative
|
||||
visited.add(id(relative))
|
||||
|
||||
# Then search all other relatives in the DAG *except* spec
|
||||
for relative in spec.root.traverse(deptype="all"):
|
||||
if relative is spec:
|
||||
continue
|
||||
if id(relative) in visited:
|
||||
continue
|
||||
if condition(relative):
|
||||
return relative
|
||||
|
||||
# Finally search spec itself.
|
||||
if condition(spec):
|
||||
return spec
|
||||
|
||||
return default # Nothing matched the condition; return default.
|
||||
SpecPair = Tuple[Spec, Spec]
|
||||
SpecLike = Union[Spec, str]
|
||||
TestsType = Union[bool, Iterable[str]]
|
||||
|
||||
|
||||
def concretize_specs_together(*abstract_specs, **kwargs):
|
||||
def concretize_specs_together(
|
||||
abstract_specs: Sequence[SpecLike], tests: TestsType = False
|
||||
) -> Sequence[Spec]:
|
||||
"""Given a number of specs as input, tries to concretize them together.
|
||||
|
||||
Args:
|
||||
tests (bool or list or set): False to run no tests, True to test
|
||||
all packages, or a list of package names to run tests for some
|
||||
*abstract_specs: abstract specs to be concretized, given either
|
||||
as Specs or strings
|
||||
|
||||
Returns:
|
||||
List of concretized specs
|
||||
abstract_specs: abstract specs to be concretized
|
||||
tests: list of package names for which to consider tests dependencies. If True, all nodes
|
||||
will have test dependencies. If False, test dependencies will be disregarded.
|
||||
"""
|
||||
import spack.solver.asp
|
||||
|
||||
allow_deprecated = spack.config.get("config:deprecated", False)
|
||||
solver = spack.solver.asp.Solver()
|
||||
result = solver.solve(
|
||||
abstract_specs, tests=kwargs.get("tests", False), allow_deprecated=allow_deprecated
|
||||
)
|
||||
result = solver.solve(abstract_specs, tests=tests, allow_deprecated=allow_deprecated)
|
||||
return [s.copy() for s in result.specs]
|
||||
|
||||
|
||||
def concretize_together(
|
||||
spec_list: Sequence[SpecPair], tests: TestsType = False
|
||||
) -> Sequence[SpecPair]:
|
||||
"""Given a number of specs as input, tries to concretize them together.
|
||||
|
||||
Args:
|
||||
spec_list: list of tuples to concretize. First entry is abstract spec, second entry is
|
||||
already concrete spec or None if not yet concretized
|
||||
tests: list of package names for which to consider tests dependencies. If True, all nodes
|
||||
will have test dependencies. If False, test dependencies will be disregarded.
|
||||
"""
|
||||
to_concretize = [concrete if concrete else abstract for abstract, concrete in spec_list]
|
||||
abstract_specs = [abstract for abstract, _ in spec_list]
|
||||
concrete_specs = concretize_specs_together(to_concretize, tests=tests)
|
||||
return list(zip(abstract_specs, concrete_specs))
|
||||
|
||||
|
||||
def concretize_together_when_possible(
|
||||
spec_list: Sequence[SpecPair], tests: TestsType = False
|
||||
) -> Sequence[SpecPair]:
|
||||
"""Given a number of specs as input, tries to concretize them together to the extent possible.
|
||||
|
||||
See documentation for ``unify: when_possible`` concretization for the precise definition of
|
||||
"to the extent possible".
|
||||
|
||||
Args:
|
||||
spec_list: list of tuples to concretize. First entry is abstract spec, second entry is
|
||||
already concrete spec or None if not yet concretized
|
||||
tests: list of package names for which to consider tests dependencies. If True, all nodes
|
||||
will have test dependencies. If False, test dependencies will be disregarded.
|
||||
"""
|
||||
import spack.solver.asp
|
||||
|
||||
to_concretize = [concrete if concrete else abstract for abstract, concrete in spec_list]
|
||||
old_concrete_to_abstract = {
|
||||
concrete: abstract for (abstract, concrete) in spec_list if concrete
|
||||
}
|
||||
|
||||
result_by_user_spec = {}
|
||||
solver = spack.solver.asp.Solver()
|
||||
allow_deprecated = spack.config.get("config:deprecated", False)
|
||||
for result in solver.solve_in_rounds(
|
||||
to_concretize, tests=tests, allow_deprecated=allow_deprecated
|
||||
):
|
||||
result_by_user_spec.update(result.specs_by_input)
|
||||
|
||||
# If the "abstract" spec is a concrete spec from the previous concretization
|
||||
# translate it back to an abstract spec. Otherwise, keep the abstract spec
|
||||
return [
|
||||
(old_concrete_to_abstract.get(abstract, abstract), concrete)
|
||||
for abstract, concrete in sorted(result_by_user_spec.items())
|
||||
]
|
||||
|
||||
|
||||
def concretize_separately(
|
||||
spec_list: Sequence[SpecPair], tests: TestsType = False
|
||||
) -> Sequence[SpecPair]:
|
||||
"""Concretizes the input specs separately from each other.
|
||||
|
||||
Args:
|
||||
spec_list: list of tuples to concretize. First entry is abstract spec, second entry is
|
||||
already concrete spec or None if not yet concretized
|
||||
tests: list of package names for which to consider tests dependencies. If True, all nodes
|
||||
will have test dependencies. If False, test dependencies will be disregarded.
|
||||
"""
|
||||
import spack.bootstrap
|
||||
|
||||
to_concretize = [abstract for abstract, concrete in spec_list if not concrete]
|
||||
args = [
|
||||
(i, str(abstract), tests)
|
||||
for i, abstract in enumerate(to_concretize)
|
||||
if not abstract.concrete
|
||||
]
|
||||
ret = [(i, abstract) for i, abstract in enumerate(to_concretize) if abstract.concrete]
|
||||
# Ensure we don't try to bootstrap clingo in parallel
|
||||
with spack.bootstrap.ensure_bootstrap_configuration():
|
||||
spack.bootstrap.ensure_clingo_importable_or_raise()
|
||||
|
||||
# Ensure all the indexes have been built or updated, since
|
||||
# otherwise the processes in the pool may timeout on waiting
|
||||
# for a write lock. We do this indirectly by retrieving the
|
||||
# provider index, which should in turn trigger the update of
|
||||
# all the indexes if there's any need for that.
|
||||
_ = spack.repo.PATH.provider_index
|
||||
|
||||
# Ensure we have compilers in compilers.yaml to avoid that
|
||||
# processes try to write the config file in parallel
|
||||
_ = spack.compilers.all_compilers_config(spack.config.CONFIG)
|
||||
|
||||
# Early return if there is nothing to do
|
||||
if len(args) == 0:
|
||||
# Still have to combine the things that were passed in as abstract with the things
|
||||
# that were passed in as pairs
|
||||
return [(abstract, concrete) for abstract, (_, concrete) in zip(to_concretize, ret)] + [
|
||||
(abstract, concrete) for abstract, concrete in spec_list if concrete
|
||||
]
|
||||
|
||||
# Solve the environment in parallel on Linux
|
||||
# TODO: support parallel concretization on macOS and Windows
|
||||
num_procs = min(len(args), spack.config.determine_number_of_jobs(parallel=True))
|
||||
|
||||
for j, (i, concrete, duration) in enumerate(
|
||||
spack.util.parallel.imap_unordered(
|
||||
_concretize_task, args, processes=num_procs, debug=tty.is_debug(), maxtaskperchild=1
|
||||
)
|
||||
):
|
||||
ret.append((i, concrete))
|
||||
percentage = (j + 1) / len(args) * 100
|
||||
tty.verbose(
|
||||
f"{duration:6.1f}s [{percentage:3.0f}%] {concrete.cformat('{hash:7}')} "
|
||||
f"{to_concretize[i].colored_str}"
|
||||
)
|
||||
sys.stdout.flush()
|
||||
|
||||
# Add specs in original order
|
||||
ret.sort(key=lambda x: x[0])
|
||||
|
||||
return [(abstract, concrete) for abstract, (_, concrete) in zip(to_concretize, ret)] + [
|
||||
(abstract, concrete) for abstract, concrete in spec_list if concrete
|
||||
]
|
||||
|
||||
|
||||
def _concretize_task(packed_arguments: Tuple[int, str, TestsType]) -> Tuple[int, Spec, float]:
|
||||
index, spec_str, tests = packed_arguments
|
||||
with tty.SuppressOutput(msg_enabled=False):
|
||||
start = time.time()
|
||||
spec = Spec(spec_str).concretized(tests=tests)
|
||||
return index, spec, time.time() - start
|
||||
|
||||
|
||||
class UnavailableCompilerVersionError(spack.error.SpackError):
|
||||
"""Raised when there is no available compiler that satisfies a
|
||||
compiler spec."""
|
||||
|
||||
def __init__(self, compiler_spec, arch=None):
|
||||
err_msg = "No compilers with spec {0} found".format(compiler_spec)
|
||||
def __init__(self, compiler_spec: CompilerSpec, arch: Optional[ArchSpec] = None) -> None:
|
||||
err_msg = f"No compilers with spec {compiler_spec} found"
|
||||
if arch:
|
||||
err_msg += " for operating system {0} and target {1}.".format(arch.os, arch.target)
|
||||
err_msg += f" for operating system {arch.os} and target {arch.target}."
|
||||
|
||||
super().__init__(
|
||||
err_msg,
|
||||
|
@@ -427,6 +427,10 @@ def __init__(self, *scopes: ConfigScope) -> None:
|
||||
self.push_scope(scope)
|
||||
self.format_updates: Dict[str, List[ConfigScope]] = collections.defaultdict(list)
|
||||
|
||||
def ensure_unwrapped(self) -> "Configuration":
|
||||
"""Ensure we unwrap this object from any dynamic wrapper (like Singleton)"""
|
||||
return self
|
||||
|
||||
@_config_mutator
|
||||
def push_scope(self, scope: ConfigScope) -> None:
|
||||
"""Add a higher precedence scope to the Configuration."""
|
||||
@@ -714,7 +718,7 @@ def print_section(self, section: str, blame: bool = False, *, scope=None) -> Non
|
||||
@contextlib.contextmanager
|
||||
def override(
|
||||
path_or_scope: Union[ConfigScope, str], value: Optional[Any] = None
|
||||
) -> Generator[Union[lang.Singleton, Configuration], None, None]:
|
||||
) -> Generator[Configuration, None, None]:
|
||||
"""Simple way to override config settings within a context.
|
||||
|
||||
Arguments:
|
||||
@@ -752,13 +756,7 @@ def override(
|
||||
assert scope is overrides
|
||||
|
||||
|
||||
#: configuration scopes added on the command line set by ``spack.main.main()``
|
||||
COMMAND_LINE_SCOPES: List[str] = []
|
||||
|
||||
|
||||
def _add_platform_scope(
|
||||
cfg: Union[Configuration, lang.Singleton], name: str, path: str, writable: bool = True
|
||||
) -> None:
|
||||
def _add_platform_scope(cfg: Configuration, name: str, path: str, writable: bool = True) -> None:
|
||||
"""Add a platform-specific subdirectory for the current platform."""
|
||||
platform = spack.platforms.host().name
|
||||
scope = DirectoryConfigScope(
|
||||
@@ -792,9 +790,7 @@ def config_paths_from_entry_points() -> List[Tuple[str, str]]:
|
||||
return config_paths
|
||||
|
||||
|
||||
def _add_command_line_scopes(
|
||||
cfg: Union[Configuration, lang.Singleton], command_line_scopes: List[str]
|
||||
) -> None:
|
||||
def _add_command_line_scopes(cfg: Configuration, command_line_scopes: List[str]) -> None:
|
||||
"""Add additional scopes from the --config-scope argument, either envs or dirs."""
|
||||
import spack.environment.environment as env # circular import
|
||||
|
||||
@@ -864,18 +860,11 @@ def create() -> Configuration:
|
||||
# Each scope can have per-platfom overrides in subdirectories
|
||||
_add_platform_scope(cfg, name, path)
|
||||
|
||||
# add command-line scopes
|
||||
_add_command_line_scopes(cfg, COMMAND_LINE_SCOPES)
|
||||
|
||||
# we make a special scope for spack commands so that they can
|
||||
# override configuration options.
|
||||
cfg.push_scope(InternalConfigScope("command_line"))
|
||||
|
||||
return cfg
|
||||
|
||||
|
||||
#: This is the singleton configuration instance for Spack.
|
||||
CONFIG: Union[Configuration, lang.Singleton] = lang.Singleton(create)
|
||||
CONFIG: Configuration = lang.Singleton(create) # type: ignore
|
||||
|
||||
|
||||
def add_from_file(filename: str, scope: Optional[str] = None) -> None:
|
||||
|
@@ -32,6 +32,7 @@
|
||||
Container,
|
||||
Dict,
|
||||
Generator,
|
||||
Iterable,
|
||||
List,
|
||||
NamedTuple,
|
||||
Optional,
|
||||
@@ -290,55 +291,6 @@ def __reduce__(self):
|
||||
return ForbiddenLock, tuple()
|
||||
|
||||
|
||||
_QUERY_DOCSTRING = """
|
||||
|
||||
Args:
|
||||
query_spec: queries iterate through specs in the database and
|
||||
return those that satisfy the supplied ``query_spec``. If
|
||||
query_spec is `any`, This will match all specs in the
|
||||
database. If it is a spec, we'll evaluate
|
||||
``spec.satisfies(query_spec)``
|
||||
|
||||
known (bool or None): Specs that are "known" are those
|
||||
for which Spack can locate a ``package.py`` file -- i.e.,
|
||||
Spack "knows" how to install them. Specs that are unknown may
|
||||
represent packages that existed in a previous version of
|
||||
Spack, but have since either changed their name or
|
||||
been removed
|
||||
|
||||
installed (bool or InstallStatus or typing.Iterable or None):
|
||||
if ``True``, includes only installed
|
||||
specs in the search; if ``False`` only missing specs, and if
|
||||
``any``, all specs in database. If an InstallStatus or iterable
|
||||
of InstallStatus, returns specs whose install status
|
||||
(installed, deprecated, or missing) matches (one of) the
|
||||
InstallStatus. (default: True)
|
||||
|
||||
explicit (bool or None): A spec that was installed
|
||||
following a specific user request is marked as explicit. If
|
||||
instead it was pulled-in as a dependency of a user requested
|
||||
spec it's considered implicit.
|
||||
|
||||
start_date (datetime.datetime or None): filters the query
|
||||
discarding specs that have been installed before ``start_date``.
|
||||
|
||||
end_date (datetime.datetime or None): filters the query discarding
|
||||
specs that have been installed after ``end_date``.
|
||||
|
||||
hashes (Container): list or set of hashes that we can use to
|
||||
restrict the search
|
||||
|
||||
in_buildcache (bool or None): Specs that are marked in
|
||||
this database as part of an associated binary cache are
|
||||
``in_buildcache``. All other specs are not. This field is used
|
||||
for querying mirror indices. Default is ``any``.
|
||||
|
||||
Returns:
|
||||
list of specs that match the query
|
||||
|
||||
"""
|
||||
|
||||
|
||||
class LockConfiguration(NamedTuple):
|
||||
"""Data class to configure locks in Database objects
|
||||
|
||||
@@ -604,6 +556,9 @@ def _path(self, spec: "spack.spec.Spec") -> pathlib.Path:
|
||||
return self.dir / f"{spec.name}-{spec.dag_hash()}"
|
||||
|
||||
|
||||
SelectType = Callable[[InstallRecord], bool]
|
||||
|
||||
|
||||
class Database:
|
||||
#: Fields written for each install record
|
||||
record_fields: Tuple[str, ...] = DEFAULT_INSTALL_RECORD_FIELDS
|
||||
@@ -1245,7 +1200,7 @@ def _add(
|
||||
self._data[key].explicit = explicit
|
||||
|
||||
@_autospec
|
||||
def add(self, spec: "spack.spec.Spec", *, explicit: bool = False) -> None:
|
||||
def add(self, spec: "spack.spec.Spec", *, explicit: bool = False, allow_missing=False) -> None:
|
||||
"""Add spec at path to database, locking and reading DB to sync.
|
||||
|
||||
``add()`` will lock and read from the DB on disk.
|
||||
@@ -1254,7 +1209,7 @@ def add(self, spec: "spack.spec.Spec", *, explicit: bool = False) -> None:
|
||||
# TODO: ensure that spec is concrete?
|
||||
# Entire add is transactional.
|
||||
with self.write_transaction():
|
||||
self._add(spec, explicit=explicit)
|
||||
self._add(spec, explicit=explicit, allow_missing=allow_missing)
|
||||
|
||||
def _get_matching_spec_key(self, spec: "spack.spec.Spec", **kwargs) -> str:
|
||||
"""Get the exact spec OR get a single spec that matches."""
|
||||
@@ -1381,7 +1336,7 @@ def _deprecate(self, spec: "spack.spec.Spec", deprecator: "spack.spec.Spec") ->
|
||||
self._data[spec_key] = spec_rec
|
||||
|
||||
@_autospec
|
||||
def mark(self, spec: "spack.spec.Spec", key, value) -> None:
|
||||
def mark(self, spec: "spack.spec.Spec", key: str, value: Any) -> None:
|
||||
"""Mark an arbitrary record on a spec."""
|
||||
with self.write_transaction():
|
||||
return self._mark(spec, key, value)
|
||||
@@ -1525,62 +1480,51 @@ def get_by_hash(self, dag_hash, default=None, installed=any):
|
||||
|
||||
def _query(
|
||||
self,
|
||||
query_spec=any,
|
||||
known=any,
|
||||
installed=True,
|
||||
explicit=any,
|
||||
start_date=None,
|
||||
end_date=None,
|
||||
hashes=None,
|
||||
in_buildcache=any,
|
||||
origin=None,
|
||||
):
|
||||
"""Run a query on the database."""
|
||||
query_spec: Optional[Union[str, "spack.spec.Spec"]] = None,
|
||||
*,
|
||||
predicate_fn: Optional[SelectType] = None,
|
||||
installed: Union[bool, InstallStatus, List[InstallStatus]] = True,
|
||||
explicit: Optional[bool] = None,
|
||||
start_date: Optional[datetime.datetime] = None,
|
||||
end_date: Optional[datetime.datetime] = None,
|
||||
hashes: Optional[Iterable[str]] = None,
|
||||
in_buildcache: Optional[bool] = None,
|
||||
origin: Optional[str] = None,
|
||||
) -> List["spack.spec.Spec"]:
|
||||
|
||||
# TODO: Specs are a lot like queries. Should there be a
|
||||
# TODO: wildcard spec object, and should specs have attributes
|
||||
# TODO: like installed and known that can be queried? Or are
|
||||
# TODO: these really special cases that only belong here?
|
||||
# Restrict the set of records over which we iterate first
|
||||
matching_hashes = self._data
|
||||
if hashes is not None:
|
||||
matching_hashes = {h: self._data[h] for h in hashes if h in self._data}
|
||||
|
||||
if query_spec is not any:
|
||||
if not isinstance(query_spec, spack.spec.Spec):
|
||||
query_spec = spack.spec.Spec(query_spec)
|
||||
if isinstance(query_spec, str):
|
||||
query_spec = spack.spec.Spec(query_spec)
|
||||
|
||||
# Just look up concrete specs with hashes; no fancy search.
|
||||
if query_spec.concrete:
|
||||
# TODO: handling of hashes restriction is not particularly elegant.
|
||||
hash_key = query_spec.dag_hash()
|
||||
if hash_key in self._data and (not hashes or hash_key in hashes):
|
||||
return [self._data[hash_key].spec]
|
||||
else:
|
||||
return []
|
||||
if query_spec is not None and query_spec.concrete:
|
||||
hash_key = query_spec.dag_hash()
|
||||
if hash_key not in matching_hashes:
|
||||
return []
|
||||
matching_hashes = {hash_key: matching_hashes[hash_key]}
|
||||
|
||||
# Abstract specs require more work -- currently we test
|
||||
# against everything.
|
||||
results = []
|
||||
start_date = start_date or datetime.datetime.min
|
||||
end_date = end_date or datetime.datetime.max
|
||||
|
||||
# save specs whose name doesn't match for last, to avoid a virtual check
|
||||
deferred = []
|
||||
|
||||
for key, rec in self._data.items():
|
||||
if hashes is not None and rec.spec.dag_hash() not in hashes:
|
||||
continue
|
||||
|
||||
for rec in matching_hashes.values():
|
||||
if origin and not (origin == rec.origin):
|
||||
continue
|
||||
|
||||
if not rec.install_type_matches(installed):
|
||||
continue
|
||||
|
||||
if in_buildcache is not any and rec.in_buildcache != in_buildcache:
|
||||
if in_buildcache is not None and rec.in_buildcache != in_buildcache:
|
||||
continue
|
||||
|
||||
if explicit is not any and rec.explicit != explicit:
|
||||
if explicit is not None and rec.explicit != explicit:
|
||||
continue
|
||||
|
||||
if known is not any and known(rec.spec.name):
|
||||
if predicate_fn is not None and not predicate_fn(rec):
|
||||
continue
|
||||
|
||||
if start_date or end_date:
|
||||
@@ -1588,7 +1532,7 @@ def _query(
|
||||
if not (start_date < inst_date < end_date):
|
||||
continue
|
||||
|
||||
if query_spec is any:
|
||||
if query_spec is None or query_spec.concrete:
|
||||
results.append(rec.spec)
|
||||
continue
|
||||
|
||||
@@ -1606,36 +1550,118 @@ def _query(
|
||||
# If we did fine something, the query spec can't be virtual b/c we matched an actual
|
||||
# package installation, so skip the virtual check entirely. If we *didn't* find anything,
|
||||
# check all the deferred specs *if* the query is virtual.
|
||||
if not results and query_spec is not any and deferred and query_spec.virtual:
|
||||
if not results and query_spec is not None and deferred and query_spec.virtual:
|
||||
results = [spec for spec in deferred if spec.satisfies(query_spec)]
|
||||
|
||||
return results
|
||||
|
||||
if _query.__doc__ is None:
|
||||
_query.__doc__ = ""
|
||||
_query.__doc__ += _QUERY_DOCSTRING
|
||||
def query_local(
|
||||
self,
|
||||
query_spec: Optional[Union[str, "spack.spec.Spec"]] = None,
|
||||
*,
|
||||
predicate_fn: Optional[SelectType] = None,
|
||||
installed: Union[bool, InstallStatus, List[InstallStatus]] = True,
|
||||
explicit: Optional[bool] = None,
|
||||
start_date: Optional[datetime.datetime] = None,
|
||||
end_date: Optional[datetime.datetime] = None,
|
||||
hashes: Optional[List[str]] = None,
|
||||
in_buildcache: Optional[bool] = None,
|
||||
origin: Optional[str] = None,
|
||||
) -> List["spack.spec.Spec"]:
|
||||
"""Queries the local Spack database.
|
||||
|
||||
def query_local(self, *args, **kwargs):
|
||||
"""Query only the local Spack database.
|
||||
This function doesn't guarantee any sorting of the returned data for performance reason,
|
||||
since comparing specs for __lt__ may be an expensive operation.
|
||||
|
||||
This function doesn't guarantee any sorting of the returned
|
||||
data for performance reason, since comparing specs for __lt__
|
||||
may be an expensive operation.
|
||||
Args:
|
||||
query_spec: if query_spec is ``None``, match all specs in the database.
|
||||
If it is a spec, return all specs matching ``spec.satisfies(query_spec)``.
|
||||
|
||||
predicate_fn: optional predicate taking an InstallRecord as argument, and returning
|
||||
whether that record is selected for the query. It can be used to craft criteria
|
||||
that need some data for selection not provided by the Database itself.
|
||||
|
||||
installed: if ``True``, includes only installed specs in the search. If ``False`` only
|
||||
missing specs, and if ``any``, all specs in database. If an InstallStatus or
|
||||
iterable of InstallStatus, returns specs whose install status matches at least
|
||||
one of the InstallStatus.
|
||||
|
||||
explicit: a spec that was installed following a specific user request is marked as
|
||||
explicit. If instead it was pulled-in as a dependency of a user requested spec
|
||||
it's considered implicit.
|
||||
|
||||
start_date: if set considers only specs installed from the starting date.
|
||||
|
||||
end_date: if set considers only specs installed until the ending date.
|
||||
|
||||
in_buildcache: specs that are marked in this database as part of an associated binary
|
||||
cache are ``in_buildcache``. All other specs are not. This field is used for
|
||||
querying mirror indices. By default, it does not check this status.
|
||||
|
||||
hashes: list of hashes used to restrict the search
|
||||
|
||||
origin: origin of the spec
|
||||
"""
|
||||
with self.read_transaction():
|
||||
return self._query(*args, **kwargs)
|
||||
return self._query(
|
||||
query_spec,
|
||||
predicate_fn=predicate_fn,
|
||||
installed=installed,
|
||||
explicit=explicit,
|
||||
start_date=start_date,
|
||||
end_date=end_date,
|
||||
hashes=hashes,
|
||||
in_buildcache=in_buildcache,
|
||||
origin=origin,
|
||||
)
|
||||
|
||||
if query_local.__doc__ is None:
|
||||
query_local.__doc__ = ""
|
||||
query_local.__doc__ += _QUERY_DOCSTRING
|
||||
def query(
|
||||
self,
|
||||
query_spec: Optional[Union[str, "spack.spec.Spec"]] = None,
|
||||
*,
|
||||
predicate_fn: Optional[SelectType] = None,
|
||||
installed: Union[bool, InstallStatus, List[InstallStatus]] = True,
|
||||
explicit: Optional[bool] = None,
|
||||
start_date: Optional[datetime.datetime] = None,
|
||||
end_date: Optional[datetime.datetime] = None,
|
||||
in_buildcache: Optional[bool] = None,
|
||||
hashes: Optional[List[str]] = None,
|
||||
origin: Optional[str] = None,
|
||||
install_tree: str = "all",
|
||||
):
|
||||
"""Queries the Spack database including all upstream databases.
|
||||
|
||||
def query(self, *args, **kwargs):
|
||||
"""Query the Spack database including all upstream databases.
|
||||
Args:
|
||||
query_spec: if query_spec is ``None``, match all specs in the database.
|
||||
If it is a spec, return all specs matching ``spec.satisfies(query_spec)``.
|
||||
|
||||
Additional Arguments:
|
||||
install_tree (str): query 'all' (default), 'local', 'upstream', or upstream path
|
||||
predicate_fn: optional predicate taking an InstallRecord as argument, and returning
|
||||
whether that record is selected for the query. It can be used to craft criteria
|
||||
that need some data for selection not provided by the Database itself.
|
||||
|
||||
installed: if ``True``, includes only installed specs in the search. If ``False`` only
|
||||
missing specs, and if ``any``, all specs in database. If an InstallStatus or
|
||||
iterable of InstallStatus, returns specs whose install status matches at least
|
||||
one of the InstallStatus.
|
||||
|
||||
explicit: a spec that was installed following a specific user request is marked as
|
||||
explicit. If instead it was pulled-in as a dependency of a user requested spec
|
||||
it's considered implicit.
|
||||
|
||||
start_date: if set considers only specs installed from the starting date.
|
||||
|
||||
end_date: if set considers only specs installed until the ending date.
|
||||
|
||||
in_buildcache: specs that are marked in this database as part of an associated binary
|
||||
cache are ``in_buildcache``. All other specs are not. This field is used for
|
||||
querying mirror indices. By default, it does not check this status.
|
||||
|
||||
hashes: list of hashes used to restrict the search
|
||||
|
||||
install_tree: query 'all' (default), 'local', 'upstream', or upstream path
|
||||
|
||||
origin: origin of the spec
|
||||
"""
|
||||
install_tree = kwargs.pop("install_tree", "all")
|
||||
valid_trees = ["all", "upstream", "local", self.root] + [u.root for u in self.upstream_dbs]
|
||||
if install_tree not in valid_trees:
|
||||
msg = "Invalid install_tree argument to Database.query()\n"
|
||||
@@ -1651,28 +1677,54 @@ def query(self, *args, **kwargs):
|
||||
# queries for upstream DBs need to *not* lock - we may not
|
||||
# have permissions to do this and the upstream DBs won't know about
|
||||
# us anyway (so e.g. they should never uninstall specs)
|
||||
upstream_results.extend(upstream_db._query(*args, **kwargs) or [])
|
||||
upstream_results.extend(
|
||||
upstream_db._query(
|
||||
query_spec,
|
||||
predicate_fn=predicate_fn,
|
||||
installed=installed,
|
||||
explicit=explicit,
|
||||
start_date=start_date,
|
||||
end_date=end_date,
|
||||
hashes=hashes,
|
||||
in_buildcache=in_buildcache,
|
||||
origin=origin,
|
||||
)
|
||||
or []
|
||||
)
|
||||
|
||||
local_results = []
|
||||
local_results: Set["spack.spec.Spec"] = set()
|
||||
if install_tree in ("all", "local") or self.root == install_tree:
|
||||
local_results = set(self.query_local(*args, **kwargs))
|
||||
local_results = set(
|
||||
self.query_local(
|
||||
query_spec,
|
||||
predicate_fn=predicate_fn,
|
||||
installed=installed,
|
||||
explicit=explicit,
|
||||
start_date=start_date,
|
||||
end_date=end_date,
|
||||
hashes=hashes,
|
||||
in_buildcache=in_buildcache,
|
||||
origin=origin,
|
||||
)
|
||||
)
|
||||
|
||||
results = list(local_results) + list(x for x in upstream_results if x not in local_results)
|
||||
|
||||
return sorted(results)
|
||||
|
||||
if query.__doc__ is None:
|
||||
query.__doc__ = ""
|
||||
query.__doc__ += _QUERY_DOCSTRING
|
||||
|
||||
def query_one(self, query_spec, known=any, installed=True):
|
||||
def query_one(
|
||||
self,
|
||||
query_spec: Optional[Union[str, "spack.spec.Spec"]],
|
||||
predicate_fn: Optional[SelectType] = None,
|
||||
installed: Union[bool, InstallStatus, List[InstallStatus]] = True,
|
||||
) -> Optional["spack.spec.Spec"]:
|
||||
"""Query for exactly one spec that matches the query spec.
|
||||
|
||||
Raises an assertion error if more than one spec matches the
|
||||
query. Returns None if no installed package matches.
|
||||
Returns None if no installed package matches.
|
||||
|
||||
Raises:
|
||||
AssertionError: if more than one spec matches the query.
|
||||
"""
|
||||
concrete_specs = self.query(query_spec, known=known, installed=installed)
|
||||
concrete_specs = self.query(query_spec, predicate_fn=predicate_fn, installed=installed)
|
||||
assert len(concrete_specs) <= 1
|
||||
return concrete_specs[0] if concrete_specs else None
|
||||
|
||||
@@ -1719,24 +1771,6 @@ def root(key, record):
|
||||
if id(rec.spec) not in needed and rec.installed
|
||||
]
|
||||
|
||||
def update_explicit(self, spec, explicit):
|
||||
"""
|
||||
Update the spec's explicit state in the database.
|
||||
|
||||
Args:
|
||||
spec (spack.spec.Spec): the spec whose install record is being updated
|
||||
explicit (bool): ``True`` if the package was requested explicitly
|
||||
by the user, ``False`` if it was pulled in as a dependency of
|
||||
an explicit package.
|
||||
"""
|
||||
rec = self.get_record(spec)
|
||||
if explicit != rec.explicit:
|
||||
with self.write_transaction():
|
||||
message = "{s.name}@{s.version} : marking the package {0}"
|
||||
status = "explicit" if explicit else "implicit"
|
||||
tty.debug(message.format(status, s=spec))
|
||||
rec.explicit = explicit
|
||||
|
||||
|
||||
class NoUpstreamVisitor:
|
||||
"""Gives edges to upstream specs, but does follow edges from upstream specs."""
|
||||
|
@@ -11,6 +11,7 @@
|
||||
import os.path
|
||||
import re
|
||||
import sys
|
||||
import traceback
|
||||
import warnings
|
||||
from typing import Dict, Iterable, List, Optional, Set, Tuple, Type
|
||||
|
||||
@@ -18,6 +19,7 @@
|
||||
import llnl.util.lang
|
||||
import llnl.util.tty
|
||||
|
||||
import spack.error
|
||||
import spack.spec
|
||||
import spack.util.elf as elf_utils
|
||||
import spack.util.environment
|
||||
@@ -66,6 +68,21 @@ def file_identifier(path):
|
||||
return s.st_dev, s.st_ino
|
||||
|
||||
|
||||
def dedupe_paths(paths: List[str]) -> List[str]:
|
||||
"""Deduplicate paths based on inode and device number. In case the list contains first a
|
||||
symlink and then the directory it points to, the symlink is replaced with the directory path.
|
||||
This ensures that we pick for example ``/usr/bin`` over ``/bin`` if the latter is a symlink to
|
||||
the former`."""
|
||||
seen: Dict[Tuple[int, int], str] = {}
|
||||
for path in paths:
|
||||
identifier = file_identifier(path)
|
||||
if identifier not in seen:
|
||||
seen[identifier] = path
|
||||
elif not os.path.islink(path):
|
||||
seen[identifier] = path
|
||||
return list(seen.values())
|
||||
|
||||
|
||||
def executables_in_path(path_hints: List[str]) -> Dict[str, str]:
|
||||
"""Get the paths of all executables available from the current PATH.
|
||||
|
||||
@@ -82,8 +99,7 @@ def executables_in_path(path_hints: List[str]) -> Dict[str, str]:
|
||||
"""
|
||||
search_paths = llnl.util.filesystem.search_paths_for_executables(*path_hints)
|
||||
# Make use we don't doubly list /usr/lib and /lib etc
|
||||
search_paths = list(llnl.util.lang.dedupe(search_paths, key=file_identifier))
|
||||
return path_to_dict(search_paths)
|
||||
return path_to_dict(dedupe_paths(search_paths))
|
||||
|
||||
|
||||
def accept_elf(path, host_compat):
|
||||
@@ -144,7 +160,7 @@ def libraries_in_ld_and_system_library_path(
|
||||
search_paths = list(filter(os.path.isdir, search_paths))
|
||||
|
||||
# Make use we don't doubly list /usr/lib and /lib etc
|
||||
search_paths = list(llnl.util.lang.dedupe(search_paths, key=file_identifier))
|
||||
search_paths = dedupe_paths(search_paths)
|
||||
|
||||
try:
|
||||
host_compat = elf_utils.get_elf_compat(sys.executable)
|
||||
@@ -260,8 +276,12 @@ def detect_specs(
|
||||
)
|
||||
except Exception as e:
|
||||
specs = []
|
||||
if spack.error.SHOW_BACKTRACE:
|
||||
details = traceback.format_exc()
|
||||
else:
|
||||
details = f"[{e.__class__.__name__}: {e}]"
|
||||
warnings.warn(
|
||||
f'error detecting "{pkg.name}" from prefix {candidate_path} [{str(e)}]'
|
||||
f'error detecting "{pkg.name}" from prefix {candidate_path}: {details}'
|
||||
)
|
||||
|
||||
if not specs:
|
||||
@@ -435,9 +455,9 @@ def by_path(
|
||||
llnl.util.tty.debug(
|
||||
f"[EXTERNAL DETECTION] Skipping {pkg_name}: timeout reached"
|
||||
)
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
llnl.util.tty.debug(
|
||||
f"[EXTERNAL DETECTION] Skipping {pkg_name}: exception occured {e}"
|
||||
f"[EXTERNAL DETECTION] Skipping {pkg_name}: {traceback.format_exc()}"
|
||||
)
|
||||
|
||||
return result
|
||||
|
@@ -10,6 +10,7 @@
|
||||
import llnl.util.lang
|
||||
|
||||
import spack.error
|
||||
import spack.repo
|
||||
import spack.spec
|
||||
|
||||
#: Names of possible directives. This list is mostly populated using the @directive decorator.
|
||||
@@ -63,7 +64,7 @@ def __init__(cls, name, bases, attr_dict):
|
||||
# The instance is being initialized: if it is a package we must ensure
|
||||
# that the directives are called to set it up.
|
||||
|
||||
if "spack.pkg" in cls.__module__:
|
||||
if cls.__module__.startswith(spack.repo.ROOT_PYTHON_NAMESPACE):
|
||||
# Ensure the presence of the dictionaries associated with the directives.
|
||||
# All dictionaries are defaultdicts that create lists for missing keys.
|
||||
for d in DirectiveMeta._directive_dict_names:
|
||||
|
@@ -473,6 +473,7 @@
|
||||
active_environment,
|
||||
all_environment_names,
|
||||
all_environments,
|
||||
as_env_dir,
|
||||
create,
|
||||
create_in_dir,
|
||||
deactivate,
|
||||
@@ -480,6 +481,7 @@
|
||||
default_view_name,
|
||||
display_specs,
|
||||
environment_dir_from_name,
|
||||
environment_from_name_or_dir,
|
||||
exists,
|
||||
initialize_environment_dir,
|
||||
installed_specs,
|
||||
@@ -507,6 +509,7 @@
|
||||
"active_environment",
|
||||
"all_environment_names",
|
||||
"all_environments",
|
||||
"as_env_dir",
|
||||
"create",
|
||||
"create_in_dir",
|
||||
"deactivate",
|
||||
@@ -514,6 +517,7 @@
|
||||
"default_view_name",
|
||||
"display_specs",
|
||||
"environment_dir_from_name",
|
||||
"environment_from_name_or_dir",
|
||||
"exists",
|
||||
"initialize_environment_dir",
|
||||
"installed_specs",
|
||||
|
@@ -9,11 +9,13 @@
|
||||
|
||||
import os
|
||||
import re
|
||||
import shlex
|
||||
from enum import Enum
|
||||
from typing import List, Optional
|
||||
|
||||
import spack.deptypes as dt
|
||||
import spack.environment.environment as ev
|
||||
import spack.paths
|
||||
import spack.spec
|
||||
import spack.traverse as traverse
|
||||
|
||||
@@ -226,6 +228,7 @@ def to_dict(self):
|
||||
"install_deps_target": self._target("install-deps"),
|
||||
"any_hash_target": self._target("%"),
|
||||
"jobserver_support": self.jobserver_support,
|
||||
"spack_script": shlex.quote(spack.paths.spack_script),
|
||||
"adjacency_list": self.make_adjacency_list,
|
||||
"phony_convenience_targets": " ".join(self.phony_convenience_targets),
|
||||
"pkg_ids_variable": self.pkg_identifier_variable,
|
||||
|
@@ -11,12 +11,10 @@
|
||||
import re
|
||||
import shutil
|
||||
import stat
|
||||
import sys
|
||||
import time
|
||||
import urllib.parse
|
||||
import urllib.request
|
||||
import warnings
|
||||
from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Union
|
||||
from typing import Any, Dict, Iterable, List, Optional, Sequence, Tuple, Union
|
||||
|
||||
import llnl.util.filesystem as fs
|
||||
import llnl.util.tty as tty
|
||||
@@ -26,7 +24,6 @@
|
||||
|
||||
import spack
|
||||
import spack.caches
|
||||
import spack.compilers
|
||||
import spack.concretize
|
||||
import spack.config
|
||||
import spack.deptypes as dt
|
||||
@@ -45,7 +42,6 @@
|
||||
import spack.util.environment
|
||||
import spack.util.hash
|
||||
import spack.util.lock as lk
|
||||
import spack.util.parallel
|
||||
import spack.util.path
|
||||
import spack.util.spack_json as sjson
|
||||
import spack.util.spack_yaml as syaml
|
||||
@@ -57,6 +53,8 @@
|
||||
from spack.spec_list import SpecList
|
||||
from spack.util.path import substitute_path_variables
|
||||
|
||||
SpecPair = spack.concretize.SpecPair
|
||||
|
||||
#: environment variable used to indicate the active environment
|
||||
spack_env_var = "SPACK_ENV"
|
||||
|
||||
@@ -277,6 +275,22 @@ def is_env_dir(path):
|
||||
return os.path.isdir(path) and os.path.exists(os.path.join(path, manifest_name))
|
||||
|
||||
|
||||
def as_env_dir(name_or_dir):
|
||||
"""Translate an environment name or directory to the environment directory"""
|
||||
if is_env_dir(name_or_dir):
|
||||
return name_or_dir
|
||||
else:
|
||||
validate_env_name(name_or_dir)
|
||||
if not exists(name_or_dir):
|
||||
raise SpackEnvironmentError("no such environment '%s'" % name_or_dir)
|
||||
return root(name_or_dir)
|
||||
|
||||
|
||||
def environment_from_name_or_dir(name_or_dir):
|
||||
"""Get an environment with the supplied name."""
|
||||
return Environment(as_env_dir(name_or_dir))
|
||||
|
||||
|
||||
def read(name):
|
||||
"""Get an environment with the supplied name."""
|
||||
validate_env_name(name)
|
||||
@@ -1159,6 +1173,8 @@ def clear(self, re_read=False):
|
||||
# things that cannot be recreated from file
|
||||
self.new_specs = [] # write packages for these on write()
|
||||
|
||||
self.manifest.clear()
|
||||
|
||||
@property
|
||||
def active(self):
|
||||
"""True if this environment is currently active."""
|
||||
@@ -1492,7 +1508,7 @@ def deconcretize(self, spec: spack.spec.Spec, concrete: bool = True):
|
||||
|
||||
def _get_specs_to_concretize(
|
||||
self,
|
||||
) -> Tuple[Set[spack.spec.Spec], Set[spack.spec.Spec], List[spack.spec.Spec]]:
|
||||
) -> Tuple[List[spack.spec.Spec], List[spack.spec.Spec], List[SpecPair]]:
|
||||
"""Compute specs to concretize for unify:true and unify:when_possible.
|
||||
|
||||
This includes new user specs and any already concretized specs.
|
||||
@@ -1502,23 +1518,20 @@ def _get_specs_to_concretize(
|
||||
|
||||
"""
|
||||
# Exit early if the set of concretized specs is the set of user specs
|
||||
new_user_specs = set(self.user_specs) - set(self.concretized_user_specs)
|
||||
kept_user_specs = set(self.user_specs) & set(self.concretized_user_specs)
|
||||
new_user_specs = list(set(self.user_specs) - set(self.concretized_user_specs))
|
||||
kept_user_specs = list(set(self.user_specs) & set(self.concretized_user_specs))
|
||||
kept_user_specs += self.included_user_specs
|
||||
if not new_user_specs:
|
||||
return new_user_specs, kept_user_specs, []
|
||||
|
||||
concrete_specs_to_keep = [
|
||||
concrete
|
||||
specs_to_concretize = [(s, None) for s in new_user_specs] + [
|
||||
(abstract, concrete)
|
||||
for abstract, concrete in self.concretized_specs()
|
||||
if abstract in kept_user_specs
|
||||
]
|
||||
|
||||
specs_to_concretize = list(new_user_specs) + concrete_specs_to_keep
|
||||
return new_user_specs, kept_user_specs, specs_to_concretize
|
||||
|
||||
def _concretize_together_where_possible(
|
||||
self, tests: bool = False
|
||||
) -> List[Tuple[spack.spec.Spec, spack.spec.Spec]]:
|
||||
def _concretize_together_where_possible(self, tests: bool = False) -> Sequence[SpecPair]:
|
||||
# Avoid cyclic dependency
|
||||
import spack.solver.asp
|
||||
|
||||
@@ -1527,36 +1540,26 @@ def _concretize_together_where_possible(
|
||||
if not new_user_specs:
|
||||
return []
|
||||
|
||||
old_concrete_to_abstract = {
|
||||
concrete: abstract for (abstract, concrete) in self.concretized_specs()
|
||||
}
|
||||
|
||||
self.concretized_user_specs = []
|
||||
self.concretized_order = []
|
||||
self.specs_by_hash = {}
|
||||
|
||||
result_by_user_spec = {}
|
||||
solver = spack.solver.asp.Solver()
|
||||
allow_deprecated = spack.config.get("config:deprecated", False)
|
||||
for result in solver.solve_in_rounds(
|
||||
specs_to_concretize, tests=tests, allow_deprecated=allow_deprecated
|
||||
):
|
||||
result_by_user_spec.update(result.specs_by_input)
|
||||
ret = []
|
||||
result = spack.concretize.concretize_together_when_possible(
|
||||
specs_to_concretize, tests=tests
|
||||
)
|
||||
for abstract, concrete in result:
|
||||
# Only add to the environment if it's from this environment (not included in)
|
||||
if abstract in self.user_specs:
|
||||
self._add_concrete_spec(abstract, concrete)
|
||||
|
||||
result = []
|
||||
for abstract, concrete in sorted(result_by_user_spec.items()):
|
||||
# If the "abstract" spec is a concrete spec from the previous concretization
|
||||
# translate it back to an abstract spec. Otherwise, keep the abstract spec
|
||||
abstract = old_concrete_to_abstract.get(abstract, abstract)
|
||||
# Return only the new specs
|
||||
if abstract in new_user_specs:
|
||||
result.append((abstract, concrete))
|
||||
self._add_concrete_spec(abstract, concrete)
|
||||
ret.append((abstract, concrete))
|
||||
|
||||
return result
|
||||
return ret
|
||||
|
||||
def _concretize_together(
|
||||
self, tests: bool = False
|
||||
) -> List[Tuple[spack.spec.Spec, spack.spec.Spec]]:
|
||||
def _concretize_together(self, tests: bool = False) -> Sequence[SpecPair]:
|
||||
"""Concretization strategy that concretizes all the specs
|
||||
in the same DAG.
|
||||
"""
|
||||
@@ -1570,8 +1573,8 @@ def _concretize_together(
|
||||
self.specs_by_hash = {}
|
||||
|
||||
try:
|
||||
concrete_specs: List[spack.spec.Spec] = spack.concretize.concretize_specs_together(
|
||||
*specs_to_concretize, tests=tests
|
||||
concretized_specs = spack.concretize.concretize_together(
|
||||
specs_to_concretize, tests=tests
|
||||
)
|
||||
except spack.error.UnsatisfiableSpecError as e:
|
||||
# "Enhance" the error message for multiple root specs, suggest a less strict
|
||||
@@ -1589,14 +1592,13 @@ def _concretize_together(
|
||||
)
|
||||
raise
|
||||
|
||||
# set() | set() does not preserve ordering, even though sets are ordered
|
||||
ordered_user_specs = list(new_user_specs) + list(kept_user_specs)
|
||||
concretized_specs = [x for x in zip(ordered_user_specs, concrete_specs)]
|
||||
for abstract, concrete in concretized_specs:
|
||||
self._add_concrete_spec(abstract, concrete)
|
||||
# Don't add if it's just included
|
||||
if abstract in self.user_specs:
|
||||
self._add_concrete_spec(abstract, concrete)
|
||||
|
||||
# zip truncates the longer list, which is exactly what we want here
|
||||
return list(zip(new_user_specs, concrete_specs))
|
||||
# Return the portion of the return value that is new
|
||||
return concretized_specs[: len(new_user_specs)]
|
||||
|
||||
def _concretize_separately(self, tests=False):
|
||||
"""Concretization strategy that concretizes separately one
|
||||
@@ -1618,71 +1620,16 @@ def _concretize_separately(self, tests=False):
|
||||
concrete = old_specs_by_hash[h]
|
||||
self._add_concrete_spec(s, concrete, new=False)
|
||||
|
||||
# Concretize any new user specs that we haven't concretized yet
|
||||
args, root_specs, i = [], [], 0
|
||||
for uspec in self.user_specs:
|
||||
if uspec not in old_concretized_user_specs:
|
||||
root_specs.append(uspec)
|
||||
args.append((i, str(uspec), tests))
|
||||
i += 1
|
||||
to_concretize = [
|
||||
(root, None) for root in self.user_specs if root not in old_concretized_user_specs
|
||||
]
|
||||
concretized_specs = spack.concretize.concretize_separately(to_concretize, tests=tests)
|
||||
|
||||
# Ensure we don't try to bootstrap clingo in parallel
|
||||
with spack.bootstrap.ensure_bootstrap_configuration():
|
||||
spack.bootstrap.ensure_clingo_importable_or_raise()
|
||||
|
||||
# Ensure all the indexes have been built or updated, since
|
||||
# otherwise the processes in the pool may timeout on waiting
|
||||
# for a write lock. We do this indirectly by retrieving the
|
||||
# provider index, which should in turn trigger the update of
|
||||
# all the indexes if there's any need for that.
|
||||
_ = spack.repo.PATH.provider_index
|
||||
|
||||
# Ensure we have compilers in compilers.yaml to avoid that
|
||||
# processes try to write the config file in parallel
|
||||
_ = spack.compilers.all_compilers_config(spack.config.CONFIG)
|
||||
|
||||
# Early return if there is nothing to do
|
||||
if len(args) == 0:
|
||||
return []
|
||||
|
||||
# Solve the environment in parallel on Linux
|
||||
start = time.time()
|
||||
num_procs = min(len(args), spack.config.determine_number_of_jobs(parallel=True))
|
||||
|
||||
# TODO: support parallel concretization on macOS and Windows
|
||||
msg = "Starting concretization"
|
||||
if sys.platform not in ("darwin", "win32") and num_procs > 1:
|
||||
msg += f" pool with {num_procs} processes"
|
||||
tty.msg(msg)
|
||||
|
||||
batch = []
|
||||
for j, (i, concrete, duration) in enumerate(
|
||||
spack.util.parallel.imap_unordered(
|
||||
_concretize_task,
|
||||
args,
|
||||
processes=num_procs,
|
||||
debug=tty.is_debug(),
|
||||
maxtaskperchild=1,
|
||||
)
|
||||
):
|
||||
batch.append((i, concrete))
|
||||
percentage = (j + 1) / len(args) * 100
|
||||
tty.verbose(
|
||||
f"{duration:6.1f}s [{percentage:3.0f}%] {concrete.cformat('{hash:7}')} "
|
||||
f"{root_specs[i].colored_str}"
|
||||
)
|
||||
sys.stdout.flush()
|
||||
|
||||
# Add specs in original order
|
||||
batch.sort(key=lambda x: x[0])
|
||||
by_hash = {} # for attaching information on test dependencies
|
||||
for root, (_, concrete) in zip(root_specs, batch):
|
||||
self._add_concrete_spec(root, concrete)
|
||||
by_hash = {}
|
||||
for abstract, concrete in concretized_specs:
|
||||
self._add_concrete_spec(abstract, concrete)
|
||||
by_hash[concrete.dag_hash()] = concrete
|
||||
|
||||
finish = time.time()
|
||||
tty.msg(f"Environment concretized in {finish - start:.2f} seconds")
|
||||
|
||||
# Unify the specs objects, so we get correct references to all parents
|
||||
self._read_lockfile_dict(self._to_lockfile_dict())
|
||||
|
||||
@@ -1702,11 +1649,7 @@ def _concretize_separately(self, tests=False):
|
||||
test_dependency.copy(), depflag=dt.TEST, virtuals=current_edge.virtuals
|
||||
)
|
||||
|
||||
results = [
|
||||
(abstract, self.specs_by_hash[h])
|
||||
for abstract, h in zip(self.concretized_user_specs, self.concretized_order)
|
||||
]
|
||||
return results
|
||||
return concretized_specs
|
||||
|
||||
@property
|
||||
def default_view(self):
|
||||
@@ -1954,17 +1897,16 @@ def install_specs(self, specs: Optional[List[Spec]] = None, **install_args):
|
||||
specs = specs if specs is not None else roots
|
||||
|
||||
# Extend the set of specs to overwrite with modified dev specs and their parents
|
||||
overwrite: Set[str] = set()
|
||||
overwrite.update(install_args.get("overwrite", []), self._dev_specs_that_need_overwrite())
|
||||
install_args["overwrite"] = overwrite
|
||||
install_args["overwrite"] = {
|
||||
*install_args.get("overwrite", ()),
|
||||
*self._dev_specs_that_need_overwrite(),
|
||||
}
|
||||
|
||||
explicit: Set[str] = set()
|
||||
explicit.update(
|
||||
install_args.get("explicit", []),
|
||||
(s.dag_hash() for s in specs),
|
||||
(s.dag_hash() for s in roots),
|
||||
)
|
||||
install_args["explicit"] = explicit
|
||||
# Only environment roots are marked explicit
|
||||
install_args["explicit"] = {
|
||||
*install_args.get("explicit", ()),
|
||||
*(s.dag_hash() for s in roots),
|
||||
}
|
||||
|
||||
PackageInstaller([spec.package for spec in specs], **install_args).install()
|
||||
|
||||
@@ -2163,6 +2105,13 @@ def _concrete_specs_dict(self):
|
||||
# Assumes no legacy formats, since this was just created.
|
||||
spec_dict[ht.dag_hash.name] = s.dag_hash()
|
||||
concrete_specs[s.dag_hash()] = spec_dict
|
||||
|
||||
if s.build_spec is not s:
|
||||
for d in s.build_spec.traverse():
|
||||
build_spec_dict = d.node_dict_with_hashes(hash=ht.dag_hash)
|
||||
build_spec_dict[ht.dag_hash.name] = d.dag_hash()
|
||||
concrete_specs[d.dag_hash()] = build_spec_dict
|
||||
|
||||
return concrete_specs
|
||||
|
||||
def _concrete_roots_dict(self):
|
||||
@@ -2322,7 +2271,7 @@ def filter_specs(self, reader, json_specs_by_hash, order_concretized):
|
||||
specs_by_hash[lockfile_key] = spec
|
||||
|
||||
# Second pass: For each spec, get its dependencies from the node dict
|
||||
# and add them to the spec
|
||||
# and add them to the spec, including build specs
|
||||
for lockfile_key, node_dict in json_specs_by_hash.items():
|
||||
name, data = reader.name_and_data(node_dict)
|
||||
for _, dep_hash, deptypes, _, virtuals in reader.dependencies_from_node_dict(data):
|
||||
@@ -2330,6 +2279,10 @@ def filter_specs(self, reader, json_specs_by_hash, order_concretized):
|
||||
specs_by_hash[dep_hash], depflag=dt.canonicalize(deptypes), virtuals=virtuals
|
||||
)
|
||||
|
||||
if "build_spec" in node_dict:
|
||||
_, bhash, _ = reader.extract_build_spec_info_from_node_dict(node_dict)
|
||||
specs_by_hash[lockfile_key]._build_spec = specs_by_hash[bhash]
|
||||
|
||||
# Traverse the root specs one at a time in the order they appear.
|
||||
# The first time we see each DAG hash, that's the one we want to
|
||||
# keep. This is only required as long as we support older lockfile
|
||||
@@ -2503,14 +2456,6 @@ def display_specs(specs):
|
||||
print(tree_string)
|
||||
|
||||
|
||||
def _concretize_task(packed_arguments) -> Tuple[int, Spec, float]:
|
||||
index, spec_str, tests = packed_arguments
|
||||
with tty.SuppressOutput(msg_enabled=False):
|
||||
start = time.time()
|
||||
spec = Spec(spec_str).concretized(tests=tests)
|
||||
return index, spec, time.time() - start
|
||||
|
||||
|
||||
def make_repo_path(root):
|
||||
"""Make a RepoPath from the repo subdirectories in an environment."""
|
||||
path = spack.repo.RepoPath(cache=spack.caches.MISC_CACHE)
|
||||
@@ -2789,6 +2734,11 @@ def remove_user_spec(self, user_spec: str) -> None:
|
||||
raise SpackEnvironmentError(msg) from e
|
||||
self.changed = True
|
||||
|
||||
def clear(self) -> None:
|
||||
"""Clear all user specs from the list of root specs"""
|
||||
self.configuration["specs"] = []
|
||||
self.changed = True
|
||||
|
||||
def override_user_spec(self, user_spec: str, idx: int) -> None:
|
||||
"""Overrides the user spec at index idx with the one passed as input.
|
||||
|
||||
|
@@ -48,8 +48,6 @@ def activate_header(env, shell, prompt=None, view: Optional[str] = None):
|
||||
cmds += 'set "SPACK_ENV=%s"\n' % env.path
|
||||
if view:
|
||||
cmds += 'set "SPACK_ENV_VIEW=%s"\n' % view
|
||||
# TODO: despacktivate
|
||||
# TODO: prompt
|
||||
elif shell == "pwsh":
|
||||
cmds += "$Env:SPACK_ENV='%s'\n" % env.path
|
||||
if view:
|
||||
|
@@ -12,6 +12,9 @@
|
||||
#: this is module-scoped because it needs to be set very early
|
||||
debug = 0
|
||||
|
||||
#: whether to show a backtrace when an error is printed, enabled with --backtrace.
|
||||
SHOW_BACKTRACE = False
|
||||
|
||||
|
||||
class SpackError(Exception):
|
||||
"""This is the superclass for all Spack errors.
|
||||
|
@@ -33,6 +33,7 @@
|
||||
from llnl.util.tty.color import colorize
|
||||
|
||||
import spack.config
|
||||
import spack.directory_layout
|
||||
import spack.paths
|
||||
import spack.projections
|
||||
import spack.relocate
|
||||
@@ -50,7 +51,7 @@
|
||||
_projections_path = ".spack/projections.yaml"
|
||||
|
||||
|
||||
LinkCallbackType = Callable[[str, str, "FilesystemView", Optional["spack.spec.Spec"]], None]
|
||||
LinkCallbackType = Callable[[str, str, "FilesystemView", Optional[spack.spec.Spec]], None]
|
||||
|
||||
|
||||
def view_symlink(src: str, dst: str, *args, **kwargs) -> None:
|
||||
@@ -62,7 +63,7 @@ def view_hardlink(src: str, dst: str, *args, **kwargs) -> None:
|
||||
|
||||
|
||||
def view_copy(
|
||||
src: str, dst: str, view: "FilesystemView", spec: Optional["spack.spec.Spec"] = None
|
||||
src: str, dst: str, view: "FilesystemView", spec: Optional[spack.spec.Spec] = None
|
||||
) -> None:
|
||||
"""
|
||||
Copy a file from src to dst.
|
||||
@@ -100,10 +101,12 @@ def view_copy(
|
||||
|
||||
spack.relocate.relocate_text(files=[dst], prefixes=prefix_to_projection)
|
||||
|
||||
try:
|
||||
os.chown(dst, src_stat.st_uid, src_stat.st_gid)
|
||||
except OSError:
|
||||
tty.debug(f"Can't change the permissions for {dst}")
|
||||
# The os module on Windows does not have a chown function.
|
||||
if sys.platform != "win32":
|
||||
try:
|
||||
os.chown(dst, src_stat.st_uid, src_stat.st_gid)
|
||||
except OSError:
|
||||
tty.debug(f"Can't change the permissions for {dst}")
|
||||
|
||||
|
||||
#: supported string values for `link_type` in an env, mapped to canonical values
|
||||
@@ -158,7 +161,7 @@ class FilesystemView:
|
||||
def __init__(
|
||||
self,
|
||||
root: str,
|
||||
layout: "spack.directory_layout.DirectoryLayout",
|
||||
layout: spack.directory_layout.DirectoryLayout,
|
||||
*,
|
||||
projections: Optional[Dict] = None,
|
||||
ignore_conflicts: bool = False,
|
||||
@@ -180,7 +183,10 @@ def __init__(
|
||||
|
||||
# Setup link function to include view
|
||||
self.link_type = link_type
|
||||
self.link = ft.partial(function_for_link_type(link_type), view=self)
|
||||
self._link = function_for_link_type(link_type)
|
||||
|
||||
def link(self, src: str, dst: str, spec: Optional[spack.spec.Spec] = None) -> None:
|
||||
self._link(src, dst, self, spec)
|
||||
|
||||
def add_specs(self, *specs, **kwargs):
|
||||
"""
|
||||
@@ -281,7 +287,7 @@ class YamlFilesystemView(FilesystemView):
|
||||
def __init__(
|
||||
self,
|
||||
root: str,
|
||||
layout: "spack.directory_layout.DirectoryLayout",
|
||||
layout: spack.directory_layout.DirectoryLayout,
|
||||
*,
|
||||
projections: Optional[Dict] = None,
|
||||
ignore_conflicts: bool = False,
|
||||
|
@@ -21,43 +21,40 @@
|
||||
features.
|
||||
"""
|
||||
import importlib
|
||||
|
||||
from llnl.util.lang import ensure_last, list_modules
|
||||
|
||||
import spack.paths
|
||||
import types
|
||||
from typing import List, Optional
|
||||
|
||||
|
||||
class _HookRunner:
|
||||
#: Stores all hooks on first call, shared among
|
||||
#: all HookRunner objects
|
||||
_hooks = None
|
||||
#: Order in which hooks are executed
|
||||
HOOK_ORDER = [
|
||||
"spack.hooks.module_file_generation",
|
||||
"spack.hooks.licensing",
|
||||
"spack.hooks.sbang",
|
||||
"spack.hooks.windows_runtime_linkage",
|
||||
"spack.hooks.drop_redundant_rpaths",
|
||||
"spack.hooks.absolutify_elf_sonames",
|
||||
"spack.hooks.permissions_setters",
|
||||
# after all mutations to the install prefix, write metadata
|
||||
"spack.hooks.write_install_manifest",
|
||||
# after all metadata is written
|
||||
"spack.hooks.autopush",
|
||||
]
|
||||
|
||||
#: Contains all hook modules after first call, shared among all HookRunner objects
|
||||
_hooks: Optional[List[types.ModuleType]] = None
|
||||
|
||||
def __init__(self, hook_name):
|
||||
self.hook_name = hook_name
|
||||
|
||||
@classmethod
|
||||
def _populate_hooks(cls):
|
||||
# Lazily populate the list of hooks
|
||||
cls._hooks = []
|
||||
|
||||
relative_names = list(list_modules(spack.paths.hooks_path))
|
||||
|
||||
# Ensure that write_install_manifest comes last
|
||||
ensure_last(relative_names, "absolutify_elf_sonames", "write_install_manifest")
|
||||
|
||||
for name in relative_names:
|
||||
module_name = __name__ + "." + name
|
||||
module_obj = importlib.import_module(module_name)
|
||||
cls._hooks.append((module_name, module_obj))
|
||||
|
||||
@property
|
||||
def hooks(self):
|
||||
def hooks(self) -> List[types.ModuleType]:
|
||||
if not self._hooks:
|
||||
self._populate_hooks()
|
||||
self._hooks = [importlib.import_module(module_name) for module_name in self.HOOK_ORDER]
|
||||
return self._hooks
|
||||
|
||||
def __call__(self, *args, **kwargs):
|
||||
for _, module in self.hooks:
|
||||
for module in self.hooks:
|
||||
if hasattr(module, self.hook_name):
|
||||
hook = getattr(module, self.hook_name)
|
||||
if hasattr(hook, "__call__"):
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user