Compare commits
922 Commits
per-instan
...
e4s-21.05
Author | SHA1 | Date | |
---|---|---|---|
![]() |
b98a21056d | ||
![]() |
abed824555 | ||
![]() |
e1a831816e | ||
![]() |
21fd449a03 | ||
![]() |
f8c2e1fc97 | ||
![]() |
6b1849b663 | ||
![]() |
4d9607857b | ||
![]() |
38fe1c55b5 | ||
![]() |
d25026677d | ||
![]() |
a586fa6dd3 | ||
![]() |
9c179c7d0b | ||
![]() |
e42b27de7e | ||
![]() |
586c08fb86 | ||
![]() |
71b9e67b3c | ||
![]() |
3dfb61116d | ||
![]() |
5e782f6a23 | ||
![]() |
2d69f63a9c | ||
![]() |
63ea0c8865 | ||
![]() |
72b6014861 | ||
![]() |
b8fc1094ff | ||
![]() |
1242f10d11 | ||
![]() |
6e378102fd | ||
![]() |
d2178fb47b | ||
![]() |
3f184b5874 | ||
![]() |
12dbae3b45 | ||
![]() |
4c4a584a9c | ||
![]() |
fcb8942c88 | ||
![]() |
30991cd9cd | ||
![]() |
b27ccd524e | ||
![]() |
1d10245fd4 | ||
![]() |
ea5b5a6d3f | ||
![]() |
f66f7faee1 | ||
![]() |
ef12b0cb8f | ||
![]() |
7c8b5a4e3a | ||
![]() |
2716791c7d | ||
![]() |
3368e06bc6 | ||
![]() |
ed4de5a688 | ||
![]() |
7055846ca1 | ||
![]() |
cf20ee0893 | ||
![]() |
5846079028 | ||
![]() |
6d42be5739 | ||
![]() |
b57b473e51 | ||
![]() |
fec9e5c7bd | ||
![]() |
bd47eaf0f8 | ||
![]() |
6f75c6f3fa | ||
![]() |
d2e22d7fe4 | ||
![]() |
9784b1b0e9 | ||
![]() |
a3be77d02e | ||
![]() |
7febe23ebc | ||
![]() |
32b9bea06a | ||
![]() |
d17783ff70 | ||
![]() |
8c64a1cda6 | ||
![]() |
0a7a23068a | ||
![]() |
e718a871f3 | ||
![]() |
a0af8c9c65 | ||
![]() |
944be7a568 | ||
![]() |
e7b11c3098 | ||
![]() |
f3c3fc7493 | ||
![]() |
142c85fe22 | ||
![]() |
6d5cd24480 | ||
![]() |
c666df5665 | ||
![]() |
608d453c01 | ||
![]() |
88192bfd4a | ||
![]() |
9e3cb78fb1 | ||
![]() |
46a1e0613e | ||
![]() |
8cef30a181 | ||
![]() |
9759c7162e | ||
![]() |
f2b362b5b3 | ||
![]() |
747e3cad1c | ||
![]() |
51e55381b9 | ||
![]() |
9b493e75a2 | ||
![]() |
fbc20e0fe8 | ||
![]() |
f5c7ab7f6c | ||
![]() |
81d583e307 | ||
![]() |
f9aa929082 | ||
![]() |
46a9dd1889 | ||
![]() |
3d2cd48048 | ||
![]() |
33df3990de | ||
![]() |
2894c24125 | ||
![]() |
b3004e1dd0 | ||
![]() |
8146c0d848 | ||
![]() |
ccbd1f1d79 | ||
![]() |
932e916c59 | ||
![]() |
1a8d18a2d3 | ||
![]() |
f407539abb | ||
![]() |
2e58d09303 | ||
![]() |
8bed109e41 | ||
![]() |
1a5924f5fb | ||
![]() |
77f155fec4 | ||
![]() |
b21a4f5c89 | ||
![]() |
5bd42d1b31 | ||
![]() |
ce7516f25f | ||
![]() |
f0d5d992e6 | ||
![]() |
79d8691dde | ||
![]() |
18edf0f072 | ||
![]() |
60a6281c47 | ||
![]() |
6bf6567f9d | ||
![]() |
9410d99153 | ||
![]() |
ba68cbc694 | ||
![]() |
4f6b4dcaac | ||
![]() |
346e52de38 | ||
![]() |
b41ee03475 | ||
![]() |
c22a5326d8 | ||
![]() |
13cb8ef2c2 | ||
![]() |
99cd09b2b2 | ||
![]() |
977dad4c93 | ||
![]() |
319f20d94f | ||
![]() |
20a99f4f04 | ||
![]() |
e3c59264d8 | ||
![]() |
4cd53cadb0 | ||
![]() |
f1c849ca0f | ||
![]() |
8f85d3b937 | ||
![]() |
0174936250 | ||
![]() |
193699f3c9 | ||
![]() |
58751ac2e8 | ||
![]() |
401f61c094 | ||
![]() |
84486be643 | ||
![]() |
d577f25dcf | ||
![]() |
b2bc0241eb | ||
![]() |
dd350e83d3 | ||
![]() |
9d251e51a1 | ||
![]() |
e56aa15d22 | ||
![]() |
46e0869631 | ||
![]() |
e34d996944 | ||
![]() |
8ee8269ede | ||
![]() |
de8027a820 | ||
![]() |
b462ccb565 | ||
![]() |
87466df80f | ||
![]() |
8446bebdd9 | ||
![]() |
213ef656c5 | ||
![]() |
c7f269e2f9 | ||
![]() |
5157f4be7f | ||
![]() |
5c3b761102 | ||
![]() |
f0d48439c3 | ||
![]() |
a24620bb65 | ||
![]() |
defb487ed9 | ||
![]() |
5aaaa1c6e8 | ||
![]() |
1b7e846434 | ||
![]() |
147d44450d | ||
![]() |
719d8f5142 | ||
![]() |
58c2997b84 | ||
![]() |
fdfd1fed41 | ||
![]() |
18b436bbb0 | ||
![]() |
d4cfde2f3e | ||
![]() |
379a1fd23b | ||
![]() |
2202ce27fa | ||
![]() |
2a7fa295fb | ||
![]() |
ef4215360c | ||
![]() |
42a77a1ff6 | ||
![]() |
8914085789 | ||
![]() |
bd389b0f95 | ||
![]() |
8813ce36b3 | ||
![]() |
a66bca591c | ||
![]() |
d80b008789 | ||
![]() |
cefbe48c89 | ||
![]() |
fc392d2f56 | ||
![]() |
59a520246b | ||
![]() |
14fc5604dd | ||
![]() |
d64fd1238b | ||
![]() |
9e30e669d2 | ||
![]() |
117dfcae25 | ||
![]() |
702f343fb1 | ||
![]() |
0d92c39d47 | ||
![]() |
19c7188cbf | ||
![]() |
f47066967b | ||
![]() |
b70bf073b5 | ||
![]() |
b91f24fa8a | ||
![]() |
cce59095bf | ||
![]() |
34ec69d140 | ||
![]() |
0cc42acbbd | ||
![]() |
8b25bdcf4d | ||
![]() |
98cf0f5e07 | ||
![]() |
9ed1208650 | ||
![]() |
98d1d40e46 | ||
![]() |
91b58112f1 | ||
![]() |
9a71bd3893 | ||
![]() |
a4f03dba9c | ||
![]() |
725bcd1e80 | ||
![]() |
ce0733a4c3 | ||
![]() |
d294438c24 | ||
![]() |
a88206de03 | ||
![]() |
4bc043da85 | ||
![]() |
2df7947f02 | ||
![]() |
52145be122 | ||
![]() |
190b043161 | ||
![]() |
97e6c0fda4 | ||
![]() |
8a40a3a70f | ||
![]() |
f8740c8c75 | ||
![]() |
ee73f75239 | ||
![]() |
c2577275f5 | ||
![]() |
735c48c4aa | ||
![]() |
d19d81a3f4 | ||
![]() |
6b7c57ad47 | ||
![]() |
3c05130258 | ||
![]() |
6403f80a96 | ||
![]() |
fc2ac099cd | ||
![]() |
b768d7bb09 | ||
![]() |
e7219db93d | ||
![]() |
1ae28f66c6 | ||
![]() |
edd35eae71 | ||
![]() |
1d3a4820f6 | ||
![]() |
8d4c2d1c76 | ||
![]() |
297f0e489a | ||
![]() |
d549e3a600 | ||
![]() |
a011564b19 | ||
![]() |
d089409079 | ||
![]() |
444c5edf4d | ||
![]() |
c18f87d419 | ||
![]() |
e763ac00fa | ||
![]() |
4a50e35262 | ||
![]() |
5230730941 | ||
![]() |
57ce5f390b | ||
![]() |
2a509ea0bf | ||
![]() |
fe46a1ce5f | ||
![]() |
bbc11f3d1c | ||
![]() |
143304cea2 | ||
![]() |
f055a48445 | ||
![]() |
0368f8ae51 | ||
![]() |
00fe243e9c | ||
![]() |
066d33b4b3 | ||
![]() |
dee419c1d5 | ||
![]() |
766f99f130 | ||
![]() |
b4e4e76872 | ||
![]() |
ec033d8922 | ||
![]() |
fee5cf4e49 | ||
![]() |
aacba57e0a | ||
![]() |
fb207b80ce | ||
![]() |
d25455277b | ||
![]() |
b27ac37547 | ||
![]() |
b6c124572c | ||
![]() |
795f5e4af1 | ||
![]() |
3b590177c9 | ||
![]() |
ff4fd6c5bc | ||
![]() |
ea2cba7b40 | ||
![]() |
3d2b1277e9 | ||
![]() |
829199a460 | ||
![]() |
2913000d92 | ||
![]() |
e58d07a819 | ||
![]() |
50eec40232 | ||
![]() |
2450ee0fb0 | ||
![]() |
57ccb6ea13 | ||
![]() |
a5422f8015 | ||
![]() |
e219d89d33 | ||
![]() |
4903eabc7a | ||
![]() |
ccdda9a74e | ||
![]() |
ad44c3ad51 | ||
![]() |
c2cd597b45 | ||
![]() |
d2cc248192 | ||
![]() |
ecb7d6dca1 | ||
![]() |
63d4d37c91 | ||
![]() |
00a1418d10 | ||
![]() |
daebf5caf8 | ||
![]() |
eb273f5c2f | ||
![]() |
c1b314e564 | ||
![]() |
45f84ee548 | ||
![]() |
c175d24c0e | ||
![]() |
5a7c339b0c | ||
![]() |
91de23ce65 | ||
![]() |
a3d58129ec | ||
![]() |
320eb4872d | ||
![]() |
d82a0c6799 | ||
![]() |
7c6f65d36b | ||
![]() |
ccf9a11ff9 | ||
![]() |
07ed432f27 | ||
![]() |
129de9083a | ||
![]() |
1698be3c3c | ||
![]() |
72f979a92b | ||
![]() |
d9c149bb1a | ||
![]() |
8d016b87d2 | ||
![]() |
b6b0fade46 | ||
![]() |
4fb31be45d | ||
![]() |
588bdb1ef9 | ||
![]() |
c131f13006 | ||
![]() |
474ec196e6 | ||
![]() |
4402f89e39 | ||
![]() |
f83ec4d46f | ||
![]() |
2ba493cd63 | ||
![]() |
be092e8625 | ||
![]() |
0fe3c5a07d | ||
![]() |
e5d87b711d | ||
![]() |
dfcd01f2c7 | ||
![]() |
3761d9b3b9 | ||
![]() |
219eb09e59 | ||
![]() |
8f1b701660 | ||
![]() |
1127c9806f | ||
![]() |
71c1d4c1f5 | ||
![]() |
946dc7b16a | ||
![]() |
f9860de2d5 | ||
![]() |
ab018c2081 | ||
![]() |
d51deb4961 | ||
![]() |
a43939cafa | ||
![]() |
9a3513b3ec | ||
![]() |
36a3bc465b | ||
![]() |
a45cb952b7 | ||
![]() |
4f7bd11d61 | ||
![]() |
a96ef8baa0 | ||
![]() |
43c74f2f17 | ||
![]() |
4b93323d82 | ||
![]() |
901f7c14fb | ||
![]() |
16b720effb | ||
![]() |
2d9988c5b7 | ||
![]() |
c748f5a765 | ||
![]() |
4ecfe7f09e | ||
![]() |
6714ab9b57 | ||
![]() |
e2933b5c4b | ||
![]() |
2efcfa9de1 | ||
![]() |
da2dbac794 | ||
![]() |
acdcdcee75 | ||
![]() |
8a65bcb7c9 | ||
![]() |
10389b2b51 | ||
![]() |
88b30090ef | ||
![]() |
6394604d0a | ||
![]() |
c7fb93b1d7 | ||
![]() |
fa12ca585a | ||
![]() |
28ef63bc1b | ||
![]() |
21ad8d4372 | ||
![]() |
5a747b9183 | ||
![]() |
6ded2c38d1 | ||
![]() |
c4c090ab65 | ||
![]() |
435ecce2f5 | ||
![]() |
bfab03088b | ||
![]() |
08f7020e0a | ||
![]() |
977eff4db2 | ||
![]() |
fc06c1e08f | ||
![]() |
01eefa7519 | ||
![]() |
4723e426a6 | ||
![]() |
61232796da | ||
![]() |
74ed861657 | ||
![]() |
9c1c7ab6ca | ||
![]() |
94f66d8ef8 | ||
![]() |
913bff0f67 | ||
![]() |
60f38291c2 | ||
![]() |
c39b53ffb3 | ||
![]() |
aa24c0de4b | ||
![]() |
653fbcc937 | ||
![]() |
abd3e2bbfa | ||
![]() |
d0e3c28f34 | ||
![]() |
d3d3b97616 | ||
![]() |
f62b8077ab | ||
![]() |
76578af912 | ||
![]() |
3ae8147dca | ||
![]() |
3578132e76 | ||
![]() |
83c46748e6 | ||
![]() |
0250a0d4ce | ||
![]() |
3729fcdb4b | ||
![]() |
bdfdba7d73 | ||
![]() |
a618594c28 | ||
![]() |
d8390ee2fb | ||
![]() |
46bce56cef | ||
![]() |
e359249d7e | ||
![]() |
93d873b116 | ||
![]() |
3732c26bad | ||
![]() |
463daf36f4 | ||
![]() |
0a259ab934 | ||
![]() |
bb038c3b6c | ||
![]() |
b2d92bbb9f | ||
![]() |
cb82e4b088 | ||
![]() |
c27c70acd9 | ||
![]() |
3103d4a5d8 | ||
![]() |
d4f4754655 | ||
![]() |
56eb66e79a | ||
![]() |
3959da5a68 | ||
![]() |
adc55498d3 | ||
![]() |
087110bcb0 | ||
![]() |
6b12000a7c | ||
![]() |
75705c9992 | ||
![]() |
5b5c56c7f1 | ||
![]() |
15d2e60c16 | ||
![]() |
c0e53827aa | ||
![]() |
239e8db319 | ||
![]() |
c8084991b4 | ||
![]() |
51509dbad0 | ||
![]() |
b510464f04 | ||
![]() |
d5b3d075d2 | ||
![]() |
5b12568c4f | ||
![]() |
5cb5aac57e | ||
![]() |
6ab859fb45 | ||
![]() |
f5e6c32495 | ||
![]() |
3f4c9aeca7 | ||
![]() |
9faa3221b3 | ||
![]() |
48d2ec99d8 | ||
![]() |
20791b5882 | ||
![]() |
b43ed76fa7 | ||
![]() |
62a001dea6 | ||
![]() |
13755d57c4 | ||
![]() |
c27d7d7486 | ||
![]() |
94f894b1b2 | ||
![]() |
98f8689ccd | ||
![]() |
10e4eeec9c | ||
![]() |
19c2ad8185 | ||
![]() |
d9804d31f9 | ||
![]() |
e14baffbcf | ||
![]() |
159d3b3381 | ||
![]() |
b23b126013 | ||
![]() |
985e101507 | ||
![]() |
24c87e07b5 | ||
![]() |
6f0042b47b | ||
![]() |
e501f79d43 | ||
![]() |
99a5bf7892 | ||
![]() |
b2d3d35528 | ||
![]() |
295377b2b4 | ||
![]() |
07e50c1732 | ||
![]() |
303e506f2b | ||
![]() |
4179c8bd72 | ||
![]() |
4f53975a2a | ||
![]() |
cad06a15e0 | ||
![]() |
42af3c16bf | ||
![]() |
0a6227870c | ||
![]() |
934d845806 | ||
![]() |
c23ffd89ff | ||
![]() |
45b4dcb9ed | ||
![]() |
b7c4bfb2a7 | ||
![]() |
7317b04318 | ||
![]() |
418c78fdc9 | ||
![]() |
e3054c3318 | ||
![]() |
2473e0cc82 | ||
![]() |
d21339bae5 | ||
![]() |
7952a802ed | ||
![]() |
4b62344163 | ||
![]() |
afa3ddedba | ||
![]() |
70955dbce3 | ||
![]() |
40d8723d6d | ||
![]() |
d0eca2c639 | ||
![]() |
0e43f52991 | ||
![]() |
769e549cd9 | ||
![]() |
5809ee6d26 | ||
![]() |
2b2a26022b | ||
![]() |
94ef4e9eae | ||
![]() |
e0c82fcf82 | ||
![]() |
a5db2a7403 | ||
![]() |
fef05621a7 | ||
![]() |
ce664f6f5f | ||
![]() |
037e8deecb | ||
![]() |
44136223f7 | ||
![]() |
2c9068ead8 | ||
![]() |
2f9bfd7e09 | ||
![]() |
c575b1ce9b | ||
![]() |
32368c7f46 | ||
![]() |
f246f6701a | ||
![]() |
675de93b32 | ||
![]() |
7c6692b0cb | ||
![]() |
a7fb70853e | ||
![]() |
0c56b59877 | ||
![]() |
bcea5e7ba4 | ||
![]() |
c3fbe0f026 | ||
![]() |
a276f9405c | ||
![]() |
ce6a617706 | ||
![]() |
19b9789b45 | ||
![]() |
6d789a5835 | ||
![]() |
80c8b11d77 | ||
![]() |
30c81b127c | ||
![]() |
93976d018d | ||
![]() |
5641923dfa | ||
![]() |
6f25e5242e | ||
![]() |
e64fe11506 | ||
![]() |
09028f7407 | ||
![]() |
010e5761d6 | ||
![]() |
b0e4950555 | ||
![]() |
c3733771e4 | ||
![]() |
b904e9aa1c | ||
![]() |
89ae2cb365 | ||
![]() |
60c909cee5 | ||
![]() |
46088aa5da | ||
![]() |
7322a9e3be | ||
![]() |
e0549f5e88 | ||
![]() |
5bec197de1 | ||
![]() |
ae045ac071 | ||
![]() |
da520d7bb3 | ||
![]() |
730fbae463 | ||
![]() |
ee76c329f8 | ||
![]() |
75fd886f62 | ||
![]() |
d670f3aa40 | ||
![]() |
0ea1b9705f | ||
![]() |
44d995b6d5 | ||
![]() |
febeca2b05 | ||
![]() |
a97ef832d9 | ||
![]() |
8182994cee | ||
![]() |
3325eff486 | ||
![]() |
9a473d6ab3 | ||
![]() |
4ee3934fb3 | ||
![]() |
58a897be0e | ||
![]() |
09e80604f5 | ||
![]() |
fd9918ab6f | ||
![]() |
73d1c0911c | ||
![]() |
b9a2b1c096 | ||
![]() |
293c8bf621 | ||
![]() |
5931995364 | ||
![]() |
f4d8382a4f | ||
![]() |
fa06dbb324 | ||
![]() |
d0dcde7c69 | ||
![]() |
58789130a8 | ||
![]() |
a42c11ff0b | ||
![]() |
0fec636bf4 | ||
![]() |
750fa8d810 | ||
![]() |
67afe7016f | ||
![]() |
7dc9a0082b | ||
![]() |
afd69866d1 | ||
![]() |
25a9a40a12 | ||
![]() |
639d66720f | ||
![]() |
8058aecd2a | ||
![]() |
5461411f21 | ||
![]() |
7925705d1d | ||
![]() |
f1d66b767d | ||
![]() |
0c0fa26b4c | ||
![]() |
448ac2c68d | ||
![]() |
c10944e8fd | ||
![]() |
e005250555 | ||
![]() |
89838bae1d | ||
![]() |
7b54389e12 | ||
![]() |
5a3a014aae | ||
![]() |
cbf7ad8cc0 | ||
![]() |
80e60ceb91 | ||
![]() |
6cf91449d4 | ||
![]() |
ba9eb19e5d | ||
![]() |
7ed05b2e5e | ||
![]() |
a9862ba859 | ||
![]() |
c655e76109 | ||
![]() |
de004857ff | ||
![]() |
6b2f63310e | ||
![]() |
971e215e25 | ||
![]() |
41e0d95e99 | ||
![]() |
2f3ded37e1 | ||
![]() |
2b3efb40ca | ||
![]() |
54f919018d | ||
![]() |
f14edd2a78 | ||
![]() |
ccb0be2f7f | ||
![]() |
37263d7b71 | ||
![]() |
11c529fddc | ||
![]() |
f1acdf5f88 | ||
![]() |
173954e568 | ||
![]() |
7e6b76301a | ||
![]() |
6b33595ebf | ||
![]() |
2a7322c43a | ||
![]() |
6ecec24105 | ||
![]() |
7fffa4a1e6 | ||
![]() |
8fce9bef7b | ||
![]() |
c144222d54 | ||
![]() |
2d5f519d9b | ||
![]() |
0f47464df4 | ||
![]() |
618edbe5d5 | ||
![]() |
4d48e09116 | ||
![]() |
e47b1ff95c | ||
![]() |
7e56973d22 | ||
![]() |
181341c8b3 | ||
![]() |
a4157173a0 | ||
![]() |
e32cb16b5a | ||
![]() |
d5f07126cb | ||
![]() |
c017e4e2b0 | ||
![]() |
70fadef7c2 | ||
![]() |
3f94fee04d | ||
![]() |
bf24bb3154 | ||
![]() |
ed9ec30176 | ||
![]() |
d22f4fc983 | ||
![]() |
117857ec72 | ||
![]() |
2347b723cc | ||
![]() |
7e49e74abd | ||
![]() |
fbecd6d876 | ||
![]() |
4919eac5d1 | ||
![]() |
212d33ecda | ||
![]() |
c69b750a52 | ||
![]() |
1a85c3cd43 | ||
![]() |
4cca69f4ed | ||
![]() |
d0a79fd80d | ||
![]() |
363fa4242d | ||
![]() |
cc5c44a695 | ||
![]() |
89e9e44bf2 | ||
![]() |
546581aefe | ||
![]() |
b1ec5154db | ||
![]() |
5d88f439f3 | ||
![]() |
06ca7f4f3e | ||
![]() |
e6de04d149 | ||
![]() |
5d42054483 | ||
![]() |
080d9b094f | ||
![]() |
e8454e498a | ||
![]() |
1a32f4a08c | ||
![]() |
92291120cb | ||
![]() |
4ea1774be3 | ||
![]() |
1222493055 | ||
![]() |
b0e5cce9c1 | ||
![]() |
2638ac15e0 | ||
![]() |
6c3bc63c1d | ||
![]() |
c122a22592 | ||
![]() |
334e4f8685 | ||
![]() |
df84a6a853 | ||
![]() |
43939622ac | ||
![]() |
7f91c1a510 | ||
![]() |
613348ec90 | ||
![]() |
393542064d | ||
![]() |
c4141e16a7 | ||
![]() |
a306129fb0 | ||
![]() |
c011e2156f | ||
![]() |
05ebe52643 | ||
![]() |
5309417f19 | ||
![]() |
51421100cf | ||
![]() |
066caa1d2a | ||
![]() |
7175c213b5 | ||
![]() |
2cc9b00d26 | ||
![]() |
b6be3d0c0b | ||
![]() |
67c666efd0 | ||
![]() |
13de22a198 | ||
![]() |
f76952572f | ||
![]() |
dca4893c11 | ||
![]() |
7960c05922 | ||
![]() |
757528d5fb | ||
![]() |
0618b43847 | ||
![]() |
1379bcf680 | ||
![]() |
c98a0f0bb2 | ||
![]() |
29f748993d | ||
![]() |
ab87f7451a | ||
![]() |
1f91409368 | ||
![]() |
a7547d8ee9 | ||
![]() |
c6a08981a7 | ||
![]() |
dee030618f | ||
![]() |
c801b0cb40 | ||
![]() |
f2169e1f17 | ||
![]() |
6054a97d0a | ||
![]() |
cb53259306 | ||
![]() |
e020f343de | ||
![]() |
7a224c583a | ||
![]() |
43ece950e5 | ||
![]() |
17ea01fbce | ||
![]() |
af52b8e29e | ||
![]() |
6c632dad71 | ||
![]() |
e7d392c49a | ||
![]() |
e320b1d7e4 | ||
![]() |
c11af21da2 | ||
![]() |
a580788d86 | ||
![]() |
0b9bd92511 | ||
![]() |
1f1b568e95 | ||
![]() |
bff713aeb2 | ||
![]() |
85e70600ed | ||
![]() |
a7d7f4e98a | ||
![]() |
62e256d995 | ||
![]() |
782a7dc690 | ||
![]() |
e30aff9b6b | ||
![]() |
976cf4496b | ||
![]() |
24f4a39d0e | ||
![]() |
96c984e0ce | ||
![]() |
087015e1ac | ||
![]() |
982d3abcfd | ||
![]() |
51df9b0c9c | ||
![]() |
0b472a91d1 | ||
![]() |
edcd0703df | ||
![]() |
7c06b472f5 | ||
![]() |
e3b0d7ce0e | ||
![]() |
e27aa39427 | ||
![]() |
b7f74f6ef6 | ||
![]() |
9e702a3d44 | ||
![]() |
06f291a3cd | ||
![]() |
215d194482 | ||
![]() |
291a2b541b | ||
![]() |
391ab10337 | ||
![]() |
f624ce0834 | ||
![]() |
8d52200cbc | ||
![]() |
35ed2b26b5 | ||
![]() |
9e6ef0e697 | ||
![]() |
ca5c2de517 | ||
![]() |
4d288a802e | ||
![]() |
e21e26094e | ||
![]() |
1c5c53f505 | ||
![]() |
4adb76c4c5 | ||
![]() |
4807956977 | ||
![]() |
8a0aec4c4d | ||
![]() |
44d2444854 | ||
![]() |
0ecce5797e | ||
![]() |
e0f810956c | ||
![]() |
19b6d3589a | ||
![]() |
f27fefc3c1 | ||
![]() |
7170c666a0 | ||
![]() |
a0f9ffd80c | ||
![]() |
68cec598dc | ||
![]() |
65563946e3 | ||
![]() |
5b77046e20 | ||
![]() |
d805be02ec | ||
![]() |
da88c0e6ff | ||
![]() |
70e132dd9a | ||
![]() |
cca5f260e5 | ||
![]() |
5500eed60c | ||
![]() |
30153b1064 | ||
![]() |
2a7f588612 | ||
![]() |
e38aac8cb6 | ||
![]() |
8246e4dde0 | ||
![]() |
f1f5f9bf5b | ||
![]() |
7e17fb1ae1 | ||
![]() |
43bdbc8234 | ||
![]() |
33bcc0ac89 | ||
![]() |
56418bac39 | ||
![]() |
77a211dc46 | ||
![]() |
f89b5e1e84 | ||
![]() |
355b7787d4 | ||
![]() |
e349ae26fc | ||
![]() |
f365cb4535 | ||
![]() |
9c86e919cd | ||
![]() |
e9c4123f97 | ||
![]() |
995b6556c9 | ||
![]() |
868bc1db0a | ||
![]() |
86055cdbdd | ||
![]() |
ae43ecc7ba | ||
![]() |
e14e6fbbd2 | ||
![]() |
db0f181ec1 | ||
![]() |
64d8e4f5ad | ||
![]() |
c843a5b585 | ||
![]() |
ef188a1e12 | ||
![]() |
7c87ebeb91 | ||
![]() |
253c0d0ebb | ||
![]() |
ccc90e3a79 | ||
![]() |
c8b4414230 | ||
![]() |
6c16f5c5d5 | ||
![]() |
cfbb4abb79 | ||
![]() |
ad3eb98c4b | ||
![]() |
c4a83aa22c | ||
![]() |
41ed95dbdf | ||
![]() |
b7d72e26bb | ||
![]() |
58edc361be | ||
![]() |
51ac05483d | ||
![]() |
c188575cfe | ||
![]() |
3caf531f04 | ||
![]() |
4d417dcf2d | ||
![]() |
97d237dafa | ||
![]() |
dbdb72d0ea | ||
![]() |
50f5ec7abf | ||
![]() |
0166f7db08 | ||
![]() |
23422dcc59 | ||
![]() |
9505af42ff | ||
![]() |
838bba70c6 | ||
![]() |
442241d477 | ||
![]() |
a989521052 | ||
![]() |
bbc666a1d2 | ||
![]() |
a4c3bc9893 | ||
![]() |
b6668a1146 | ||
![]() |
1ed44f20f2 | ||
![]() |
85cdb3d250 | ||
![]() |
8b4cf24ba7 | ||
![]() |
5b1fa20f87 | ||
![]() |
47c13bc6ca | ||
![]() |
fbed5d1eb5 | ||
![]() |
a586b8df20 | ||
![]() |
d2bf74242e | ||
![]() |
cac8646e1c | ||
![]() |
f9837d8c66 | ||
![]() |
1424dc6147 | ||
![]() |
710d096b43 | ||
![]() |
239be21391 | ||
![]() |
4668f34682 | ||
![]() |
0fcda35a71 | ||
![]() |
8c5f4c9c94 | ||
![]() |
d0a2e354bb | ||
![]() |
91bd16a850 | ||
![]() |
7e6386803e | ||
![]() |
8dab9f0a81 | ||
![]() |
a57f56c5b9 | ||
![]() |
023524365c | ||
![]() |
802f4d9ffa | ||
![]() |
fc5a50aa83 | ||
![]() |
26c7f02873 | ||
![]() |
e5c7723276 | ||
![]() |
eb6f4c740b | ||
![]() |
6e48e29c75 | ||
![]() |
1c0230f7d0 | ||
![]() |
65b03a9f03 | ||
![]() |
6df1b509fa | ||
![]() |
a4f19853b8 | ||
![]() |
b924440f79 | ||
![]() |
3336fff229 | ||
![]() |
c1cf643780 | ||
![]() |
1908e6834d | ||
![]() |
bfb3b55414 | ||
![]() |
3a36ca4b36 | ||
![]() |
b51363e0ac | ||
![]() |
0d4e0b8198 | ||
![]() |
7cc2db1b4e | ||
![]() |
69d123a1a0 | ||
![]() |
76b54816cf | ||
![]() |
2dbaf23fca | ||
![]() |
5a75f7e0d6 | ||
![]() |
d978d72e19 | ||
![]() |
9f90659aa7 | ||
![]() |
d528ff6e5a | ||
![]() |
bbaf6e05d2 | ||
![]() |
0d387678b7 | ||
![]() |
45c616529c | ||
![]() |
fb062428f9 | ||
![]() |
fc48c63355 | ||
![]() |
3f7c111e29 | ||
![]() |
7e6048fef5 | ||
![]() |
49b222442f | ||
![]() |
252f42f115 | ||
![]() |
73a4a5d394 | ||
![]() |
c89fa98b16 | ||
![]() |
9c139bf7a2 | ||
![]() |
2ae7e25501 | ||
![]() |
82e97124c8 | ||
![]() |
cf1b8dd72b | ||
![]() |
1e70ee38a2 | ||
![]() |
e22b2ff5c9 | ||
![]() |
034211f993 | ||
![]() |
5ed00560cd | ||
![]() |
efaa2c160f | ||
![]() |
db87f62aa5 | ||
![]() |
67f182bb6d | ||
![]() |
4a059aad17 | ||
![]() |
76424c1a91 | ||
![]() |
2556490cb0 | ||
![]() |
5a10c6f822 | ||
![]() |
09dcb16a70 | ||
![]() |
bfece29858 | ||
![]() |
f7d7cbfacf | ||
![]() |
487ea02044 | ||
![]() |
2922068fdc | ||
![]() |
1c084cd4dd | ||
![]() |
4f2a972c2f | ||
![]() |
3e532db04d | ||
![]() |
7daf582357 | ||
![]() |
c4f0a3cf6c | ||
![]() |
3b59af8b2b | ||
![]() |
764c170530 | ||
![]() |
6242f102fb | ||
![]() |
3205c6f940 | ||
![]() |
315124e219 | ||
![]() |
cf9adfd748 | ||
![]() |
a1d9a56a43 | ||
![]() |
01a6adb5f7 | ||
![]() |
fd12cba18b | ||
![]() |
bb60dbd2ad | ||
![]() |
772dd7bcb2 | ||
![]() |
25747a037a | ||
![]() |
2196c24e97 | ||
![]() |
904867703e | ||
![]() |
1ed7762327 | ||
![]() |
bee9e34b50 | ||
![]() |
1144666c09 | ||
![]() |
802f5afac0 | ||
![]() |
9a453b2e74 | ||
![]() |
fa6e30c6a7 | ||
![]() |
64d4ab85e7 | ||
![]() |
2747af6000 | ||
![]() |
c57a74e3b0 | ||
![]() |
3ce4dae1bf | ||
![]() |
2e6cdd3ec1 | ||
![]() |
1db6cd5d16 | ||
![]() |
d3a9824ea2 | ||
![]() |
c1f1dc163e | ||
![]() |
a78677a835 | ||
![]() |
eb48b29375 | ||
![]() |
db37e67c3b | ||
![]() |
711d22c9fe | ||
![]() |
37b439152d | ||
![]() |
176c27f194 | ||
![]() |
b848fab3ec | ||
![]() |
c3bab11ee1 | ||
![]() |
220c0d9cfc | ||
![]() |
1bd0964ed3 | ||
![]() |
a37c916dff | ||
![]() |
2a4c06b1e6 | ||
![]() |
e3bcb0ec1e | ||
![]() |
a63a3c1d3d | ||
![]() |
9d42381d38 | ||
![]() |
846cf954f0 | ||
![]() |
f67d4774ea | ||
![]() |
79193dc37c | ||
![]() |
4079bbce97 | ||
![]() |
4ed5c366fa | ||
![]() |
ebbce40a88 | ||
![]() |
e89c9ec082 | ||
![]() |
f9cc073be0 | ||
![]() |
8b16728fd9 | ||
![]() |
3d7069e039 | ||
![]() |
1a080b9a00 | ||
![]() |
e5d106c9ce | ||
![]() |
22e3620021 | ||
![]() |
43131526e5 | ||
![]() |
14c7a13893 | ||
![]() |
d17b6c4601 | ||
![]() |
f9be95cb2e | ||
![]() |
5451e2edeb | ||
![]() |
579d97117d | ||
![]() |
666240cdf8 | ||
![]() |
aa59bc87d5 | ||
![]() |
f760d00db6 | ||
![]() |
e4aad43b93 | ||
![]() |
598e4e77a7 | ||
![]() |
63915de99b | ||
![]() |
11584f3cb1 | ||
![]() |
ec2abbafa3 | ||
![]() |
cc0b3017ea | ||
![]() |
c365e1e7b2 | ||
![]() |
ddce89e4ce | ||
![]() |
f59ac045d4 | ||
![]() |
07cf420acb | ||
![]() |
3e2698b860 | ||
![]() |
2f053d3051 | ||
![]() |
9188498a84 | ||
![]() |
23b19b55b4 | ||
![]() |
a7d36cab5b | ||
![]() |
58ad774773 | ||
![]() |
d606cf93bd | ||
![]() |
f713ebcb01 | ||
![]() |
7ecaeba33a | ||
![]() |
110b50676d | ||
![]() |
513d3bac04 | ||
![]() |
d5fa509b07 | ||
![]() |
35c3a25ca6 | ||
![]() |
730c030ee5 | ||
![]() |
f08842e05d | ||
![]() |
5b238ef1c1 | ||
![]() |
1be0bf95b3 | ||
![]() |
3084bd76c1 | ||
![]() |
995c978f52 | ||
![]() |
d56c629932 | ||
![]() |
098bf6a386 | ||
![]() |
0bed64503d | ||
![]() |
728f62ec8d | ||
![]() |
a2a29f8789 | ||
![]() |
f7a880f868 | ||
![]() |
feb6f85af7 | ||
![]() |
ae0f6d5a67 | ||
![]() |
04a02f8665 | ||
![]() |
0105a83d6f | ||
![]() |
9c91aeffa8 | ||
![]() |
6997991ad2 | ||
![]() |
138417174b | ||
![]() |
38841ad746 |
@@ -8,4 +8,4 @@ share/spack/dotkit/*
|
||||
share/spack/lmod/*
|
||||
share/spack/modules/*
|
||||
lib/spack/spack/test/*
|
||||
|
||||
var/spack/cache/*
|
||||
|
4
.github/workflows/linux_build_tests.yaml
vendored
4
.github/workflows/linux_build_tests.yaml
vendored
@@ -42,14 +42,14 @@ jobs:
|
||||
package:
|
||||
- lz4 # MakefilePackage
|
||||
- mpich~fortran # AutotoolsPackage
|
||||
- tut # WafPackage
|
||||
- 'tut%gcc@:10.99.99' # WafPackage
|
||||
- py-setuptools # PythonPackage
|
||||
- openjpeg # CMakePackage
|
||||
- r-rcpp # RPackage
|
||||
- ruby-rake # RubyPackage
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/cache@v2.1.4
|
||||
- uses: actions/cache@v2.1.5
|
||||
with:
|
||||
path: ~/.ccache
|
||||
key: ccache-build-${{ matrix.package }}
|
||||
|
77
.github/workflows/unit_tests.yaml
vendored
77
.github/workflows/unit_tests.yaml
vendored
@@ -293,30 +293,61 @@ jobs:
|
||||
clingo-cffi:
|
||||
needs: [ validate, style, documentation, changes ]
|
||||
runs-on: ubuntu-latest
|
||||
container: spack/github-actions:clingo-cffi
|
||||
steps:
|
||||
- name: Run unit tests
|
||||
run: |
|
||||
whoami && echo PWD=$PWD && echo HOME=$HOME && echo SPACK_TEST_SOLVER=$SPACK_TEST_SOLVER
|
||||
python3 -c "import clingo; print(hasattr(clingo.Symbol, '_rep'), clingo.__version__)"
|
||||
git clone https://github.com/spack/spack.git && cd spack
|
||||
git fetch origin ${{ github.ref }}:test-branch
|
||||
git checkout test-branch
|
||||
. share/spack/setup-env.sh
|
||||
spack compiler find
|
||||
spack solve mpileaks%gcc
|
||||
if [ "${{ needs.changes.outputs.with_coverage }}" == "true" ]
|
||||
then
|
||||
coverage run $(which spack) unit-test -v -x
|
||||
coverage combine
|
||||
coverage xml
|
||||
else
|
||||
$(which spack) unit-test -m "not maybeslow" -k "package_sanity"
|
||||
fi
|
||||
- uses: codecov/codecov-action@v1
|
||||
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
|
||||
with:
|
||||
flags: unittests,linux,clingo
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: 3.9
|
||||
- name: Install System packages
|
||||
run: |
|
||||
sudo apt-get -y update
|
||||
# Needed for unit tests
|
||||
sudo apt-get install -y coreutils gfortran graphviz gnupg2 mercurial
|
||||
sudo apt-get install -y ninja-build patchelf
|
||||
# Needed for kcov
|
||||
sudo apt-get -y install cmake binutils-dev libcurl4-openssl-dev
|
||||
sudo apt-get -y install zlib1g-dev libdw-dev libiberty-dev
|
||||
- name: Install kcov for bash script coverage
|
||||
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
|
||||
env:
|
||||
KCOV_VERSION: 34
|
||||
run: |
|
||||
KCOV_ROOT=$(mktemp -d)
|
||||
wget --output-document=${KCOV_ROOT}/${KCOV_VERSION}.tar.gz https://github.com/SimonKagstrom/kcov/archive/v${KCOV_VERSION}.tar.gz
|
||||
tar -C ${KCOV_ROOT} -xzvf ${KCOV_ROOT}/${KCOV_VERSION}.tar.gz
|
||||
mkdir -p ${KCOV_ROOT}/build
|
||||
cd ${KCOV_ROOT}/build && cmake -Wno-dev ${KCOV_ROOT}/kcov-${KCOV_VERSION} && cd -
|
||||
make -C ${KCOV_ROOT}/build && sudo make -C ${KCOV_ROOT}/build install
|
||||
- name: Install Python packages
|
||||
run: |
|
||||
pip install --upgrade pip six setuptools codecov coverage clingo
|
||||
- name: Setup git configuration
|
||||
run: |
|
||||
# Need this for the git tests to succeed.
|
||||
git --version
|
||||
. .github/workflows/setup_git.sh
|
||||
- name: Run unit tests (full suite with coverage)
|
||||
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
|
||||
env:
|
||||
COVERAGE: true
|
||||
SPACK_TEST_SOLVER: clingo
|
||||
run: |
|
||||
share/spack/qa/run-unit-tests
|
||||
coverage combine
|
||||
coverage xml
|
||||
- name: Run unit tests (reduced suite without coverage)
|
||||
if: ${{ needs.changes.outputs.with_coverage == 'false' }}
|
||||
env:
|
||||
ONLY_PACKAGES: true
|
||||
SPACK_TEST_SOLVER: clingo
|
||||
run: |
|
||||
share/spack/qa/run-unit-tests
|
||||
- uses: codecov/codecov-action@v1
|
||||
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
|
||||
with:
|
||||
flags: unittests,linux,clingo
|
||||
# Run unit tests on MacOS
|
||||
build:
|
||||
needs: [ validate, style, documentation, changes ]
|
||||
|
3
.mailmap
3
.mailmap
@@ -3,7 +3,8 @@ Adam Moody <moody20@llnl.gov> Adam T. Moody
|
||||
Alfredo Gimenez <gimenez1@llnl.gov> Alfredo Gimenez <alfredo.gimenez@gmail.com>
|
||||
Alfredo Gimenez <gimenez1@llnl.gov> Alfredo Adolfo Gimenez <alfredo.gimenez@gmail.com>
|
||||
Andrew Williams <williamsa89@cardiff.ac.uk> Andrew Williams <andrew@alshain.org.uk>
|
||||
Axel Huebl <a.huebl@hzdr.de> Axel Huebl <axel.huebl@plasma.ninja>
|
||||
Axel Huebl <axelhuebl@lbl.gov> Axel Huebl <a.huebl@hzdr.de>
|
||||
Axel Huebl <axelhuebl@lbl.gov> Axel Huebl <axel.huebl@plasma.ninja>
|
||||
Ben Boeckel <ben.boeckel@kitware.com> Ben Boeckel <mathstuf@gmail.com>
|
||||
Ben Boeckel <ben.boeckel@kitware.com> Ben Boeckel <mathstuf@users.noreply.github.com>
|
||||
Benedikt Hegner <hegner@cern.ch> Benedikt Hegner <benedikt.hegner@cern.ch>
|
||||
|
@@ -1,6 +1,6 @@
|
||||
#!/bin/sh
|
||||
#
|
||||
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
|
||||
# sbang project developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
@@ -142,11 +142,13 @@ config:
|
||||
locks: true
|
||||
|
||||
|
||||
# The maximum number of jobs to use when running `make` in parallel,
|
||||
# always limited by the number of cores available. For instance:
|
||||
# - If set to 16 on a 4 cores machine `spack install` will run `make -j4`
|
||||
# - If set to 16 on a 18 cores machine `spack install` will run `make -j16`
|
||||
# If not set, Spack will use all available cores up to 16.
|
||||
# The maximum number of jobs to use for the build system (e.g. `make`), when
|
||||
# the -j flag is not given on the command line. Defaults to 16 when not set.
|
||||
# Note that the maximum number of jobs is limited by the number of cores
|
||||
# available, taking thread affinity into account when supported. For instance:
|
||||
# - With `build_jobs: 16` and 4 cores available `spack install` will run `make -j4`
|
||||
# - With `build_jobs: 16` and 32 cores available `spack install` will run `make -j16`
|
||||
# - With `build_jobs: 2` and 4 cores available `spack install -j6` will run `make -j6`
|
||||
# build_jobs: 16
|
||||
|
||||
|
||||
|
21
etc/spack/defaults/cray/modules.yaml
Normal file
21
etc/spack/defaults/cray/modules.yaml
Normal file
@@ -0,0 +1,21 @@
|
||||
# -------------------------------------------------------------------------
|
||||
# This is the default configuration for Spack's module file generation.
|
||||
#
|
||||
# Settings here are versioned with Spack and are intended to provide
|
||||
# sensible defaults out of the box. Spack maintainers should edit this
|
||||
# file to keep it current.
|
||||
#
|
||||
# Users can override these settings by editing the following files.
|
||||
#
|
||||
# Per-spack-instance settings (overrides defaults):
|
||||
# $SPACK_ROOT/etc/spack/modules.yaml
|
||||
#
|
||||
# Per-user settings (overrides default and site settings):
|
||||
# ~/.spack/modules.yaml
|
||||
# -------------------------------------------------------------------------
|
||||
modules:
|
||||
prefix_inspections:
|
||||
lib:
|
||||
- LD_LIBRARY_PATH
|
||||
lib64:
|
||||
- LD_LIBRARY_PATH
|
162
lib/spack/docs/analyze.rst
Normal file
162
lib/spack/docs/analyze.rst
Normal file
@@ -0,0 +1,162 @@
|
||||
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
|
||||
Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
.. _analyze:
|
||||
|
||||
=======
|
||||
Analyze
|
||||
=======
|
||||
|
||||
|
||||
The analyze command is a front-end to various tools that let us analyze
|
||||
package installations. Each analyzer is a module for a different kind
|
||||
of analysis that can be done on a package installation, including (but not
|
||||
limited to) binary, log, or text analysis. Thus, the analyze command group
|
||||
allows you to take an existing package install, choose an analyzer,
|
||||
and extract some output for the package using it.
|
||||
|
||||
|
||||
-----------------
|
||||
Analyzer Metadata
|
||||
-----------------
|
||||
|
||||
For all analyzers, we write to an ``analyzers`` folder in ``~/.spack``, or the
|
||||
value that you specify in your spack config at ``config:analyzers_dir``.
|
||||
For example, here we see the results of running an analysis on zlib:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ tree ~/.spack/analyzers/
|
||||
└── linux-ubuntu20.04-skylake
|
||||
└── gcc-9.3.0
|
||||
└── zlib-1.2.11-sl7m27mzkbejtkrajigj3a3m37ygv4u2
|
||||
├── environment_variables
|
||||
│ └── spack-analyzer-environment-variables.json
|
||||
├── install_files
|
||||
│ └── spack-analyzer-install-files.json
|
||||
└── libabigail
|
||||
└── spack-analyzer-libabigail-libz.so.1.2.11.xml
|
||||
|
||||
|
||||
This means that you can always find analyzer output in this folder, and it
|
||||
is organized with the same logic as the package install it was run for.
|
||||
If you want to customize this top level folder, simply provide the ``--path``
|
||||
argument to ``spack analyze run``. The nested organization will be maintained
|
||||
within your custom root.
|
||||
|
||||
-----------------
|
||||
Listing Analyzers
|
||||
-----------------
|
||||
|
||||
If you aren't familiar with Spack's analyzers, you can quickly list those that
|
||||
are available:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack analyze list-analyzers
|
||||
install_files : install file listing read from install_manifest.json
|
||||
environment_variables : environment variables parsed from spack-build-env.txt
|
||||
config_args : config args loaded from spack-configure-args.txt
|
||||
abigail : Application Binary Interface (ABI) features for objects
|
||||
|
||||
|
||||
In the above, the first three are fairly simple - parsing metadata files from
|
||||
a package install directory to save
|
||||
|
||||
-------------------
|
||||
Analyzing a Package
|
||||
-------------------
|
||||
|
||||
The analyze command, akin to install, will accept a package spec to perform
|
||||
an analysis for. The package must be installed. Let's walk through an example
|
||||
with zlib. We first ask to analyze it. However, since we have more than one
|
||||
install, we are asked to disambiguate:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack analyze run zlib
|
||||
==> Error: zlib matches multiple packages.
|
||||
Matching packages:
|
||||
fz2bs56 zlib@1.2.11%gcc@7.5.0 arch=linux-ubuntu18.04-skylake
|
||||
sl7m27m zlib@1.2.11%gcc@9.3.0 arch=linux-ubuntu20.04-skylake
|
||||
Use a more specific spec.
|
||||
|
||||
|
||||
We can then specify the spec version that we want to analyze:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack analyze run zlib/fz2bs56
|
||||
|
||||
If you don't provide any specific analyzer names, by default all analyzers
|
||||
(shown in the ``list-analyzers`` subcommand list) will be run. If an analyzer does not
|
||||
have any result, it will be skipped. For example, here is a result running for
|
||||
zlib:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ ls ~/.spack/analyzers/linux-ubuntu20.04-skylake/gcc-9.3.0/zlib-1.2.11-sl7m27mzkbejtkrajigj3a3m37ygv4u2/
|
||||
spack-analyzer-environment-variables.json
|
||||
spack-analyzer-install-files.json
|
||||
spack-analyzer-libabigail-libz.so.1.2.11.xml
|
||||
|
||||
If you want to run a specific analyzer, ask for it with `--analyzer`. Here we run
|
||||
spack analyze on libabigail (already installed) _using_ libabigail1
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack analyze run --analyzer abigail libabigail
|
||||
|
||||
|
||||
.. _analyze_monitoring:
|
||||
|
||||
----------------------
|
||||
Monitoring An Analysis
|
||||
----------------------
|
||||
|
||||
For any kind of analysis, you can
|
||||
use a `spack monitor <https://github.com/spack/spack-monitor>`_ "Spackmon"
|
||||
as a server to upload the same run metadata to. You can
|
||||
follow the instructions in the `spack monitor documentation <https://spack-monitor.readthedocs.org>`_
|
||||
to first create a server along with a username and token for yourself.
|
||||
You can then use this guide to interact with the server.
|
||||
|
||||
You should first export our spack monitor token and username to the environment:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ export SPACKMON_TOKEN=50445263afd8f67e59bd79bff597836ee6c05438
|
||||
$ export SPACKMON_USER=spacky
|
||||
|
||||
|
||||
By default, the host for your server is expected to be at ``http://127.0.0.1``
|
||||
with a prefix of ``ms1``, and if this is the case, you can simply add the
|
||||
``--monitor`` flag to the install command:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack analyze run --monitor wget
|
||||
|
||||
If you need to customize the host or the prefix, you can do that as well:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack analyze run --monitor --monitor-prefix monitor --monitor-host https://monitor-service.io wget
|
||||
|
||||
If your server doesn't have authentication, you can skip it:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack analyze run --monitor --monitor-disable-auth wget
|
||||
|
||||
Regardless of your choice, when you run analyze on an installed package (whether
|
||||
it was installed with ``--monitor`` or not, you'll see the results generating as they did
|
||||
before, and a message that the monitor server was pinged:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack analyze --monitor wget
|
||||
...
|
||||
==> Sending result for wget bin/wget to monitor.
|
@@ -27,12 +27,18 @@ It is recommended that the following be put in your ``.bashrc`` file:
|
||||
|
||||
If you do not see colorized output when using ``less -R`` it is because color
|
||||
is being disabled in the piped output. In this case, tell spack to force
|
||||
colorized output.
|
||||
colorized output with a flag
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack --color always | less -R
|
||||
|
||||
or an environment variable
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ SPACK_COLOR=always spack | less -R
|
||||
|
||||
--------------------------
|
||||
Listing available packages
|
||||
--------------------------
|
||||
@@ -963,7 +969,7 @@ Variants are named options associated with a particular package. They are
|
||||
optional, as each package must provide default values for each variant it
|
||||
makes available. Variants can be specified using
|
||||
a flexible parameter syntax ``name=<value>``. For example,
|
||||
``spack install libelf debug=True`` will install libelf built with debug
|
||||
``spack install mercury debug=True`` will install mercury built with debug
|
||||
flags. The names of particular variants available for a package depend on
|
||||
what was provided by the package author. ``spack info <package>`` will
|
||||
provide information on what build variants are available.
|
||||
@@ -971,11 +977,11 @@ provide information on what build variants are available.
|
||||
For compatibility with earlier versions, variants which happen to be
|
||||
boolean in nature can be specified by a syntax that represents turning
|
||||
options on and off. For example, in the previous spec we could have
|
||||
supplied ``libelf +debug`` with the same effect of enabling the debug
|
||||
supplied ``mercury +debug`` with the same effect of enabling the debug
|
||||
compile time option for the libelf package.
|
||||
|
||||
Depending on the package a variant may have any default value. For
|
||||
``libelf`` here, ``debug`` is ``False`` by default, and we turned it on
|
||||
``mercury`` here, ``debug`` is ``False`` by default, and we turned it on
|
||||
with ``debug=True`` or ``+debug``. If a variant is ``True`` by default
|
||||
you can turn it off by either adding ``-name`` or ``~name`` to the spec.
|
||||
|
||||
|
@@ -155,7 +155,7 @@ version, this can be done like so:
|
||||
|
||||
@property
|
||||
def force_autoreconf(self):
|
||||
return self.version == Version('1.2.3'):
|
||||
return self.version == Version('1.2.3')
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Finding configure flags
|
||||
|
@@ -9,7 +9,7 @@
|
||||
Custom Build Systems
|
||||
--------------------
|
||||
|
||||
While the build systems listed above should meet your needs for the
|
||||
While the built-in build systems should meet your needs for the
|
||||
vast majority of packages, some packages provide custom build scripts.
|
||||
This guide is intended for the following use cases:
|
||||
|
||||
@@ -31,7 +31,7 @@ installation. Both of these packages require custom build systems.
|
||||
Base class
|
||||
^^^^^^^^^^
|
||||
|
||||
If your package does not belong to any of the aforementioned build
|
||||
If your package does not belong to any of the built-in build
|
||||
systems that Spack already supports, you should inherit from the
|
||||
``Package`` base class. ``Package`` is a simple base class with a
|
||||
single phase: ``install``. If your package is simple, you may be able
|
||||
@@ -168,7 +168,8 @@ if and only if this flag is set, we would use the following line:
|
||||
Testing
|
||||
^^^^^^^
|
||||
|
||||
Let's put everything together and add unit tests to our package.
|
||||
Let's put everything together and add unit tests to be optionally run
|
||||
during the installation of our package.
|
||||
In the ``perl`` package, we can see:
|
||||
|
||||
.. code-block:: python
|
||||
@@ -182,12 +183,6 @@ As you can guess, this runs ``make test`` *after* building the package,
|
||||
if and only if testing is requested. Again, this is not specific to
|
||||
custom build systems, it can be added to existing build systems as well.
|
||||
|
||||
Ideally, every package in Spack will have some sort of test to ensure
|
||||
that it was built correctly. It is up to the package authors to make
|
||||
sure this happens. If you are adding a package for some software and
|
||||
the developers list commands to test the installation, please add these
|
||||
tests to your ``package.py``.
|
||||
|
||||
.. warning::
|
||||
|
||||
The order of decorators matters. The following ordering:
|
||||
@@ -207,3 +202,12 @@ tests to your ``package.py``.
|
||||
the tests will always be run regardless of whether or not
|
||||
``--test=root`` is requested. See https://github.com/spack/spack/issues/3833
|
||||
for more information
|
||||
|
||||
Ideally, every package in Spack will have some sort of test to ensure
|
||||
that it was built correctly. It is up to the package authors to make
|
||||
sure this happens. If you are adding a package for some software and
|
||||
the developers list commands to test the installation, please add these
|
||||
tests to your ``package.py``.
|
||||
|
||||
For more information on other forms of package testing, refer to
|
||||
:ref:`Checking an installation <checking_an_installation>`.
|
||||
|
@@ -38,21 +38,25 @@ Intel no longer releases new versions of Parallel Studio, which can be
|
||||
used in Spack via the :ref:<intelpackage>. All of its components can
|
||||
now be found in oneAPI.
|
||||
|
||||
Example
|
||||
=======
|
||||
Examples
|
||||
========
|
||||
|
||||
We start with a simple example that will be sufficient for most
|
||||
users. Install the oneAPI compilers::
|
||||
Building a Package With icx
|
||||
---------------------------
|
||||
|
||||
In this example, we build patchelf with ``icc`` and ``icx``. The
|
||||
compilers are installed with spack.
|
||||
|
||||
Install the oneAPI compilers::
|
||||
|
||||
spack install intel-oneapi-compilers
|
||||
|
||||
Add the oneAPI compilers to the set of compilers that Spack can use::
|
||||
Add the compilers to your ``compilers.yaml`` so spack can use them::
|
||||
|
||||
spack compiler add `spack location -i intel-oneapi-compilers`/compiler/latest/linux/bin/intel64
|
||||
spack compiler add `spack location -i intel-oneapi-compilers`/compiler/latest/linux/bin
|
||||
|
||||
This adds the compilers to your ``compilers.yaml``. Verify that the
|
||||
compilers are available::
|
||||
Verify that the compilers are available::
|
||||
|
||||
spack compiler list
|
||||
|
||||
@@ -72,9 +76,11 @@ To build with with ``icx``, do ::
|
||||
|
||||
spack install patchelf%oneapi
|
||||
|
||||
In addition to compilers, oneAPI contains many libraries. The ``hdf5``
|
||||
package works with any compatible MPI implementation. To build
|
||||
``hdf5`` with Intel oneAPI MPI do::
|
||||
Using oneAPI MPI to Satisfy a Virtual Dependence
|
||||
------------------------------------------------------
|
||||
|
||||
The ``hdf5`` package works with any compatible MPI implementation. To
|
||||
build ``hdf5`` with Intel oneAPI MPI do::
|
||||
|
||||
spack install hdf5 +mpi ^intel-oneapi-mpi
|
||||
|
||||
@@ -95,11 +101,23 @@ To use the compilers, add some information about the installation to
|
||||
spack compiler add /opt/intel/oneapi/compiler/latest/linux/bin
|
||||
|
||||
Adapt the paths above if you did not install the tools in the default
|
||||
location. After adding the compilers, using them in Spack will be
|
||||
exactly the same as if you had installed the
|
||||
``intel-oneapi-compilers`` package. Another option is to manually add
|
||||
the configuration to ``compilers.yaml`` as described in :ref:`Compiler
|
||||
configuration <compiler-config>`.
|
||||
location. After adding the compilers, using them is the same
|
||||
as if you had installed the ``intel-oneapi-compilers`` package.
|
||||
Another option is to manually add the configuration to
|
||||
``compilers.yaml`` as described in :ref:`Compiler configuration
|
||||
<compiler-config>`.
|
||||
|
||||
Libraries
|
||||
---------
|
||||
|
||||
If you want Spack to use MKL that you have installed without Spack in
|
||||
the default location, then add the following to
|
||||
``~/.spack/packages.yaml``, adjusting the version as appropriate::
|
||||
|
||||
intel-oneapi-mkl:
|
||||
externals:
|
||||
- spec: intel-oneapi-mkl@2021.1.1
|
||||
prefix: /opt/intel/oneapi/
|
||||
|
||||
|
||||
Using oneAPI Tools Installed by Spack
|
||||
|
@@ -561,43 +561,29 @@ follow `the next section <intel-install-libs_>`_ instead.
|
||||
modules: []
|
||||
spec: intel@18.0.3
|
||||
paths:
|
||||
cc: stub
|
||||
cxx: stub
|
||||
f77: stub
|
||||
fc: stub
|
||||
cc: /usr/bin/true
|
||||
cxx: /usr/bin/true
|
||||
f77: /usr/bin/true
|
||||
fc: /usr/bin/true
|
||||
|
||||
Replace ``18.0.3`` with the version that you determined in the preceding
|
||||
step. The contents under ``paths:`` do not matter yet.
|
||||
step. The exact contents under ``paths:`` do not matter yet, but the paths must exist.
|
||||
|
||||
You are right to ask: "Why on earth is that necessary?" [fn8]_.
|
||||
The answer lies in Spack striving for strict compiler consistency.
|
||||
Consider what happens without such a pre-declared compiler stub:
|
||||
Say, you ask Spack to install a particular version
|
||||
``intel-parallel-studio@edition.V``. Spack will apply an unrelated compiler
|
||||
spec to concretize and install your request, resulting in
|
||||
``intel-parallel-studio@edition.V %X``. That compiler ``%X`` is not going to
|
||||
be the version that this new package itself provides. Rather, it would
|
||||
typically be ``%gcc@...`` in a default Spack installation or possibly indeed
|
||||
``%intel@...``, but at a version that precedes ``V``.
|
||||
This temporary stub is required such that the ``intel-parallel-studio`` package
|
||||
can be installed for the ``intel`` compiler (which the package itself is going
|
||||
to provide after the installation) rather than an arbitrary system compiler.
|
||||
The paths given in ``cc``, ``cxx``, ``f77``, ``fc`` must exist, but will
|
||||
never be used to build anything during the installation of ``intel-parallel-studio``.
|
||||
|
||||
The problem comes to the fore as soon as you try to use any virtual ``mkl``
|
||||
or ``mpi`` packages that you would expect to now be provided by
|
||||
``intel-parallel-studio@edition.V``. Spack will indeed see those virtual
|
||||
packages, but only as being tied to the compiler that the package
|
||||
``intel-parallel-studio@edition.V`` was concretized with *at installation*.
|
||||
If you were to install a client package with the new compilers now available
|
||||
to you, you would naturally run ``spack install foo +mkl %intel@V``, yet
|
||||
Spack will either complain about ``mkl%intel@V`` being missing (because it
|
||||
only knows about ``mkl%X``) or it will go and attempt to install *another
|
||||
instance* of ``intel-parallel-studio@edition.V %intel@V`` so as to match the
|
||||
compiler spec ``%intel@V`` that you gave for your client package ``foo``.
|
||||
This will be unexpected and will quickly get annoying because each
|
||||
reinstallation takes up time and extra disk space.
|
||||
The reason for this stub is that ``intel-parallel-studio`` also provides the
|
||||
``mpi`` and ``mkl`` packages and when concretizing a spec, Spack ensures
|
||||
strong consistency of the used compiler across all dependencies: [fn8]_.
|
||||
Installing a package ``foo +mkl %intel`` will make Spack look for a package
|
||||
``mkl %intel``, which can be provided by ``intel-parallel-studio+mkl %intel``,
|
||||
but not by ``intel-parallel-studio+mkl %gcc``.
|
||||
|
||||
To escape this trap, put the compiler stub declaration shown here in place,
|
||||
then use that pre-declared compiler spec to install the actual package, as
|
||||
shown next. This approach works because during installation only the
|
||||
package's own self-sufficient installer will be used, not any compiler.
|
||||
Failure to do so may result in additional installations of ``mkl``, ``intel-mpi`` or
|
||||
even ``intel-parallel-studio`` as dependencies for other packages.
|
||||
|
||||
.. _`verify-compiler-anticipated`:
|
||||
|
||||
@@ -648,11 +634,25 @@ follow `the next section <intel-install-libs_>`_ instead.
|
||||
want to use the ``intel64`` variant. The ``icpc`` and ``ifort`` compilers
|
||||
will be located in the same directory as ``icc``.
|
||||
|
||||
* Use the ``modules:`` and/or ``cflags:`` tokens to specify a suitable accompanying
|
||||
* Make sure to specify ``modules: ['intel-parallel-studio-cluster2018.3-intel-18.0.3-HASH']``
|
||||
(with ``HASH`` being the short hash as displayed when running
|
||||
``spack find -l intel-parallel-studio@cluster.2018.3`` and the versions adapted accordingly)
|
||||
to ensure that the correct and complete environment for the Intel compilers gets
|
||||
loaded when running them. With modern versions of the Intel compiler you may otherwise see
|
||||
issues about missing libraries. Please also note that module name must exactly match
|
||||
the name as returned by ``module avail`` (and shown in the example above).
|
||||
|
||||
* Use the ``modules:`` and/or ``cflags:`` tokens to further specify a suitable accompanying
|
||||
``gcc`` version to help pacify picky client packages that ask for C++
|
||||
standards more recent than supported by your system-provided ``gcc`` and its
|
||||
``libstdc++.so``.
|
||||
|
||||
* If you specified a custom variant (for example ``+vtune``) you may want to add this as your
|
||||
preferred variant in the packages configuration for the ``intel-parallel-studio`` package
|
||||
as described in :ref:`concretization-preferences`. Otherwise you will have to specify
|
||||
the variant everytime ``intel-parallel-studio`` is being used as ``mkl``, ``fftw`` or ``mpi``
|
||||
implementation to avoid pulling in a different variant.
|
||||
|
||||
* To set the Intel compilers for default use in Spack, instead of the usual ``%gcc``,
|
||||
follow section `Selecting Intel compilers`_.
|
||||
|
||||
|
@@ -121,11 +121,15 @@ override the ``meson_args`` method like so:
|
||||
.. code-block:: python
|
||||
|
||||
def meson_args(self):
|
||||
return ['--default-library=both']
|
||||
return ['--warnlevel=3']
|
||||
|
||||
|
||||
This method can be used to pass flags as well as variables.
|
||||
|
||||
Note that the ``MesonPackage`` base class already defines variants for
|
||||
``buildtype``, ``default_library`` and ``strip``, which are mapped to default
|
||||
Meson arguments, meaning that you don't have to specify these.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^
|
||||
External documentation
|
||||
^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
@@ -627,7 +627,8 @@ adds:
|
||||
Testing
|
||||
^^^^^^^
|
||||
|
||||
``PythonPackage`` provides a couple of options for testing packages.
|
||||
``PythonPackage`` provides a couple of options for testing packages
|
||||
both during and after the installation process.
|
||||
|
||||
""""""""""""
|
||||
Import tests
|
||||
@@ -696,16 +697,20 @@ libraries. Make sure not to add modules/packages containing the word
|
||||
"test", as these likely won't end up in the installation directory,
|
||||
or may require test dependencies like pytest to be installed.
|
||||
|
||||
These tests can be triggered by running ``spack install --test=root``
|
||||
or by running ``spack test run`` after the installation has finished.
|
||||
Import tests can be run during the installation using ``spack install
|
||||
--test=root`` or at any time after the installation using
|
||||
``spack test run``.
|
||||
|
||||
""""""""""
|
||||
Unit tests
|
||||
""""""""""
|
||||
|
||||
The package you want to install may come with additional unit tests.
|
||||
You can add additional build-time or install-time tests by adding
|
||||
additional testing functions. For example, ``py-numpy`` adds:
|
||||
The package may have its own unit or regression tests. Spack can
|
||||
run these tests during the installation by adding phase-appropriate
|
||||
test methods.
|
||||
|
||||
For example, ``py-numpy`` adds the following as a check to run
|
||||
after the ``install`` phase:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@@ -716,7 +721,13 @@ additional testing functions. For example, ``py-numpy`` adds:
|
||||
python('-c', 'import numpy; numpy.test("full", verbose=2)')
|
||||
|
||||
|
||||
These tests can be triggered by running ``spack install --test=root``.
|
||||
when testing is enabled during the installation (i.e., ``spack install
|
||||
--test=root``).
|
||||
|
||||
.. note::
|
||||
|
||||
Additional information is available on :ref:`install phase tests
|
||||
<install_phase-tests>`.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Setup file in a sub-directory
|
||||
|
@@ -202,21 +202,23 @@ of builds.
|
||||
|
||||
Unless overridden in a package or on the command line, Spack builds all
|
||||
packages in parallel. The default parallelism is equal to the number of
|
||||
cores on your machine, up to 16. Parallelism cannot exceed the number of
|
||||
cores available on the host. For a build system that uses Makefiles, this
|
||||
means running:
|
||||
cores available to the process, up to 16 (the default of ``build_jobs``).
|
||||
For a build system that uses Makefiles, this ``spack install`` runs:
|
||||
|
||||
- ``make -j<build_jobs>``, when ``build_jobs`` is less than the number of
|
||||
cores on the machine
|
||||
cores available
|
||||
- ``make -j<ncores>``, when ``build_jobs`` is greater or equal to the
|
||||
number of cores on the machine
|
||||
number of cores available
|
||||
|
||||
If you work on a shared login node or have a strict ulimit, it may be
|
||||
necessary to set the default to a lower value. By setting ``build_jobs``
|
||||
to 4, for example, commands like ``spack install`` will run ``make -j4``
|
||||
instead of hogging every core.
|
||||
instead of hogging every core. To build all software in serial,
|
||||
set ``build_jobs`` to 1.
|
||||
|
||||
To build all software in serial, set ``build_jobs`` to 1.
|
||||
Note that specifying the number of jobs on the command line always takes
|
||||
priority, so that ``spack install -j<n>`` always runs `make -j<n>`, even
|
||||
when that exceeds the number of cores available.
|
||||
|
||||
--------------------
|
||||
``ccache``
|
||||
|
@@ -106,11 +106,21 @@ with a high level view of Spack's directory structure:
|
||||
external/ <- external libs included in Spack distro
|
||||
llnl/ <- some general-use libraries
|
||||
|
||||
spack/ <- spack module; contains Python code
|
||||
cmd/ <- each file in here is a spack subcommand
|
||||
compilers/ <- compiler description files
|
||||
test/ <- unit test modules
|
||||
util/ <- common code
|
||||
spack/ <- spack module; contains Python code
|
||||
analyzers/ <- modules to run analysis on installed packages
|
||||
build_systems/ <- modules for different build systems
|
||||
cmd/ <- each file in here is a spack subcommand
|
||||
compilers/ <- compiler description files
|
||||
container/ <- module for spack containerize
|
||||
hooks/ <- hook modules to run at different points
|
||||
modules/ <- modules for lmod, tcl, etc.
|
||||
operating_systems/ <- operating system modules
|
||||
platforms/ <- different spack platforms
|
||||
reporters/ <- reporters like cdash, junit
|
||||
schema/ <- schemas to validate data structures
|
||||
solver/ <- the spack solver
|
||||
test/ <- unit test modules
|
||||
util/ <- common code
|
||||
|
||||
Spack is designed so that it could live within a `standard UNIX
|
||||
directory hierarchy <http://linux.die.net/man/7/hier>`_, so ``lib``,
|
||||
@@ -251,6 +261,22 @@ Unit tests
|
||||
This is a fake package hierarchy used to mock up packages for
|
||||
Spack's test suite.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Research and Monitoring Modules
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
:mod:`spack.monitor`
|
||||
Contains :class:`SpackMonitor <spack.monitor.SpackMonitor>`. This is accessed
|
||||
from the ``spack install`` and ``spack analyze`` commands to send build
|
||||
and package metadada up to a `Spack Monitor <https://github.com/spack/spack-monitor>`_ server.
|
||||
|
||||
|
||||
:mod:`spack.analyzers`
|
||||
A module folder with a :class:`AnalyzerBase <spack.analyzers.analyzer_base.AnalyzerBase>`
|
||||
that provides base functions to run, save, and (optionally) upload analysis
|
||||
results to a `Spack Monitor <https://github.com/spack/spack-monitor>`_ server.
|
||||
|
||||
|
||||
^^^^^^^^^^^^^
|
||||
Other Modules
|
||||
^^^^^^^^^^^^^
|
||||
@@ -299,6 +325,235 @@ Conceptually, packages are overloaded. They contain:
|
||||
Stage objects
|
||||
-------------
|
||||
|
||||
|
||||
.. _writing-analyzers:
|
||||
|
||||
-----------------
|
||||
Writing analyzers
|
||||
-----------------
|
||||
|
||||
To write an analyzer, you should add a new python file to the
|
||||
analyzers module directory at ``lib/spack/spack/analyzers`` .
|
||||
Your analyzer should be a subclass of the :class:`AnalyzerBase <spack.analyzers.analyzer_base.AnalyzerBase>`. For example, if you want
|
||||
to add an analyzer class ``Myanalyzer`` you woul write to
|
||||
``spack/analyzers/myanalyzer.py`` and import and
|
||||
use the base as follows:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from .analyzer_base import AnalyzerBase
|
||||
|
||||
class Myanalyzer(AnalyzerBase):
|
||||
|
||||
|
||||
Note that the class name is your module file name, all lowercase
|
||||
except for the first capital letter. You can look at other analyzers in
|
||||
that analyzer directory for examples. The guide here will tell you about the basic functions needed.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Analyzer Output Directory
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
By default, when you run ``spack analyze run`` an analyzer output directory will
|
||||
be created in your spack user directory in your ``$HOME``. The reason we output here
|
||||
is because the install directory might not always be writable.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
~/.spack/
|
||||
analyzers
|
||||
|
||||
Result files will be written here, organized in subfolders in the same structure
|
||||
as the package, with each analyzer owning it's own subfolder. for example:
|
||||
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ tree ~/.spack/analyzers/
|
||||
/home/spackuser/.spack/analyzers/
|
||||
└── linux-ubuntu20.04-skylake
|
||||
└── gcc-9.3.0
|
||||
└── zlib-1.2.11-sl7m27mzkbejtkrajigj3a3m37ygv4u2
|
||||
├── environment_variables
|
||||
│ └── spack-analyzer-environment-variables.json
|
||||
├── install_files
|
||||
│ └── spack-analyzer-install-files.json
|
||||
└── libabigail
|
||||
└── lib
|
||||
└── spack-analyzer-libabigail-libz.so.1.2.11.xml
|
||||
|
||||
|
||||
Notice that for the libabigail analyzer, since results are generated per object,
|
||||
we honor the object's folder in case there are equivalently named files in
|
||||
different folders. The result files are typically written as json so they can be easily read and uploaded in a future interaction with a monitor.
|
||||
|
||||
|
||||
^^^^^^^^^^^^^^^^^
|
||||
Analyzer Metadata
|
||||
^^^^^^^^^^^^^^^^^
|
||||
|
||||
Your analyzer is required to have the class attributes ``name``, ``outfile``,
|
||||
and ``description``. These are printed to the user with they use the subcommand
|
||||
``spack analyze list-analyzers``. Here is an example.
|
||||
As we mentioned above, note that this analyzer would live in a module named
|
||||
``libabigail.py`` in the analyzers folder so that the class can be discovered.
|
||||
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
class Libabigail(AnalyzerBase):
|
||||
|
||||
name = "libabigail"
|
||||
outfile = "spack-analyzer-libabigail.json"
|
||||
description = "Application Binary Interface (ABI) features for objects"
|
||||
|
||||
|
||||
This means that the name and output file should be unique for your analyzer.
|
||||
Note that "all" cannot be the name of an analyzer, as this key is used to indicate
|
||||
that the user wants to run all analyzers.
|
||||
|
||||
.. _analyzer_run_function:
|
||||
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
An analyzer run Function
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
The core of an analyzer is its ``run()`` function, which should accept no
|
||||
arguments. You can assume your analyzer has the package spec of interest at ``self.spec``
|
||||
and it's up to the run function to generate whatever analysis data you need,
|
||||
and then return the object with a key as the analyzer name. The result data
|
||||
should be a list of objects, each with a name, ``analyzer_name``, ``install_file``,
|
||||
and one of ``value`` or ``binary_value``. The install file should be for a relative
|
||||
path, and not the absolute path. For example, let's say we extract a metric called
|
||||
``metric`` for ``bin/wget`` using our analyzer ``thebest-analyzer``.
|
||||
We might have data that looks like this:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
result = {"name": "metric", "analyzer_name": "thebest-analyzer", "value": "1", "install_file": "bin/wget"}
|
||||
|
||||
|
||||
We'd then return it as follows - note that they key is the analyzer name at ``self.name``.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
return {self.name: result}
|
||||
|
||||
This will save the complete result to the analyzer metadata folder, as described
|
||||
previously. If you want support for adding a different kind of metadata (e.g.,
|
||||
not associated with an install file) then the monitor server would need to be updated
|
||||
to support this first.
|
||||
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
An analyzer init Function
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
If you don't need any extra dependencies or checks, you can skip defining an analyzer
|
||||
init function, as the base class will handle it. Typically, it will accept
|
||||
a spec, and an optional output directory (if the user does not want the default
|
||||
metadata folder for analyzer results). The analyzer init function should call
|
||||
it's parent init, and then do any extra checks or validation that are required to
|
||||
work. For example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def __init__(self, spec, dirname=None):
|
||||
super(Myanalyzer, self).__init__(spec, dirname)
|
||||
|
||||
# install extra dependencies, do extra preparation and checks here
|
||||
|
||||
|
||||
At the end of the init, you will have available to you:
|
||||
|
||||
- **self.spec**: the spec object
|
||||
- **self.dirname**: an optional directory name the user as provided at init to save
|
||||
- **self.output_dir**: the analyzer metadata directory, where we save by default
|
||||
- **self.meta_dir**: the path to the package metadata directory (.spack) if you need it
|
||||
|
||||
And can proceed to write your analyzer.
|
||||
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Saving Analyzer Results
|
||||
^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
The analyzer will have ``save_result`` called, with the result object generated
|
||||
to save it to the filesystem, and if the user has added the ``--monitor`` flag
|
||||
to upload it to a monitor server. If your result follows an accepted result
|
||||
format and you don't need to parse it further, you don't need to add this
|
||||
function to your class. However, if your result data is large or otherwise
|
||||
needs additional parsing, you can define it. If you define the function, it
|
||||
is useful to know about the ``output_dir`` property, which you can join
|
||||
with your output file relative path of choice:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
outfile = os.path.join(self.output_dir, "my-output-file.txt")
|
||||
|
||||
|
||||
The directory will be provided by the ``output_dir`` property but it won't exist,
|
||||
so you should create it:
|
||||
|
||||
|
||||
.. code::block:: python
|
||||
|
||||
# Create the output directory
|
||||
if not os.path.exists(self._output_dir):
|
||||
os.makedirs(self._output_dir)
|
||||
|
||||
|
||||
If you are generating results that match to specific files in the package
|
||||
install directory, you should try to maintain those paths in the case that
|
||||
there are equivalently named files in different directories that would
|
||||
overwrite one another. As an example of an analyzer with a custom save,
|
||||
the Libabigail analyzer saves ``*.xml`` files to the analyzer metadata
|
||||
folder in ``run()``, as they are either binaries, or as xml (text) would
|
||||
usually be too big to pass in one request. For this reason, the files
|
||||
are saved during ``run()`` and the filenames added to the result object,
|
||||
and then when the result object is passed back into ``save_result()``,
|
||||
we skip saving to the filesystem, and instead read the file and send
|
||||
each one (separately) to the monitor:
|
||||
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def save_result(self, result, monitor=None, overwrite=False):
|
||||
"""ABI results are saved to individual files, so each one needs to be
|
||||
read and uploaded. Result here should be the lookup generated in run(),
|
||||
the key is the analyzer name, and each value is the result file.
|
||||
We currently upload the entire xml as text because libabigail can't
|
||||
easily read gzipped xml, but this will be updated when it can.
|
||||
"""
|
||||
if not monitor:
|
||||
return
|
||||
|
||||
name = self.spec.package.name
|
||||
|
||||
for obj, filename in result.get(self.name, {}).items():
|
||||
|
||||
# Don't include the prefix
|
||||
rel_path = obj.replace(self.spec.prefix + os.path.sep, "")
|
||||
|
||||
# We've already saved the results to file during run
|
||||
content = spack.monitor.read_file(filename)
|
||||
|
||||
# A result needs an analyzer, value or binary_value, and name
|
||||
data = {"value": content, "install_file": rel_path, "name": "abidw-xml"}
|
||||
tty.info("Sending result for %s %s to monitor." % (name, rel_path))
|
||||
monitor.send_analyze_metadata(self.spec.package, {"libabigail": [data]})
|
||||
|
||||
|
||||
|
||||
Notice that this function, if you define it, requires a result object (generated by
|
||||
``run()``, a monitor (if you want to send), and a boolean ``overwrite`` to be used
|
||||
to check if a result exists first, and not write to it if the result exists and
|
||||
overwrite is False. Also notice that since we already saved these files to the analyzer metadata folder, we return early if a monitor isn't defined, because this function serves to send results to the monitor. If you haven't saved anything to the analyzer metadata folder
|
||||
yet, you might want to do that here. You should also use ``tty.info`` to give
|
||||
the user a message of "Writing result to $DIRNAME."
|
||||
|
||||
|
||||
.. _writing-commands:
|
||||
|
||||
----------------
|
||||
@@ -345,6 +600,183 @@ Whenever you add/remove/rename a command or flags for an existing command,
|
||||
make sure to update Spack's `Bash tab completion script
|
||||
<https://github.com/adamjstewart/spack/blob/develop/share/spack/spack-completion.bash>`_.
|
||||
|
||||
|
||||
-------------
|
||||
Writing Hooks
|
||||
-------------
|
||||
|
||||
A hook is a callback that makes it easy to design functions that run
|
||||
for different events. We do this by way of defining hook types, and then
|
||||
inserting them at different places in the spack code base. Whenever a hook
|
||||
type triggers by way of a function call, we find all the hooks of that type,
|
||||
and run them.
|
||||
|
||||
Spack defines hooks by way of a module at ``lib/spack/spack/hooks`` where we can define
|
||||
types of hooks in the ``__init__.py``, and then python files in that folder
|
||||
can use hook functions. The files are automatically parsed, so if you write
|
||||
a new file for some integration (e.g., ``lib/spack/spack/hooks/myintegration.py``
|
||||
you can then write hook functions in that file that will be automatically detected,
|
||||
and run whenever your hook is called. This section will cover the basic kind
|
||||
of hooks, and how to write them.
|
||||
|
||||
^^^^^^^^^^^^^^
|
||||
Types of Hooks
|
||||
^^^^^^^^^^^^^^
|
||||
|
||||
The following hooks are currently implemented to make it easy for you,
|
||||
the developer, to add hooks at different stages of a spack install or similar.
|
||||
If there is a hook that you would like and is missing, you can propose to add a new one.
|
||||
|
||||
"""""""""""""""""""""
|
||||
``pre_install(spec)``
|
||||
"""""""""""""""""""""
|
||||
|
||||
A ``pre_install`` hook is run within an install subprocess, directly before
|
||||
the install starts. It expects a single argument of a spec, and is run in
|
||||
a multiprocessing subprocess. Note that if you see ``pre_install`` functions associated with packages these are not hooks
|
||||
as we have defined them here, but rather callback functions associated with
|
||||
a package install.
|
||||
|
||||
|
||||
""""""""""""""""""""""
|
||||
``post_install(spec)``
|
||||
""""""""""""""""""""""
|
||||
|
||||
A ``post_install`` hook is run within an install subprocess, directly after
|
||||
the install finishes, but before the build stage is removed. If you
|
||||
write one of these hooks, you should expect it to accept a spec as the only
|
||||
argument. This is run in a multiprocessing subprocess. This ``post_install`` is
|
||||
also seen in packages, but in this context not related to the hooks described
|
||||
here.
|
||||
|
||||
|
||||
""""""""""""""""""""""""""
|
||||
``on_install_start(spec)``
|
||||
""""""""""""""""""""""""""
|
||||
|
||||
This hook is run at the beginning of ``lib/spack/spack/installer.py``,
|
||||
in the install function of a ``PackageInstaller``,
|
||||
and importantly is not part of a build process, but before it. This is when
|
||||
we have just newly grabbed the task, and are preparing to install. If you
|
||||
write a hook of this type, you should provide the spec to it.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def on_install_start(spec):
|
||||
"""On start of an install, we want to...
|
||||
"""
|
||||
print('on_install_start')
|
||||
|
||||
|
||||
""""""""""""""""""""""""""""
|
||||
``on_install_success(spec)``
|
||||
""""""""""""""""""""""""""""
|
||||
|
||||
This hook is run on a successful install, and is also run inside the build
|
||||
process, akin to ``post_install``. The main difference is that this hook
|
||||
is run outside of the context of the stage directory, meaning after the
|
||||
build stage has been removed and the user is alerted that the install was
|
||||
successful. If you need to write a hook that is run on success of a particular
|
||||
phase, you should use ``on_phase_success``.
|
||||
|
||||
""""""""""""""""""""""""""""
|
||||
``on_install_failure(spec)``
|
||||
""""""""""""""""""""""""""""
|
||||
|
||||
This hook is run given an install failure that happens outside of the build
|
||||
subprocess, but somewhere in ``installer.py`` when something else goes wrong.
|
||||
If you need to write a hook that is relevant to a failure within a build
|
||||
process, you would want to instead use ``on_phase_failure``.
|
||||
|
||||
|
||||
"""""""""""""""""""""""""""""""""""""""""""""""
|
||||
``on_phase_success(pkg, phase_name, log_file)``
|
||||
"""""""""""""""""""""""""""""""""""""""""""""""
|
||||
|
||||
This hook is run within the install subprocess, and specifically when a phase
|
||||
successfully finishes. Since we are interested in the package, the name of
|
||||
the phase, and any output from it, we require:
|
||||
|
||||
- **pkg**: the package variable, which also has the attached spec at ``pkg.spec``
|
||||
- **phase_name**: the name of the phase that was successful (e.g., configure)
|
||||
- **log_file**: the path to the file with output, in case you need to inspect or otherwise interact with it.
|
||||
|
||||
"""""""""""""""""""""""""""""""""""""""""""""
|
||||
``on_phase_error(pkg, phase_name, log_file)``
|
||||
"""""""""""""""""""""""""""""""""""""""""""""
|
||||
|
||||
In the case of an error during a phase, we might want to trigger some event
|
||||
with a hook, and this is the purpose of this particular hook. Akin to
|
||||
``on_phase_success`` we require the same variables - the package that failed,
|
||||
the name of the phase, and the log file where we might find errors.
|
||||
|
||||
"""""""""""""""""""""""""""""""""
|
||||
``on_analyzer_save(pkg, result)``
|
||||
"""""""""""""""""""""""""""""""""
|
||||
|
||||
After an analyzer has saved some result for a package, this hook is called,
|
||||
and it provides the package that we just ran the analysis for, along with
|
||||
the loaded result. Typically, a result is structured to have the name
|
||||
of the analyzer as key, and the result object that is defined in detail in
|
||||
:ref:`analyzer_run_function`.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def on_analyzer_save(pkg, result):
|
||||
"""given a package and a result...
|
||||
"""
|
||||
print('Do something extra with a package analysis result here')
|
||||
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^
|
||||
Adding a New Hook Type
|
||||
^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Adding a new hook type is very simple! In ``lib/spack/spack/hooks/__init__.py``
|
||||
you can simply create a new ``HookRunner`` that is named to match your new hook.
|
||||
For example, let's say you want to add a new hook called ``post_log_write``
|
||||
to trigger after anything is written to a logger. You would add it as follows:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# pre/post install and run by the install subprocess
|
||||
pre_install = HookRunner('pre_install')
|
||||
post_install = HookRunner('post_install')
|
||||
|
||||
# hooks related to logging
|
||||
post_log_write = HookRunner('post_log_write') # <- here is my new hook!
|
||||
|
||||
|
||||
You then need to decide what arguments my hook would expect. Since this is
|
||||
related to logging, let's say that you want a message and level. That means
|
||||
that when you add a python file to the ``lib/spack/spack/hooks``
|
||||
folder with one or more callbacks intended to be triggered by this hook. You might
|
||||
use my new hook as follows:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def post_log_write(message, level):
|
||||
"""Do something custom with the messsage and level every time we write
|
||||
to the log
|
||||
"""
|
||||
print('running post_log_write!')
|
||||
|
||||
|
||||
To use the hook, we would call it as follows somewhere in the logic to do logging.
|
||||
In this example, we use it outside of a logger that is already defined:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
import spack.hooks
|
||||
|
||||
# We do something here to generate a logger and message
|
||||
spack.hooks.post_log_write(message, logger.level)
|
||||
|
||||
|
||||
This is not to say that this would be the best way to implement an integration
|
||||
with the logger (you'd probably want to write a custom logger, or you could
|
||||
have the hook defined within the logger) but serves as an example of writing a hook.
|
||||
|
||||
----------
|
||||
Unit tests
|
||||
----------
|
||||
@@ -402,7 +834,7 @@ you can specify the interpreter with ``-i``:
|
||||
.. code-block:: console
|
||||
|
||||
$ spack python -i ipython
|
||||
Python 3.8.3 (default, May 19 2020, 18:47:26)
|
||||
Python 3.8.3 (default, May 19 2020, 18:47:26)
|
||||
Type 'copyright', 'credits' or 'license' for more information
|
||||
IPython 7.17.0 -- An enhanced Interactive Python. Type '?' for help.
|
||||
|
||||
@@ -430,7 +862,7 @@ or a file:
|
||||
$ spack python ~/test_fetching.py
|
||||
$ spack python -i ipython ~/test_fetching.py
|
||||
|
||||
just like you would with the normal ``python`` command.
|
||||
just like you would with the normal ``python`` command.
|
||||
|
||||
|
||||
.. _cmd-spack-url:
|
||||
@@ -575,8 +1007,10 @@ develop onto release branches. This is typically done by cherry-picking
|
||||
bugfix commits off of ``develop``.
|
||||
|
||||
To avoid version churn for users of a release series, minor releases
|
||||
should **not** make changes that would change the concretization of
|
||||
**should not** make changes that would change the concretization of
|
||||
packages. They should generally only contain fixes to the Spack core.
|
||||
However, sometimes priorities are such that new functionality needs to
|
||||
be added to a minor release.
|
||||
|
||||
Both major and minor releases are tagged. After each release, we merge
|
||||
the release branch back into ``develop`` so that the version bump and any
|
||||
@@ -585,50 +1019,51 @@ convenience, we also tag the latest release as ``releases/latest``,
|
||||
so that users can easily check it out to get the latest
|
||||
stable version. See :ref:`merging-releases` for more details.
|
||||
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Scheduling work for releases
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
We schedule work for releases by creating `GitHub projects
|
||||
<https://github.com/spack/spack/projects>`_. At any time, there may be
|
||||
several open release projects. For example, here are two releases (from
|
||||
several open release projects. For example, below are two releases (from
|
||||
some past version of the page linked above):
|
||||
|
||||
.. image:: images/projects.png
|
||||
|
||||
Here, there's one release in progress for ``0.15.1`` and another for
|
||||
This image shows one release in progress for ``0.15.1`` and another for
|
||||
``0.16.0``. Each of these releases has a project board containing issues
|
||||
and pull requests. GitHub shows a status bar with completed work in
|
||||
green, work in progress in purple, and work not started yet in gray, so
|
||||
it's fairly easy to see progress.
|
||||
|
||||
Spack's project boards are not firm commitments, and we move work between
|
||||
Spack's project boards are not firm commitments so we move work between
|
||||
releases frequently. If we need to make a release and some tasks are not
|
||||
yet done, we will simply move them to next minor or major release, rather
|
||||
yet done, we will simply move them to the next minor or major release, rather
|
||||
than delaying the release to complete them.
|
||||
|
||||
For more on using GitHub project boards, see `GitHub's documentation
|
||||
<https://docs.github.com/en/github/managing-your-work-on-github/about-project-boards>`_.
|
||||
|
||||
|
||||
.. _major-releases:
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
Making Major Releases
|
||||
Making major releases
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Assuming you've already created a project board and completed the work
|
||||
for a major release, the steps to make the release are as follows:
|
||||
Assuming a project board has already been created and all required work
|
||||
completed, the steps to make the major release are:
|
||||
|
||||
#. Create two new project boards:
|
||||
|
||||
* One for the next major release
|
||||
* One for the next point release
|
||||
|
||||
#. Move any tasks that aren't done yet to one of the new project boards.
|
||||
Small bugfixes should go to the next point release. Major features,
|
||||
refactors, and changes that could affect concretization should go in
|
||||
the next major release.
|
||||
#. Move any optional tasks that are not done to one of the new project boards.
|
||||
|
||||
In general, small bugfixes should go to the next point release. Major
|
||||
features, refactors, and changes that could affect concretization should
|
||||
go in the next major release.
|
||||
|
||||
#. Create a branch for the release, based on ``develop``:
|
||||
|
||||
@@ -640,11 +1075,14 @@ for a major release, the steps to make the release are as follows:
|
||||
``releases/vX.Y``. That is, you should create a ``releases/vX.Y``
|
||||
branch if you are preparing the ``X.Y.0`` release.
|
||||
|
||||
#. Bump the version in ``lib/spack/spack/__init__.py``. See `this example from 0.13.0
|
||||
#. Bump the version in ``lib/spack/spack/__init__.py``.
|
||||
|
||||
See `this example from 0.13.0
|
||||
<https://github.com/spack/spack/commit/8eeb64096c98b8a43d1c587f13ece743c864fba9>`_
|
||||
|
||||
#. Update ``CHANGELOG.md`` with major highlights in bullet form. Use
|
||||
proper markdown formatting, like `this example from 0.15.0
|
||||
#. Update ``CHANGELOG.md`` with major highlights in bullet form.
|
||||
|
||||
Use proper markdown formatting, like `this example from 0.15.0
|
||||
<https://github.com/spack/spack/commit/d4bf70d9882fcfe88507e9cb444331d7dd7ba71c>`_.
|
||||
|
||||
#. Push the release branch to GitHub.
|
||||
@@ -668,33 +1106,33 @@ for a major release, the steps to make the release are as follows:
|
||||
.. _point-releases:
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
Making Point Releases
|
||||
Making point releases
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
This assumes you've already created a project board for a point release
|
||||
and completed the work to be done for the release. To make a point
|
||||
release:
|
||||
Assuming a project board has already been created and all required work
|
||||
completed, the steps to make the point release are:
|
||||
|
||||
#. Create one new project board for the next point release.
|
||||
#. Create a new project board for the next point release.
|
||||
|
||||
#. Move any cards that aren't done yet to the next project board.
|
||||
#. Move any optional tasks that are not done to the next project board.
|
||||
|
||||
#. Check out the release branch (it should already exist). For the
|
||||
``X.Y.Z`` release, the release branch is called ``releases/vX.Y``. For
|
||||
``v0.15.1``, you would check out ``releases/v0.15``:
|
||||
#. Check out the release branch (it should already exist).
|
||||
|
||||
For the ``X.Y.Z`` release, the release branch is called ``releases/vX.Y``.
|
||||
For ``v0.15.1``, you would check out ``releases/v0.15``:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ git checkout releases/v0.15
|
||||
|
||||
#. Cherry-pick each pull request in the ``Done`` column of the release
|
||||
project onto the release branch.
|
||||
project board onto the release branch.
|
||||
|
||||
This is **usually** fairly simple since we squash the commits from the
|
||||
vast majority of pull requests, which means there is only one commit
|
||||
vast majority of pull requests. That means there is only one commit
|
||||
per pull request to cherry-pick. For example, `this pull request
|
||||
<https://github.com/spack/spack/pull/15777>`_ has three commits, but
|
||||
the were squashed into a single commit on merge. You can see the
|
||||
they were squashed into a single commit on merge. You can see the
|
||||
commit that was created here:
|
||||
|
||||
.. image:: images/pr-commit.png
|
||||
@@ -706,9 +1144,8 @@ release:
|
||||
|
||||
$ git cherry-pick 7e46da7
|
||||
|
||||
For pull requests that were rebased, you'll need to cherry-pick each
|
||||
rebased commit individually. There have not been any rebased PRs like
|
||||
this in recent point releases.
|
||||
For pull requests that were rebased (or not squashed), you'll need to
|
||||
cherry-pick each associated commit individually.
|
||||
|
||||
.. warning::
|
||||
|
||||
@@ -721,30 +1158,35 @@ release:
|
||||
cherry-picked all the commits in order. This generally means there
|
||||
is some other intervening pull request that the one you're trying
|
||||
to pick depends on. In these cases, you'll need to make a judgment
|
||||
call:
|
||||
call regarding those pull requests. Consider the number of affected
|
||||
files and or the resulting differences.
|
||||
|
||||
1. If the dependency is small, you might just cherry-pick it, too.
|
||||
If you do this, add it to the release board.
|
||||
1. If the dependency changes are small, you might just cherry-pick it,
|
||||
too. If you do this, add the task to the release board.
|
||||
|
||||
2. If it is large, then you may decide that this fix is not worth
|
||||
including in a point release, in which case you should remove it
|
||||
from the release project.
|
||||
2. If the changes are large, then you may decide that this fix is not
|
||||
worth including in a point release, in which case you should remove
|
||||
the task from the release project.
|
||||
|
||||
3. You can always decide to manually back-port the fix to the release
|
||||
branch if neither of the above options makes sense, but this can
|
||||
require a lot of work. It's seldom the right choice.
|
||||
|
||||
#. Bump the version in ``lib/spack/spack/__init__.py``. See `this example from 0.14.1
|
||||
#. Bump the version in ``lib/spack/spack/__init__.py``.
|
||||
|
||||
See `this example from 0.14.1
|
||||
<https://github.com/spack/spack/commit/ff0abb9838121522321df2a054d18e54b566b44a>`_.
|
||||
|
||||
#. Update ``CHANGELOG.md`` with a list of bugfixes. This is typically just a
|
||||
summary of the commits you cherry-picked onto the release branch. See
|
||||
`the changelog from 0.14.1
|
||||
#. Update ``CHANGELOG.md`` with a list of the changes.
|
||||
|
||||
This is typically a summary of the commits you cherry-picked onto the
|
||||
release branch. See `the changelog from 0.14.1
|
||||
<https://github.com/spack/spack/commit/ff0abb9838121522321df2a054d18e54b566b44a>`_.
|
||||
|
||||
#. Push the release branch to GitHub.
|
||||
|
||||
#. Make sure CI passes on the release branch, including:
|
||||
|
||||
* Regular unit tests
|
||||
* Build tests
|
||||
* The E4S pipeline at `gitlab.spack.io <https://gitlab.spack.io>`_
|
||||
@@ -767,23 +1209,26 @@ release:
|
||||
Publishing a release on GitHub
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
#. Go to `github.com/spack/spack/releases
|
||||
<https://github.com/spack/spack/releases>`_ and click ``Draft a new
|
||||
release``. Set the following:
|
||||
#. Create the release in GitHub.
|
||||
|
||||
* ``Tag version`` should start with ``v`` and contain *all three*
|
||||
parts of the version, .g. ``v0.15.1``. This is the name of the tag
|
||||
that will be created.
|
||||
* Go to
|
||||
`github.com/spack/spack/releases <https://github.com/spack/spack/releases>`_
|
||||
and click ``Draft a new release``.
|
||||
|
||||
* ``Target`` should be the ``releases/vX.Y`` branch (e.g., ``releases/v0.15``).
|
||||
* Set ``Tag version`` to the name of the tag that will be created.
|
||||
|
||||
* ``Release title`` should be ``vX.Y.Z`` (To match the tag, e.g., ``v0.15.1``).
|
||||
The name should start with ``v`` and contain *all three*
|
||||
parts of the version (e.g. ``v0.15.0`` or ``v0.15.1``).
|
||||
|
||||
* For the text, paste the latest release markdown from your ``CHANGELOG.md``.
|
||||
* Set ``Target`` to the ``releases/vX.Y`` branch (e.g., ``releases/v0.15``).
|
||||
|
||||
You can save the draft and keep coming back to this as you prepare the release.
|
||||
* Set ``Release title`` to ``vX.Y.Z`` to match the tag (e.g., ``v0.15.1``).
|
||||
|
||||
#. When you are done, click ``Publish release``.
|
||||
* Paste the latest release markdown from your ``CHANGELOG.md`` file as the text.
|
||||
|
||||
* Save the draft so you can keep coming back to it as you prepare the release.
|
||||
|
||||
#. When you are ready to finalize the release, click ``Publish release``.
|
||||
|
||||
#. Immediately after publishing, go back to
|
||||
`github.com/spack/spack/releases
|
||||
@@ -791,22 +1236,26 @@ Publishing a release on GitHub
|
||||
auto-generated ``.tar.gz`` file for the release. It's the ``Source
|
||||
code (tar.gz)`` link.
|
||||
|
||||
#. Click ``Edit`` on the release you just did and attach the downloaded
|
||||
#. Click ``Edit`` on the release you just made and attach the downloaded
|
||||
release tarball as a binary. This does two things:
|
||||
|
||||
#. Makes sure that the hash of our releases doesn't change over time.
|
||||
GitHub sometimes annoyingly changes they way they generate
|
||||
tarballs, and then hashes can change if you rely on the
|
||||
#. Makes sure that the hash of our releases does not change over time.
|
||||
|
||||
GitHub sometimes annoyingly changes the way they generate tarballs
|
||||
that can result in the hashes changing if you rely on the
|
||||
auto-generated tarball links.
|
||||
|
||||
#. Gets us download counts on releases visible through the GitHub
|
||||
API. GitHub tracks downloads of artifacts, but *not* the source
|
||||
#. Gets download counts on releases visible through the GitHub API.
|
||||
|
||||
GitHub tracks downloads of artifacts, but *not* the source
|
||||
links. See the `releases
|
||||
page <https://api.github.com/repos/spack/spack/releases>`_ and search
|
||||
for ``download_count`` to see this.
|
||||
|
||||
#. Go to `readthedocs.org <https://readthedocs.org/projects/spack>`_ and activate
|
||||
the release tag. This builds the documentation and makes the released version
|
||||
#. Go to `readthedocs.org <https://readthedocs.org/projects/spack>`_ and
|
||||
activate the release tag.
|
||||
|
||||
This builds the documentation and makes the released version
|
||||
selectable in the versions menu.
|
||||
|
||||
|
||||
@@ -820,23 +1269,23 @@ If the new release is the **highest** Spack release yet, you should
|
||||
also tag it as ``releases/latest``. For example, suppose the highest
|
||||
release is currently ``0.15.3``:
|
||||
|
||||
* If you are releasing ``0.15.4`` or ``0.16.0``, then you should tag
|
||||
it with ``releases/latest``, as these are higher than ``0.15.3``.
|
||||
* If you are releasing ``0.15.4`` or ``0.16.0``, then you should tag
|
||||
it with ``releases/latest``, as these are higher than ``0.15.3``.
|
||||
|
||||
* If you are making a new release of an **older** major version of
|
||||
Spack, e.g. ``0.14.4``, then you should not tag it as
|
||||
``releases/latest`` (as there are newer major versions).
|
||||
* If you are making a new release of an **older** major version of
|
||||
Spack, e.g. ``0.14.4``, then you should not tag it as
|
||||
``releases/latest`` (as there are newer major versions).
|
||||
|
||||
To tag ``releases/latest``, do this:
|
||||
To tag ``releases/latest``, do this:
|
||||
|
||||
.. code-block:: console
|
||||
.. code-block:: console
|
||||
|
||||
$ git checkout releases/vX.Y # vX.Y is the new release's branch
|
||||
$ git tag --force releases/latest
|
||||
$ git push --tags
|
||||
$ git checkout releases/vX.Y # vX.Y is the new release's branch
|
||||
$ git tag --force releases/latest
|
||||
$ git push --force --tags
|
||||
|
||||
The ``--force`` argument makes ``git`` overwrite the existing
|
||||
``releases/latest`` tag with the new one.
|
||||
The ``--force`` argument to ``git tag`` makes ``git`` overwrite the existing
|
||||
``releases/latest`` tag with the new one.
|
||||
|
||||
We also merge each release that we tag as ``releases/latest`` into ``develop``.
|
||||
Make sure to do this with a merge commit:
|
||||
@@ -844,17 +1293,17 @@ Make sure to do this with a merge commit:
|
||||
.. code-block:: console
|
||||
|
||||
$ git checkout develop
|
||||
$ git merge --no-ff vX.Y.Z # vX.Y.Z is the new release's tag
|
||||
$ git merge --no-ff -s ours vX.Y.Z # vX.Y.Z is the new release's tag
|
||||
$ git push
|
||||
|
||||
We merge back to ``develop`` because it:
|
||||
|
||||
* updates the version and ``CHANGELOG.md`` on ``develop``.
|
||||
* updates the version and ``CHANGELOG.md`` on ``develop``; and
|
||||
* ensures that your release tag is reachable from the head of
|
||||
``develop``
|
||||
``develop``.
|
||||
|
||||
We *must* use a real merge commit (via the ``--no-ff`` option) because it
|
||||
ensures that the release tag is reachable from the tip of ``develop``.
|
||||
We *must* use a real merge commit (via the ``--no-ff`` option) to
|
||||
ensure that the release tag is reachable from the tip of ``develop``.
|
||||
This is necessary for ``spack -V`` to work properly -- it uses ``git
|
||||
describe --tags`` to find the last reachable tag in the repository and
|
||||
reports how far we are from it. For example:
|
||||
@@ -872,6 +1321,7 @@ the release is complete and tagged. If you do it before you've tagged the
|
||||
release and later decide you want to tag some later commit, you'll need
|
||||
to merge again.
|
||||
|
||||
|
||||
.. _announcing-releases:
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^
|
||||
@@ -882,20 +1332,40 @@ We announce releases in all of the major Spack communication channels.
|
||||
Publishing the release takes care of GitHub. The remaining channels are
|
||||
Twitter, Slack, and the mailing list. Here are the steps:
|
||||
|
||||
#. Make a tweet to announce the release. It should link to the release's
|
||||
page on GitHub. You can base it on `this example tweet
|
||||
<https://twitter.com/spackpm/status/1231761858182307840>`_.
|
||||
#. Announce the release on Twitter.
|
||||
|
||||
#. Ping ``@channel`` in ``#general`` on Slack (`spackpm.slack.com
|
||||
<https://spackpm.slack.com>`_) with a link to the tweet. The tweet
|
||||
will be shown inline so that you do not have to retype your release
|
||||
announcement.
|
||||
* Compose the tweet on the ``@spackpm`` account per the
|
||||
``spack-twitter`` slack channel.
|
||||
|
||||
#. Email the Spack mailing list to let them know about the release. As
|
||||
with the tweet, you likely want to link to the release's page on
|
||||
GitHub. It's also helpful to include some information directly in the
|
||||
email. You can base yours on this `example email
|
||||
<https://groups.google.com/forum/#!topic/spack/WT4CT9i_X4s>`_.
|
||||
* Be sure to include a link to the release's page on GitHub.
|
||||
|
||||
Once you've announced the release, congratulations, you're done! You've
|
||||
finished making the release!
|
||||
You can base the tweet on `this
|
||||
example <https://twitter.com/spackpm/status/1231761858182307840>`_.
|
||||
|
||||
#. Announce the release on Slack.
|
||||
|
||||
* Compose a message in the ``#general`` Slack channel
|
||||
(`spackpm.slack.com <https://spackpm.slack.com>`_).
|
||||
|
||||
* Preface the message with ``@channel`` to notify even those
|
||||
people not currently logged in.
|
||||
|
||||
* Be sure to include a link to the tweet above.
|
||||
|
||||
The tweet will be shown inline so that you do not have to retype
|
||||
your release announcement.
|
||||
|
||||
#. Announce the release on the Spack mailing list.
|
||||
|
||||
* Compose an email to the Spack mailing list.
|
||||
|
||||
* Be sure to include a link to the release's page on GitHub.
|
||||
|
||||
* It is also helpful to include some information directly in the
|
||||
email.
|
||||
|
||||
You can base your announcement on this `example
|
||||
email <https://groups.google.com/forum/#!topic/spack/WT4CT9i_X4s>`_.
|
||||
|
||||
Once you've completed the above steps, congratulations, you're done!
|
||||
You've finished making the release!
|
||||
|
@@ -248,9 +248,9 @@ Users can add abstract specs to an Environment using the ``spack add``
|
||||
command. The most important component of an Environment is a list of
|
||||
abstract specs.
|
||||
|
||||
Adding a spec adds to the manifest (the ``spack.yaml`` file) and to
|
||||
the roots of the Environment, but does not affect the concrete specs
|
||||
in the lockfile, nor does it install the spec.
|
||||
Adding a spec adds to the manifest (the ``spack.yaml`` file), which is
|
||||
used to define the roots of the Environment, but does not affect the
|
||||
concrete specs in the lockfile, nor does it install the spec.
|
||||
|
||||
The ``spack add`` command is environment aware. It adds to the
|
||||
currently active environment. All environment aware commands can also
|
||||
@@ -356,6 +356,18 @@ command also stores a Spack repo containing the ``package.py`` file
|
||||
used at install time for each package in the ``repos/`` directory in
|
||||
the Environment.
|
||||
|
||||
The ``--no-add`` option can be used in a concrete environment to tell
|
||||
spack to install specs already present in the environment but not to
|
||||
add any new root specs to the environment. For root specs provided
|
||||
to ``spack install`` on the command line, ``--no-add`` is the default,
|
||||
while for dependency specs on the other hand, it is optional. In other
|
||||
words, if there is an unambiguous match in the active concrete environment
|
||||
for a root spec provided to ``spack install`` on the command line, spack
|
||||
does not require you to specify the ``--no-add` option to prevent the spec
|
||||
from being added again. At the same time, a spec that already exists in the
|
||||
environment, but only as a dependency, will be added to the environment as a
|
||||
root spec without the ``--no-add`` option.
|
||||
|
||||
^^^^^^^
|
||||
Loading
|
||||
^^^^^^^
|
||||
|
@@ -19,7 +19,7 @@ before Spack is run:
|
||||
#. Python 2 (2.6 or 2.7) or 3 (3.5 - 3.9) to run Spack
|
||||
#. A C/C++ compiler for building
|
||||
#. The ``make`` executable for building
|
||||
#. The ``tar``, ``gzip``, ``bzip2``, ``xz`` and optionally ``zstd``
|
||||
#. The ``tar``, ``gzip``, ``unzip``, ``bzip2``, ``xz`` and optionally ``zstd``
|
||||
executables for extracting source code
|
||||
#. The ``patch`` command to apply patches
|
||||
#. The ``git`` and ``curl`` commands for fetching
|
||||
|
@@ -67,6 +67,7 @@ or refer to the full manual below.
|
||||
build_settings
|
||||
environments
|
||||
containers
|
||||
monitoring
|
||||
mirrors
|
||||
module_file_support
|
||||
repositories
|
||||
@@ -77,6 +78,12 @@ or refer to the full manual below.
|
||||
extensions
|
||||
pipelines
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
:caption: Research
|
||||
|
||||
analyze
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
:caption: Contributing
|
||||
|
@@ -159,6 +159,27 @@ can supply a file with specs in it, one per line:
|
||||
This is useful if there is a specific suite of software managed by
|
||||
your site.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^
|
||||
Mirror environment
|
||||
^^^^^^^^^^^^^^^^^^
|
||||
|
||||
To create a mirror of all packages required by a concerte environment, activate the environment and call ``spack mirror create -a``.
|
||||
This is especially useful to create a mirror of an environment concretized on another machine.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
[remote] $ spack env create myenv
|
||||
[remote] $ spack env activate myenv
|
||||
[remote] $ spack add ...
|
||||
[remote] $ spack concretize
|
||||
|
||||
$ sftp remote:/spack/var/environment/myenv/spack.lock
|
||||
$ spack env create myenv spack.lock
|
||||
$ spack env activate myenv
|
||||
$ spack mirror create -a
|
||||
|
||||
|
||||
|
||||
.. _cmd-spack-mirror-add:
|
||||
|
||||
--------------------
|
||||
|
@@ -130,9 +130,8 @@ list of environment modifications.
|
||||
to the corresponding environment variables:
|
||||
|
||||
================== =================================
|
||||
LIBRARY_PATH ``self.prefix/rlib/R/lib``
|
||||
LD_LIBRARY_PATH ``self.prefix/rlib/R/lib``
|
||||
CPATH ``self.prefix/rlib/R/include``
|
||||
PKG_CONFIG_PATH ``self.prefix/rlib/pkgconfig``
|
||||
================== =================================
|
||||
|
||||
with the following snippet:
|
||||
|
104
lib/spack/docs/monitoring.rst
Normal file
104
lib/spack/docs/monitoring.rst
Normal file
@@ -0,0 +1,104 @@
|
||||
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
|
||||
Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
.. _monitoring:
|
||||
|
||||
==========
|
||||
Monitoring
|
||||
==========
|
||||
|
||||
You can use a `spack monitor <https://github.com/spack/spack-monitor>`_ "Spackmon"
|
||||
server to store a database of your packages, builds, and associated metadata
|
||||
for provenance, research, or some other kind of development. You should
|
||||
follow the instructions in the `spack monitor documentation <https://spack-monitor.readthedocs.org>`_
|
||||
to first create a server along with a username and token for yourself.
|
||||
You can then use this guide to interact with the server.
|
||||
|
||||
-------------------
|
||||
Analysis Monitoring
|
||||
-------------------
|
||||
|
||||
To read about how to monitor an analysis (meaning you want to send analysis results
|
||||
to a server) see :ref:`analyze_monitoring`.
|
||||
|
||||
---------------------
|
||||
Monitoring An Install
|
||||
---------------------
|
||||
|
||||
Since an install is typically when you build packages, we logically want
|
||||
to tell spack to monitor during this step. Let's start with an example
|
||||
where we want to monitor the install of hdf5. Unless you have disabled authentication
|
||||
for the server, we first want to export our spack monitor token and username to the environment:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ export SPACKMON_TOKEN=50445263afd8f67e59bd79bff597836ee6c05438
|
||||
$ export SPACKMON_USER=spacky
|
||||
|
||||
|
||||
By default, the host for your server is expected to be at ``http://127.0.0.1``
|
||||
with a prefix of ``ms1``, and if this is the case, you can simply add the
|
||||
``--monitor`` flag to the install command:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack install --monitor hdf5
|
||||
|
||||
|
||||
If you need to customize the host or the prefix, you can do that as well:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack install --monitor --monitor-prefix monitor --monitor-host https://monitor-service.io hdf5
|
||||
|
||||
|
||||
As a precaution, we cut out early in the spack client if you have not provided
|
||||
authentication credentials. For example, if you run the command above without
|
||||
exporting your username or token, you'll see:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
==> Error: You are required to export SPACKMON_TOKEN and SPACKMON_USER
|
||||
|
||||
This extra check is to ensure that we don't start any builds,
|
||||
and then discover that you forgot to export your token. However, if
|
||||
your monitoring server has authentication disabled, you can tell this to
|
||||
the client to skip this step:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack install --monitor --monitor-disable-auth hdf5
|
||||
|
||||
If the service is not running, you'll cleanly exit early - the install will
|
||||
not continue if you've asked it to monitor and there is no service.
|
||||
For example, here is what you'll see if the monitoring service is not running:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
[Errno 111] Connection refused
|
||||
|
||||
|
||||
If you want to continue builds (and stop monitoring) you can set the ``--monitor-keep-going``
|
||||
flag.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack install --monitor --monitor-keep-going hdf5
|
||||
|
||||
This could mean that if a request fails, you only have partial or no data
|
||||
added to your monitoring database. This setting will not be applied to the
|
||||
first request to check if the server is running, but to subsequent requests.
|
||||
If you don't have a monitor server running and you want to build, simply
|
||||
don't provide the ``--monitor`` flag! Finally, if you want to provide one or
|
||||
more tags to your build, you can do:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# Add one tag, "pizza"
|
||||
$ spack install --monitor --monitor-tags pizza hdf5
|
||||
|
||||
# Add two tags, "pizza" and "pasta"
|
||||
$ spack install --monitor --monitor-tags pizza,pasta hdf5
|
||||
|
@@ -1206,7 +1206,7 @@ Variants
|
||||
|
||||
Many software packages can be configured to enable optional
|
||||
features, which often come at the expense of additional dependencies or
|
||||
longer build-times. To be flexible enough and support a wide variety of
|
||||
longer build times. To be flexible enough and support a wide variety of
|
||||
use cases, Spack permits to expose to the end-user the ability to choose
|
||||
which features should be activated in a package at the time it is installed.
|
||||
The mechanism to be employed is the :py:func:`spack.directives.variant` directive.
|
||||
@@ -3911,186 +3911,337 @@ using MPI wrappers will work, even on even on a Cray:
|
||||
|
||||
This is because on Cray, ``spec['mpi'].mpicc`` is just ``spack_cc``.
|
||||
|
||||
.. _sanity-checks:
|
||||
.. _checking_an_installation:
|
||||
|
||||
------------------------
|
||||
Checking an installation
|
||||
------------------------
|
||||
|
||||
By default, Spack assumes that a build has failed if nothing is
|
||||
written to the install prefix, and that it has succeeded if anything
|
||||
(a file, a directory, etc.) is written to the install prefix after
|
||||
``install()`` completes.
|
||||
A package that *appears* to install successfully does not mean
|
||||
it is actually installed correctly or will continue to work indefinitely.
|
||||
There are a number of possible points of failure so Spack provides
|
||||
features for checking the software along the way.
|
||||
|
||||
Consider a simple autotools build like this:
|
||||
Failures can occur during and after the installation process. The
|
||||
build may start but the software not end up fully installed. The
|
||||
installed software may not work at all or as expected. The software
|
||||
may work after being installed but, due to changes on the system,
|
||||
may stop working days, weeks, or months after being installed.
|
||||
|
||||
.. code-block:: python
|
||||
This section describes Spack's support for checks that can be performed
|
||||
during and after its installation. The former checks are referred to as
|
||||
``build-time tests`` and the latter as ``stand-alone (or smoke) tests``.
|
||||
|
||||
def install(self, spec, prefix):
|
||||
configure("--prefix={0}".format(prefix))
|
||||
make()
|
||||
make("install")
|
||||
|
||||
If you are using using standard autotools or CMake, ``configure`` and
|
||||
``make`` will not write anything to the install prefix. Only ``make
|
||||
install`` writes the files, and only once the build is already
|
||||
complete.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
``sanity_check_is_file`` and ``sanity_check_is_dir``
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Unfortunately, many builds of scientific
|
||||
software modify the install prefix *before* ``make install``. Builds
|
||||
like this can falsely report that they were successfully installed if
|
||||
an error occurs before the install is complete but after files have
|
||||
been written to the ``prefix``.
|
||||
|
||||
You can optionally specify *sanity checks* to deal with this problem.
|
||||
Add properties like this to your package:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
class MyPackage(Package):
|
||||
...
|
||||
|
||||
sanity_check_is_file = ['include/libelf.h']
|
||||
sanity_check_is_dir = [lib]
|
||||
|
||||
def install(self, spec, prefix):
|
||||
configure("--prefix=" + prefix)
|
||||
make()
|
||||
make("install")
|
||||
|
||||
Now, after ``install()`` runs, Spack will check whether
|
||||
``$prefix/include/libelf.h`` exists and is a file, and whether
|
||||
``$prefix/lib`` exists and is a directory. If the checks fail, then
|
||||
the build will fail and the install prefix will be removed. If they
|
||||
succeed, Spack considers the build successful and keeps the prefix in
|
||||
place.
|
||||
.. _build_time-tests:
|
||||
|
||||
^^^^^^^^^^^^^^^^
|
||||
Build-time tests
|
||||
^^^^^^^^^^^^^^^^
|
||||
|
||||
Sometimes packages finish to build "correctly" and issues with their run-time
|
||||
behavior are discovered only at a later stage, maybe after a full software stack
|
||||
relying on them has already been built. To avoid situations of that kind it's possible
|
||||
to write build-time tests that will be executed only if the option ``--run-tests``
|
||||
of ``spack install`` has been activated.
|
||||
Spack infers the status of a build based on the contents of the install
|
||||
prefix. Success is assumed if anything (e.g., a file, directory) is
|
||||
written after ``install()`` completes. Otherwise, the build is assumed
|
||||
to have failed. However, the presence of install prefix contents
|
||||
is not a sufficient indicator of success.
|
||||
|
||||
The proper way to write these tests is relying on two decorators that come with
|
||||
any base class listed in :ref:`installation_procedure`.
|
||||
Consider a simple autotools build using the following commands:
|
||||
|
||||
.. code-block:: python
|
||||
.. code-block:: console
|
||||
|
||||
@run_after('build')
|
||||
@on_package_attributes(run_tests=True)
|
||||
def check_build(self):
|
||||
# Custom implementation goes here
|
||||
pass
|
||||
$ ./configure --prefix=/path/to/installation/prefix
|
||||
$ make
|
||||
$ make install
|
||||
|
||||
The first decorator ``run_after('build')`` schedules this
|
||||
function to be invoked after the ``build`` phase has been executed, while the
|
||||
second one makes the invocation conditional on the fact that ``self.run_tests == True``.
|
||||
It is also possible to schedule a function to be invoked *before* a given phase
|
||||
using the ``run_before`` decorator.
|
||||
Standard Autotools and CMake do not write anything to the prefix from
|
||||
the ``configure`` and ``make`` commands. Files are only written from
|
||||
the ``make install`` after the build completes.
|
||||
|
||||
.. note::
|
||||
|
||||
Default implementations for build-time tests
|
||||
If you want to learn more about ``Autotools`` and ``CMake`` packages
|
||||
in Spack, refer to :ref:`AutotoolsPackage <autotoolspackage>` and
|
||||
:ref:`CMakePackage <cmakepackage>`, respectively.
|
||||
|
||||
Packages that are built using specific build systems may already have a
|
||||
default implementation for build-time tests. For instance :py:class:`~.AutotoolsPackage`
|
||||
based packages will try to invoke ``make test`` and ``make check`` if
|
||||
Spack is asked to run tests.
|
||||
More information on each class is available in the the :py:mod:`~.spack.build_systems`
|
||||
documentation.
|
||||
What can you do to check that the build is progressing satisfactorily?
|
||||
If there are specific files and or directories expected of a successful
|
||||
installation, you can add basic, fast ``sanity checks``. You can also add
|
||||
checks to be performed after one or more installation phases.
|
||||
|
||||
.. _sanity-checks:
|
||||
|
||||
""""""""""""""""""""
|
||||
Adding sanity checks
|
||||
""""""""""""""""""""
|
||||
|
||||
Unfortunately, many builds of scientific software modify the installation
|
||||
prefix **before** ``make install``. Builds like this can falsely report
|
||||
success when an error occurs before the installation is complete. Simple
|
||||
sanity checks can be used to identify files and or directories that are
|
||||
required of a successful installation. Spack checks for the presence of
|
||||
the files and directories after ``install()`` runs.
|
||||
|
||||
If any of the listed files or directories are missing, then the build will
|
||||
fail and the install prefix will be removed. If they all exist, then Spack
|
||||
considers the build successful from a sanity check perspective and keeps
|
||||
the prefix in place.
|
||||
|
||||
For example, the sanity checks for the ``reframe`` package below specify
|
||||
that eight paths must exist within the installation prefix after the
|
||||
``install`` method completes.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
class Reframe(Package):
|
||||
...
|
||||
|
||||
# sanity check
|
||||
sanity_check_is_file = [join_path('bin', 'reframe')]
|
||||
sanity_check_is_dir = ['bin', 'config', 'docs', 'reframe', 'tutorials',
|
||||
'unittests', 'cscs-checks']
|
||||
|
||||
Spack will then ensure the installation created the **file**:
|
||||
|
||||
* ``self.prefix/bin/reframe``
|
||||
|
||||
It will also check for the existence of the following **directories**:
|
||||
|
||||
* ``self.prefix/bin``
|
||||
* ``self.prefix/config``
|
||||
* ``self.prefix/docs``
|
||||
* ``self.prefix/reframe``
|
||||
* ``self.prefix/tutorials``
|
||||
* ``self.prefix/unittests``
|
||||
* ``self.prefix/cscs-checks``
|
||||
|
||||
.. note::
|
||||
|
||||
You **MUST** use ``sanity_check_is_file`` to specify required
|
||||
files and ``sanity_check_is_dir`` for required directories.
|
||||
|
||||
.. _install_phase-tests:
|
||||
|
||||
"""""""""""""""""""""""""""""""
|
||||
Adding installation phase tests
|
||||
"""""""""""""""""""""""""""""""
|
||||
|
||||
Sometimes packages appear to build "correctly" only to have run-time
|
||||
behavior issues discovered at a later stage, such as after a full
|
||||
software stack relying on them has been built. Checks can be performed
|
||||
at different phases of the package installation to possibly avoid
|
||||
these types of problems. Some checks are built-in to different build
|
||||
systems, while others will need to be added to the package.
|
||||
|
||||
Built-in installation phase tests are provided by packages inheriting
|
||||
from select :ref:`build systems <build-systems>`, where naming conventions
|
||||
are used to identify typical test identifiers for those systems. In
|
||||
general, you won't need to add anything to your package to take advantage
|
||||
of these tests if your software's build system complies with the convention;
|
||||
otherwise, you'll want or need to override the post-phase method to perform
|
||||
other checks.
|
||||
|
||||
.. list-table:: Built-in installation phase tests
|
||||
:header-rows: 1
|
||||
|
||||
* - Build System Class
|
||||
- Post-Build Phase Method (Runs)
|
||||
- Post-Install Phase Method (Runs)
|
||||
* - `AutotoolsPackage <build_systems/autotoolspackage>`
|
||||
- ``check`` (``make test``, ``make check``)
|
||||
- ``installcheck`` (``make installcheck``)
|
||||
* - `CMakePackage <build_systems/cmakepackage>`
|
||||
- ``check`` (``make check``, ``make test``)
|
||||
- Not applicable
|
||||
* - `MakefilePackage <build_systems/makefilepackage>`
|
||||
- ``check`` (``make test``, ``make check``)
|
||||
- ``installcheck`` (``make installcheck``)
|
||||
* - `MesonPackage <build_systems/mesonpackage>`
|
||||
- ``check`` (``make test``, ``make check``)
|
||||
- Not applicable
|
||||
* - `PerlPackage <build_systems/perlpackage>`
|
||||
- ``check`` (``make test``)
|
||||
- Not applicable
|
||||
* - `PythonPackage <build_systems/pythonpackage>`
|
||||
- Not applicable
|
||||
- ``test`` (module imports)
|
||||
* - `QMakePackage <build_systems/qmakepackage>`
|
||||
- ``check`` (``make check``)
|
||||
- Not applicable
|
||||
* - `SConsPackage <build_systems/sconspackage>`
|
||||
- ``build_test`` (must be overridden)
|
||||
- Not applicable
|
||||
* - `SIPPackage <build_systems/sippackage>`
|
||||
- Not applicable
|
||||
- ``test`` (module imports)
|
||||
|
||||
For example, the ``Libelf`` package inherits from ``AutotoolsPackage``
|
||||
and its ``Makefile`` has a standard ``check`` target. So Spack will
|
||||
automatically run ``make check`` after the ``build`` phase when it
|
||||
is installed using the ``--test`` option, such as:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack install --test=root libelf
|
||||
|
||||
In addition to overriding any built-in build system installation
|
||||
phase tests, you can write your own install phase tests. You will
|
||||
need to use two decorators for each phase test method:
|
||||
|
||||
* ``run_after``
|
||||
* ``on_package_attributes``
|
||||
|
||||
The first decorator tells Spack when in the installation process to
|
||||
run your test method installation process; namely *after* the provided
|
||||
installation phase. The second decorator tells Spack to only run the
|
||||
checks when the ``--test`` option is provided on the command line.
|
||||
|
||||
.. note::
|
||||
|
||||
Be sure to place the directives above your test method in the order
|
||||
``run_after`` *then* ``on_package_attributes``.
|
||||
|
||||
.. note::
|
||||
|
||||
You also want to be sure the package supports the phase you use
|
||||
in the ``run_after`` directive. For example, ``PackageBase`` only
|
||||
supports the ``install`` phase while the ``AutotoolsPackage`` and
|
||||
``MakefilePackage`` support both ``install`` and ``build`` phases.
|
||||
|
||||
Assuming both ``build`` and ``install`` phases are available to you,
|
||||
you could add additional checks to be performed after each of those
|
||||
phases based on the skeleton provided below.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
class YourMakefilePackage(MakefilePackage):
|
||||
...
|
||||
|
||||
@run_after('build')
|
||||
@on_package_attributes(run_tests=True)
|
||||
def check_build(self):
|
||||
# Add your custom post-build phase tests
|
||||
pass
|
||||
|
||||
@run_after('install')
|
||||
@on_package_attributes(run_tests=True)
|
||||
def check_install(self):
|
||||
# Add your custom post-install phase tests
|
||||
pass
|
||||
|
||||
.. note::
|
||||
|
||||
You could also schedule work to be done **before** a given phase
|
||||
using the ``run_before`` decorator.
|
||||
|
||||
By way of a concrete example, the ``reframe`` package mentioned
|
||||
previously has a simple installation phase check that runs the
|
||||
installed executable. The check is implemented as follows:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
class Reframe(Package):
|
||||
...
|
||||
|
||||
# check if we can run reframe
|
||||
@run_after('install')
|
||||
@on_package_attributes(run_tests=True)
|
||||
def check_list(self):
|
||||
with working_dir(self.stage.source_path):
|
||||
reframe = Executable(join_path(self.prefix, 'bin', 'reframe'))
|
||||
reframe('-l')
|
||||
|
||||
.. warning::
|
||||
|
||||
The API for adding tests is not yet considered stable and may change drastically in future releases.
|
||||
The API for adding tests is not yet considered stable and may change
|
||||
in future releases.
|
||||
|
||||
.. _cmd-spack-test:
|
||||
|
||||
^^^^^^^^^^^^^
|
||||
Install Tests
|
||||
^^^^^^^^^^^^^
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Stand-alone (or smoke) tests
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
.. warning::
|
||||
While build-time tests are integrated with the installation process,
|
||||
stand-alone tests are independent of that process. Consequently, such
|
||||
tests can be performed days, even weeks, after the software is installed.
|
||||
|
||||
The API for adding and running install tests is not yet considered
|
||||
stable and may change drastically in future releases. Packages with
|
||||
install tests will be refactored to match changes to the API.
|
||||
Stand-alone tests are checks that should run relatively quickly -- as
|
||||
in on the order of at most a few minutes -- and ideally execute all
|
||||
aspects of the installed software, or at least key functionality.
|
||||
|
||||
While build tests are integrated with the build system, install tests
|
||||
are for testing package installations independent of the build process.
|
||||
.. note::
|
||||
|
||||
Install tests are executed in a test stage directory that defaults to
|
||||
``~/.spack/test``. You can change the location in the high-level ``config``
|
||||
by adding the ``test_stage`` path as follows:
|
||||
Execution speed is important because these tests are intended
|
||||
to quickly assess whether the installed software works on the
|
||||
system.
|
||||
|
||||
Failing stand-alone tests indicate that there is no reason to
|
||||
proceed with more resource-intensive tests.
|
||||
|
||||
Passing stand-alone (or smoke) tests can lead to more thorough
|
||||
testing, such as extensive unit or regression tests, or tests
|
||||
that run at scale. Spack support for more thorough testing is
|
||||
a work in progress.
|
||||
|
||||
Stand-alone tests have their own test stage directory, which can be
|
||||
configured. These tests can compile or build software with the compiler
|
||||
used to build the package. They can use files cached from the build for
|
||||
testing the installation. Custom files, such as source, data, or expected
|
||||
outputs can be added for use in these tests.
|
||||
|
||||
""""""""""""""""""""""""""""""""""""
|
||||
Configuring the test stage directory
|
||||
""""""""""""""""""""""""""""""""""""
|
||||
|
||||
Stand-alone tests rely on a stage directory for building, running,
|
||||
and tracking results.
|
||||
The default directory, ``~/.spack/test``, is defined in
|
||||
:ref:`etc/spack/defaults/config.yaml <config-yaml>`.
|
||||
You can configure the location in the high-level ``config`` by adding
|
||||
or changing the ``test_stage`` path in the appropriate ``config.yaml``
|
||||
file such that:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
config:
|
||||
test_stage: /path/to/stage
|
||||
|
||||
""""""""""""""""""""
|
||||
Adding install tests
|
||||
""""""""""""""""""""
|
||||
The package can access this path **during test processing** using
|
||||
`self.test_suite.stage`.
|
||||
|
||||
Install tests are added to a package by defining a ``test`` method
|
||||
with the following signature:
|
||||
.. note::
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def test(self):
|
||||
|
||||
These tests run in an environment that provides access to the
|
||||
package and all of its dependencies, including ``test``-type
|
||||
dependencies.
|
||||
|
||||
Standard python ``assert`` statements and other error reporting
|
||||
mechanisms can be used in the ``test`` method. Spack will report
|
||||
such errors as test failures.
|
||||
|
||||
You can implement multiple tests (or test parts) within the ``test``
|
||||
method, where each is run separately and testing continues after
|
||||
failures by using the ``run_test`` method. The signature for the
|
||||
method is:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def run_test(self, exe, options=[], expected=[], status=0, installed=False,
|
||||
purpose='', skip_missing=False, work_dir=None):
|
||||
|
||||
The test fails if there is no executable named ``exe`` found in the
|
||||
paths of the ``PATH`` variable **unless** ``skip_missing`` is ``True``.
|
||||
The test also fails if the resulting path is not within the prefix of
|
||||
the package being tested when ``installed`` is ``True``.
|
||||
|
||||
The executable runs in ``work_dir``, when specified, using the provided
|
||||
``options``. The return code is checked against the ``status`` argument,
|
||||
which can be an integer or list of integers representing status codes
|
||||
corresponding to successful execution (e.g. ``status=[0,3,7]``).
|
||||
Spack also checks that every string in ``expected`` is a regex matching
|
||||
part of the output from the test run (e.g.
|
||||
``expected=['completed successfully', 'converged in']``). Default behavior
|
||||
is to behave as though ``status=[0]`` and ``expected=[]`` are specified.
|
||||
|
||||
Output from the test is written to its log file. The ``purpose`` argument
|
||||
serves as the heading in text logs to highlight the start of each test part.
|
||||
The test stage path is established for the entire suite. That
|
||||
means it is the root directory for all specs being installed
|
||||
with the same `spack test run` command. Each spec gets its
|
||||
own stage subdirectory.
|
||||
|
||||
"""""""""""""""""""""""""
|
||||
Enabling test compilation
|
||||
"""""""""""""""""""""""""
|
||||
|
||||
Some tests may require access to the compiler with which the package
|
||||
was built, especially to test library-only packages. You must enable
|
||||
loading the package's compiler configuration by setting the attribute
|
||||
``test_requires_compiler`` to ``True``. Doing so makes the compiler
|
||||
available in the test environment through the canonical environment
|
||||
variables (``CC``, ``CXX``, ``FC``, ``F77``).
|
||||
Some stand-alone tests will require access to the compiler with which
|
||||
the package was built, especially for library-only packages. You must
|
||||
enable loading the package's compiler configuration by setting the
|
||||
``test_requires_compiler`` property to ``True`` for your package.
|
||||
For example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
class MyPackage(Package):
|
||||
...
|
||||
|
||||
test_requires_compiler = True
|
||||
|
||||
Setting this property to ``True`` makes the compiler available in the
|
||||
test environment through the canonical environment variables (e.g.,
|
||||
``CC``, ``CXX``, ``FC``, ``F77``).
|
||||
|
||||
.. note::
|
||||
|
||||
We recommend adding the property at the top of the package with the
|
||||
other attributes, such as ``homepage`` and ``url``.
|
||||
|
||||
.. _cache_extra_test_sources:
|
||||
|
||||
"""""""""""""""""""""""
|
||||
Adding build-time files
|
||||
@@ -4098,39 +4249,70 @@ Adding build-time files
|
||||
|
||||
.. note::
|
||||
|
||||
We highly recommend the re-use of build-time tests and input files
|
||||
for testing installed software. These files are easier to keep
|
||||
synchronized with the software than creating custom install tests.
|
||||
We highly recommend re-using build-time tests and input files
|
||||
for testing installed software. These files are easier to keep
|
||||
synchronized since they reside within the software's repository
|
||||
than maintaining custom install test files with the Spack package.
|
||||
|
||||
You can use the ``cache_extra_test_sources`` method copy directories
|
||||
You can use the ``cache_extra_test_sources`` method to copy directories
|
||||
and or files from the build stage directory to the package's installation
|
||||
directory.
|
||||
|
||||
For example, a package method for copying everything in the ``tests``
|
||||
subdirectory plus the ``foo.c`` and ``bar.c`` files from ``examples``
|
||||
can be implemented as follows:
|
||||
The signature for ``cache_extra_test_sources`` is:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@run_after('install')
|
||||
def cache_test_sources(self):
|
||||
srcs = ['tests', 'examples/foo.c', 'examples/bar.c']
|
||||
self.cache_extra_test_sources(srcs)
|
||||
def cache_extra_test_sources(self, srcs):
|
||||
|
||||
The use of the ``run_after`` directive ensures the associated files
|
||||
are copied **after** the package is installed during the build process.
|
||||
where ``srcs`` is a string or a list of strings corresponding to
|
||||
the paths for the files and or subdirectories, relative to the staged
|
||||
source, that are to be copied to the corresponding path relative to
|
||||
``self.install_test_root``. All of the contents within each subdirectory
|
||||
will be also be copied.
|
||||
|
||||
The method copies files to the package's metadata directory under
|
||||
the ``self.install_test_root``. All files in the package source's
|
||||
``tests`` directory for the example above will be copied to the
|
||||
``join_path(self.install_test_root, 'tests')`` directory. The two
|
||||
files listed under the staged ``examples`` directory will be copied
|
||||
to the ``join_path(self.install_test_root, 'examples')`` directory.
|
||||
For example, a package method for copying everything in the ``tests``
|
||||
subdirectory plus the ``foo.c`` and ``bar.c`` files from ``examples``
|
||||
can be implemented as shown below.
|
||||
|
||||
.. note::
|
||||
|
||||
The ``run_after`` directive ensures associated files are copied
|
||||
**after** the package is installed by the build process.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
class MyPackage(Package):
|
||||
...
|
||||
|
||||
@run_after('install')
|
||||
def copy_test_sources(self):
|
||||
srcs = ['tests',
|
||||
join_path('examples', 'foo.c'),
|
||||
join_path('examples', 'bar.c')]
|
||||
self.cache_extra_test_sources(srcs)
|
||||
|
||||
In this case, the method copies the associated files from the build
|
||||
stage **after** the software is installed to the package's metadata
|
||||
directory. The result is the following directory and files will be
|
||||
available for use in stand-alone tests:
|
||||
|
||||
* ``join_path(self.install_test_root, 'tests')`` along with its files and subdirectories
|
||||
* ``join_path(self.install_test_root, 'examples', 'foo.c')``
|
||||
* ``join_path(self.install_test_root, 'examples', 'bar.c')``
|
||||
|
||||
.. note::
|
||||
|
||||
While source and input files are generally recommended, binaries
|
||||
may also be cached by the build process for install testing.
|
||||
**may** also be cached by the build process for install testing.
|
||||
Only you, as the package writer or maintainer, know whether these
|
||||
would be appropriate stand-alone tests.
|
||||
|
||||
.. note::
|
||||
|
||||
You are free to use a method name that is more suitable for
|
||||
your package.
|
||||
|
||||
.. _cache_custom_files:
|
||||
|
||||
"""""""""""""""""""
|
||||
Adding custom files
|
||||
@@ -4145,11 +4327,345 @@ Examples include:
|
||||
- expected test output
|
||||
|
||||
These extra files should be added to the ``test`` subdirectory of the
|
||||
package in the repository. Spack will automaticaly copy any files in
|
||||
that directory to the test staging directory during install testing.
|
||||
package in the Spack repository. Spack will automatically copy any files
|
||||
in that directory to the test staging directory during stand-alone testing.
|
||||
|
||||
The ``test`` method can access those files from the
|
||||
``self.test_suite.current_test_data_dir`` directory.
|
||||
|
||||
.. _expected_test_output_from_file:
|
||||
|
||||
"""""""""""""""""""""""""""""""""""
|
||||
Reading expected output from a file
|
||||
"""""""""""""""""""""""""""""""""""
|
||||
|
||||
The helper function ``get_escaped_text_output`` is available for packages
|
||||
to retrieve and properly format the text from a file that contains the
|
||||
output that is expected when an executable is run using ``self.run_test``.
|
||||
|
||||
The signature for ``get_escaped_text_output`` is:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def get_escaped_text_output(filename):
|
||||
|
||||
where ``filename`` is the path to the file containing the expected output.
|
||||
|
||||
The ``filename`` for a :ref:`custom file <cache_custom_files>` can be
|
||||
accessed and used as illustrated by a simplified version of an ``sqlite``
|
||||
package check:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
class Sqlite(AutotoolsPackage):
|
||||
...
|
||||
|
||||
def test(self):
|
||||
test_data_dir = self.test_suite.current_test_data_dir
|
||||
db_filename = test_data_dir.join('packages.db')
|
||||
..
|
||||
|
||||
expected = get_escaped_text_output(test_data_dir.join('dump.out'))
|
||||
self.run_test('sqlite3',
|
||||
[db_filename, '.dump'],
|
||||
expected,
|
||||
installed=True,
|
||||
purpose='test: checking dump output',
|
||||
skip_missing=False)
|
||||
|
||||
Expected outputs do not have to be stored with the Spack package.
|
||||
Maintaining them with the source is actually preferable.
|
||||
|
||||
Suppose a package's source has ``examples/foo.c`` and ``examples/foo.out``
|
||||
files that are copied for stand-alone test purposes using
|
||||
:ref:`cache_extra_test_sources <cache_extra_test_sources>` and the
|
||||
`run_test` method builds the executable ``examples/foo``. The package
|
||||
can retrieve the expected output from ``examples/foo.out`` using:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
class MyFooPackage(Package):
|
||||
...
|
||||
|
||||
def test(self):
|
||||
..
|
||||
filename = join_path(self.install_test_root, 'examples', 'foo.out')
|
||||
expected = get_escaped_text_output(filename)
|
||||
..
|
||||
|
||||
Alternatively, suppose ``MyFooPackage`` installs tests in ``share/tests``
|
||||
and their outputs in ``share/tests/outputs``. The expected output for
|
||||
``foo``, assuming it is still called ``foo.out``, can be retrieved as
|
||||
follows:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
class MyFooPackage(Package):
|
||||
...
|
||||
|
||||
def test(self):
|
||||
..
|
||||
filename = join_path(self.prefix.share.tests.outputs, 'foo.out')
|
||||
expected = get_escaped_text_output(filename)
|
||||
..
|
||||
|
||||
|
||||
""""""""""""""""""""""""
|
||||
Adding stand-alone tests
|
||||
""""""""""""""""""""""""
|
||||
|
||||
Stand-alone tests are defined in the package's ``test`` method. The
|
||||
default ``test`` method is a no-op so you'll want to override it to
|
||||
implement the tests.
|
||||
|
||||
.. note::
|
||||
|
||||
Any package method named ``test`` is automatically executed by
|
||||
Spack when the ``spack test run`` command is performed.
|
||||
|
||||
For example, the ``MyPackage`` package below provides a skeleton for
|
||||
the test method.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
class MyPackage(Package):
|
||||
...
|
||||
|
||||
def test(self):
|
||||
# TODO: Add quick checks of the installed software
|
||||
pass
|
||||
|
||||
Stand-alone tests run in an environment that provides access to the
|
||||
package and all of its dependencies, including ``test``-type
|
||||
dependencies.
|
||||
|
||||
Standard python ``assert`` statements and other error reporting
|
||||
mechanisms can be used in the ``test`` method. Spack will report
|
||||
such errors as test failures.
|
||||
|
||||
You can implement multiple tests (or test parts) within the ``test``
|
||||
method using the ``run_test`` method. Each invocation is run separately
|
||||
in a manner that allows testing to continue after failures.
|
||||
|
||||
The signature for ``run_test`` is:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def run_test(self, exe, options=[], expected=[], status=0,
|
||||
installed=False, purpose='', skip_missing=False,
|
||||
work_dir=None):
|
||||
|
||||
where each argument has the following meaning:
|
||||
|
||||
* ``exe`` is the executable to run.
|
||||
|
||||
If a name, the ``exe`` is required to be found in one of the paths
|
||||
in the ``PATH`` environment variable **unless** ``skip_missing`` is
|
||||
``True``. Alternatively, a relative (to ``work_dir``) or fully
|
||||
qualified path for the executable can be provided in ``exe``.
|
||||
|
||||
The test will fail if the resulting path is not within the prefix
|
||||
of the package being tested **unless** ``installed`` is ``False``.
|
||||
|
||||
* ``options`` is a list of the command line options.
|
||||
|
||||
Options are a list of strings to be passed to the executable when
|
||||
it runs.
|
||||
|
||||
The default is ``[]``, which means no options are provided to the
|
||||
executable.
|
||||
|
||||
* ``expected`` is an optional list of expected output strings.
|
||||
|
||||
Spack requires every string in ``expected`` to be a regex matching
|
||||
part of the output from the test run (e.g.,
|
||||
``expected=['completed successfully', 'converged in']``). The
|
||||
output can also include expected failure outputs (e.g.,
|
||||
``expected=['failed to converge']``).
|
||||
|
||||
The expected output can be :ref:`read from a file
|
||||
<expected_test_output_from_file>`.
|
||||
|
||||
The default is ``expected=[]``, so Spack will not check the output.
|
||||
|
||||
* ``status`` is the optional expected return code(s).
|
||||
|
||||
A list of return codes corresponding to successful execution can
|
||||
be provided (e.g., ``status=[0,3,7]``). Support for non-zero return
|
||||
codes allows for basic **expected failure** tests as well as different
|
||||
return codes across versions of the software.
|
||||
|
||||
The default is ``status=[0]``, which corresponds to **successful**
|
||||
execution in the sense that the executable does not exit with a
|
||||
failure code or raise an exception.
|
||||
|
||||
* ``installed`` is used to require ``exe`` to be within the package
|
||||
prefix.
|
||||
|
||||
If ``True``, then the path for ``exe`` is required to be within the
|
||||
package prefix; otherwise, the path is not constrained.
|
||||
|
||||
The default is ``False``, so the fully qualified path for ``exe``
|
||||
does **not** need to be within the installation directory.
|
||||
|
||||
* ``purpose`` is an optional heading describing the the test part.
|
||||
|
||||
Output from the test is written to a test log file so this argument
|
||||
serves as a searchable heading in text logs to highlight the start
|
||||
of the test part. Having a description can be helpful when debugging
|
||||
failing tests.
|
||||
|
||||
* ``skip_missing`` is used to determine if the test should be skipped.
|
||||
|
||||
If ``True``, then the test part should be skipped if the executable
|
||||
is missing; otherwise, the executable must exist. This option can
|
||||
be useful when test executables are removed or change as the software
|
||||
evolves in subsequent versions.
|
||||
|
||||
The default is ``False``, which means the test executable must be
|
||||
present for any installable version of the software.
|
||||
|
||||
* ``work_dir`` is the path to the directory from which the executable
|
||||
will run.
|
||||
|
||||
The default of ``None`` corresponds to the current directory (``'.'``).
|
||||
|
||||
You may need to access files from one or more locations when writing
|
||||
the tests. This can happen if the software's repository does not
|
||||
include test source files or includes files but no way to build the
|
||||
executables using the installed headers and libraries. In these
|
||||
cases, you may need to reference the files relative to one or more
|
||||
root directory and associated package property. These are given in
|
||||
the table below.
|
||||
|
||||
.. list-table:: Directory-to-property mapping
|
||||
:header-rows: 1
|
||||
|
||||
* - Root Directory
|
||||
- Package Property
|
||||
- Example(s)
|
||||
* - Package Installation Files
|
||||
- ``self.prefix``
|
||||
- ``self.prefix.include``, ``self.prefix.lib``
|
||||
* - Package Dependency's Files
|
||||
- ``self.spec['<dependency-package>'].prefix``
|
||||
- ``self.spec['trilinos'].prefix.include``
|
||||
* - Copied Build-time Files
|
||||
- ``self.install_test_root``
|
||||
- ``join_path(self.install_test_root, 'examples', 'foo.c')``
|
||||
* - Custom Package Files
|
||||
- ``self.test_suite.current_test_data_dir``
|
||||
- ``join_path(self.test_suite.current_test_data_dir, 'hello.f90')``
|
||||
|
||||
""""""""""""""""""""""""""""
|
||||
Inheriting stand-alone tests
|
||||
""""""""""""""""""""""""""""
|
||||
|
||||
Stand-alone tests defined in parent (.e.g., :ref:`build-systems`) and
|
||||
virtual (e.g., :ref:`virtual-dependencies`) packages are available to
|
||||
packages that inherit from or provide interfaces for those packages,
|
||||
respectively. The table below summarizes the tests that will be included
|
||||
with those provided in the package itself when executing stand-alone tests.
|
||||
|
||||
.. list-table:: Inherited/provided stand-alone tests
|
||||
:header-rows: 1
|
||||
|
||||
* - Parent/Provider Package
|
||||
- Stand-alone Tests
|
||||
* - `C
|
||||
<https://github.com/spack/spack/blob/develop/var/spack/repos/builtin/packages/c>`_
|
||||
- Compiles ``hello.c`` and runs it
|
||||
* - `Cxx
|
||||
<https://github.com/spack/spack/blob/develop/var/spack/repos/builtin/packages/cxx>`_
|
||||
- Compiles and runs several ``hello`` programs
|
||||
* - `Fortan
|
||||
<https://github.com/spack/spack/blob/develop/var/spack/repos/builtin/packages/fortran>`_
|
||||
- Compiles and runs ``hello`` programs (``F`` and ``f90``)
|
||||
* - `Mpi
|
||||
<https://github.com/spack/spack/blob/develop/var/spack/repos/builtin/packages/mpi>`_
|
||||
- Compiles and runs ``mpi_hello`` (``c``, ``fortran``)
|
||||
* - `PythonPackage <build_systems/pythonpackage>`
|
||||
- Imports installed modules
|
||||
|
||||
These tests are very generic so it is important that package
|
||||
developers and maintainers provide additional stand-alone tests
|
||||
customized to the package.
|
||||
|
||||
One example of a package that adds its own stand-alone (or smoke)
|
||||
tests is the `Openmpi package
|
||||
<https://github.com/spack/spack/blob/develop/var/spack/repos/builtin/packages/openmpi/package.py>`_.
|
||||
The preliminary set of tests for the package performed the
|
||||
following checks:
|
||||
|
||||
- installed binaries with the ``--version`` option return the expected
|
||||
version;
|
||||
- outputs from (selected) installed binaries match expectations;
|
||||
- ``make all`` succeeds when building examples that were copied from the
|
||||
source directory during package installation; and
|
||||
- outputs from running the copied and built examples match expectations.
|
||||
|
||||
Below is an example of running and viewing the stand-alone tests,
|
||||
where only the outputs for the first of each set are shown:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack test run --alias openmpi-4.0.5 openmpi@4.0.5
|
||||
==> Spack test openmpi-4.0.5
|
||||
==> Testing package openmpi-4.0.5-eygjgve
|
||||
$ spack test results -l openmpi-4.0.5
|
||||
==> Spack test openmpi-4.0.5
|
||||
==> Testing package openmpi-4.0.5-eygjgve
|
||||
==> Results for test suite 'openmpi-4.0.5':
|
||||
==> openmpi-4.0.5-eygjgve PASSED
|
||||
==> Testing package openmpi-4.0.5-eygjgve
|
||||
==> [2021-04-26-17:35:20.259650] test: ensuring version of mpiCC is 8.3.1
|
||||
==> [2021-04-26-17:35:20.260155] '$SPACK_ROOT/opt/spack/linux-rhel7-broadwell/gcc-8.3.1/openmpi-4.0.5-eygjgvek35awfor2qaljltjind2oa67r/bin/mpiCC' '--version'
|
||||
g++ (GCC) 8.3.1 20190311 (Red Hat 8.3.1-3)
|
||||
Copyright (C) 2018 Free Software Foundation, Inc.
|
||||
This is free software; see the source for copying conditions. There is NO
|
||||
warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
|
||||
|
||||
PASSED
|
||||
...
|
||||
==> [2021-04-26-17:35:20.493921] test: checking mpirun output
|
||||
==> [2021-04-26-17:35:20.494461] '$SPACK_ROOT/opt/spack/linux-rhel7-broadwell/gcc-8.3.1/openmpi-4.0.5-eygjgvek35awfor2qaljltjind2oa67r/bin/mpirun' '-n' '1' 'ls' '..'
|
||||
openmpi-4.0.5-eygjgve repo test_suite.lock
|
||||
openmpi-4.0.5-eygjgve-test-out.txt results.txt
|
||||
PASSED
|
||||
...
|
||||
==> [2021-04-26-17:35:20.630452] test: ensuring ability to build the examples
|
||||
==> [2021-04-26-17:35:20.630943] '/usr/bin/make' 'all'
|
||||
mpicc -g hello_c.c -o hello_c
|
||||
mpicc -g ring_c.c -o ring_c
|
||||
mpicc -g connectivity_c.c -o connectivity_c
|
||||
mpicc -g spc_example.c -o spc_example
|
||||
...
|
||||
PASSED
|
||||
==> [2021-04-26-17:35:23.291214] test: checking hello_c example output and status (0)
|
||||
==> [2021-04-26-17:35:23.291841] './hello_c'
|
||||
Hello, world, I am 0 of 1, (Open MPI v4.0.5, package: Open MPI dahlgren@quartz2300 Distribution, ident: 4.0.5, repo rev: v4.0.5, Aug 26, 2020, 114)
|
||||
PASSED
|
||||
...
|
||||
==> [2021-04-26-17:35:24.603152] test: ensuring copied examples cleaned up
|
||||
==> [2021-04-26-17:35:24.603807] '/usr/bin/make' 'clean'
|
||||
rm -f hello_c hello_cxx hello_mpifh hello_usempi hello_usempif08 hello_oshmem hello_oshmemcxx hello_oshmemfh Hello.class ring_c ring_cxx ring_mpifh ring_usempi ring_usempif08 ring_oshmem ring_oshmemfh Ring.class connectivity_c oshmem_shmalloc oshmem_circular_shift oshmem_max_reduction oshmem_strided_puts oshmem_symmetric_data spc_example *~ *.o
|
||||
PASSED
|
||||
==> [2021-04-26-17:35:24.643360] test: mpicc: expect command status in [0]
|
||||
==> [2021-04-26-17:35:24.643834] '$SPACK_ROOT/opt/spack/linux-rhel7-broadwell/gcc-8.3.1/openmpi-4.0.5-eygjgvek35awfor2qaljltjind2oa67r/bin/mpicc' '-o' 'mpi_hello_c' '$HOME/.spack/test/hyzq5eqlqfog6fawlzxwg3prqy5vjhms/openmpi-4.0.5-eygjgve/data/mpi/mpi_hello.c'
|
||||
PASSED
|
||||
==> [2021-04-26-17:35:24.776765] test: mpirun: expect command status in [0]
|
||||
==> [2021-04-26-17:35:24.777194] '$SPACK_ROOT/opt/spack/linux-rhel7-broadwell/gcc-8.3.1/openmpi-4.0.5-eygjgvek35awfor2qaljltjind2oa67r/bin/mpirun' '-np' '1' 'mpi_hello_c'
|
||||
Hello world! From rank 0 of 1
|
||||
PASSED
|
||||
...
|
||||
|
||||
|
||||
.. warning::
|
||||
|
||||
The API for adding and running stand-alone tests is not yet considered
|
||||
stable and may change drastically in future releases. Packages with
|
||||
stand-alone tests will be refactored to match changes to the API.
|
||||
|
||||
.. _cmd-spack-test-list:
|
||||
|
||||
"""""""""""""""""""
|
||||
@@ -4160,6 +4676,13 @@ Packages available for install testing can be found using the
|
||||
``spack test list`` command. The command outputs all installed
|
||||
packages that have defined ``test`` methods.
|
||||
|
||||
Alternatively you can use the ``--all`` option to get a list of
|
||||
all packages that have defined ``test`` methods even if they are
|
||||
not installed.
|
||||
|
||||
For more information, refer to `spack test list
|
||||
<https://spack.readthedocs.io/en/latest/command_index.html#spack-test-list>`_.
|
||||
|
||||
.. _cmd-spack-test-run:
|
||||
|
||||
""""""""""""""""""
|
||||
@@ -4168,7 +4691,7 @@ packages that have defined ``test`` methods.
|
||||
|
||||
Install tests can be run for one or more installed packages using
|
||||
the ``spack test run`` command. A ``test suite`` is created from
|
||||
the provided specs. If no specs are provided it will test all specs
|
||||
the provided specs. If no specs are provided it will test all specs
|
||||
in the active environment or all specs installed in Spack if no
|
||||
environment is active.
|
||||
|
||||
@@ -4184,6 +4707,10 @@ Test output is written to a text log file by default but ``junit``
|
||||
and ``cdash`` are outputs are available through the ``--log-format``
|
||||
option.
|
||||
|
||||
For more information, refer to `spack test run
|
||||
<https://spack.readthedocs.io/en/latest/command_index.html#spack-test-run>`_.
|
||||
|
||||
|
||||
.. _cmd-spack-test-results:
|
||||
|
||||
""""""""""""""""""""""
|
||||
@@ -4200,6 +4727,9 @@ test(s) to facilitate debugging.
|
||||
The ``--failed`` option limits results shown to that of the failed
|
||||
tests, if any, of matching packages.
|
||||
|
||||
For more information, refer to `spack test results
|
||||
<https://spack.readthedocs.io/en/latest/command_index.html#spack-test-results>`_.
|
||||
|
||||
.. _cmd-spack-test-find:
|
||||
|
||||
"""""""""""""""""""
|
||||
@@ -4209,6 +4739,9 @@ tests, if any, of matching packages.
|
||||
The ``spack test find`` command lists the aliases or content hashes
|
||||
of all test suites whose results are available.
|
||||
|
||||
For more information, refer to `spack test find
|
||||
<https://spack.readthedocs.io/en/latest/command_index.html#spack-test-find>`_.
|
||||
|
||||
.. _cmd-spack-test-remove:
|
||||
|
||||
"""""""""""""""""""""
|
||||
@@ -4219,6 +4752,9 @@ The ``spack test remove`` command removes test suites to declutter
|
||||
the test results directory. You are prompted to confirm the removal
|
||||
of each test suite **unless** you use the ``--yes-to-all`` option.
|
||||
|
||||
For more information, refer to `spack test remove
|
||||
<https://spack.readthedocs.io/en/latest/command_index.html#spack-test-remove>`_.
|
||||
|
||||
.. _file-manipulation:
|
||||
|
||||
---------------------------
|
||||
@@ -4231,9 +4767,9 @@ ways of setting compilers and options, you may need to edit files or
|
||||
install some files yourself to get them working with Spack.
|
||||
|
||||
You can do this with standard Python code, and Python has rich
|
||||
libraries with functions for file manipulation and filtering. Spack
|
||||
libraries with functions for file manipulation and filtering. Spack
|
||||
also provides a number of convenience functions of its own to make
|
||||
your life even easier. These functions are described in this section.
|
||||
your life even easier. These functions are described in this section.
|
||||
|
||||
All of the functions in this section can be included by simply
|
||||
running:
|
||||
|
@@ -240,6 +240,13 @@ takes a boolean and determines whether the pipeline uses artifacts to store and
|
||||
pass along the buildcaches from one stage to the next (the default if you don't
|
||||
provide this option is ``False``).
|
||||
|
||||
The optional ``broken-specs-url`` key tells Spack to check against a list of
|
||||
specs that are known to be currently broken in ``develop``. If any such specs
|
||||
are found, the ``spack ci generate`` command will fail with an error message
|
||||
informing the user what broken specs were encountered. This allows the pipeline
|
||||
to fail early and avoid wasting compute resources attempting to build packages
|
||||
that will not succeed.
|
||||
|
||||
The optional ``cdash`` section provides information that will be used by the
|
||||
``spack ci generate`` command (invoked by ``spack ci start``) for reporting
|
||||
to CDash. All the jobs generated from this environment will belong to a
|
||||
@@ -554,7 +561,7 @@ provision of a custom ``script`` section. The reason for this is to run
|
||||
|
||||
Now imagine you have long pipelines with many specs to be built, and you
|
||||
are pointing to a spack repository and branch that has a tendency to change
|
||||
frequently, such as the main repo and it's ``develop`` branch. If each child
|
||||
frequently, such as the main repo and its ``develop`` branch. If each child
|
||||
job checks out the ``develop`` branch, that could result in some jobs running
|
||||
with one SHA of spack, while later jobs run with another. To help avoid this
|
||||
issue, the pipeline generation process saves global variables called
|
||||
|
20
lib/spack/env/cc
vendored
20
lib/spack/env/cc
vendored
@@ -277,10 +277,18 @@ other_args=()
|
||||
isystem_system_includes=()
|
||||
isystem_includes=()
|
||||
|
||||
while [ -n "$1" ]; do
|
||||
while [ $# -ne 0 ]; do
|
||||
|
||||
# an RPATH to be added after the case statement.
|
||||
rp=""
|
||||
|
||||
# Multiple consecutive spaces in the command line can
|
||||
# result in blank arguments
|
||||
if [ -z "$1" ]; then
|
||||
shift
|
||||
continue
|
||||
fi
|
||||
|
||||
case "$1" in
|
||||
-isystem*)
|
||||
arg="${1#-isystem}"
|
||||
@@ -311,6 +319,16 @@ while [ -n "$1" ]; do
|
||||
fi
|
||||
;;
|
||||
-l*)
|
||||
# -loopopt=0 is generated erroneously in autoconf <= 2.69,
|
||||
# and passed by ifx to the linker, which confuses it with a
|
||||
# library. Filter it out.
|
||||
# TODO: generalize filtering of args with an env var, so that
|
||||
# TODO: we do not have to special case this here.
|
||||
if { [ "$mode" = "ccld" ] || [ $mode = "ld" ]; } \
|
||||
&& [ "$1" != "${1#-loopopt}" ]; then
|
||||
shift
|
||||
continue
|
||||
fi
|
||||
arg="${1#-l}"
|
||||
if [ -z "$arg" ]; then shift; arg="$1"; fi
|
||||
other_args+=("-l$arg")
|
||||
|
2
lib/spack/external/__init__.py
vendored
2
lib/spack/external/__init__.py
vendored
@@ -11,7 +11,7 @@
|
||||
|
||||
* Homepage: https://pypi.python.org/pypi/archspec
|
||||
* Usage: Labeling, comparison and detection of microarchitectures
|
||||
* Version: 0.1.2 (commit 0389e83e87d3dc5043a7ac08172bd970706524d6)
|
||||
* Version: 0.1.2 (commit 130607c373fd88cd3c43da94c0d3afd3a44084b0)
|
||||
|
||||
argparse
|
||||
--------
|
||||
|
39
lib/spack/external/archspec/cpu/detect.py
vendored
39
lib/spack/external/archspec/cpu/detect.py
vendored
@@ -99,17 +99,29 @@ def sysctl_info_dict():
|
||||
def sysctl(*args):
|
||||
return _check_output(["sysctl"] + list(args), env=child_environment).strip()
|
||||
|
||||
flags = (
|
||||
sysctl("-n", "machdep.cpu.features").lower()
|
||||
+ " "
|
||||
+ sysctl("-n", "machdep.cpu.leaf7_features").lower()
|
||||
)
|
||||
info = {
|
||||
"vendor_id": sysctl("-n", "machdep.cpu.vendor"),
|
||||
"flags": flags,
|
||||
"model": sysctl("-n", "machdep.cpu.model"),
|
||||
"model name": sysctl("-n", "machdep.cpu.brand_string"),
|
||||
}
|
||||
if platform.machine() == "x86_64":
|
||||
flags = (
|
||||
sysctl("-n", "machdep.cpu.features").lower()
|
||||
+ " "
|
||||
+ sysctl("-n", "machdep.cpu.leaf7_features").lower()
|
||||
)
|
||||
info = {
|
||||
"vendor_id": sysctl("-n", "machdep.cpu.vendor"),
|
||||
"flags": flags,
|
||||
"model": sysctl("-n", "machdep.cpu.model"),
|
||||
"model name": sysctl("-n", "machdep.cpu.brand_string"),
|
||||
}
|
||||
else:
|
||||
model = (
|
||||
"m1" if "Apple" in sysctl("-n", "machdep.cpu.brand_string") else "unknown"
|
||||
)
|
||||
info = {
|
||||
"vendor_id": "Apple",
|
||||
"flags": [],
|
||||
"model": model,
|
||||
"CPU implementer": "Apple",
|
||||
"model name": sysctl("-n", "machdep.cpu.brand_string"),
|
||||
}
|
||||
return info
|
||||
|
||||
|
||||
@@ -173,6 +185,11 @@ def compatible_microarchitectures(info):
|
||||
info (dict): dictionary containing information on the host cpu
|
||||
"""
|
||||
architecture_family = platform.machine()
|
||||
# On Apple M1 platform.machine() returns "arm64" instead of "aarch64"
|
||||
# so we should normalize the name here
|
||||
if architecture_family == "arm64":
|
||||
architecture_family = "aarch64"
|
||||
|
||||
# If a tester is not registered, be conservative and assume no known
|
||||
# target is compatible with the host
|
||||
tester = COMPATIBILITY_CHECKS.get(architecture_family, lambda x, y: False)
|
||||
|
@@ -91,6 +91,166 @@
|
||||
]
|
||||
}
|
||||
},
|
||||
"x86_64_v2": {
|
||||
"from": ["x86_64"],
|
||||
"vendor": "generic",
|
||||
"features": [
|
||||
"cx16",
|
||||
"lahf_lm",
|
||||
"mmx",
|
||||
"sse",
|
||||
"sse2",
|
||||
"ssse3",
|
||||
"sse4_1",
|
||||
"sse4_2",
|
||||
"popcnt"
|
||||
],
|
||||
"compilers": {
|
||||
"gcc": [
|
||||
{
|
||||
"versions": "11.1:",
|
||||
"name": "x86-64-v2",
|
||||
"flags": "-march={name} -mtune=generic"
|
||||
},
|
||||
{
|
||||
"versions": "4.6:11.0",
|
||||
"name": "x86-64",
|
||||
"flags": "-march={name} -mtune=generic -mcx16 -msahf -mpopcnt -msse3 -msse4.1 -msse4.2 -mssse3"
|
||||
}
|
||||
],
|
||||
"clang": [
|
||||
{
|
||||
"versions": "12.0:",
|
||||
"name": "x86-64-v2",
|
||||
"flags": "-march={name} -mtune=generic"
|
||||
},
|
||||
{
|
||||
"versions": "3.9:11.1",
|
||||
"name": "x86-64",
|
||||
"flags": "-march={name} -mtune=generic -mcx16 -msahf -mpopcnt -msse3 -msse4.1 -msse4.2 -mssse3"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"x86_64_v3": {
|
||||
"from": ["x86_64_v2"],
|
||||
"vendor": "generic",
|
||||
"features": [
|
||||
"cx16",
|
||||
"lahf_lm",
|
||||
"mmx",
|
||||
"sse",
|
||||
"sse2",
|
||||
"ssse3",
|
||||
"sse4_1",
|
||||
"sse4_2",
|
||||
"popcnt",
|
||||
"avx",
|
||||
"avx2",
|
||||
"bmi1",
|
||||
"bmi2",
|
||||
"f16c",
|
||||
"fma",
|
||||
"abm",
|
||||
"movbe",
|
||||
"xsave"
|
||||
],
|
||||
"compilers": {
|
||||
"gcc": [
|
||||
{
|
||||
"versions": "11.1:",
|
||||
"name": "x86-64-v3",
|
||||
"flags": "-march={name} -mtune=generic"
|
||||
},
|
||||
{
|
||||
"versions": "4.8:11.0",
|
||||
"name": "x86-64",
|
||||
"flags": "-march={name} -mtune=generic -mcx16 -msahf -mpopcnt -msse3 -msse4.1 -msse4.2 -mssse3 -mavx -mavx2 -mbmi -mbmi2 -mf16c -mfma -mlzcnt -mmovbe -mxsave"
|
||||
}
|
||||
],
|
||||
"clang": [
|
||||
{
|
||||
"versions": "12.0:",
|
||||
"name": "x86-64-v3",
|
||||
"flags": "-march={name} -mtune=generic"
|
||||
},
|
||||
{
|
||||
"versions": "3.9:11.1",
|
||||
"name": "x86-64",
|
||||
"flags": "-march={name} -mtune=generic -mcx16 -msahf -mpopcnt -msse3 -msse4.1 -msse4.2 -mssse3 -mavx -mavx2 -mbmi -mbmi2 -mf16c -mfma -mlzcnt -mmovbe -mxsave"
|
||||
}
|
||||
],
|
||||
"apple-clang": [
|
||||
{
|
||||
"versions": "8.0:",
|
||||
"name": "x86-64",
|
||||
"flags": "-march={name} -mtune=generic -mcx16 -msahf -mpopcnt -msse3 -msse4.1 -msse4.2 -mssse3 -mavx -mavx2 -mbmi -mbmi2 -mf16c -mfma -mlzcnt -mmovbe -mxsave"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"x86_64_v4": {
|
||||
"from": ["x86_64_v3"],
|
||||
"vendor": "generic",
|
||||
"features": [
|
||||
"cx16",
|
||||
"lahf_lm",
|
||||
"mmx",
|
||||
"sse",
|
||||
"sse2",
|
||||
"ssse3",
|
||||
"sse4_1",
|
||||
"sse4_2",
|
||||
"popcnt",
|
||||
"avx",
|
||||
"avx2",
|
||||
"bmi1",
|
||||
"bmi2",
|
||||
"f16c",
|
||||
"fma",
|
||||
"abm",
|
||||
"movbe",
|
||||
"xsave",
|
||||
"avx512f",
|
||||
"avx512bw",
|
||||
"avx512cd",
|
||||
"avx512dq",
|
||||
"avx512vl"
|
||||
],
|
||||
"compilers": {
|
||||
"gcc": [
|
||||
{
|
||||
"versions": "11.1:",
|
||||
"name": "x86-64-v4",
|
||||
"flags": "-march={name} -mtune=generic"
|
||||
},
|
||||
{
|
||||
"versions": "6.0:11.0",
|
||||
"name": "x86-64",
|
||||
"flags": "-march={name} -mtune=generic -mcx16 -msahf -mpopcnt -msse3 -msse4.1 -msse4.2 -mssse3 -mavx -mavx2 -mbmi -mbmi2 -mf16c -mfma -mlzcnt -mmovbe -mxsave -mavx512f -mavx512bw -mavx512cd -mavx512dq -mavx512vl"
|
||||
}
|
||||
],
|
||||
"clang": [
|
||||
{
|
||||
"versions": "12.0:",
|
||||
"name": "x86-64-v4",
|
||||
"flags": "-march={name} -mtune=generic"
|
||||
},
|
||||
{
|
||||
"versions": "3.9:11.1",
|
||||
"name": "x86-64",
|
||||
"flags": "-march={name} -mtune=generic -mcx16 -msahf -mpopcnt -msse3 -msse4.1 -msse4.2 -mssse3 -mavx -mavx2 -mbmi -mbmi2 -mf16c -mfma -mlzcnt -mmovbe -mxsave -mavx512f -mavx512bw -mavx512cd -mavx512dq -mavx512vl"
|
||||
}
|
||||
],
|
||||
"apple-clang": [
|
||||
{
|
||||
"versions": "8.0:",
|
||||
"name": "x86-64",
|
||||
"flags": "-march={name} -mtune=generic -mcx16 -msahf -mpopcnt -msse3 -msse4.1 -msse4.2 -mssse3 -mavx -mavx2 -mbmi -mbmi2 -mf16c -mfma -mlzcnt -mmovbe -mxsave -mavx512f -mavx512bw -mavx512cd -mavx512dq -mavx512vl"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"nocona": {
|
||||
"from": ["x86_64"],
|
||||
"vendor": "GenuineIntel",
|
||||
@@ -177,7 +337,7 @@
|
||||
}
|
||||
},
|
||||
"nehalem": {
|
||||
"from": ["core2"],
|
||||
"from": ["core2", "x86_64_v2"],
|
||||
"vendor": "GenuineIntel",
|
||||
"features": [
|
||||
"mmx",
|
||||
@@ -394,7 +554,7 @@
|
||||
}
|
||||
},
|
||||
"haswell": {
|
||||
"from": ["ivybridge"],
|
||||
"from": ["ivybridge", "x86_64_v3"],
|
||||
"vendor": "GenuineIntel",
|
||||
"features": [
|
||||
"mmx",
|
||||
@@ -642,7 +802,7 @@
|
||||
}
|
||||
},
|
||||
"skylake_avx512": {
|
||||
"from": ["skylake"],
|
||||
"from": ["skylake", "x86_64_v4"],
|
||||
"vendor": "GenuineIntel",
|
||||
"features": [
|
||||
"mmx",
|
||||
@@ -986,7 +1146,7 @@
|
||||
}
|
||||
},
|
||||
"bulldozer": {
|
||||
"from": ["x86_64"],
|
||||
"from": ["x86_64_v2"],
|
||||
"vendor": "AuthenticAMD",
|
||||
"features": [
|
||||
"mmx",
|
||||
@@ -1145,7 +1305,7 @@
|
||||
}
|
||||
},
|
||||
"excavator": {
|
||||
"from": ["steamroller"],
|
||||
"from": ["steamroller", "x86_64_v3"],
|
||||
"vendor": "AuthenticAMD",
|
||||
"features": [
|
||||
"mmx",
|
||||
@@ -1204,7 +1364,7 @@
|
||||
}
|
||||
},
|
||||
"zen": {
|
||||
"from": ["x86_64"],
|
||||
"from": ["x86_64_v3"],
|
||||
"vendor": "AuthenticAMD",
|
||||
"features": [
|
||||
"bmi1",
|
||||
@@ -1359,9 +1519,24 @@
|
||||
"popcnt",
|
||||
"clwb",
|
||||
"vaes",
|
||||
"vpclmulqdq"
|
||||
"vpclmulqdq",
|
||||
"pku"
|
||||
],
|
||||
"compilers": {
|
||||
"gcc": [
|
||||
{
|
||||
"versions": "10.3:",
|
||||
"name": "znver3",
|
||||
"flags": "-march={name} -mtune={name}"
|
||||
}
|
||||
],
|
||||
"clang": [
|
||||
{
|
||||
"versions": "12.0:",
|
||||
"name": "znver3",
|
||||
"flags": "-march={name} -mtune={name}"
|
||||
}
|
||||
],
|
||||
"aocc": [
|
||||
{
|
||||
"versions": "3.0:",
|
||||
@@ -1544,6 +1719,12 @@
|
||||
"versions": ":",
|
||||
"flags": "-march=armv8-a -mtune=generic"
|
||||
}
|
||||
],
|
||||
"apple-clang": [
|
||||
{
|
||||
"versions": ":",
|
||||
"flags": "-march=armv8-a -mtune=generic"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
@@ -1752,6 +1933,31 @@
|
||||
]
|
||||
}
|
||||
},
|
||||
"m1": {
|
||||
"from": ["aarch64"],
|
||||
"vendor": "Apple",
|
||||
"features": [],
|
||||
"compilers": {
|
||||
"gcc": [
|
||||
{
|
||||
"versions": "8.0:",
|
||||
"flags" : "-march=armv8.4-a -mtune=generic"
|
||||
}
|
||||
],
|
||||
"clang" : [
|
||||
{
|
||||
"versions": "9.0:",
|
||||
"flags" : "-march=armv8.4-a"
|
||||
}
|
||||
],
|
||||
"apple-clang": [
|
||||
{
|
||||
"versions": "11.0:",
|
||||
"flags" : "-march=armv8.4-a"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"arm": {
|
||||
"from": [],
|
||||
"vendor": "generic",
|
||||
|
2
lib/spack/external/py2/typing.py
vendored
2
lib/spack/external/py2/typing.py
vendored
@@ -1,4 +1,4 @@
|
||||
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
@@ -14,6 +14,11 @@
|
||||
from six import string_types
|
||||
import sys
|
||||
|
||||
if sys.version_info < (3, 0):
|
||||
from itertools import izip_longest # novm
|
||||
zip_longest = izip_longest
|
||||
else:
|
||||
from itertools import zip_longest # novm
|
||||
|
||||
if sys.version_info >= (3, 3):
|
||||
from collections.abc import Hashable, MutableMapping # novm
|
||||
@@ -227,48 +232,222 @@ def list_modules(directory, **kwargs):
|
||||
yield re.sub('.py$', '', name)
|
||||
|
||||
|
||||
def key_ordering(cls):
|
||||
"""Decorates a class with extra methods that implement rich comparison
|
||||
operations and ``__hash__``. The decorator assumes that the class
|
||||
implements a function called ``_cmp_key()``. The rich comparison
|
||||
operations will compare objects using this key, and the ``__hash__``
|
||||
function will return the hash of this key.
|
||||
def decorator_with_or_without_args(decorator):
|
||||
"""Allows a decorator to be used with or without arguments, e.g.::
|
||||
|
||||
If a class already has ``__eq__``, ``__ne__``, ``__lt__``, ``__le__``,
|
||||
``__gt__``, or ``__ge__`` defined, this decorator will overwrite them.
|
||||
# Calls the decorator function some args
|
||||
@decorator(with, arguments, and=kwargs)
|
||||
|
||||
or::
|
||||
|
||||
# Calls the decorator function with zero arguments
|
||||
@decorator
|
||||
|
||||
Raises:
|
||||
TypeError: If the class does not have a ``_cmp_key`` method
|
||||
"""
|
||||
def setter(name, value):
|
||||
value.__name__ = name
|
||||
setattr(cls, name, value)
|
||||
# See https://stackoverflow.com/questions/653368 for more on this
|
||||
@functools.wraps(decorator)
|
||||
def new_dec(*args, **kwargs):
|
||||
if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):
|
||||
# actual decorated function
|
||||
return decorator(args[0])
|
||||
else:
|
||||
# decorator arguments
|
||||
return lambda realf: decorator(realf, *args, **kwargs)
|
||||
|
||||
if not has_method(cls, '_cmp_key'):
|
||||
raise TypeError("'%s' doesn't define _cmp_key()." % cls.__name__)
|
||||
return new_dec
|
||||
|
||||
setter('__eq__',
|
||||
lambda s, o:
|
||||
(s is o) or (o is not None and s._cmp_key() == o._cmp_key()))
|
||||
setter('__lt__',
|
||||
lambda s, o: o is not None and s._cmp_key() < o._cmp_key())
|
||||
setter('__le__',
|
||||
lambda s, o: o is not None and s._cmp_key() <= o._cmp_key())
|
||||
|
||||
setter('__ne__',
|
||||
lambda s, o:
|
||||
(s is not o) and (o is None or s._cmp_key() != o._cmp_key()))
|
||||
setter('__gt__',
|
||||
lambda s, o: o is None or s._cmp_key() > o._cmp_key())
|
||||
setter('__ge__',
|
||||
lambda s, o: o is None or s._cmp_key() >= o._cmp_key())
|
||||
#: sentinel for testing that iterators are done in lazy_lexicographic_ordering
|
||||
done = object()
|
||||
|
||||
setter('__hash__', lambda self: hash(self._cmp_key()))
|
||||
|
||||
def tuplify(seq):
|
||||
"""Helper for lazy_lexicographic_ordering()."""
|
||||
return tuple((tuplify(x) if callable(x) else x) for x in seq())
|
||||
|
||||
|
||||
def lazy_eq(lseq, rseq):
|
||||
"""Equality comparison for two lazily generated sequences.
|
||||
|
||||
See ``lazy_lexicographic_ordering``.
|
||||
"""
|
||||
liter = lseq() # call generators
|
||||
riter = rseq()
|
||||
|
||||
# zip_longest is implemented in native code, so use it for speed.
|
||||
# use zip_longest instead of zip because it allows us to tell
|
||||
# which iterator was longer.
|
||||
for left, right in zip_longest(liter, riter, fillvalue=done):
|
||||
if (left is done) or (right is done):
|
||||
return False
|
||||
|
||||
# recursively enumerate any generators, otherwise compare
|
||||
equal = lazy_eq(left, right) if callable(left) else left == right
|
||||
if not equal:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def lazy_lt(lseq, rseq):
|
||||
"""Less-than comparison for two lazily generated sequences.
|
||||
|
||||
See ``lazy_lexicographic_ordering``.
|
||||
"""
|
||||
liter = lseq()
|
||||
riter = rseq()
|
||||
|
||||
for left, right in zip_longest(liter, riter, fillvalue=done):
|
||||
if (left is done) or (right is done):
|
||||
return left is done # left was shorter than right
|
||||
|
||||
sequence = callable(left)
|
||||
equal = lazy_eq(left, right) if sequence else left == right
|
||||
if equal:
|
||||
continue
|
||||
|
||||
if sequence:
|
||||
return lazy_lt(left, right)
|
||||
if left is None:
|
||||
return True
|
||||
if right is None:
|
||||
return False
|
||||
|
||||
return left < right
|
||||
|
||||
return False # if equal, return False
|
||||
|
||||
|
||||
@decorator_with_or_without_args
|
||||
def lazy_lexicographic_ordering(cls, set_hash=True):
|
||||
"""Decorates a class with extra methods that implement rich comparison.
|
||||
|
||||
This is a lazy version of the tuple comparison used frequently to
|
||||
implement comparison in Python. Given some objects with fields, you
|
||||
might use tuple keys to implement comparison, e.g.::
|
||||
|
||||
class Widget:
|
||||
def _cmp_key(self):
|
||||
return (
|
||||
self.a,
|
||||
self.b,
|
||||
(self.c, self.d),
|
||||
self.e
|
||||
)
|
||||
|
||||
def __eq__(self, other):
|
||||
return self._cmp_key() == other._cmp_key()
|
||||
|
||||
def __lt__(self):
|
||||
return self._cmp_key() < other._cmp_key()
|
||||
|
||||
# etc.
|
||||
|
||||
Python would compare ``Widgets`` lexicographically based on their
|
||||
tuples. The issue there for simple comparators is that we have to
|
||||
bulid the tuples *and* we have to generate all the values in them up
|
||||
front. When implementing comparisons for large data structures, this
|
||||
can be costly.
|
||||
|
||||
Lazy lexicographic comparison maps the tuple comparison shown above
|
||||
to generator functions. Instead of comparing based on pre-constructed
|
||||
tuple keys, users of this decorator can compare using elements from a
|
||||
generator. So, you'd write::
|
||||
|
||||
@lazy_lexicographic_ordering
|
||||
class Widget:
|
||||
def _cmp_iter(self):
|
||||
yield a
|
||||
yield b
|
||||
def cd_fun():
|
||||
yield c
|
||||
yield d
|
||||
yield cd_fun
|
||||
yield e
|
||||
|
||||
# operators are added by decorator
|
||||
|
||||
There are no tuples preconstructed, and the generator does not have
|
||||
to complete. Instead of tuples, we simply make functions that lazily
|
||||
yield what would've been in the tuple. The
|
||||
``@lazy_lexicographic_ordering`` decorator handles the details of
|
||||
implementing comparison operators, and the ``Widget`` implementor
|
||||
only has to worry about writing ``_cmp_iter``, and making sure the
|
||||
elements in it are also comparable.
|
||||
|
||||
Some things to note:
|
||||
|
||||
* If a class already has ``__eq__``, ``__ne__``, ``__lt__``,
|
||||
``__le__``, ``__gt__``, ``__ge__``, or ``__hash__`` defined, this
|
||||
decorator will overwrite them.
|
||||
|
||||
* If ``set_hash`` is ``False``, this will not overwrite
|
||||
``__hash__``.
|
||||
|
||||
* This class uses Python 2 None-comparison semantics. If you yield
|
||||
None and it is compared to a non-None type, None will always be
|
||||
less than the other object.
|
||||
|
||||
Raises:
|
||||
TypeError: If the class does not have a ``_cmp_iter`` method
|
||||
|
||||
"""
|
||||
if not has_method(cls, "_cmp_iter"):
|
||||
raise TypeError("'%s' doesn't define _cmp_iter()." % cls.__name__)
|
||||
|
||||
# comparison operators are implemented in terms of lazy_eq and lazy_lt
|
||||
def eq(self, other):
|
||||
if self is other:
|
||||
return True
|
||||
return (other is not None) and lazy_eq(self._cmp_iter, other._cmp_iter)
|
||||
|
||||
def lt(self, other):
|
||||
if self is other:
|
||||
return False
|
||||
return (other is not None) and lazy_lt(self._cmp_iter, other._cmp_iter)
|
||||
|
||||
def ne(self, other):
|
||||
if self is other:
|
||||
return False
|
||||
return (other is None) or not lazy_eq(self._cmp_iter, other._cmp_iter)
|
||||
|
||||
def gt(self, other):
|
||||
if self is other:
|
||||
return False
|
||||
return (other is None) or lazy_lt(other._cmp_iter, self._cmp_iter)
|
||||
|
||||
def le(self, other):
|
||||
if self is other:
|
||||
return True
|
||||
return (other is not None) and not lazy_lt(other._cmp_iter,
|
||||
self._cmp_iter)
|
||||
|
||||
def ge(self, other):
|
||||
if self is other:
|
||||
return True
|
||||
return (other is None) or not lazy_lt(self._cmp_iter, other._cmp_iter)
|
||||
|
||||
def h(self):
|
||||
return hash(tuplify(self._cmp_iter))
|
||||
|
||||
def add_func_to_class(name, func):
|
||||
"""Add a function to a class with a particular name."""
|
||||
func.__name__ = name
|
||||
setattr(cls, name, func)
|
||||
|
||||
add_func_to_class("__eq__", eq)
|
||||
add_func_to_class("__ne__", ne)
|
||||
add_func_to_class("__lt__", lt)
|
||||
add_func_to_class("__le__", le)
|
||||
add_func_to_class("__gt__", gt)
|
||||
add_func_to_class("__ge__", ge)
|
||||
if set_hash:
|
||||
add_func_to_class("__hash__", h)
|
||||
|
||||
return cls
|
||||
|
||||
|
||||
@key_ordering
|
||||
@lazy_lexicographic_ordering
|
||||
class HashableMap(MutableMapping):
|
||||
"""This is a hashable, comparable dictionary. Hash is performed on
|
||||
a tuple of the values in the dictionary."""
|
||||
@@ -291,8 +470,9 @@ def __len__(self):
|
||||
def __delitem__(self, key):
|
||||
del self.dict[key]
|
||||
|
||||
def _cmp_key(self):
|
||||
return tuple(sorted(self.values()))
|
||||
def _cmp_iter(self):
|
||||
for _, v in sorted(self.items()):
|
||||
yield v
|
||||
|
||||
def copy(self):
|
||||
"""Type-agnostic clone method. Preserves subclass type."""
|
||||
@@ -624,6 +804,9 @@ def __repr__(self):
|
||||
def load_module_from_file(module_name, module_path):
|
||||
"""Loads a python module from the path of the corresponding file.
|
||||
|
||||
If the module is already in ``sys.modules`` it will be returned as
|
||||
is and not reloaded.
|
||||
|
||||
Args:
|
||||
module_name (str): namespace where the python module will be loaded,
|
||||
e.g. ``foo.bar``
|
||||
@@ -636,12 +819,28 @@ def load_module_from_file(module_name, module_path):
|
||||
ImportError: when the module can't be loaded
|
||||
FileNotFoundError: when module_path doesn't exist
|
||||
"""
|
||||
if module_name in sys.modules:
|
||||
return sys.modules[module_name]
|
||||
|
||||
# This recipe is adapted from https://stackoverflow.com/a/67692/771663
|
||||
if sys.version_info[0] == 3 and sys.version_info[1] >= 5:
|
||||
import importlib.util
|
||||
spec = importlib.util.spec_from_file_location( # novm
|
||||
module_name, module_path)
|
||||
module = importlib.util.module_from_spec(spec) # novm
|
||||
spec.loader.exec_module(module)
|
||||
# The module object needs to exist in sys.modules before the
|
||||
# loader executes the module code.
|
||||
#
|
||||
# See https://docs.python.org/3/reference/import.html#loading
|
||||
sys.modules[spec.name] = module
|
||||
try:
|
||||
spec.loader.exec_module(module)
|
||||
except BaseException:
|
||||
try:
|
||||
del sys.modules[spec.name]
|
||||
except KeyError:
|
||||
pass
|
||||
raise
|
||||
elif sys.version_info[0] == 3 and sys.version_info[1] < 5:
|
||||
import importlib.machinery
|
||||
loader = importlib.machinery.SourceFileLoader( # novm
|
||||
|
@@ -264,7 +264,7 @@ def _write_log_debug_data(self):
|
||||
self.old_host = self.host
|
||||
|
||||
self.pid = os.getpid()
|
||||
self.host = socket.getfqdn()
|
||||
self.host = socket.gethostname()
|
||||
|
||||
# write pid, host to disk to sync over FS
|
||||
self._file.seek(0)
|
||||
|
@@ -321,7 +321,10 @@ def __init__(self, file_like):
|
||||
def unwrap(self):
|
||||
if self.open:
|
||||
if self.file_like:
|
||||
self.file = open(self.file_like, 'w')
|
||||
if sys.version_info < (3,):
|
||||
self.file = open(self.file_like, 'w')
|
||||
else:
|
||||
self.file = open(self.file_like, 'w', encoding='utf-8')
|
||||
else:
|
||||
self.file = StringIO()
|
||||
return self.file
|
||||
@@ -722,7 +725,11 @@ def _writer_daemon(stdin_multiprocess_fd, read_multiprocess_fd, write_fd, echo,
|
||||
|
||||
# Use line buffering (3rd param = 1) since Python 3 has a bug
|
||||
# that prevents unbuffered text I/O.
|
||||
in_pipe = os.fdopen(read_multiprocess_fd.fd, 'r', 1)
|
||||
if sys.version_info < (3,):
|
||||
in_pipe = os.fdopen(read_multiprocess_fd.fd, 'r', 1)
|
||||
else:
|
||||
# Python 3.x before 3.7 does not open with UTF-8 encoding by default
|
||||
in_pipe = os.fdopen(read_multiprocess_fd.fd, 'r', 1, encoding='utf-8')
|
||||
|
||||
if stdin_multiprocess_fd:
|
||||
stdin = os.fdopen(stdin_multiprocess_fd.fd)
|
||||
|
@@ -1,4 +1,4 @@
|
||||
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
43
lib/spack/spack/analyzers/__init__.py
Normal file
43
lib/spack/spack/analyzers/__init__.py
Normal file
@@ -0,0 +1,43 @@
|
||||
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
"""This package contains code for creating analyzers to extract Application
|
||||
Binary Interface (ABI) information, along with simple analyses that just load
|
||||
existing metadata.
|
||||
"""
|
||||
|
||||
from __future__ import absolute_import
|
||||
|
||||
import spack.util.classes
|
||||
import spack.paths
|
||||
|
||||
import llnl.util.tty as tty
|
||||
|
||||
|
||||
mod_path = spack.paths.analyzers_path
|
||||
analyzers = spack.util.classes.list_classes("spack.analyzers", mod_path)
|
||||
|
||||
# The base analyzer does not have a name, and cannot do dict comprehension
|
||||
analyzer_types = {}
|
||||
for a in analyzers:
|
||||
if not hasattr(a, "name"):
|
||||
continue
|
||||
analyzer_types[a.name] = a
|
||||
|
||||
|
||||
def list_all():
|
||||
"""A helper function to list all analyzers and their descriptions
|
||||
"""
|
||||
for name, analyzer in analyzer_types.items():
|
||||
print("%-25s: %-35s" % (name, analyzer.description))
|
||||
|
||||
|
||||
def get_analyzer(name):
|
||||
"""Courtesy function to retrieve an analyzer, and exit on error if it
|
||||
does not exist.
|
||||
"""
|
||||
if name in analyzer_types:
|
||||
return analyzer_types[name]
|
||||
tty.die("Analyzer %s does not exist" % name)
|
115
lib/spack/spack/analyzers/analyzer_base.py
Normal file
115
lib/spack/spack/analyzers/analyzer_base.py
Normal file
@@ -0,0 +1,115 @@
|
||||
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
"""An analyzer base provides basic functions to run the analysis, save results,
|
||||
and (optionally) interact with a Spack Monitor
|
||||
"""
|
||||
|
||||
import spack.monitor
|
||||
import spack.hooks
|
||||
import llnl.util.tty as tty
|
||||
import spack.util.path
|
||||
import spack.config
|
||||
|
||||
import os
|
||||
|
||||
|
||||
def get_analyzer_dir(spec, analyzer_dir=None):
|
||||
"""
|
||||
Given a spec, return the directory to save analyzer results.
|
||||
|
||||
We create the directory if it does not exist. We also check that the
|
||||
spec has an associated package. An analyzer cannot be run if the spec isn't
|
||||
associated with a package. If the user provides a custom analyzer_dir,
|
||||
we use it over checking the config and the default at ~/.spack/analyzers
|
||||
"""
|
||||
# An analyzer cannot be run if the spec isn't associated with a package
|
||||
if not hasattr(spec, "package") or not spec.package:
|
||||
tty.die("A spec can only be analyzed with an associated package.")
|
||||
|
||||
# The top level directory is in the user home, or a custom location
|
||||
if not analyzer_dir:
|
||||
analyzer_dir = spack.util.path.canonicalize_path(
|
||||
spack.config.get('config:analyzers_dir', '~/.spack/analyzers'))
|
||||
|
||||
# We follow the same convention as the spec install (this could be better)
|
||||
package_prefix = os.sep.join(spec.package.prefix.split('/')[-3:])
|
||||
meta_dir = os.path.join(analyzer_dir, package_prefix)
|
||||
return meta_dir
|
||||
|
||||
|
||||
class AnalyzerBase(object):
|
||||
|
||||
def __init__(self, spec, dirname=None):
|
||||
"""
|
||||
Verify that the analyzer has correct metadata.
|
||||
|
||||
An Analyzer is intended to run on one spec install, so the spec
|
||||
with its associated package is required on init. The child analyzer
|
||||
class should define an init function that super's the init here, and
|
||||
also check that the analyzer has all dependencies that it
|
||||
needs. If an analyzer subclass does not have dependencies, it does not
|
||||
need to define an init. An Analyzer should not be allowed to proceed
|
||||
if one or more dependencies are missing. The dirname, if defined,
|
||||
is an optional directory name to save to (instead of the default meta
|
||||
spack directory).
|
||||
"""
|
||||
self.spec = spec
|
||||
self.dirname = dirname
|
||||
self.meta_dir = os.path.dirname(spec.package.install_log_path)
|
||||
|
||||
for required in ["name", "outfile", "description"]:
|
||||
if not hasattr(self, required):
|
||||
tty.die("Please add a %s attribute on the analyzer." % required)
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
Given a spec with an installed package, run the analyzer on it.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
@property
|
||||
def output_dir(self):
|
||||
"""
|
||||
The full path to the output directory.
|
||||
|
||||
This includes the nested analyzer directory structure. This function
|
||||
does not create anything.
|
||||
"""
|
||||
if not hasattr(self, "_output_dir"):
|
||||
output_dir = get_analyzer_dir(self.spec, self.dirname)
|
||||
self._output_dir = os.path.join(output_dir, self.name)
|
||||
|
||||
return self._output_dir
|
||||
|
||||
def save_result(self, result, overwrite=False):
|
||||
"""
|
||||
Save a result to the associated spack monitor, if defined.
|
||||
|
||||
This function is on the level of the analyzer because it might be
|
||||
the case that the result is large (appropriate for a single request)
|
||||
or that the data is organized differently (e.g., more than one
|
||||
request per result). If an analyzer subclass needs to over-write
|
||||
this function with a custom save, that is appropriate to do (see abi).
|
||||
"""
|
||||
# We maintain the structure in json with the analyzer as key so
|
||||
# that in the future, we could upload to a monitor server
|
||||
if result[self.name]:
|
||||
|
||||
outfile = os.path.join(self.output_dir, self.outfile)
|
||||
|
||||
# Only try to create the results directory if we have a result
|
||||
if not os.path.exists(self._output_dir):
|
||||
os.makedirs(self._output_dir)
|
||||
|
||||
# Don't overwrite an existing result if overwrite is False
|
||||
if os.path.exists(outfile) and not overwrite:
|
||||
tty.info("%s exists and overwrite is False, skipping." % outfile)
|
||||
else:
|
||||
tty.info("Writing result to %s" % outfile)
|
||||
spack.monitor.write_json(result[self.name], outfile)
|
||||
|
||||
# This hook runs after a save result
|
||||
spack.hooks.on_analyzer_save(self.spec.package, result)
|
32
lib/spack/spack/analyzers/config_args.py
Normal file
32
lib/spack/spack/analyzers/config_args.py
Normal file
@@ -0,0 +1,32 @@
|
||||
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
"""A configargs analyzer is a class of analyzer that typically just uploads
|
||||
already existing metadata about config args from a package spec install
|
||||
directory."""
|
||||
|
||||
|
||||
import spack.monitor
|
||||
from .analyzer_base import AnalyzerBase
|
||||
|
||||
import os
|
||||
|
||||
|
||||
class ConfigArgs(AnalyzerBase):
|
||||
|
||||
name = "config_args"
|
||||
outfile = "spack-analyzer-config-args.json"
|
||||
description = "config args loaded from spack-configure-args.txt"
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
Load the configure-args.txt and save in json.
|
||||
|
||||
The run function will find the spack-config-args.txt file in the
|
||||
package install directory, and read it into a json structure that has
|
||||
the name of the analyzer as the key.
|
||||
"""
|
||||
config_file = os.path.join(self.meta_dir, "spack-configure-args.txt")
|
||||
return {self.name: spack.monitor.read_file(config_file)}
|
51
lib/spack/spack/analyzers/environment_variables.py
Normal file
51
lib/spack/spack/analyzers/environment_variables.py
Normal file
@@ -0,0 +1,51 @@
|
||||
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
"""An environment analyzer will read and parse the environment variables
|
||||
file in the installed package directory, generating a json file that has
|
||||
an index of key, value pairs for environment variables."""
|
||||
|
||||
|
||||
from .analyzer_base import AnalyzerBase
|
||||
from spack.util.environment import EnvironmentModifications
|
||||
|
||||
|
||||
import os
|
||||
|
||||
|
||||
class EnvironmentVariables(AnalyzerBase):
|
||||
|
||||
name = "environment_variables"
|
||||
outfile = "spack-analyzer-environment-variables.json"
|
||||
description = "environment variables parsed from spack-build-env.txt"
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
Load, parse, and save spack-build-env.txt to analyzers.
|
||||
|
||||
Read in the spack-build-env.txt file from the package install
|
||||
directory and parse the environment variables into key value pairs.
|
||||
The result should have the key for the analyzer, the name.
|
||||
"""
|
||||
env_file = os.path.join(self.meta_dir, "spack-build-env.txt")
|
||||
return {self.name: self._read_environment_file(env_file)}
|
||||
|
||||
def _read_environment_file(self, filename):
|
||||
"""
|
||||
Read and parse the environment file.
|
||||
|
||||
Given an environment file, we want to read it, split by semicolons
|
||||
and new lines, and then parse down to the subset of SPACK_* variables.
|
||||
We assume that all spack prefix variables are not secrets, and unlike
|
||||
the install_manifest.json, we don't (at least to start) parse the values
|
||||
to remove path prefixes specific to user systems.
|
||||
"""
|
||||
if not os.path.exists(filename):
|
||||
return
|
||||
|
||||
mods = EnvironmentModifications.from_sourcing_file(filename)
|
||||
env = {}
|
||||
mods.apply_modifications(env)
|
||||
return env
|
30
lib/spack/spack/analyzers/install_files.py
Normal file
30
lib/spack/spack/analyzers/install_files.py
Normal file
@@ -0,0 +1,30 @@
|
||||
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
"""The install files json file (install_manifest.json) already exists in
|
||||
the package install folder, so this analyzer simply moves it to the user
|
||||
analyzer folder for further processing."""
|
||||
|
||||
|
||||
import spack.monitor
|
||||
from .analyzer_base import AnalyzerBase
|
||||
|
||||
import os
|
||||
|
||||
|
||||
class InstallFiles(AnalyzerBase):
|
||||
|
||||
name = "install_files"
|
||||
outfile = "spack-analyzer-install-files.json"
|
||||
description = "install file listing read from install_manifest.json"
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
Load in the install_manifest.json and save to analyzers.
|
||||
|
||||
We write it out to the analyzers folder, with key as the analyzer name.
|
||||
"""
|
||||
manifest_file = os.path.join(self.meta_dir, "install_manifest.json")
|
||||
return {self.name: spack.monitor.read_json(manifest_file)}
|
116
lib/spack/spack/analyzers/libabigail.py
Normal file
116
lib/spack/spack/analyzers/libabigail.py
Normal file
@@ -0,0 +1,116 @@
|
||||
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
|
||||
import spack
|
||||
import spack.error
|
||||
import spack.bootstrap
|
||||
import spack.hooks
|
||||
import spack.monitor
|
||||
import spack.binary_distribution
|
||||
import spack.package
|
||||
import spack.repo
|
||||
|
||||
import llnl.util.tty as tty
|
||||
|
||||
from .analyzer_base import AnalyzerBase
|
||||
|
||||
import os
|
||||
|
||||
|
||||
class Libabigail(AnalyzerBase):
|
||||
|
||||
name = "libabigail"
|
||||
outfile = "spack-analyzer-libabigail.json"
|
||||
description = "Application Binary Interface (ABI) features for objects"
|
||||
|
||||
def __init__(self, spec, dirname=None):
|
||||
"""
|
||||
init for an analyzer ensures we have all needed dependencies.
|
||||
|
||||
For the libabigail analyzer, this means Libabigail.
|
||||
Since the output for libabigail is one file per object, we communicate
|
||||
with the monitor multiple times.
|
||||
"""
|
||||
super(Libabigail, self).__init__(spec, dirname)
|
||||
|
||||
# This doesn't seem to work to import on the module level
|
||||
tty.debug("Preparing to use Libabigail, will install if missing.")
|
||||
|
||||
with spack.bootstrap.ensure_bootstrap_configuration():
|
||||
|
||||
# libabigail won't install lib/bin/share without docs
|
||||
spec = spack.spec.Spec("libabigail+docs")
|
||||
spec.concretize()
|
||||
|
||||
self.abidw = spack.bootstrap.get_executable(
|
||||
"abidw", spec=spec, install=True)
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
Run libabigail, and save results to filename.
|
||||
|
||||
This run function differs in that we write as we generate and then
|
||||
return a dict with the analyzer name as the key, and the value of a
|
||||
dict of results, where the key is the object name, and the value is
|
||||
the output file written to.
|
||||
"""
|
||||
manifest = spack.binary_distribution.get_buildfile_manifest(self.spec)
|
||||
|
||||
# This result will store a path to each file
|
||||
result = {}
|
||||
|
||||
# Generate an output file for each binary or object
|
||||
for obj in manifest.get("binary_to_relocate_fullpath", []):
|
||||
|
||||
# We want to preserve the path in the install directory in case
|
||||
# a library has an equivalenly named lib or executable, for example
|
||||
outdir = os.path.dirname(obj.replace(self.spec.package.prefix,
|
||||
'').strip(os.path.sep))
|
||||
outfile = "spack-analyzer-libabigail-%s.xml" % os.path.basename(obj)
|
||||
outfile = os.path.join(self.output_dir, outdir, outfile)
|
||||
outdir = os.path.dirname(outfile)
|
||||
|
||||
# Create the output directory
|
||||
if not os.path.exists(outdir):
|
||||
os.makedirs(outdir)
|
||||
|
||||
# Sometimes libabigail segfaults and dumps
|
||||
try:
|
||||
self.abidw(obj, "--out-file", outfile)
|
||||
result[obj] = outfile
|
||||
tty.info("Writing result to %s" % outfile)
|
||||
except spack.error.SpackError:
|
||||
tty.warn("Issue running abidw for %s" % obj)
|
||||
|
||||
return {self.name: result}
|
||||
|
||||
def save_result(self, result, overwrite=False):
|
||||
"""
|
||||
Read saved ABI results and upload to monitor server.
|
||||
|
||||
ABI results are saved to individual files, so each one needs to be
|
||||
read and uploaded. Result here should be the lookup generated in run(),
|
||||
the key is the analyzer name, and each value is the result file.
|
||||
We currently upload the entire xml as text because libabigail can't
|
||||
easily read gzipped xml, but this will be updated when it can.
|
||||
"""
|
||||
if not spack.monitor.cli:
|
||||
return
|
||||
|
||||
name = self.spec.package.name
|
||||
|
||||
for obj, filename in result.get(self.name, {}).items():
|
||||
|
||||
# Don't include the prefix
|
||||
rel_path = obj.replace(self.spec.prefix + os.path.sep, "")
|
||||
|
||||
# We've already saved the results to file during run
|
||||
content = spack.monitor.read_file(filename)
|
||||
|
||||
# A result needs an analyzer, value or binary_value, and name
|
||||
data = {"value": content, "install_file": rel_path, "name": "abidw-xml"}
|
||||
tty.info("Sending result for %s %s to monitor." % (name, rel_path))
|
||||
spack.hooks.on_analyzer_save(self.spec.package, {"libabigail": [data]})
|
@@ -58,14 +58,13 @@
|
||||
"""
|
||||
import contextlib
|
||||
import functools
|
||||
import inspect
|
||||
import warnings
|
||||
|
||||
import archspec.cpu
|
||||
import six
|
||||
|
||||
import llnl.util.tty as tty
|
||||
from llnl.util.lang import memoized, list_modules, key_ordering
|
||||
import llnl.util.lang as lang
|
||||
|
||||
import spack.compiler
|
||||
import spack.compilers
|
||||
@@ -74,7 +73,7 @@
|
||||
import spack.error as serr
|
||||
import spack.util.executable
|
||||
import spack.version
|
||||
from spack.util.naming import mod_to_class
|
||||
import spack.util.classes
|
||||
from spack.util.spack_yaml import syaml_dict
|
||||
|
||||
|
||||
@@ -232,7 +231,7 @@ def optimization_flags(self, compiler):
|
||||
)
|
||||
|
||||
|
||||
@key_ordering
|
||||
@lang.lazy_lexicographic_ordering
|
||||
class Platform(object):
|
||||
""" Abstract class that each type of Platform will subclass.
|
||||
Will return a instance of it once it is returned.
|
||||
@@ -329,23 +328,27 @@ def __repr__(self):
|
||||
def __str__(self):
|
||||
return self.name
|
||||
|
||||
def _cmp_key(self):
|
||||
t_keys = ''.join(str(t._cmp_key()) for t in
|
||||
sorted(self.targets.values()))
|
||||
o_keys = ''.join(str(o._cmp_key()) for o in
|
||||
sorted(self.operating_sys.values()))
|
||||
return (self.name,
|
||||
self.default,
|
||||
self.front_end,
|
||||
self.back_end,
|
||||
self.default_os,
|
||||
self.front_os,
|
||||
self.back_os,
|
||||
t_keys,
|
||||
o_keys)
|
||||
def _cmp_iter(self):
|
||||
yield self.name
|
||||
yield self.default
|
||||
yield self.front_end
|
||||
yield self.back_end
|
||||
yield self.default_os
|
||||
yield self.front_os
|
||||
yield self.back_os
|
||||
|
||||
def targets():
|
||||
for t in sorted(self.targets.values()):
|
||||
yield t._cmp_iter
|
||||
yield targets
|
||||
|
||||
def oses():
|
||||
for o in sorted(self.operating_sys.values()):
|
||||
yield o._cmp_iter
|
||||
yield oses
|
||||
|
||||
|
||||
@key_ordering
|
||||
@lang.lazy_lexicographic_ordering
|
||||
class OperatingSystem(object):
|
||||
""" Operating System will be like a class similar to platform extended
|
||||
by subclasses for the specifics. Operating System will contain the
|
||||
@@ -363,8 +366,9 @@ def __str__(self):
|
||||
def __repr__(self):
|
||||
return self.__str__()
|
||||
|
||||
def _cmp_key(self):
|
||||
return (self.name, self.version)
|
||||
def _cmp_iter(self):
|
||||
yield self.name
|
||||
yield self.version
|
||||
|
||||
def to_dict(self):
|
||||
return syaml_dict([
|
||||
@@ -373,7 +377,7 @@ def to_dict(self):
|
||||
])
|
||||
|
||||
|
||||
@key_ordering
|
||||
@lang.lazy_lexicographic_ordering
|
||||
class Arch(object):
|
||||
"""Architecture is now a class to help with setting attributes.
|
||||
|
||||
@@ -423,20 +427,21 @@ def __nonzero__(self):
|
||||
self.target is not None)
|
||||
__bool__ = __nonzero__
|
||||
|
||||
def _cmp_key(self):
|
||||
def _cmp_iter(self):
|
||||
if isinstance(self.platform, Platform):
|
||||
platform = self.platform.name
|
||||
yield self.platform.name
|
||||
else:
|
||||
platform = self.platform
|
||||
yield self.platform
|
||||
|
||||
if isinstance(self.os, OperatingSystem):
|
||||
os = self.os.name
|
||||
yield self.os.name
|
||||
else:
|
||||
os = self.os
|
||||
yield self.os
|
||||
|
||||
if isinstance(self.target, Target):
|
||||
target = self.target.microarchitecture
|
||||
yield self.target.microarchitecture
|
||||
else:
|
||||
target = self.target
|
||||
return (platform, os, target)
|
||||
yield self.target
|
||||
|
||||
def to_dict(self):
|
||||
str_or_none = lambda v: str(v) if v else None
|
||||
@@ -458,7 +463,7 @@ def from_dict(d):
|
||||
return arch_for_spec(spec)
|
||||
|
||||
|
||||
@memoized
|
||||
@lang.memoized
|
||||
def get_platform(platform_name):
|
||||
"""Returns a platform object that corresponds to the given name."""
|
||||
platform_list = all_platforms()
|
||||
@@ -494,28 +499,13 @@ def arch_for_spec(arch_spec):
|
||||
return Arch(arch_plat, arch_spec.os, arch_spec.target)
|
||||
|
||||
|
||||
@memoized
|
||||
@lang.memoized
|
||||
def _all_platforms():
|
||||
classes = []
|
||||
mod_path = spack.paths.platform_path
|
||||
parent_module = "spack.platforms"
|
||||
|
||||
for name in list_modules(mod_path):
|
||||
mod_name = '%s.%s' % (parent_module, name)
|
||||
class_name = mod_to_class(name)
|
||||
mod = __import__(mod_name, fromlist=[class_name])
|
||||
if not hasattr(mod, class_name):
|
||||
tty.die('No class %s defined in %s' % (class_name, mod_name))
|
||||
cls = getattr(mod, class_name)
|
||||
if not inspect.isclass(cls):
|
||||
tty.die('%s.%s is not a class' % (mod_name, class_name))
|
||||
|
||||
classes.append(cls)
|
||||
|
||||
return classes
|
||||
return spack.util.classes.list_classes("spack.platforms", mod_path)
|
||||
|
||||
|
||||
@memoized
|
||||
@lang.memoized
|
||||
def _platform():
|
||||
"""Detects the platform for this machine.
|
||||
|
||||
@@ -546,7 +536,7 @@ def _platform():
|
||||
all_platforms = _all_platforms
|
||||
|
||||
|
||||
@memoized
|
||||
@lang.memoized
|
||||
def default_arch():
|
||||
"""Default ``Arch`` object for this machine.
|
||||
|
||||
@@ -570,7 +560,7 @@ def sys_type():
|
||||
return str(default_arch())
|
||||
|
||||
|
||||
@memoized
|
||||
@lang.memoized
|
||||
def compatible_sys_types():
|
||||
"""Returns a list of all the systypes compatible with the current host."""
|
||||
compatible_archs = []
|
||||
|
@@ -37,13 +37,11 @@
|
||||
import spack.mirror
|
||||
import spack.util.url as url_util
|
||||
import spack.util.web as web_util
|
||||
from spack.caches import misc_cache_location
|
||||
from spack.spec import Spec
|
||||
from spack.stage import Stage
|
||||
|
||||
|
||||
#: default root, relative to the Spack install path
|
||||
default_binary_index_root = os.path.join(spack.paths.opt_path, 'spack')
|
||||
|
||||
_build_cache_relative_path = 'build_cache'
|
||||
_build_cache_keys_relative_path = '_pgp'
|
||||
|
||||
@@ -67,9 +65,8 @@ class BinaryCacheIndex(object):
|
||||
mean we should have paid the price to update the cache earlier?
|
||||
"""
|
||||
|
||||
def __init__(self, cache_root=None):
|
||||
self._cache_root = cache_root or default_binary_index_root
|
||||
self._index_cache_root = os.path.join(self._cache_root, 'indices')
|
||||
def __init__(self, cache_root):
|
||||
self._index_cache_root = cache_root
|
||||
|
||||
# the key associated with the serialized _local_index_cache
|
||||
self._index_contents_key = 'contents.json'
|
||||
@@ -440,13 +437,15 @@ def _fetch_and_cache_index(self, mirror_url, expect_hash=None):
|
||||
return True
|
||||
|
||||
|
||||
def binary_index_location():
|
||||
"""Set up a BinaryCacheIndex for remote buildcache dbs in the user's homedir."""
|
||||
cache_root = os.path.join(misc_cache_location(), 'indices')
|
||||
return spack.util.path.canonicalize_path(cache_root)
|
||||
|
||||
|
||||
def _binary_index():
|
||||
"""Get the singleton store instance."""
|
||||
cache_root = spack.config.get(
|
||||
'config:binary_index_root', default_binary_index_root)
|
||||
cache_root = spack.util.path.canonicalize_path(cache_root)
|
||||
|
||||
return BinaryCacheIndex(cache_root)
|
||||
return BinaryCacheIndex(binary_index_location())
|
||||
|
||||
|
||||
#: Singleton binary_index instance
|
||||
@@ -551,40 +550,38 @@ def read_buildinfo_file(prefix):
|
||||
return buildinfo
|
||||
|
||||
|
||||
def write_buildinfo_file(spec, workdir, rel=False):
|
||||
def get_buildfile_manifest(spec):
|
||||
"""
|
||||
Create a cache file containing information
|
||||
required for the relocation
|
||||
Return a data structure with information about a build, including
|
||||
text_to_relocate, binary_to_relocate, binary_to_relocate_fullpath
|
||||
link_to_relocate, and other, which means it doesn't fit any of previous
|
||||
checks (and should not be relocated). We blacklist docs (man) and
|
||||
metadata (.spack). This can be used to find a particular kind of file
|
||||
in spack, or to generate the build metadata.
|
||||
"""
|
||||
prefix = spec.prefix
|
||||
text_to_relocate = []
|
||||
binary_to_relocate = []
|
||||
link_to_relocate = []
|
||||
data = {"text_to_relocate": [], "binary_to_relocate": [],
|
||||
"link_to_relocate": [], "other": [],
|
||||
"binary_to_relocate_fullpath": []}
|
||||
|
||||
blacklist = (".spack", "man")
|
||||
prefix_to_hash = dict()
|
||||
prefix_to_hash[str(spec.package.prefix)] = spec.dag_hash()
|
||||
deps = spack.build_environment.get_rpath_deps(spec.package)
|
||||
for d in deps:
|
||||
prefix_to_hash[str(d.prefix)] = d.dag_hash()
|
||||
|
||||
# Do this at during tarball creation to save time when tarball unpacked.
|
||||
# Used by make_package_relative to determine binaries to change.
|
||||
for root, dirs, files in os.walk(prefix, topdown=True):
|
||||
for root, dirs, files in os.walk(spec.prefix, topdown=True):
|
||||
dirs[:] = [d for d in dirs if d not in blacklist]
|
||||
for filename in files:
|
||||
path_name = os.path.join(root, filename)
|
||||
m_type, m_subtype = relocate.mime_type(path_name)
|
||||
rel_path_name = os.path.relpath(path_name, spec.prefix)
|
||||
added = False
|
||||
|
||||
if os.path.islink(path_name):
|
||||
link = os.readlink(path_name)
|
||||
if os.path.isabs(link):
|
||||
# Relocate absolute links into the spack tree
|
||||
if link.startswith(spack.store.layout.root):
|
||||
rel_path_name = os.path.relpath(path_name, prefix)
|
||||
link_to_relocate.append(rel_path_name)
|
||||
else:
|
||||
msg = 'Absolute link %s to %s ' % (path_name, link)
|
||||
msg += 'outside of prefix %s ' % prefix
|
||||
msg += 'should not be relocated.'
|
||||
tty.warn(msg)
|
||||
data['link_to_relocate'].append(rel_path_name)
|
||||
added = True
|
||||
|
||||
if relocate.needs_binary_relocation(m_type, m_subtype):
|
||||
if ((m_subtype in ('x-executable', 'x-sharedlib')
|
||||
@@ -592,11 +589,31 @@ def write_buildinfo_file(spec, workdir, rel=False):
|
||||
(m_subtype in ('x-mach-binary')
|
||||
and sys.platform == 'darwin') or
|
||||
(not filename.endswith('.o'))):
|
||||
rel_path_name = os.path.relpath(path_name, prefix)
|
||||
binary_to_relocate.append(rel_path_name)
|
||||
data['binary_to_relocate'].append(rel_path_name)
|
||||
data['binary_to_relocate_fullpath'].append(path_name)
|
||||
added = True
|
||||
|
||||
if relocate.needs_text_relocation(m_type, m_subtype):
|
||||
rel_path_name = os.path.relpath(path_name, prefix)
|
||||
text_to_relocate.append(rel_path_name)
|
||||
data['text_to_relocate'].append(rel_path_name)
|
||||
added = True
|
||||
|
||||
if not added:
|
||||
data['other'].append(path_name)
|
||||
return data
|
||||
|
||||
|
||||
def write_buildinfo_file(spec, workdir, rel=False):
|
||||
"""
|
||||
Create a cache file containing information
|
||||
required for the relocation
|
||||
"""
|
||||
manifest = get_buildfile_manifest(spec)
|
||||
|
||||
prefix_to_hash = dict()
|
||||
prefix_to_hash[str(spec.package.prefix)] = spec.dag_hash()
|
||||
deps = spack.build_environment.get_rpath_deps(spec.package)
|
||||
for d in deps:
|
||||
prefix_to_hash[str(d.prefix)] = d.dag_hash()
|
||||
|
||||
# Create buildinfo data and write it to disk
|
||||
import spack.hooks.sbang as sbang
|
||||
@@ -606,10 +623,10 @@ def write_buildinfo_file(spec, workdir, rel=False):
|
||||
buildinfo['buildpath'] = spack.store.layout.root
|
||||
buildinfo['spackprefix'] = spack.paths.prefix
|
||||
buildinfo['relative_prefix'] = os.path.relpath(
|
||||
prefix, spack.store.layout.root)
|
||||
buildinfo['relocate_textfiles'] = text_to_relocate
|
||||
buildinfo['relocate_binaries'] = binary_to_relocate
|
||||
buildinfo['relocate_links'] = link_to_relocate
|
||||
spec.prefix, spack.store.layout.root)
|
||||
buildinfo['relocate_textfiles'] = manifest['text_to_relocate']
|
||||
buildinfo['relocate_binaries'] = manifest['binary_to_relocate']
|
||||
buildinfo['relocate_links'] = manifest['link_to_relocate']
|
||||
buildinfo['prefix_to_hash'] = prefix_to_hash
|
||||
filename = buildinfo_file_name(workdir)
|
||||
with open(filename, 'w') as outfile:
|
||||
@@ -1161,7 +1178,7 @@ def is_backup_file(file):
|
||||
text_names.append(text_name)
|
||||
|
||||
# If we are not installing back to the same install tree do the relocation
|
||||
if old_layout_root != new_layout_root:
|
||||
if old_prefix != new_prefix:
|
||||
files_to_relocate = [os.path.join(workdir, filename)
|
||||
for filename in buildinfo.get('relocate_binaries')
|
||||
]
|
||||
|
@@ -1,10 +1,17 @@
|
||||
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
import contextlib
|
||||
import os
|
||||
import sys
|
||||
try:
|
||||
import sysconfig # novm
|
||||
except ImportError:
|
||||
# Not supported on Python 2.6
|
||||
pass
|
||||
|
||||
import archspec.cpu
|
||||
|
||||
import llnl.util.filesystem as fs
|
||||
import llnl.util.tty as tty
|
||||
@@ -20,17 +27,33 @@
|
||||
from spack.util.environment import EnvironmentModifications
|
||||
|
||||
|
||||
def spec_for_current_python():
|
||||
"""For bootstrapping purposes we are just interested in the Python
|
||||
minor version (all patches are ABI compatible with the same minor)
|
||||
and on whether ucs4 support has been enabled for Python 2.7
|
||||
|
||||
See:
|
||||
https://www.python.org/dev/peps/pep-0513/
|
||||
https://stackoverflow.com/a/35801395/771663
|
||||
"""
|
||||
version_str = '.'.join(str(x) for x in sys.version_info[:2])
|
||||
variant_str = ''
|
||||
if sys.version_info[0] == 2 and sys.version_info[1] == 7:
|
||||
unicode_size = sysconfig.get_config_var('Py_UNICODE_SIZE')
|
||||
variant_str = '+ucs4' if unicode_size == 4 else '~ucs4'
|
||||
|
||||
spec_fmt = 'python@{0} {1}'
|
||||
return spec_fmt.format(version_str, variant_str)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def spack_python_interpreter():
|
||||
"""Override the current configuration to set the interpreter under
|
||||
which Spack is currently running as the only Python external spec
|
||||
available.
|
||||
"""
|
||||
python_cls = type(spack.spec.Spec('python').package)
|
||||
python_prefix = os.path.dirname(os.path.dirname(sys.executable))
|
||||
externals = python_cls.determine_spec_details(
|
||||
python_prefix, [os.path.basename(sys.executable)])
|
||||
external_python = externals[0]
|
||||
external_python = spec_for_current_python()
|
||||
|
||||
entry = {
|
||||
'buildable': False,
|
||||
@@ -60,9 +83,10 @@ def make_module_available(module, spec=None, install=False):
|
||||
# We can constrain by a shortened version in place of a version range
|
||||
# because this spec is only used for querying or as a placeholder to be
|
||||
# replaced by an external that already has a concrete version. This syntax
|
||||
# is not suffucient when concretizing without an external, as it will
|
||||
# is not sufficient when concretizing without an external, as it will
|
||||
# concretize to python@X.Y instead of python@X.Y.Z
|
||||
spec.constrain('^python@%d.%d' % sys.version_info[:2])
|
||||
python_requirement = '^' + spec_for_current_python()
|
||||
spec.constrain(python_requirement)
|
||||
installed_specs = spack.store.db.query(spec, installed=True)
|
||||
|
||||
for ispec in installed_specs:
|
||||
@@ -172,7 +196,10 @@ def _raise_error(executable, exe_spec):
|
||||
|
||||
|
||||
def _bootstrap_config_scopes():
|
||||
config_scopes = []
|
||||
tty.debug('[BOOTSTRAP CONFIG SCOPE] name=_builtin')
|
||||
config_scopes = [
|
||||
spack.config.InternalConfigScope('_builtin', spack.config.config_defaults)
|
||||
]
|
||||
for name, path in spack.config.configuration_paths:
|
||||
platform = spack.architecture.platform().name
|
||||
platform_scope = spack.config.ConfigScope(
|
||||
@@ -180,7 +207,7 @@ def _bootstrap_config_scopes():
|
||||
)
|
||||
generic_scope = spack.config.ConfigScope(name, path)
|
||||
config_scopes.extend([generic_scope, platform_scope])
|
||||
msg = '[BOOSTRAP CONFIG SCOPE] name={0}, path={1}'
|
||||
msg = '[BOOTSTRAP CONFIG SCOPE] name={0}, path={1}'
|
||||
tty.debug(msg.format(generic_scope.name, generic_scope.path))
|
||||
tty.debug(msg.format(platform_scope.name, platform_scope.path))
|
||||
return config_scopes
|
||||
@@ -189,11 +216,35 @@ def _bootstrap_config_scopes():
|
||||
@contextlib.contextmanager
|
||||
def ensure_bootstrap_configuration():
|
||||
with spack.architecture.use_platform(spack.architecture.real_platform()):
|
||||
# Default configuration scopes excluding command line and builtin
|
||||
# but accounting for platform specific scopes
|
||||
config_scopes = _bootstrap_config_scopes()
|
||||
with spack.config.use_configuration(*config_scopes):
|
||||
with spack.repo.use_repositories(spack.paths.packages_path):
|
||||
with spack.store.use_store(spack.paths.user_bootstrap_store):
|
||||
with spack.repo.use_repositories(spack.paths.packages_path):
|
||||
with spack.store.use_store(spack.paths.user_bootstrap_store):
|
||||
# Default configuration scopes excluding command line
|
||||
# and builtin but accounting for platform specific scopes
|
||||
config_scopes = _bootstrap_config_scopes()
|
||||
with spack.config.use_configuration(*config_scopes):
|
||||
with spack_python_interpreter():
|
||||
yield
|
||||
|
||||
|
||||
def clingo_root_spec():
|
||||
# Construct the root spec that will be used to bootstrap clingo
|
||||
spec_str = 'clingo-bootstrap@spack+python'
|
||||
|
||||
# Add a proper compiler hint to the root spec. We use GCC for
|
||||
# everything but MacOS.
|
||||
if str(spack.architecture.platform()) == 'darwin':
|
||||
spec_str += ' %apple-clang'
|
||||
else:
|
||||
spec_str += ' %gcc'
|
||||
|
||||
# Add hint to use frontend operating system on Cray
|
||||
if str(spack.architecture.platform()) == 'cray':
|
||||
spec_str += ' os=fe'
|
||||
|
||||
# Add the generic target
|
||||
generic_target = archspec.cpu.host().family
|
||||
spec_str += ' target={0}'.format(str(generic_target))
|
||||
|
||||
tty.debug('[BOOTSTRAP ROOT SPEC] clingo: {0}'.format(spec_str))
|
||||
|
||||
return spack.spec.Spec(spec_str)
|
||||
|
@@ -70,8 +70,7 @@
|
||||
from spack.util.executable import Executable
|
||||
from spack.util.module_cmd import load_module, path_from_modules, module
|
||||
from spack.util.log_parse import parse_log_events, make_log_context
|
||||
|
||||
|
||||
from spack.util.cpus import cpus_available
|
||||
#
|
||||
# This can be set by the user to globally disable parallel builds.
|
||||
#
|
||||
@@ -307,6 +306,19 @@ def set_compiler_environment_variables(pkg, env):
|
||||
return env
|
||||
|
||||
|
||||
def _place_externals_last(spec_container):
|
||||
"""
|
||||
For a (possibly unordered) container of specs, return an ordered list
|
||||
where all external specs are at the end of the list. External packages
|
||||
may be installed in merged prefixes with other packages, and so
|
||||
they should be deprioritized for any search order (i.e. in PATH, or
|
||||
for a set of -L entries in a compiler invocation).
|
||||
"""
|
||||
first = list(x for x in spec_container if not x.external)
|
||||
second = list(x for x in spec_container if x.external)
|
||||
return first + second
|
||||
|
||||
|
||||
def set_build_environment_variables(pkg, env, dirty):
|
||||
"""Ensure a clean install environment when we build packages.
|
||||
|
||||
@@ -324,6 +336,29 @@ def set_build_environment_variables(pkg, env, dirty):
|
||||
link_deps = set(pkg.spec.traverse(root=False, deptype=('link')))
|
||||
build_link_deps = build_deps | link_deps
|
||||
rpath_deps = get_rpath_deps(pkg)
|
||||
# This includes all build dependencies and any other dependencies that
|
||||
# should be added to PATH (e.g. supporting executables run by build
|
||||
# dependencies)
|
||||
build_and_supporting_deps = set()
|
||||
for build_dep in build_deps:
|
||||
build_and_supporting_deps.update(build_dep.traverse(deptype='run'))
|
||||
|
||||
# Establish an arbitrary but fixed ordering of specs so that resulting
|
||||
# environment variable values are stable
|
||||
def _order(specs):
|
||||
return sorted(specs, key=lambda x: x.name)
|
||||
|
||||
# External packages may be installed in a prefix which contains many other
|
||||
# package installs. To avoid having those installations override
|
||||
# Spack-installed packages, they are placed at the end of search paths.
|
||||
# System prefixes are removed entirely later on since they are already
|
||||
# searched.
|
||||
build_deps = _place_externals_last(_order(build_deps))
|
||||
link_deps = _place_externals_last(_order(link_deps))
|
||||
build_link_deps = _place_externals_last(_order(build_link_deps))
|
||||
rpath_deps = _place_externals_last(_order(rpath_deps))
|
||||
build_and_supporting_deps = _place_externals_last(
|
||||
_order(build_and_supporting_deps))
|
||||
|
||||
link_dirs = []
|
||||
include_dirs = []
|
||||
@@ -370,21 +405,10 @@ def set_build_environment_variables(pkg, env, dirty):
|
||||
env.set(SPACK_INCLUDE_DIRS, ':'.join(include_dirs))
|
||||
env.set(SPACK_RPATH_DIRS, ':'.join(rpath_dirs))
|
||||
|
||||
build_prefixes = [dep.prefix for dep in build_deps]
|
||||
build_link_prefixes = [dep.prefix for dep in build_link_deps]
|
||||
|
||||
# add run-time dependencies of direct build-time dependencies:
|
||||
for build_dep in build_deps:
|
||||
for run_dep in build_dep.traverse(deptype='run'):
|
||||
build_prefixes.append(run_dep.prefix)
|
||||
|
||||
# Filter out system paths: ['/', '/usr', '/usr/local']
|
||||
# These paths can be introduced into the build when an external package
|
||||
# is added as a dependency. The problem with these paths is that they often
|
||||
# contain hundreds of other packages installed in the same directory.
|
||||
# If these paths come first, they can overshadow Spack installations.
|
||||
build_prefixes = filter_system_paths(build_prefixes)
|
||||
build_link_prefixes = filter_system_paths(build_link_prefixes)
|
||||
build_and_supporting_prefixes = filter_system_paths(
|
||||
x.prefix for x in build_and_supporting_deps)
|
||||
build_link_prefixes = filter_system_paths(
|
||||
x.prefix for x in build_link_deps)
|
||||
|
||||
# Add dependencies to CMAKE_PREFIX_PATH
|
||||
env.set_path('CMAKE_PREFIX_PATH', build_link_prefixes)
|
||||
@@ -399,7 +423,10 @@ def set_build_environment_variables(pkg, env, dirty):
|
||||
env.set('SPACK_COMPILER_EXTRA_RPATHS', extra_rpaths)
|
||||
|
||||
# Add bin directories from dependencies to the PATH for the build.
|
||||
for prefix in build_prefixes:
|
||||
# These directories are added to the beginning of the search path, and in
|
||||
# the order given by 'build_and_supporting_prefixes' (the iteration order
|
||||
# is reversed because each entry is prepended)
|
||||
for prefix in reversed(build_and_supporting_prefixes):
|
||||
for dirname in ['bin', 'bin64']:
|
||||
bin_dir = os.path.join(prefix, dirname)
|
||||
if os.path.isdir(bin_dir):
|
||||
@@ -443,7 +470,7 @@ def set_build_environment_variables(pkg, env, dirty):
|
||||
env.set(SPACK_CCACHE_BINARY, ccache)
|
||||
|
||||
# Add any pkgconfig directories to PKG_CONFIG_PATH
|
||||
for prefix in build_link_prefixes:
|
||||
for prefix in reversed(build_link_prefixes):
|
||||
for directory in ('lib', 'lib64', 'share'):
|
||||
pcdir = os.path.join(prefix, directory, 'pkgconfig')
|
||||
if os.path.isdir(pcdir):
|
||||
@@ -452,6 +479,38 @@ def set_build_environment_variables(pkg, env, dirty):
|
||||
return env
|
||||
|
||||
|
||||
def determine_number_of_jobs(
|
||||
parallel=False, command_line=None, config_default=None, max_cpus=None):
|
||||
"""
|
||||
Packages that require sequential builds need 1 job. Otherwise we use the
|
||||
number of jobs set on the command line. If not set, then we use the config
|
||||
defaults (which is usually set through the builtin config scope), but we
|
||||
cap to the number of CPUs available to avoid oversubscription.
|
||||
|
||||
Parameters:
|
||||
parallel (bool): true when package supports parallel builds
|
||||
command_line (int/None): command line override
|
||||
config_default (int/None): config default number of jobs
|
||||
max_cpus (int/None): maximum number of CPUs available. When None, this
|
||||
value is automatically determined.
|
||||
"""
|
||||
if not parallel:
|
||||
return 1
|
||||
|
||||
if command_line is None and 'command_line' in spack.config.scopes():
|
||||
command_line = spack.config.get('config:build_jobs', scope='command_line')
|
||||
|
||||
if command_line is not None:
|
||||
return command_line
|
||||
|
||||
max_cpus = max_cpus or cpus_available()
|
||||
|
||||
# in some rare cases _builtin config may not be set, so default to max 16
|
||||
config_default = config_default or spack.config.get('config:build_jobs', 16)
|
||||
|
||||
return min(max_cpus, config_default)
|
||||
|
||||
|
||||
def _set_variables_for_single_module(pkg, module):
|
||||
"""Helper function to set module variables for single module."""
|
||||
# Put a marker on this module so that it won't execute the body of this
|
||||
@@ -460,8 +519,7 @@ def _set_variables_for_single_module(pkg, module):
|
||||
if getattr(module, marker, False):
|
||||
return
|
||||
|
||||
jobs = spack.config.get('config:build_jobs', 16) if pkg.parallel else 1
|
||||
jobs = min(jobs, multiprocessing.cpu_count())
|
||||
jobs = determine_number_of_jobs(parallel=pkg.parallel)
|
||||
|
||||
m = module
|
||||
m.make_jobs = jobs
|
||||
|
@@ -6,6 +6,7 @@
|
||||
import itertools
|
||||
import os
|
||||
import os.path
|
||||
import stat
|
||||
from subprocess import PIPE
|
||||
from subprocess import check_call
|
||||
from typing import List # novm
|
||||
@@ -174,7 +175,10 @@ def runs_ok(script_abs_path):
|
||||
# Copy the good files over the bad ones
|
||||
for abs_path in to_be_patched:
|
||||
name = os.path.basename(abs_path)
|
||||
mode = os.stat(abs_path).st_mode
|
||||
os.chmod(abs_path, stat.S_IWUSR)
|
||||
fs.copy(substitutes[name], abs_path)
|
||||
os.chmod(abs_path, mode)
|
||||
|
||||
@run_before('configure')
|
||||
def _set_autotools_environment_variables(self):
|
||||
|
249
lib/spack/spack/build_systems/cached_cmake.py
Normal file
249
lib/spack/spack/build_systems/cached_cmake.py
Normal file
@@ -0,0 +1,249 @@
|
||||
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
import os
|
||||
|
||||
from llnl.util.filesystem import install, mkdirp
|
||||
import llnl.util.tty as tty
|
||||
|
||||
from spack.build_systems.cmake import CMakePackage
|
||||
from spack.package import run_after
|
||||
|
||||
|
||||
def cmake_cache_path(name, value, comment=""):
|
||||
"""Generate a string for a cmake cache variable"""
|
||||
return 'set({0} "{1}" CACHE PATH "{2}")\n'.format(name, value, comment)
|
||||
|
||||
|
||||
def cmake_cache_string(name, value, comment=""):
|
||||
"""Generate a string for a cmake cache variable"""
|
||||
return 'set({0} "{1}" CACHE STRING "{2}")\n'.format(name, value, comment)
|
||||
|
||||
|
||||
def cmake_cache_option(name, boolean_value, comment=""):
|
||||
"""Generate a string for a cmake configuration option"""
|
||||
|
||||
value = "ON" if boolean_value else "OFF"
|
||||
return 'set({0} {1} CACHE BOOL "{2}")\n'.format(name, value, comment)
|
||||
|
||||
|
||||
class CachedCMakePackage(CMakePackage):
|
||||
"""Specialized class for packages built using CMake initial cache.
|
||||
|
||||
This feature of CMake allows packages to increase reproducibility,
|
||||
especially between Spack- and manual builds. It also allows packages to
|
||||
sidestep certain parsing bugs in extremely long ``cmake`` commands, and to
|
||||
avoid system limits on the length of the command line."""
|
||||
|
||||
phases = ['initconfig', 'cmake', 'build', 'install']
|
||||
|
||||
@property
|
||||
def cache_name(self):
|
||||
return "{0}-{1}-{2}@{3}.cmake".format(
|
||||
self.name,
|
||||
self.spec.architecture,
|
||||
self.spec.compiler.name,
|
||||
self.spec.compiler.version,
|
||||
)
|
||||
|
||||
@property
|
||||
def cache_path(self):
|
||||
return os.path.join(self.stage.source_path, self.cache_name)
|
||||
|
||||
def flag_handler(self, name, flags):
|
||||
if name in ('cflags', 'cxxflags', 'cppflags', 'fflags'):
|
||||
return (None, None, None) # handled in the cmake cache
|
||||
return (flags, None, None)
|
||||
|
||||
def initconfig_compiler_entries(self):
|
||||
# This will tell cmake to use the Spack compiler wrappers when run
|
||||
# through Spack, but use the underlying compiler when run outside of
|
||||
# Spack
|
||||
spec = self.spec
|
||||
|
||||
# Fortran compiler is optional
|
||||
if "FC" in os.environ:
|
||||
spack_fc_entry = cmake_cache_path(
|
||||
"CMAKE_Fortran_COMPILER", os.environ['FC'])
|
||||
system_fc_entry = cmake_cache_path(
|
||||
"CMAKE_Fortran_COMPILER", self.compiler.fc)
|
||||
else:
|
||||
spack_fc_entry = "# No Fortran compiler defined in spec"
|
||||
system_fc_entry = "# No Fortran compiler defined in spec"
|
||||
|
||||
entries = [
|
||||
"#------------------{0}".format("-" * 60),
|
||||
"# Compilers",
|
||||
"#------------------{0}".format("-" * 60),
|
||||
"# Compiler Spec: {0}".format(spec.compiler),
|
||||
"#------------------{0}".format("-" * 60),
|
||||
'if(DEFINED ENV{SPACK_CC})\n',
|
||||
' ' + cmake_cache_path(
|
||||
"CMAKE_C_COMPILER", os.environ['CC']),
|
||||
' ' + cmake_cache_path(
|
||||
"CMAKE_CXX_COMPILER", os.environ['CXX']),
|
||||
' ' + spack_fc_entry,
|
||||
'else()\n',
|
||||
' ' + cmake_cache_path(
|
||||
"CMAKE_C_COMPILER", self.compiler.cc),
|
||||
' ' + cmake_cache_path(
|
||||
"CMAKE_CXX_COMPILER", self.compiler.cxx),
|
||||
' ' + system_fc_entry,
|
||||
'endif()\n'
|
||||
]
|
||||
|
||||
# use global spack compiler flags
|
||||
cppflags = ' '.join(spec.compiler_flags['cppflags'])
|
||||
if cppflags:
|
||||
# avoid always ending up with ' ' with no flags defined
|
||||
cppflags += ' '
|
||||
cflags = cppflags + ' '.join(spec.compiler_flags['cflags'])
|
||||
if cflags:
|
||||
entries.append(cmake_cache_string("CMAKE_C_FLAGS", cflags))
|
||||
cxxflags = cppflags + ' '.join(spec.compiler_flags['cxxflags'])
|
||||
if cxxflags:
|
||||
entries.append(cmake_cache_string("CMAKE_CXX_FLAGS", cxxflags))
|
||||
fflags = ' '.join(spec.compiler_flags['fflags'])
|
||||
if fflags:
|
||||
entries.append(cmake_cache_string("CMAKE_Fortran_FLAGS", fflags))
|
||||
|
||||
# Override XL compiler family
|
||||
familymsg = ("Override to proper compiler family for XL")
|
||||
if "xlf" in (self.compiler.fc or ''): # noqa: F821
|
||||
entries.append(cmake_cache_string(
|
||||
"CMAKE_Fortran_COMPILER_ID", "XL",
|
||||
familymsg))
|
||||
if "xlc" in self.compiler.cc: # noqa: F821
|
||||
entries.append(cmake_cache_string(
|
||||
"CMAKE_C_COMPILER_ID", "XL",
|
||||
familymsg))
|
||||
if "xlC" in self.compiler.cxx: # noqa: F821
|
||||
entries.append(cmake_cache_string(
|
||||
"CMAKE_CXX_COMPILER_ID", "XL",
|
||||
familymsg))
|
||||
|
||||
return entries
|
||||
|
||||
def initconfig_mpi_entries(self):
|
||||
spec = self.spec
|
||||
|
||||
if not spec.satisfies('^mpi'):
|
||||
return []
|
||||
|
||||
entries = [
|
||||
"#------------------{0}".format("-" * 60),
|
||||
"# MPI",
|
||||
"#------------------{0}\n".format("-" * 60),
|
||||
]
|
||||
|
||||
entries.append(cmake_cache_path("MPI_C_COMPILER",
|
||||
spec['mpi'].mpicc))
|
||||
entries.append(cmake_cache_path("MPI_CXX_COMPILER",
|
||||
spec['mpi'].mpicxx))
|
||||
entries.append(cmake_cache_path("MPI_Fortran_COMPILER",
|
||||
spec['mpi'].mpifc))
|
||||
|
||||
# Check for slurm
|
||||
using_slurm = False
|
||||
slurm_checks = ['+slurm',
|
||||
'schedulers=slurm',
|
||||
'process_managers=slurm']
|
||||
if any(spec['mpi'].satisfies(variant) for variant in slurm_checks):
|
||||
using_slurm = True
|
||||
|
||||
# Determine MPIEXEC
|
||||
if using_slurm:
|
||||
if spec['mpi'].external:
|
||||
# Heuristic until we have dependents on externals
|
||||
mpiexec = '/usr/bin/srun'
|
||||
else:
|
||||
mpiexec = os.path.join(spec['slurm'].prefix.bin, 'srun')
|
||||
else:
|
||||
mpiexec = os.path.join(spec['mpi'].prefix.bin, 'mpirun')
|
||||
if not os.path.exists(mpiexec):
|
||||
mpiexec = os.path.join(spec['mpi'].prefix.bin, 'mpiexec')
|
||||
|
||||
if not os.path.exists(mpiexec):
|
||||
msg = "Unable to determine MPIEXEC, %s tests may fail" % self.name
|
||||
entries.append("# {0}\n".format(msg))
|
||||
tty.warn(msg)
|
||||
else:
|
||||
# starting with cmake 3.10, FindMPI expects MPIEXEC_EXECUTABLE
|
||||
# vs the older versions which expect MPIEXEC
|
||||
if self.spec["cmake"].satisfies('@3.10:'):
|
||||
entries.append(cmake_cache_path("MPIEXEC_EXECUTABLE",
|
||||
mpiexec))
|
||||
else:
|
||||
entries.append(cmake_cache_path("MPIEXEC", mpiexec))
|
||||
|
||||
# Determine MPIEXEC_NUMPROC_FLAG
|
||||
if using_slurm:
|
||||
entries.append(cmake_cache_string("MPIEXEC_NUMPROC_FLAG", "-n"))
|
||||
else:
|
||||
entries.append(cmake_cache_string("MPIEXEC_NUMPROC_FLAG", "-np"))
|
||||
|
||||
return entries
|
||||
|
||||
def initconfig_hardware_entries(self):
|
||||
spec = self.spec
|
||||
|
||||
entries = [
|
||||
"#------------------{0}".format("-" * 60),
|
||||
"# Hardware",
|
||||
"#------------------{0}\n".format("-" * 60),
|
||||
]
|
||||
|
||||
if spec.satisfies('^cuda'):
|
||||
entries.append("#------------------{0}".format("-" * 30))
|
||||
entries.append("# Cuda")
|
||||
entries.append("#------------------{0}\n".format("-" * 30))
|
||||
|
||||
cudatoolkitdir = spec['cuda'].prefix
|
||||
entries.append(cmake_cache_path("CUDA_TOOLKIT_ROOT_DIR",
|
||||
cudatoolkitdir))
|
||||
cudacompiler = "${CUDA_TOOLKIT_ROOT_DIR}/bin/nvcc"
|
||||
entries.append(cmake_cache_path("CMAKE_CUDA_COMPILER",
|
||||
cudacompiler))
|
||||
|
||||
if spec.satisfies('^mpi'):
|
||||
entries.append(cmake_cache_path("CMAKE_CUDA_HOST_COMPILER",
|
||||
"${MPI_CXX_COMPILER}"))
|
||||
else:
|
||||
entries.append(cmake_cache_path("CMAKE_CUDA_HOST_COMPILER",
|
||||
"${CMAKE_CXX_COMPILER}"))
|
||||
|
||||
return entries
|
||||
|
||||
def std_initconfig_entries(self):
|
||||
return [
|
||||
"#------------------{0}".format("-" * 60),
|
||||
"# !!!! This is a generated file, edit at own risk !!!!",
|
||||
"#------------------{0}".format("-" * 60),
|
||||
"# CMake executable path: {0}".format(
|
||||
self.spec['cmake'].command.path),
|
||||
"#------------------{0}\n".format("-" * 60),
|
||||
]
|
||||
|
||||
def initconfig(self, spec, prefix):
|
||||
cache_entries = (self.std_initconfig_entries() +
|
||||
self.initconfig_compiler_entries() +
|
||||
self.initconfig_mpi_entries() +
|
||||
self.initconfig_hardware_entries() +
|
||||
self.initconfig_package_entries())
|
||||
|
||||
with open(self.cache_name, 'w') as f:
|
||||
for entry in cache_entries:
|
||||
f.write('%s\n' % entry)
|
||||
f.write('\n')
|
||||
|
||||
@property
|
||||
def std_cmake_args(self):
|
||||
args = super(CachedCMakePackage, self).std_cmake_args
|
||||
args.extend(['-C', self.cache_path])
|
||||
return args
|
||||
|
||||
@run_after('install')
|
||||
def install_cmake_cache(self):
|
||||
mkdirp(self.spec.prefix.share.cmake)
|
||||
install(self.cache_path, self.spec.prefix.share.cmake)
|
@@ -151,7 +151,7 @@ def license_files(self):
|
||||
'+advisor': 'advisor',
|
||||
'+inspector': 'inspector',
|
||||
'+itac': 'itac',
|
||||
'+vtune': 'vtune_amplifier',
|
||||
'+vtune': 'vtune_profiler',
|
||||
}.items():
|
||||
if variant in self.spec:
|
||||
dirs.append(self.normalize_path(
|
||||
@@ -202,7 +202,8 @@ def pset_components(self):
|
||||
'+itac': ' intel-itac intel-ta intel-tc'
|
||||
' intel-trace-analyzer intel-trace-collector',
|
||||
# Trace Analyzer and Collector
|
||||
'+vtune': ' intel-vtune-amplifier', # VTune
|
||||
'+vtune': ' intel-vtune'
|
||||
# VTune, ..-profiler since 2020, ..-amplifier before
|
||||
}.items():
|
||||
if variant in self.spec:
|
||||
c += components_to_add
|
||||
@@ -535,8 +536,9 @@ def normalize_path(self, component_path, component_suite_dir=None,
|
||||
[None, '2016:', 'compilers_and_libraries'],
|
||||
['advisor', ':2016', 'advisor_xe'],
|
||||
['inspector', ':2016', 'inspector_xe'],
|
||||
['vtune_amplifier', ':2017', 'vtune_amplifier_xe'],
|
||||
['vtune_profiler', ':2017', 'vtune_amplifier_xe'],
|
||||
['vtune', ':2017', 'vtune_amplifier_xe'], # alt.
|
||||
['vtune_profiler', ':2019', 'vtune_amplifier'],
|
||||
['itac', ':', 'itac', [os.sep + standalone_glob]],
|
||||
]:
|
||||
if cs == rename_rule[0] and v.satisfies(ver(rename_rule[1])):
|
||||
|
@@ -52,9 +52,13 @@ class MesonPackage(PackageBase):
|
||||
|
||||
build_time_test_callbacks = ['check']
|
||||
|
||||
variant('buildtype', default='release',
|
||||
variant('buildtype', default='debugoptimized',
|
||||
description='Meson build type',
|
||||
values=('plain', 'debug', 'debugoptimized', 'release', 'minsize'))
|
||||
variant('default_library', default='shared',
|
||||
description=' Default library type',
|
||||
values=('shared', 'static', 'both'))
|
||||
variant('strip', default=False, description='Strip targets on install')
|
||||
|
||||
depends_on('meson', type='build')
|
||||
depends_on('ninja', type='build')
|
||||
@@ -96,6 +100,13 @@ def _std_args(pkg):
|
||||
except KeyError:
|
||||
build_type = 'release'
|
||||
|
||||
strip = 'true' if '+strip' in pkg.spec else 'false'
|
||||
|
||||
try:
|
||||
default_library = pkg.spec.variants['default_library'].value
|
||||
except KeyError:
|
||||
default_library = 'shared'
|
||||
|
||||
args = [
|
||||
'--prefix={0}'.format(pkg.prefix),
|
||||
# If we do not specify libdir explicitly, Meson chooses something
|
||||
@@ -103,8 +114,9 @@ def _std_args(pkg):
|
||||
# find libraries and pkg-config files.
|
||||
# See https://github.com/mesonbuild/meson/issues/2197
|
||||
'--libdir={0}'.format(pkg.prefix.lib),
|
||||
'--buildtype={0}'.format(build_type),
|
||||
'--strip',
|
||||
'-Dbuildtype={0}'.format(build_type),
|
||||
'-Dstrip={0}'.format(strip),
|
||||
'-Ddefault_library={0}'.format(default_library)
|
||||
]
|
||||
|
||||
return args
|
||||
@@ -131,6 +143,7 @@ def meson_args(self):
|
||||
* ``--libdir``
|
||||
* ``--buildtype``
|
||||
* ``--strip``
|
||||
* ``--default_library``
|
||||
|
||||
which will be set automatically.
|
||||
|
||||
|
@@ -7,14 +7,16 @@
|
||||
|
||||
"""
|
||||
|
||||
import getpass
|
||||
import shutil
|
||||
from sys import platform
|
||||
from os.path import basename, dirname, isdir, join
|
||||
from os.path import basename, dirname, isdir
|
||||
|
||||
from spack.package import Package
|
||||
from spack.util.environment import EnvironmentModifications
|
||||
from spack.util.executable import Executable
|
||||
|
||||
from llnl.util.filesystem import find_headers, find_libraries
|
||||
from llnl.util.filesystem import find_headers, find_libraries, join_path
|
||||
|
||||
|
||||
class IntelOneApiPackage(Package):
|
||||
@@ -33,6 +35,11 @@ def component_dir(self):
|
||||
"""Subdirectory for this component in the install prefix."""
|
||||
raise NotImplementedError
|
||||
|
||||
@property
|
||||
def component_path(self):
|
||||
"""Path to component <prefix>/<component>/<version>."""
|
||||
return join_path(self.prefix, self.component_dir, str(self.spec.version))
|
||||
|
||||
def install(self, spec, prefix, installer_path=None):
|
||||
"""Shared install method for all oneapi packages."""
|
||||
|
||||
@@ -42,6 +49,22 @@ def install(self, spec, prefix, installer_path=None):
|
||||
installer_path = basename(self.url_for_version(spec.version))
|
||||
|
||||
if platform == 'linux':
|
||||
# Intel installer assumes and enforces that all components
|
||||
# are installed into a single prefix. Spack wants to
|
||||
# install each component in a separate prefix. The
|
||||
# installer mechanism is implemented by saving install
|
||||
# information in a directory called installercache for
|
||||
# future runs. The location of the installercache depends
|
||||
# on the userid. For root it is always in /var/intel. For
|
||||
# non-root it is in $HOME/intel.
|
||||
#
|
||||
# The method for preventing this install from interfering
|
||||
# with other install depends on the userid. For root, we
|
||||
# delete the installercache before and after install. For
|
||||
# non root we redefine the HOME environment variable.
|
||||
if getpass.getuser() == 'root':
|
||||
shutil.rmtree('/var/intel/installercache', ignore_errors=True)
|
||||
|
||||
bash = Executable('bash')
|
||||
|
||||
# Installer writes files in ~/intel set HOME so it goes to prefix
|
||||
@@ -52,22 +75,24 @@ def install(self, spec, prefix, installer_path=None):
|
||||
'--eula', 'accept',
|
||||
'--install-dir', prefix)
|
||||
|
||||
if getpass.getuser() == 'root':
|
||||
shutil.rmtree('/var/intel/installercache', ignore_errors=True)
|
||||
|
||||
# Some installers have a bug and do not return an error code when failing
|
||||
if not isdir(join(prefix, self.component_dir)):
|
||||
if not isdir(join_path(prefix, self.component_dir)):
|
||||
raise RuntimeError('install failed')
|
||||
|
||||
def setup_run_environment(self, env):
|
||||
|
||||
"""Adds environment variables to the generated module file.
|
||||
|
||||
These environment variables come from running:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ source {prefix}/setvars.sh --force
|
||||
$ source {prefix}/{component}/{version}/env/vars.sh
|
||||
"""
|
||||
env.extend(EnvironmentModifications.from_sourcing_file(
|
||||
join(self.prefix, self.component_dir, 'latest/env/vars.sh')))
|
||||
join_path(self.component_path, 'env', 'vars.sh')))
|
||||
|
||||
|
||||
class IntelOneApiLibraryPackage(IntelOneApiPackage):
|
||||
@@ -75,12 +100,11 @@ class IntelOneApiLibraryPackage(IntelOneApiPackage):
|
||||
|
||||
@property
|
||||
def headers(self):
|
||||
include_path = '%s/%s/latest/include' % (
|
||||
self.prefix, self.component_dir)
|
||||
include_path = join_path(self.component_path, 'include')
|
||||
return find_headers('*', include_path, recursive=True)
|
||||
|
||||
@property
|
||||
def libs(self):
|
||||
lib_path = '%s/%s/latest/lib/intel64' % (self.prefix, self.component_dir)
|
||||
lib_path = join_path(self.component_path, 'lib', 'intel64')
|
||||
lib_path = lib_path if isdir(lib_path) else dirname(lib_path)
|
||||
return find_libraries('*', root=lib_path, shared=True, recursive=True)
|
||||
|
@@ -17,7 +17,7 @@
|
||||
import spack.util.path
|
||||
|
||||
|
||||
def _misc_cache():
|
||||
def misc_cache_location():
|
||||
"""The ``misc_cache`` is Spack's cache for small data.
|
||||
|
||||
Currently the ``misc_cache`` stores indexes for virtual dependency
|
||||
@@ -27,7 +27,11 @@ def _misc_cache():
|
||||
if not path:
|
||||
path = os.path.join(spack.paths.user_config_path, 'cache')
|
||||
path = spack.util.path.canonicalize_path(path)
|
||||
return path
|
||||
|
||||
|
||||
def _misc_cache():
|
||||
path = misc_cache_location()
|
||||
return spack.util.file_cache.FileCache(path)
|
||||
|
||||
|
||||
@@ -35,7 +39,7 @@ def _misc_cache():
|
||||
misc_cache = llnl.util.lang.Singleton(_misc_cache)
|
||||
|
||||
|
||||
def _fetch_cache():
|
||||
def fetch_cache_location():
|
||||
"""Filesystem cache of downloaded archives.
|
||||
|
||||
This prevents Spack from repeatedly fetch the same files when
|
||||
@@ -45,7 +49,11 @@ def _fetch_cache():
|
||||
if not path:
|
||||
path = os.path.join(spack.paths.var_path, "cache")
|
||||
path = spack.util.path.canonicalize_path(path)
|
||||
return path
|
||||
|
||||
|
||||
def _fetch_cache():
|
||||
path = fetch_cache_location()
|
||||
return spack.fetch_strategy.FsCache(path)
|
||||
|
||||
|
||||
|
@@ -575,6 +575,13 @@ def generate_gitlab_ci_yaml(env, print_summary, output_file, prune_dag=False,
|
||||
ci_mirrors = yaml_root['mirrors']
|
||||
mirror_urls = [url for url in ci_mirrors.values()]
|
||||
|
||||
# Check for a list of "known broken" specs that we should not bother
|
||||
# trying to build.
|
||||
broken_specs_url = ''
|
||||
known_broken_specs_encountered = []
|
||||
if 'broken-specs-url' in gitlab_ci:
|
||||
broken_specs_url = gitlab_ci['broken-specs-url']
|
||||
|
||||
enable_artifacts_buildcache = False
|
||||
if 'enable-artifacts-buildcache' in gitlab_ci:
|
||||
enable_artifacts_buildcache = gitlab_ci['enable-artifacts-buildcache']
|
||||
@@ -665,6 +672,14 @@ def generate_gitlab_ci_yaml(env, print_summary, output_file, prune_dag=False,
|
||||
pkg_name = pkg_name_from_spec_label(spec_label)
|
||||
release_spec = root_spec[pkg_name]
|
||||
|
||||
# Check if this spec is in our list of known failures.
|
||||
if broken_specs_url:
|
||||
full_hash = release_spec.full_hash()
|
||||
broken_spec_path = url_util.join(broken_specs_url, full_hash)
|
||||
if web_util.url_exists(broken_spec_path):
|
||||
known_broken_specs_encountered.append('{0} ({1})'.format(
|
||||
release_spec, full_hash))
|
||||
|
||||
runner_attribs = find_matching_config(
|
||||
release_spec, gitlab_ci)
|
||||
|
||||
@@ -1029,6 +1044,14 @@ def generate_gitlab_ci_yaml(env, print_summary, output_file, prune_dag=False,
|
||||
|
||||
sorted_output = {'no-specs-to-rebuild': noop_job}
|
||||
|
||||
if known_broken_specs_encountered:
|
||||
error_msg = (
|
||||
'Pipeline generation failed due to the presence of the '
|
||||
'following specs that are known to be broken in develop:\n')
|
||||
for broken_spec in known_broken_specs_encountered:
|
||||
error_msg += '* {0}\n'.format(broken_spec)
|
||||
tty.die(error_msg)
|
||||
|
||||
with open(output_file, 'w') as outf:
|
||||
outf.write(syaml.dump_config(sorted_output, default_flow_style=True))
|
||||
|
||||
|
118
lib/spack/spack/cmd/analyze.py
Normal file
118
lib/spack/spack/cmd/analyze.py
Normal file
@@ -0,0 +1,118 @@
|
||||
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
import sys
|
||||
|
||||
import llnl.util.tty as tty
|
||||
|
||||
import spack.analyzers
|
||||
import spack.build_environment
|
||||
import spack.cmd
|
||||
import spack.cmd.common.arguments as arguments
|
||||
import spack.environment as ev
|
||||
import spack.fetch_strategy
|
||||
import spack.monitor
|
||||
import spack.paths
|
||||
import spack.report
|
||||
|
||||
|
||||
description = "run analyzers on installed packages"
|
||||
section = "analysis"
|
||||
level = "long"
|
||||
|
||||
|
||||
def setup_parser(subparser):
|
||||
sp = subparser.add_subparsers(metavar='SUBCOMMAND', dest='analyze_command')
|
||||
|
||||
sp.add_parser('list-analyzers',
|
||||
description="list available analyzers",
|
||||
help="show list of analyzers that are available to run.")
|
||||
|
||||
# This adds the monitor group to the subparser
|
||||
spack.monitor.get_monitor_group(subparser)
|
||||
|
||||
# Run Parser
|
||||
run_parser = sp.add_parser('run', description="run an analyzer",
|
||||
help="provide the name of the analyzer to run.")
|
||||
|
||||
run_parser.add_argument(
|
||||
'--overwrite', action='store_true',
|
||||
help="re-analyze even if the output file already exists.")
|
||||
run_parser.add_argument(
|
||||
'-p', '--path', default=None,
|
||||
dest='path',
|
||||
help="write output to a different directory than ~/.spack/analyzers")
|
||||
run_parser.add_argument(
|
||||
'-a', '--analyzers', default=None,
|
||||
dest="analyzers", action="append",
|
||||
help="add an analyzer (defaults to all available)")
|
||||
arguments.add_common_arguments(run_parser, ['spec'])
|
||||
|
||||
|
||||
def analyze_spec(spec, analyzers=None, outdir=None, monitor=None, overwrite=False):
|
||||
"""
|
||||
Do an analysis for a spec, optionally adding monitoring.
|
||||
|
||||
We also allow the user to specify a custom output directory.
|
||||
analyze_spec(spec, args.analyzers, args.outdir, monitor)
|
||||
|
||||
Args:
|
||||
spec (Spec): spec object of installed package
|
||||
analyzers (list): list of analyzer (keys) to run
|
||||
monitor (monitor.SpackMonitorClient): a monitor client
|
||||
overwrite (bool): overwrite result if already exists
|
||||
"""
|
||||
analyzers = analyzers or list(spack.analyzers.analyzer_types.keys())
|
||||
|
||||
# Load the build environment from the spec install directory, and send
|
||||
# the spec to the monitor if it's not known
|
||||
if monitor:
|
||||
monitor.load_build_environment(spec)
|
||||
monitor.new_configuration([spec])
|
||||
|
||||
for name in analyzers:
|
||||
|
||||
# Instantiate the analyzer with the spec and outdir
|
||||
analyzer = spack.analyzers.get_analyzer(name)(spec, outdir)
|
||||
|
||||
# Run the analyzer to get a json result - results are returned as
|
||||
# a dictionary with a key corresponding to the analyzer type, so
|
||||
# we can just update the data
|
||||
result = analyzer.run()
|
||||
|
||||
# Send the result. We do them separately because:
|
||||
# 1. each analyzer might have differently organized output
|
||||
# 2. the size of a result can be large
|
||||
analyzer.save_result(result, overwrite)
|
||||
|
||||
|
||||
def analyze(parser, args, **kwargs):
|
||||
|
||||
# If the user wants to list analyzers, do so and exit
|
||||
if args.analyze_command == "list-analyzers":
|
||||
spack.analyzers.list_all()
|
||||
sys.exit(0)
|
||||
|
||||
# handle active environment, if any
|
||||
env = ev.get_env(args, 'analyze')
|
||||
|
||||
# Get an disambiguate spec (we should only have one)
|
||||
specs = spack.cmd.parse_specs(args.spec)
|
||||
if not specs:
|
||||
tty.die("You must provide one or more specs to analyze.")
|
||||
spec = spack.cmd.disambiguate_spec(specs[0], env)
|
||||
|
||||
# The user wants to monitor builds using github.com/spack/spack-monitor
|
||||
# It is instantianted once here, and then available at spack.monitor.cli
|
||||
monitor = None
|
||||
if args.use_monitor:
|
||||
monitor = spack.monitor.get_client(
|
||||
host=args.monitor_host,
|
||||
prefix=args.monitor_prefix,
|
||||
disable_auth=args.monitor_disable_auth,
|
||||
)
|
||||
|
||||
# Run the analysis
|
||||
analyze_spec(spec, args.analyzers, args.path, monitor, args.overwrite)
|
@@ -22,7 +22,7 @@ def shell_init_instructions(cmd, equivalent):
|
||||
shell_specific = "{sh_arg}" in equivalent
|
||||
|
||||
msg = [
|
||||
"`%s` requires spack's shell support." % cmd,
|
||||
"`%s` requires Spack's shell support." % cmd,
|
||||
"",
|
||||
"To set up shell support, run the command below for your shell.",
|
||||
"",
|
||||
@@ -49,5 +49,12 @@ def shell_init_instructions(cmd, equivalent):
|
||||
else:
|
||||
msg += [" " + equivalent]
|
||||
|
||||
msg += [
|
||||
"",
|
||||
"If you have already set up Spack's shell support but still receive",
|
||||
"this message, please make sure to call Spack via the `spack` command",
|
||||
"without any path components (such as `bin/spack`).",
|
||||
]
|
||||
|
||||
msg += ['']
|
||||
tty.error(*msg)
|
||||
|
@@ -5,7 +5,6 @@
|
||||
|
||||
|
||||
import argparse
|
||||
import multiprocessing
|
||||
|
||||
import spack.cmd
|
||||
import spack.config
|
||||
@@ -102,22 +101,10 @@ def __call__(self, parser, namespace, jobs, option_string):
|
||||
'[expected a positive integer, got "{1}"]'
|
||||
raise ValueError(msg.format(option_string, jobs))
|
||||
|
||||
jobs = min(jobs, multiprocessing.cpu_count())
|
||||
spack.config.set('config:build_jobs', jobs, scope='command_line')
|
||||
|
||||
setattr(namespace, 'jobs', jobs)
|
||||
|
||||
@property
|
||||
def default(self):
|
||||
# This default is coded as a property so that look-up
|
||||
# of this value is done only on demand
|
||||
return min(spack.config.get('config:build_jobs', 16),
|
||||
multiprocessing.cpu_count())
|
||||
|
||||
@default.setter
|
||||
def default(self, value):
|
||||
pass
|
||||
|
||||
|
||||
class DeptypeAction(argparse.Action):
|
||||
"""Creates a tuple of valid dependency types from a deptype argument."""
|
||||
@@ -267,6 +254,7 @@ def install_status():
|
||||
'-I', '--install-status', action='store_true', default=False,
|
||||
help='show install status of packages. packages can be: '
|
||||
'installed [+], missing and needed by an installed package [-], '
|
||||
'installed in and upstream instance [^], '
|
||||
'or not installed (no annotation)')
|
||||
|
||||
|
||||
|
@@ -53,6 +53,9 @@ def emulate_env_utility(cmd_name, context, args):
|
||||
spec = args.spec[0]
|
||||
cmd = args.spec[1:]
|
||||
|
||||
if not spec:
|
||||
tty.die("spack %s requires a spec." % cmd_name)
|
||||
|
||||
specs = spack.cmd.parse_specs(spec, concretize=False)
|
||||
if len(specs) > 1:
|
||||
tty.die("spack %s only takes one spec." % cmd_name)
|
||||
|
@@ -338,7 +338,7 @@ def __init__(self, name, url, *args, **kwargs):
|
||||
r_name = parse_name(url)
|
||||
|
||||
cran = re.search(
|
||||
r'(?:r-project)[^/]+/src' + '/([^/]+)' * 2,
|
||||
r'(?:r-project|rstudio)[^/]+/src' + '/([^/]+)' * 2,
|
||||
url
|
||||
)
|
||||
|
||||
|
@@ -17,6 +17,7 @@
|
||||
import spack.cmd.common.arguments as arguments
|
||||
import spack.environment as ev
|
||||
import spack.fetch_strategy
|
||||
import spack.monitor
|
||||
import spack.paths
|
||||
import spack.report
|
||||
from spack.error import SpackError
|
||||
@@ -106,6 +107,8 @@ def setup_parser(subparser):
|
||||
'--cache-only', action='store_true', dest='cache_only', default=False,
|
||||
help="only install package from binary mirrors")
|
||||
|
||||
monitor_group = spack.monitor.get_monitor_group(subparser) # noqa
|
||||
|
||||
subparser.add_argument(
|
||||
'--include-build-deps', action='store_true', dest='include_build_deps',
|
||||
default=False, help="""include build deps when installing from cache,
|
||||
@@ -136,6 +139,10 @@ def setup_parser(subparser):
|
||||
subparser.add_argument(
|
||||
'--only-concrete', action='store_true', default=False,
|
||||
help='(with environment) only install already concretized specs')
|
||||
subparser.add_argument(
|
||||
'--no-add', action='store_true', default=False,
|
||||
help="""(with environment) only install specs provided as argument
|
||||
if they are already in the concretized environment""")
|
||||
subparser.add_argument(
|
||||
'-f', '--file', action='append', default=[],
|
||||
dest='specfiles', metavar='SPEC_YAML_FILE',
|
||||
@@ -183,7 +190,7 @@ def default_log_file(spec):
|
||||
"""
|
||||
fmt = 'test-{x.name}-{x.version}-{hash}.xml'
|
||||
basename = fmt.format(x=spec, hash=spec.dag_hash())
|
||||
dirname = fs.os.path.join(spack.paths.var_path, 'junit-report')
|
||||
dirname = fs.os.path.join(spack.paths.reports_path, 'junit')
|
||||
fs.mkdirp(dirname)
|
||||
return fs.os.path.join(dirname, basename)
|
||||
|
||||
@@ -202,11 +209,66 @@ def install_specs(cli_args, kwargs, specs):
|
||||
|
||||
try:
|
||||
if env:
|
||||
specs_to_install = []
|
||||
specs_to_add = []
|
||||
for abstract, concrete in specs:
|
||||
with env.write_transaction():
|
||||
concrete = env.concretize_and_add(abstract, concrete)
|
||||
env.write(regenerate_views=False)
|
||||
env.install_all(cli_args, **kwargs)
|
||||
# This won't find specs added to the env since last
|
||||
# concretize, therefore should we consider enforcing
|
||||
# concretization of the env before allowing to install
|
||||
# specs?
|
||||
m_spec = env.matching_spec(abstract)
|
||||
|
||||
# If there is any ambiguity in the above call to matching_spec
|
||||
# (i.e. if more than one spec in the environment matches), then
|
||||
# SpackEnvironmentError is rasied, with a message listing the
|
||||
# the matches. Getting to this point means there were either
|
||||
# no matches or exactly one match.
|
||||
|
||||
if not m_spec:
|
||||
tty.debug('{0} matched nothing in the env'.format(
|
||||
abstract.name))
|
||||
# no matches in the env
|
||||
if cli_args.no_add:
|
||||
msg = ('You asked to install {0} without adding it ' +
|
||||
'(--no-add), but no such spec exists in ' +
|
||||
'environment').format(abstract.name)
|
||||
tty.die(msg)
|
||||
else:
|
||||
tty.debug('adding {0} as a root'.format(abstract.name))
|
||||
specs_to_add.append((abstract, concrete))
|
||||
|
||||
continue
|
||||
|
||||
tty.debug('exactly one match for {0} in env -> {1}'.format(
|
||||
m_spec.name, m_spec.dag_hash()))
|
||||
|
||||
if m_spec in env.roots() or cli_args.no_add:
|
||||
# either the single match is a root spec (and --no-add is
|
||||
# the default for roots) or --no-add was stated explictly
|
||||
tty.debug('just install {0}'.format(m_spec.name))
|
||||
specs_to_install.append(m_spec)
|
||||
else:
|
||||
# the single match is not a root (i.e. it's a dependency),
|
||||
# and --no-add was not specified, so we'll add it as a
|
||||
# root before installing
|
||||
tty.debug('add {0} then install it'.format(m_spec.name))
|
||||
specs_to_add.append((abstract, concrete))
|
||||
|
||||
if specs_to_add:
|
||||
tty.debug('Adding the following specs as roots:')
|
||||
for abstract, concrete in specs_to_add:
|
||||
tty.debug(' {0}'.format(abstract.name))
|
||||
with env.write_transaction():
|
||||
specs_to_install.append(
|
||||
env.concretize_and_add(abstract, concrete))
|
||||
env.write(regenerate_views=False)
|
||||
|
||||
# Install the validated list of cli specs
|
||||
if specs_to_install:
|
||||
tty.debug('Installing the following cli specs:')
|
||||
for s in specs_to_install:
|
||||
tty.debug(' {0}'.format(s.name))
|
||||
env.install_specs(specs_to_install, args=cli_args, **kwargs)
|
||||
else:
|
||||
installs = [(concrete.package, kwargs) for _, concrete in specs]
|
||||
builder = PackageInstaller(installs)
|
||||
@@ -224,6 +286,7 @@ def install_specs(cli_args, kwargs, specs):
|
||||
|
||||
|
||||
def install(parser, args, **kwargs):
|
||||
|
||||
if args.help_cdash:
|
||||
parser = argparse.ArgumentParser(
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||
@@ -236,6 +299,15 @@ def install(parser, args, **kwargs):
|
||||
parser.print_help()
|
||||
return
|
||||
|
||||
# The user wants to monitor builds using github.com/spack/spack-monitor
|
||||
if args.use_monitor:
|
||||
monitor = spack.monitor.get_client(
|
||||
host=args.monitor_host,
|
||||
prefix=args.monitor_prefix,
|
||||
disable_auth=args.monitor_disable_auth,
|
||||
tags=args.monitor_tags,
|
||||
)
|
||||
|
||||
reporter = spack.report.collect_info(
|
||||
spack.package.PackageInstaller, '_install_task', args.log_format, args)
|
||||
if args.log_file:
|
||||
@@ -378,4 +450,17 @@ def get_tests(specs):
|
||||
# overwrite all concrete explicit specs from this build
|
||||
kwargs['overwrite'] = [spec.dag_hash() for spec in specs]
|
||||
|
||||
# Update install_args with the monitor args, needed for build task
|
||||
kwargs.update({
|
||||
"monitor_disable_auth": args.monitor_disable_auth,
|
||||
"monitor_keep_going": args.monitor_keep_going,
|
||||
"monitor_host": args.monitor_host,
|
||||
"use_monitor": args.use_monitor,
|
||||
"monitor_prefix": args.monitor_prefix,
|
||||
})
|
||||
|
||||
# If we are using the monitor, we send configs. and create build
|
||||
# The full_hash is the main package id, the build_hash for others
|
||||
if args.use_monitor and specs:
|
||||
monitor.new_configuration(specs)
|
||||
install_specs(args, kwargs, zip(abstract_specs, specs))
|
||||
|
@@ -48,11 +48,15 @@ def setup_parser(subparser):
|
||||
'-S', '--stages', action='store_true',
|
||||
help="top level stage directory")
|
||||
directories.add_argument(
|
||||
'-b', '--build-dir', action='store_true',
|
||||
help="checked out or expanded source directory for a spec "
|
||||
'--source-dir', action='store_true',
|
||||
help="source directory for a spec "
|
||||
"(requires it to be staged first)")
|
||||
directories.add_argument(
|
||||
'-e', '--env', action='store',
|
||||
'-b', '--build-dir', action='store_true',
|
||||
help="build directory for a spec "
|
||||
"(requires it to be staged first)")
|
||||
directories.add_argument(
|
||||
'-e', '--env', action='store', dest='location_env',
|
||||
help="location of an environment managed by spack")
|
||||
|
||||
arguments.add_common_arguments(subparser, ['spec'])
|
||||
@@ -61,64 +65,77 @@ def setup_parser(subparser):
|
||||
def location(parser, args):
|
||||
if args.module_dir:
|
||||
print(spack.paths.module_path)
|
||||
return
|
||||
|
||||
elif args.spack_root:
|
||||
if args.spack_root:
|
||||
print(spack.paths.prefix)
|
||||
return
|
||||
|
||||
elif args.env:
|
||||
path = spack.environment.root(args.env)
|
||||
if args.location_env:
|
||||
path = spack.environment.root(args.location_env)
|
||||
if not os.path.isdir(path):
|
||||
tty.die("no such environment: '%s'" % args.env)
|
||||
tty.die("no such environment: '%s'" % args.location_env)
|
||||
print(path)
|
||||
return
|
||||
|
||||
elif args.packages:
|
||||
if args.packages:
|
||||
print(spack.repo.path.first_repo().root)
|
||||
return
|
||||
|
||||
elif args.stages:
|
||||
if args.stages:
|
||||
print(spack.stage.get_stage_root())
|
||||
return
|
||||
|
||||
else:
|
||||
specs = spack.cmd.parse_specs(args.spec)
|
||||
if not specs:
|
||||
tty.die("You must supply a spec.")
|
||||
if len(specs) != 1:
|
||||
tty.die("Too many specs. Supply only one.")
|
||||
specs = spack.cmd.parse_specs(args.spec)
|
||||
|
||||
if args.install_dir:
|
||||
# install_dir command matches against installed specs.
|
||||
env = ev.get_env(args, 'location')
|
||||
spec = spack.cmd.disambiguate_spec(specs[0], env)
|
||||
print(spec.prefix)
|
||||
if not specs:
|
||||
tty.die("You must supply a spec.")
|
||||
|
||||
else:
|
||||
spec = specs[0]
|
||||
if len(specs) != 1:
|
||||
tty.die("Too many specs. Supply only one.")
|
||||
|
||||
if args.package_dir:
|
||||
# This one just needs the spec name.
|
||||
print(spack.repo.path.dirname_for_package_name(spec.name))
|
||||
# install_dir command matches against installed specs.
|
||||
if args.install_dir:
|
||||
env = ev.get_env(args, 'location')
|
||||
spec = spack.cmd.disambiguate_spec(specs[0], env)
|
||||
print(spec.prefix)
|
||||
return
|
||||
|
||||
else:
|
||||
spec = spack.cmd.matching_spec_from_env(spec)
|
||||
pkg = spec.package
|
||||
spec = specs[0]
|
||||
|
||||
if args.stage_dir:
|
||||
print(pkg.stage.path)
|
||||
# Package dir just needs the spec name
|
||||
if args.package_dir:
|
||||
print(spack.repo.path.dirname_for_package_name(spec.name))
|
||||
return
|
||||
|
||||
else: # args.build_dir is the default.
|
||||
if not pkg.stage.expanded:
|
||||
tty.die("Build directory does not exist yet. "
|
||||
"Run this to create it:",
|
||||
"spack stage " + " ".join(args.spec))
|
||||
# Either concretize or filter from already concretized environment
|
||||
spec = spack.cmd.matching_spec_from_env(spec)
|
||||
pkg = spec.package
|
||||
|
||||
# Out of source builds have build_directory defined
|
||||
if hasattr(pkg, 'build_directory'):
|
||||
# build_directory can be either absolute or relative
|
||||
# to the stage path in either case os.path.join makes it
|
||||
# absolute
|
||||
print(os.path.normpath(os.path.join(
|
||||
pkg.stage.path,
|
||||
pkg.build_directory
|
||||
)))
|
||||
else:
|
||||
# Otherwise assume in-source builds
|
||||
return print(pkg.stage.source_path)
|
||||
if args.stage_dir:
|
||||
print(pkg.stage.path)
|
||||
return
|
||||
|
||||
if args.build_dir:
|
||||
# Out of source builds have build_directory defined
|
||||
if hasattr(pkg, 'build_directory'):
|
||||
# build_directory can be either absolute or relative to the stage path
|
||||
# in either case os.path.join makes it absolute
|
||||
print(os.path.normpath(os.path.join(
|
||||
pkg.stage.path,
|
||||
pkg.build_directory
|
||||
)))
|
||||
return
|
||||
|
||||
# Otherwise assume in-source builds
|
||||
print(pkg.stage.source_path)
|
||||
return
|
||||
|
||||
# source dir remains, which requires the spec to be staged
|
||||
if not pkg.stage.expanded:
|
||||
tty.die("Source directory does not exist yet. "
|
||||
"Run this to create it:",
|
||||
"spack stage " + " ".join(args.spec))
|
||||
|
||||
# Default to source dir.
|
||||
print(pkg.stage.source_path)
|
||||
|
@@ -11,7 +11,7 @@
|
||||
import spack.cmd.modules.lmod
|
||||
import spack.cmd.modules.tcl
|
||||
|
||||
description = "manipulate module files"
|
||||
description = "generate/manage module files"
|
||||
section = "user environment"
|
||||
level = "short"
|
||||
|
||||
|
@@ -10,6 +10,7 @@
|
||||
import sys
|
||||
|
||||
import llnl.util.tty as tty
|
||||
import llnl.util.tty.color as color
|
||||
|
||||
import spack
|
||||
import spack.cmd
|
||||
@@ -23,15 +24,20 @@
|
||||
level = 'long'
|
||||
|
||||
#: output options
|
||||
show_options = ('asp', 'output', 'solutions')
|
||||
show_options = ('asp', 'opt', 'output', 'solutions')
|
||||
|
||||
|
||||
def setup_parser(subparser):
|
||||
# Solver arguments
|
||||
subparser.add_argument(
|
||||
'--show', action='store', default=('solutions'),
|
||||
help="outputs: a list with any of: "
|
||||
"%s (default), all" % ', '.join(show_options))
|
||||
'--show', action='store', default='opt,solutions',
|
||||
help="select outputs: comma-separated list of: \n"
|
||||
" asp asp program text\n"
|
||||
" opt optimization criteria for best model\n"
|
||||
" output raw clingo output\n"
|
||||
" solutions models found by asp program\n"
|
||||
" all all of the above"
|
||||
)
|
||||
subparser.add_argument(
|
||||
'--models', action='store', type=int, default=0,
|
||||
help="number of solutions to search (default 0 for all)")
|
||||
@@ -41,10 +47,10 @@ def setup_parser(subparser):
|
||||
subparser, ['long', 'very_long', 'install_status'])
|
||||
subparser.add_argument(
|
||||
'-y', '--yaml', action='store_const', dest='format', default=None,
|
||||
const='yaml', help='print concrete spec as YAML')
|
||||
const='yaml', help='print concrete spec as yaml')
|
||||
subparser.add_argument(
|
||||
'-j', '--json', action='store_const', dest='format', default=None,
|
||||
const='json', help='print concrete spec as YAML')
|
||||
const='json', help='print concrete spec as json')
|
||||
subparser.add_argument(
|
||||
'-c', '--cover', action='store',
|
||||
default='nodes', choices=['nodes', 'edges', 'paths'],
|
||||
@@ -113,9 +119,18 @@ def solve(parser, args):
|
||||
best = min(result.answers)
|
||||
|
||||
opt, _, answer = best
|
||||
if not args.format:
|
||||
tty.msg("Best of %d answers." % result.nmodels)
|
||||
tty.msg("Optimization: %s" % opt)
|
||||
if ("opt" in dump) and (not args.format):
|
||||
tty.msg("Best of %d considered solutions." % result.nmodels)
|
||||
tty.msg("Optimization Criteria:")
|
||||
|
||||
maxlen = max(len(s) for s in result.criteria)
|
||||
color.cprint(
|
||||
"@*{ Priority Criterion %sValue}" % ((maxlen - 10) * " ")
|
||||
)
|
||||
for i, (name, val) in enumerate(zip(result.criteria, opt)):
|
||||
fmt = " @K{%%-8d} %%-%ds%%5d" % maxlen
|
||||
color.cprint(fmt % (i + 1, name, val))
|
||||
print()
|
||||
|
||||
# iterate over roots from command line
|
||||
for input_spec in specs:
|
||||
|
@@ -42,6 +42,10 @@ def setup_parser(subparser):
|
||||
subparser.add_argument(
|
||||
'-N', '--namespaces', action='store_true', default=False,
|
||||
help='show fully qualified package names')
|
||||
subparser.add_argument(
|
||||
'--hash-type', default="build_hash",
|
||||
choices=['build_hash', 'full_hash', 'dag_hash'],
|
||||
help='generate spec with a particular hash type.')
|
||||
subparser.add_argument(
|
||||
'-t', '--types', action='store_true', default=False,
|
||||
help='show dependency types')
|
||||
@@ -83,11 +87,14 @@ def spec(parser, args):
|
||||
if spec.name in spack.repo.path or spec.virtual:
|
||||
spec.concretize()
|
||||
|
||||
# The user can specify the hash type to use
|
||||
hash_type = getattr(ht, args.hash_type)
|
||||
|
||||
if args.format == 'yaml':
|
||||
# use write because to_yaml already has a newline.
|
||||
sys.stdout.write(spec.to_yaml(hash=ht.build_hash))
|
||||
sys.stdout.write(spec.to_yaml(hash=hash_type))
|
||||
else:
|
||||
print(spec.to_json(hash=ht.build_hash))
|
||||
print(spec.to_json(hash=hash_type))
|
||||
continue
|
||||
|
||||
with tree_context():
|
||||
|
@@ -41,8 +41,14 @@ def stage(parser, args):
|
||||
if args.deprecated:
|
||||
spack.config.set('config:deprecated', True, scope='command_line')
|
||||
|
||||
specs = spack.cmd.parse_specs(args.specs, concretize=True)
|
||||
specs = spack.cmd.parse_specs(args.specs, concretize=False)
|
||||
|
||||
# prevent multiple specs from extracting in the same folder
|
||||
if len(specs) > 1 and args.path:
|
||||
tty.die("`--path` requires a single spec, but multiple were provided")
|
||||
|
||||
for spec in specs:
|
||||
spec = spack.cmd.matching_spec_from_env(spec)
|
||||
package = spack.repo.get(spec)
|
||||
if args.path:
|
||||
package.path = args.path
|
||||
|
@@ -24,10 +24,7 @@
|
||||
from itertools import zip_longest # novm
|
||||
|
||||
|
||||
description = (
|
||||
"runs source code style checks on Spack. Requires flake8, mypy, black for "
|
||||
+ "their respective checks"
|
||||
)
|
||||
description = "runs source code style checks on spack"
|
||||
section = "developer"
|
||||
level = "long"
|
||||
|
||||
|
@@ -25,8 +25,8 @@
|
||||
|
||||
|
||||
# tutorial configuration parameters
|
||||
tutorial_branch = "releases/v0.15"
|
||||
tutorial_mirror = "s3://spack-tutorial-container/mirror/"
|
||||
tutorial_branch = "releases/v0.16"
|
||||
tutorial_mirror = "s3://spack-binaries-prs/tutorial/ecp21/mirror"
|
||||
tutorial_key = os.path.join(spack.paths.share_path, "keys", "tutorial.pub")
|
||||
|
||||
# configs to remove
|
||||
|
@@ -12,7 +12,7 @@
|
||||
|
||||
import spack.cmd.common.arguments as arguments
|
||||
import spack.repo
|
||||
from spack.version import VersionList, ver
|
||||
from spack.version import ver, infinity_versions
|
||||
|
||||
description = "list available versions of a package"
|
||||
section = "packaging"
|
||||
@@ -66,7 +66,10 @@ def versions(parser, args):
|
||||
if args.new:
|
||||
if sys.stdout.isatty():
|
||||
tty.msg('New remote versions (not yet checksummed):')
|
||||
highest_safe_version = VersionList(safe_versions).highest_numeric()
|
||||
numeric_safe_versions = list(filter(
|
||||
lambda v: str(v) not in infinity_versions,
|
||||
safe_versions))
|
||||
highest_safe_version = max(numeric_safe_versions)
|
||||
remote_versions = set([ver(v) for v in set(fetched_versions)
|
||||
if v > highest_safe_version])
|
||||
else:
|
||||
|
@@ -24,11 +24,10 @@
|
||||
import spack.config
|
||||
import spack.compiler
|
||||
import spack.architecture
|
||||
import spack.util.imp as simp
|
||||
|
||||
from spack.util.environment import get_path
|
||||
from spack.util.naming import mod_to_class
|
||||
|
||||
_imported_compilers_module = 'spack.compilers'
|
||||
_path_instance_vars = ['cc', 'cxx', 'f77', 'fc']
|
||||
_flags_instance_vars = ['cflags', 'cppflags', 'cxxflags', 'fflags']
|
||||
_other_instance_vars = ['modules', 'operating_system', 'environment',
|
||||
@@ -472,17 +471,17 @@ def get_compiler_duplicates(compiler_spec, arch_spec):
|
||||
@llnl.util.lang.memoized
|
||||
def class_for_compiler_name(compiler_name):
|
||||
"""Given a compiler module name, get the corresponding Compiler class."""
|
||||
assert(supported(compiler_name))
|
||||
assert supported(compiler_name)
|
||||
|
||||
# Hack to be able to call the compiler `apple-clang` while still
|
||||
# using a valid python name for the module
|
||||
module_name = compiler_name
|
||||
submodule_name = compiler_name
|
||||
if compiler_name == 'apple-clang':
|
||||
module_name = compiler_name.replace('-', '_')
|
||||
submodule_name = compiler_name.replace('-', '_')
|
||||
|
||||
file_path = os.path.join(spack.paths.compilers_path, module_name + ".py")
|
||||
compiler_mod = simp.load_source(_imported_compilers_module, file_path)
|
||||
cls = getattr(compiler_mod, mod_to_class(compiler_name))
|
||||
module_name = '.'.join(['spack', 'compilers', submodule_name])
|
||||
module_obj = __import__(module_name, fromlist=[None])
|
||||
cls = getattr(module_obj, mod_to_class(compiler_name))
|
||||
|
||||
# make a note of the name in the module so we can get to it easily.
|
||||
cls.name = compiler_name
|
||||
|
@@ -25,6 +25,9 @@ class Aocc(Compiler):
|
||||
# Subclasses use possible names of Fortran 90 compiler
|
||||
fc_names = ['flang']
|
||||
|
||||
PrgEnv = 'PrgEnv-aocc'
|
||||
PrgEnv_compiler = 'aocc'
|
||||
|
||||
version_argument = '--version'
|
||||
|
||||
@property
|
||||
|
@@ -724,6 +724,24 @@ def concretize_specs_together(*abstract_specs, **kwargs):
|
||||
Returns:
|
||||
List of concretized specs
|
||||
"""
|
||||
if spack.config.get('config:concretizer') == 'original':
|
||||
return _concretize_specs_together_original(*abstract_specs, **kwargs)
|
||||
return _concretize_specs_together_new(*abstract_specs, **kwargs)
|
||||
|
||||
|
||||
def _concretize_specs_together_new(*abstract_specs, **kwargs):
|
||||
import spack.solver.asp
|
||||
result = spack.solver.asp.solve(abstract_specs)
|
||||
|
||||
if not result.satisfiable:
|
||||
result.print_cores()
|
||||
tty.die("Unsatisfiable spec.")
|
||||
|
||||
opt, i, answer = min(result.answers)
|
||||
return [answer[s.name].copy() for s in abstract_specs]
|
||||
|
||||
|
||||
def _concretize_specs_together_original(*abstract_specs, **kwargs):
|
||||
def make_concretization_repository(abstract_specs):
|
||||
"""Returns the path to a temporary repository created to contain
|
||||
a fake package that depends on all of the abstract specs.
|
||||
|
@@ -35,7 +35,6 @@
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import multiprocessing
|
||||
from contextlib import contextmanager
|
||||
from six import iteritems
|
||||
from ordereddict_backport import OrderedDict
|
||||
@@ -61,6 +60,7 @@
|
||||
import spack.schema.upstreams
|
||||
import spack.schema.env
|
||||
from spack.error import SpackError
|
||||
from spack.util.cpus import cpus_available
|
||||
|
||||
# Hacked yaml for configuration files preserves line numbers.
|
||||
import spack.util.spack_yaml as syaml
|
||||
@@ -110,7 +110,7 @@
|
||||
'verify_ssl': True,
|
||||
'checksum': True,
|
||||
'dirty': False,
|
||||
'build_jobs': min(16, multiprocessing.cpu_count()),
|
||||
'build_jobs': min(16, cpus_available()),
|
||||
'build_stage': '$tempdir/spack-stage',
|
||||
'concretizer': 'original',
|
||||
}
|
||||
@@ -129,7 +129,7 @@ def first_existing(dictionary, keys):
|
||||
try:
|
||||
return next(k for k in keys if k in dictionary)
|
||||
except StopIteration:
|
||||
raise KeyError("None of %s is in dict!" % keys)
|
||||
raise KeyError("None of %s is in dict!" % str(keys))
|
||||
|
||||
|
||||
class ConfigScope(object):
|
||||
@@ -241,11 +241,18 @@ def get_section(self, section):
|
||||
# }
|
||||
# }
|
||||
# }
|
||||
|
||||
# This bit ensures we have read the file and have
|
||||
# the raw data in memory
|
||||
if self._raw_data is None:
|
||||
self._raw_data = read_config_file(self.path, self.schema)
|
||||
if self._raw_data is None:
|
||||
return None
|
||||
|
||||
# Here we know we have the raw data and ensure we
|
||||
# populate the sections dictionary, which may be
|
||||
# cleared by the clear() method
|
||||
if not self.sections:
|
||||
section_data = self._raw_data
|
||||
for key in self.yaml_path:
|
||||
if section_data is None:
|
||||
@@ -254,6 +261,7 @@ def get_section(self, section):
|
||||
|
||||
for section_key, data in section_data.items():
|
||||
self.sections[section_key] = {section_key: data}
|
||||
|
||||
return self.sections.get(section, None)
|
||||
|
||||
def _write_section(self, section):
|
||||
@@ -354,6 +362,10 @@ def _write_section(self, section):
|
||||
def __repr__(self):
|
||||
return '<InternalConfigScope: %s>' % self.name
|
||||
|
||||
def clear(self):
|
||||
# no cache to clear here.
|
||||
pass
|
||||
|
||||
@staticmethod
|
||||
def _process_dict_keyname_overrides(data):
|
||||
"""Turn a trailing `:' in a key name into an override attribute."""
|
||||
|
@@ -17,6 +17,7 @@
|
||||
import spack.config
|
||||
import spack.hash_types as ht
|
||||
import spack.spec
|
||||
import spack.util.spack_json as sjson
|
||||
from spack.error import SpackError
|
||||
|
||||
|
||||
@@ -247,6 +248,17 @@ def write_spec(self, spec, path):
|
||||
# full provenance by full hash so it's availabe if we want it later
|
||||
spec.to_yaml(f, hash=ht.full_hash)
|
||||
|
||||
def write_host_environment(self, spec):
|
||||
"""The host environment is a json file with os, kernel, and spack
|
||||
versioning. We use it in the case that an analysis later needs to
|
||||
easily access this information.
|
||||
"""
|
||||
from spack.util.environment import get_host_environment_metadata
|
||||
env_file = self.env_metadata_path(spec)
|
||||
environ = get_host_environment_metadata()
|
||||
with open(env_file, 'w') as fd:
|
||||
sjson.dump(environ, fd)
|
||||
|
||||
def read_spec(self, path):
|
||||
"""Read the contents of a file and parse them as a spec"""
|
||||
try:
|
||||
@@ -300,6 +312,9 @@ def disable_upstream_check(self):
|
||||
def metadata_path(self, spec):
|
||||
return os.path.join(spec.prefix, self.metadata_dir)
|
||||
|
||||
def env_metadata_path(self, spec):
|
||||
return os.path.join(self.metadata_path(spec), "install_environment.json")
|
||||
|
||||
def build_packages_path(self, spec):
|
||||
return os.path.join(self.metadata_path(spec), self.packages_dir)
|
||||
|
||||
|
@@ -2,16 +2,14 @@
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
import collections
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import shutil
|
||||
import copy
|
||||
import socket
|
||||
|
||||
import six
|
||||
import ruamel.yaml as yaml
|
||||
|
||||
from ordereddict_backport import OrderedDict
|
||||
|
||||
@@ -33,13 +31,12 @@
|
||||
import spack.user_environment as uenv
|
||||
from spack.filesystem_view import YamlFilesystemView
|
||||
import spack.util.environment
|
||||
import spack.architecture as architecture
|
||||
from spack.spec import Spec
|
||||
from spack.spec_list import SpecList, InvalidSpecConstraintError
|
||||
from spack.variant import UnknownVariantError
|
||||
import spack.util.hash
|
||||
import spack.util.lock as lk
|
||||
from spack.util.path import substitute_path_variables
|
||||
from spack.installer import PackageInstaller
|
||||
import spack.util.path
|
||||
|
||||
#: environment variable used to indicate the active environment
|
||||
@@ -447,21 +444,11 @@ def _write_yaml(data, str_or_file):
|
||||
|
||||
def _eval_conditional(string):
|
||||
"""Evaluate conditional definitions using restricted variable scope."""
|
||||
arch = architecture.Arch(
|
||||
architecture.platform(), 'default_os', 'default_target')
|
||||
arch_spec = spack.spec.Spec('arch=%s' % arch)
|
||||
valid_variables = {
|
||||
'target': str(arch.target),
|
||||
'os': str(arch.os),
|
||||
'platform': str(arch.platform),
|
||||
'arch': arch_spec,
|
||||
'architecture': arch_spec,
|
||||
'arch_str': str(arch),
|
||||
valid_variables = spack.util.environment.get_host_environment()
|
||||
valid_variables.update({
|
||||
're': re,
|
||||
'env': os.environ,
|
||||
'hostname': socket.gethostname()
|
||||
}
|
||||
|
||||
})
|
||||
return eval(string, valid_variables)
|
||||
|
||||
|
||||
@@ -486,8 +473,13 @@ def __eq__(self, other):
|
||||
self.link == other.link])
|
||||
|
||||
def to_dict(self):
|
||||
ret = {'root': self.root}
|
||||
ret = syaml.syaml_dict([('root', self.root)])
|
||||
if self.projections:
|
||||
# projections guaranteed to be ordered dict if true-ish
|
||||
# for python2.6, may be syaml or ruamel.yaml implementation
|
||||
# so we have to check for both
|
||||
types = (OrderedDict, syaml.syaml_dict, yaml.comments.CommentedMap)
|
||||
assert isinstance(self.projections, types)
|
||||
ret['projections'] = self.projections
|
||||
if self.select:
|
||||
ret['select'] = self.select
|
||||
@@ -506,10 +498,66 @@ def from_dict(base_path, d):
|
||||
d.get('exclude', []),
|
||||
d.get('link', default_view_link))
|
||||
|
||||
def view(self):
|
||||
root = self.root
|
||||
if not os.path.isabs(root):
|
||||
root = os.path.normpath(os.path.join(self.base, self.root))
|
||||
@property
|
||||
def _current_root(self):
|
||||
if not os.path.exists(self.root):
|
||||
return None
|
||||
|
||||
root = os.readlink(self.root)
|
||||
if os.path.isabs(root):
|
||||
return root
|
||||
|
||||
root_dir = os.path.dirname(self.root)
|
||||
return os.path.join(root_dir, root)
|
||||
|
||||
def _next_root(self, specs):
|
||||
content_hash = self.content_hash(specs)
|
||||
root_dir = os.path.dirname(self.root)
|
||||
root_name = os.path.basename(self.root)
|
||||
return os.path.join(root_dir, '._%s' % root_name, content_hash)
|
||||
|
||||
def content_hash(self, specs):
|
||||
d = syaml.syaml_dict([
|
||||
('descriptor', self.to_dict()),
|
||||
('specs', [(spec.full_hash(), spec.prefix) for spec in sorted(specs)])
|
||||
])
|
||||
contents = sjson.dump(d)
|
||||
return spack.util.hash.b32_hash(contents)
|
||||
|
||||
def get_projection_for_spec(self, spec):
|
||||
"""Get projection for spec relative to view root
|
||||
|
||||
Getting the projection from the underlying root will get the temporary
|
||||
projection. This gives the permanent projection relative to the root
|
||||
symlink.
|
||||
"""
|
||||
view = self.view()
|
||||
view_path = view.get_projection_for_spec(spec)
|
||||
rel_path = os.path.relpath(view_path, self._current_root)
|
||||
return os.path.join(self.root, rel_path)
|
||||
|
||||
def view(self, new=None):
|
||||
"""
|
||||
Generate the FilesystemView object for this ViewDescriptor
|
||||
|
||||
By default, this method returns a FilesystemView object rooted at the
|
||||
current underlying root of this ViewDescriptor (self._current_root)
|
||||
|
||||
Raise if new is None and there is no current view
|
||||
|
||||
Arguments:
|
||||
new (string or None): If a string, create a FilesystemView
|
||||
rooted at that path. Default None. This should only be used to
|
||||
regenerate the view, and cannot be used to access specs.
|
||||
"""
|
||||
root = self._current_root
|
||||
if new:
|
||||
root = new
|
||||
if not root:
|
||||
# This can only be hit if we write a future bug
|
||||
msg = ("Attempting to get nonexistent view from environment. "
|
||||
"View root is at %s" % self.root)
|
||||
raise SpackEnvironmentViewError(msg)
|
||||
return YamlFilesystemView(root, spack.store.layout,
|
||||
ignore_conflicts=True,
|
||||
projections=self.projections)
|
||||
@@ -531,9 +579,10 @@ def __contains__(self, spec):
|
||||
|
||||
return True
|
||||
|
||||
def regenerate(self, all_specs, roots):
|
||||
def specs_for_view(self, all_specs, roots):
|
||||
specs_for_view = []
|
||||
specs = all_specs if self.link == 'all' else roots
|
||||
|
||||
for spec in specs:
|
||||
# The view does not store build deps, so if we want it to
|
||||
# recognize environment specs (which do store build deps),
|
||||
@@ -545,6 +594,10 @@ def regenerate(self, all_specs, roots):
|
||||
spec_copy._hash = spec._hash
|
||||
spec_copy._normal = spec._normal
|
||||
specs_for_view.append(spec_copy)
|
||||
return specs_for_view
|
||||
|
||||
def regenerate(self, all_specs, roots):
|
||||
specs_for_view = self.specs_for_view(all_specs, roots)
|
||||
|
||||
# regeneration queries the database quite a bit; this read
|
||||
# transaction ensures that we don't repeatedly lock/unlock.
|
||||
@@ -554,36 +607,52 @@ def regenerate(self, all_specs, roots):
|
||||
|
||||
# To ensure there are no conflicts with packages being installed
|
||||
# that cannot be resolved or have repos that have been removed
|
||||
# we always regenerate the view from scratch. We must first make
|
||||
# sure the root directory exists for the very first time though.
|
||||
root = os.path.normpath(
|
||||
self.root if os.path.isabs(self.root) else os.path.join(
|
||||
self.base, self.root)
|
||||
)
|
||||
fs.mkdirp(root)
|
||||
# we always regenerate the view from scratch.
|
||||
# We will do this by hashing the view contents and putting the view
|
||||
# in a directory by hash, and then having a symlink to the real
|
||||
# view in the root. The real root for a view at /dirname/basename
|
||||
# will be /dirname/._basename_<hash>.
|
||||
# This allows for atomic swaps when we update the view
|
||||
|
||||
# The tempdir for the directory transaction must be in the same
|
||||
# filesystem mount as the view for symlinks to work. Provide
|
||||
# dirname(root) as the tempdir for the
|
||||
# replace_directory_transaction because it must be on the same
|
||||
# filesystem mount as the view itself. Otherwise it may be
|
||||
# impossible to construct the view in the tempdir even when it can
|
||||
# be constructed in-place.
|
||||
with fs.replace_directory_transaction(root, os.path.dirname(root)):
|
||||
view = self.view()
|
||||
# cache the roots because the way we determine which is which does
|
||||
# not work while we are updating
|
||||
new_root = self._next_root(installed_specs_for_view)
|
||||
old_root = self._current_root
|
||||
|
||||
view.clean()
|
||||
specs_in_view = set(view.get_all_specs())
|
||||
tty.msg("Updating view at {0}".format(self.root))
|
||||
if new_root == old_root:
|
||||
tty.debug("View at %s does not need regeneration." % self.root)
|
||||
return
|
||||
|
||||
rm_specs = specs_in_view - installed_specs_for_view
|
||||
add_specs = installed_specs_for_view - specs_in_view
|
||||
# construct view at new_root
|
||||
tty.msg("Updating view at {0}".format(self.root))
|
||||
|
||||
# pass all_specs in, as it's expensive to read all the
|
||||
# spec.yaml files twice.
|
||||
view.remove_specs(*rm_specs, with_dependents=False,
|
||||
all_specs=specs_in_view)
|
||||
view.add_specs(*add_specs, with_dependencies=False)
|
||||
view = self.view(new=new_root)
|
||||
fs.mkdirp(new_root)
|
||||
view.add_specs(*installed_specs_for_view,
|
||||
with_dependencies=False)
|
||||
|
||||
# create symlink from tmpname to new_root
|
||||
root_dirname = os.path.dirname(self.root)
|
||||
tmp_symlink_name = os.path.join(root_dirname, '._view_link')
|
||||
if os.path.exists(tmp_symlink_name):
|
||||
os.unlink(tmp_symlink_name)
|
||||
os.symlink(new_root, tmp_symlink_name)
|
||||
|
||||
# mv symlink atomically over root symlink to old_root
|
||||
if os.path.exists(self.root) and not os.path.islink(self.root):
|
||||
msg = "Cannot create view: "
|
||||
msg += "file already exists and is not a link: %s" % self.root
|
||||
raise SpackEnvironmentViewError(msg)
|
||||
os.rename(tmp_symlink_name, self.root)
|
||||
|
||||
# remove old_root
|
||||
if old_root and os.path.exists(old_root):
|
||||
try:
|
||||
shutil.rmtree(old_root)
|
||||
except (IOError, OSError) as e:
|
||||
msg = "Failed to remove old view at %s\n" % old_root
|
||||
msg += str(e)
|
||||
tty.warn(msg)
|
||||
|
||||
|
||||
class Environment(object):
|
||||
@@ -1290,19 +1359,36 @@ def check_views(self):
|
||||
def _env_modifications_for_default_view(self, reverse=False):
|
||||
all_mods = spack.util.environment.EnvironmentModifications()
|
||||
|
||||
errors = []
|
||||
for _, spec in self.concretized_specs():
|
||||
if spec in self.default_view and spec.package.installed:
|
||||
try:
|
||||
mods = uenv.environment_modifications_for_spec(
|
||||
spec, self.default_view)
|
||||
except Exception as e:
|
||||
msg = ("couldn't get environment settings for %s"
|
||||
% spec.format("{name}@{version} /{hash:7}"))
|
||||
errors.append((msg, str(e)))
|
||||
continue
|
||||
visited = set()
|
||||
|
||||
all_mods.extend(mods.reversed() if reverse else mods)
|
||||
errors = []
|
||||
for _, root_spec in self.concretized_specs():
|
||||
if root_spec in self.default_view and root_spec.package.installed:
|
||||
for spec in root_spec.traverse(deptype='run', root=True):
|
||||
if spec.name in visited:
|
||||
# It is expected that only one instance of the package
|
||||
# can be added to the environment - do not attempt to
|
||||
# add multiple.
|
||||
tty.debug(
|
||||
"Not adding {0} to shell modifications: "
|
||||
"this package has already been added".format(
|
||||
spec.format("{name}/{hash:7}")
|
||||
)
|
||||
)
|
||||
continue
|
||||
else:
|
||||
visited.add(spec.name)
|
||||
|
||||
try:
|
||||
mods = uenv.environment_modifications_for_spec(
|
||||
spec, self.default_view)
|
||||
except Exception as e:
|
||||
msg = ("couldn't get environment settings for %s"
|
||||
% spec.format("{name}@{version} /{hash:7}"))
|
||||
errors.append((msg, str(e)))
|
||||
continue
|
||||
|
||||
all_mods.extend(mods.reversed() if reverse else mods)
|
||||
|
||||
return all_mods, errors
|
||||
|
||||
@@ -1454,20 +1540,31 @@ def install_all(self, args=None, **install_args):
|
||||
args (Namespace): argparse namespace with command arguments
|
||||
install_args (dict): keyword install arguments
|
||||
"""
|
||||
self.install_specs(None, args=args, **install_args)
|
||||
|
||||
def install_specs(self, specs=None, args=None, **install_args):
|
||||
from spack.installer import PackageInstaller
|
||||
|
||||
tty.debug('Assessing installation status of environment packages')
|
||||
# If "spack install" is invoked repeatedly for a large environment
|
||||
# where all specs are already installed, the operation can take
|
||||
# a large amount of time due to repeatedly acquiring and releasing
|
||||
# locks, this does an initial check across all specs within a single
|
||||
# DB read transaction to reduce time spent in this case.
|
||||
specs_to_install = self.uninstalled_specs()
|
||||
# DB read transaction to reduce time spent in this case. In the next
|
||||
# three lines we remove any already-installed root specs from the list
|
||||
# to install. However, uninstalled_specs() only considers root specs,
|
||||
# so this will allow dep specs to be unnecessarily re-installed.
|
||||
uninstalled_roots = self.uninstalled_specs()
|
||||
specs_to_install = specs or uninstalled_roots
|
||||
specs_to_install = [s for s in specs_to_install
|
||||
if s not in self.roots() or s in uninstalled_roots]
|
||||
|
||||
if not specs_to_install:
|
||||
tty.msg('All of the packages are already installed')
|
||||
return
|
||||
|
||||
tty.debug('Processing {0} uninstalled specs'
|
||||
.format(len(specs_to_install)))
|
||||
tty.debug('Processing {0} uninstalled specs'.format(
|
||||
len(specs_to_install)))
|
||||
|
||||
install_args['overwrite'] = install_args.get(
|
||||
'overwrite', []) + self._get_overwrite_specs()
|
||||
@@ -1587,7 +1684,7 @@ def matching_spec(self, spec):
|
||||
if abstract)
|
||||
|
||||
if len(root_matches) == 1:
|
||||
return root_matches[0][1]
|
||||
return list(root_matches.items())[0][0]
|
||||
|
||||
# More than one spec matched, and either multiple roots matched or
|
||||
# none of the matches were roots
|
||||
@@ -2092,3 +2189,7 @@ def is_latest_format(manifest):
|
||||
|
||||
class SpackEnvironmentError(spack.error.SpackError):
|
||||
"""Superclass for all errors to do with Spack environments."""
|
||||
|
||||
|
||||
class SpackEnvironmentViewError(SpackEnvironmentError):
|
||||
"""Class for errors regarding view generation."""
|
||||
|
@@ -292,7 +292,15 @@ def mirror_id(self):
|
||||
|
||||
@property
|
||||
def candidate_urls(self):
|
||||
return [self.url] + (self.mirrors or [])
|
||||
urls = []
|
||||
|
||||
for url in [self.url] + (self.mirrors or []):
|
||||
if url.startswith('file://'):
|
||||
path = urllib_parse.quote(url[len('file://'):])
|
||||
url = 'file://' + path
|
||||
urls.append(url)
|
||||
|
||||
return urls
|
||||
|
||||
@_needs_stage
|
||||
def fetch(self):
|
||||
@@ -467,6 +475,8 @@ def expand(self):
|
||||
tarball_container = os.path.join(self.stage.path,
|
||||
"spack-expanded-archive")
|
||||
|
||||
# Below we assume that the command to decompress expand the
|
||||
# archive in the current working directory
|
||||
mkdirp(tarball_container)
|
||||
with working_dir(tarball_container):
|
||||
decompress(self.archive_file)
|
||||
|
@@ -2,65 +2,92 @@
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
"""This package contains modules with hooks for various stages in the
|
||||
Spack install process. You can add modules here and they'll be
|
||||
executed by package at various times during the package lifecycle.
|
||||
Spack install process. You can add modules here and they'll be
|
||||
executed by package at various times during the package lifecycle.
|
||||
|
||||
Each hook is just a function that takes a package as a parameter.
|
||||
Hooks are not executed in any particular order.
|
||||
Each hook is just a function that takes a package as a parameter.
|
||||
Hooks are not executed in any particular order.
|
||||
|
||||
Currently the following hooks are supported:
|
||||
Currently the following hooks are supported:
|
||||
|
||||
* pre_install(spec)
|
||||
* post_install(spec)
|
||||
* pre_uninstall(spec)
|
||||
* post_uninstall(spec)
|
||||
* pre_install(spec)
|
||||
* post_install(spec)
|
||||
* pre_uninstall(spec)
|
||||
* post_uninstall(spec)
|
||||
* on_install_start(spec)
|
||||
* on_install_success(spec)
|
||||
* on_install_failure(spec)
|
||||
* on_phase_success(pkg, phase_name, log_file)
|
||||
* on_phase_error(pkg, phase_name, log_file)
|
||||
* on_phase_error(pkg, phase_name, log_file)
|
||||
* on_analyzer_save(pkg, result)
|
||||
|
||||
This can be used to implement support for things like module
|
||||
systems (e.g. modules, lmod, etc.) or to add other custom
|
||||
features.
|
||||
This can be used to implement support for things like module
|
||||
systems (e.g. modules, lmod, etc.) or to add other custom
|
||||
features.
|
||||
"""
|
||||
import os.path
|
||||
|
||||
import llnl.util.lang
|
||||
import spack.paths
|
||||
import spack.util.imp as simp
|
||||
from llnl.util.lang import memoized, list_modules
|
||||
|
||||
|
||||
@memoized
|
||||
def all_hook_modules():
|
||||
modules = []
|
||||
for name in list_modules(spack.paths.hooks_path):
|
||||
mod_name = __name__ + '.' + name
|
||||
path = os.path.join(spack.paths.hooks_path, name) + ".py"
|
||||
mod = simp.load_source(mod_name, path)
|
||||
|
||||
if name == 'write_install_manifest':
|
||||
last_mod = mod
|
||||
else:
|
||||
modules.append(mod)
|
||||
|
||||
# put `write_install_manifest` as the last hook to run
|
||||
modules.append(last_mod)
|
||||
return modules
|
||||
|
||||
|
||||
class HookRunner(object):
|
||||
class _HookRunner(object):
|
||||
#: Stores all hooks on first call, shared among
|
||||
#: all HookRunner objects
|
||||
_hooks = None
|
||||
|
||||
def __init__(self, hook_name):
|
||||
self.hook_name = hook_name
|
||||
|
||||
@classmethod
|
||||
def _populate_hooks(cls):
|
||||
# Lazily populate the list of hooks
|
||||
cls._hooks = []
|
||||
relative_names = list(llnl.util.lang.list_modules(
|
||||
spack.paths.hooks_path
|
||||
))
|
||||
|
||||
# We want this hook to be the last registered
|
||||
relative_names.sort(key=lambda x: x == 'write_install_manifest')
|
||||
assert relative_names[-1] == 'write_install_manifest'
|
||||
|
||||
for name in relative_names:
|
||||
module_name = __name__ + '.' + name
|
||||
# When importing a module from a package, __import__('A.B', ...)
|
||||
# returns package A when 'fromlist' is empty. If fromlist is not
|
||||
# empty it returns the submodule B instead
|
||||
# See: https://stackoverflow.com/a/2725668/771663
|
||||
module_obj = __import__(module_name, fromlist=[None])
|
||||
cls._hooks.append((module_name, module_obj))
|
||||
|
||||
@property
|
||||
def hooks(self):
|
||||
if not self._hooks:
|
||||
self._populate_hooks()
|
||||
return self._hooks
|
||||
|
||||
def __call__(self, *args, **kwargs):
|
||||
for module in all_hook_modules():
|
||||
for _, module in self.hooks:
|
||||
if hasattr(module, self.hook_name):
|
||||
hook = getattr(module, self.hook_name)
|
||||
if hasattr(hook, '__call__'):
|
||||
hook(*args, **kwargs)
|
||||
|
||||
|
||||
pre_install = HookRunner('pre_install')
|
||||
post_install = HookRunner('post_install')
|
||||
# pre/post install and run by the install subprocess
|
||||
pre_install = _HookRunner('pre_install')
|
||||
post_install = _HookRunner('post_install')
|
||||
|
||||
pre_uninstall = HookRunner('pre_uninstall')
|
||||
post_uninstall = HookRunner('post_uninstall')
|
||||
# These hooks are run within an install subprocess
|
||||
pre_uninstall = _HookRunner('pre_uninstall')
|
||||
post_uninstall = _HookRunner('post_uninstall')
|
||||
on_phase_success = _HookRunner('on_phase_success')
|
||||
on_phase_error = _HookRunner('on_phase_error')
|
||||
|
||||
# These are hooks in installer.py, before starting install subprocess
|
||||
on_install_start = _HookRunner('on_install_start')
|
||||
on_install_success = _HookRunner('on_install_success')
|
||||
on_install_failure = _HookRunner('on_install_failure')
|
||||
|
||||
# Analyzer hooks
|
||||
on_analyzer_save = _HookRunner('on_analyzer_save')
|
||||
|
73
lib/spack/spack/hooks/monitor.py
Normal file
73
lib/spack/spack/hooks/monitor.py
Normal file
@@ -0,0 +1,73 @@
|
||||
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
import llnl.util.tty as tty
|
||||
import spack.monitor
|
||||
|
||||
|
||||
def on_install_start(spec):
|
||||
"""On start of an install, we want to ping the server if it exists
|
||||
"""
|
||||
if not spack.monitor.cli:
|
||||
return
|
||||
|
||||
tty.debug("Running on_install_start for %s" % spec)
|
||||
build_id = spack.monitor.cli.new_build(spec)
|
||||
tty.verbose("Build created with id %s" % build_id)
|
||||
|
||||
|
||||
def on_install_success(spec):
|
||||
"""On the success of an install (after everything is complete)
|
||||
"""
|
||||
if not spack.monitor.cli:
|
||||
return
|
||||
|
||||
tty.debug("Running on_install_success for %s" % spec)
|
||||
result = spack.monitor.cli.update_build(spec, status="SUCCESS")
|
||||
tty.verbose(result.get('message'))
|
||||
|
||||
|
||||
def on_install_failure(spec):
|
||||
"""Triggered on failure of an install
|
||||
"""
|
||||
if not spack.monitor.cli:
|
||||
return
|
||||
|
||||
tty.debug("Running on_install_failure for %s" % spec)
|
||||
result = spack.monitor.cli.fail_task(spec)
|
||||
tty.verbose(result.get('message'))
|
||||
|
||||
|
||||
def on_phase_success(pkg, phase_name, log_file):
|
||||
"""Triggered on a phase success
|
||||
"""
|
||||
if not spack.monitor.cli:
|
||||
return
|
||||
|
||||
tty.debug("Running on_phase_success %s, phase %s" % (pkg.name, phase_name))
|
||||
result = spack.monitor.cli.send_phase(pkg, phase_name, log_file, "SUCCESS")
|
||||
tty.verbose(result.get('message'))
|
||||
|
||||
|
||||
def on_phase_error(pkg, phase_name, log_file):
|
||||
"""Triggered on a phase error
|
||||
"""
|
||||
if not spack.monitor.cli:
|
||||
return
|
||||
|
||||
tty.debug("Running on_phase_error %s, phase %s" % (pkg.name, phase_name))
|
||||
result = spack.monitor.cli.send_phase(pkg, phase_name, log_file, "ERROR")
|
||||
tty.verbose(result.get('message'))
|
||||
|
||||
|
||||
def on_analyzer_save(pkg, result):
|
||||
"""given a package and a result, if we have a spack monitor, upload
|
||||
the result to it.
|
||||
"""
|
||||
if not spack.monitor.cli:
|
||||
return
|
||||
|
||||
# This hook runs after a save result
|
||||
spack.monitor.cli.send_analyze_metadata(pkg, result)
|
@@ -46,6 +46,7 @@
|
||||
import spack.compilers
|
||||
import spack.error
|
||||
import spack.hooks
|
||||
import spack.monitor
|
||||
import spack.package
|
||||
import spack.package_prefs as prefs
|
||||
import spack.repo
|
||||
@@ -412,6 +413,25 @@ def clear_failures():
|
||||
spack.store.db.clear_all_failures()
|
||||
|
||||
|
||||
def combine_phase_logs(phase_log_files, log_path):
|
||||
"""
|
||||
Read set or list of logs and combine them into one file.
|
||||
|
||||
Each phase will produce it's own log, so this function aims to cat all the
|
||||
separate phase log output files into the pkg.log_path. It is written
|
||||
generally to accept some list of files, and a log path to combine them to.
|
||||
|
||||
Args:
|
||||
phase_log_files (list): a list or iterator of logs to combine
|
||||
log_path (path): the path to combine them to
|
||||
"""
|
||||
|
||||
with open(log_path, 'w') as log_file:
|
||||
for phase_log_file in phase_log_files:
|
||||
with open(phase_log_file, 'r') as phase_log:
|
||||
log_file.write(phase_log.read())
|
||||
|
||||
|
||||
def dump_packages(spec, path):
|
||||
"""
|
||||
Dump all package information for a spec and its dependencies.
|
||||
@@ -521,6 +541,12 @@ def log(pkg):
|
||||
# Archive the whole stdout + stderr for the package
|
||||
fs.install(pkg.log_path, pkg.install_log_path)
|
||||
|
||||
# Archive all phase log paths
|
||||
for phase_log in pkg.phase_log_files:
|
||||
log_file = os.path.basename(phase_log)
|
||||
log_file = os.path.join(os.path.dirname(packages_dir), log_file)
|
||||
fs.install(phase_log, log_file)
|
||||
|
||||
# Archive the environment used for the build
|
||||
fs.install(pkg.env_path, pkg.install_env_path)
|
||||
|
||||
@@ -1250,8 +1276,8 @@ def _requeue_task(self, task):
|
||||
task (BuildTask): the installation build task for a package
|
||||
"""
|
||||
if task.status not in [STATUS_INSTALLED, STATUS_INSTALLING]:
|
||||
tty.msg('{0} {1}'.format(install_msg(task.pkg_id, self.pid),
|
||||
'in progress by another process'))
|
||||
tty.debug('{0} {1}'.format(install_msg(task.pkg_id, self.pid),
|
||||
'in progress by another process'))
|
||||
|
||||
new_task = task.next_attempt(self.installed)
|
||||
new_task.status = STATUS_INSTALLING
|
||||
@@ -1260,6 +1286,7 @@ def _requeue_task(self, task):
|
||||
def _setup_install_dir(self, pkg):
|
||||
"""
|
||||
Create and ensure proper access controls for the install directory.
|
||||
Write a small metadata file with the current spack environment.
|
||||
|
||||
Args:
|
||||
pkg (Package): the package to be built and installed
|
||||
@@ -1285,6 +1312,9 @@ def _setup_install_dir(self, pkg):
|
||||
# Ensure the metadata path exists as well
|
||||
fs.mkdirp(spack.store.layout.metadata_path(pkg.spec), mode=perms)
|
||||
|
||||
# Always write host environment - we assume this can change
|
||||
spack.store.layout.write_host_environment(pkg.spec)
|
||||
|
||||
def _update_failed(self, task, mark=False, exc=None):
|
||||
"""
|
||||
Update the task and transitive dependents as failed; optionally mark
|
||||
@@ -1388,8 +1418,8 @@ def install(self):
|
||||
|
||||
Args:
|
||||
pkg (Package): the package to be built and installed"""
|
||||
self._init_queue()
|
||||
|
||||
self._init_queue()
|
||||
fail_fast_err = 'Terminating after first install failure'
|
||||
single_explicit_spec = len(self.build_requests) == 1
|
||||
failed_explicits = []
|
||||
@@ -1400,6 +1430,7 @@ def install(self):
|
||||
if task is None:
|
||||
continue
|
||||
|
||||
spack.hooks.on_install_start(task.request.pkg.spec)
|
||||
install_args = task.request.install_args
|
||||
keep_prefix = install_args.get('keep_prefix')
|
||||
|
||||
@@ -1422,6 +1453,10 @@ def install(self):
|
||||
tty.warn('{0} does NOT actually have any uninstalled deps'
|
||||
' left'.format(pkg_id))
|
||||
dep_str = 'dependencies' if task.priority > 1 else 'dependency'
|
||||
|
||||
# Hook to indicate task failure, but without an exception
|
||||
spack.hooks.on_install_failure(task.request.pkg.spec)
|
||||
|
||||
raise InstallError(
|
||||
'Cannot proceed with {0}: {1} uninstalled {2}: {3}'
|
||||
.format(pkg_id, task.priority, dep_str,
|
||||
@@ -1441,6 +1476,11 @@ def install(self):
|
||||
tty.warn('{0} failed to install'.format(pkg_id))
|
||||
self._update_failed(task)
|
||||
|
||||
# Mark that the package failed
|
||||
# TODO: this should also be for the task.pkg, but we don't
|
||||
# model transitive yet.
|
||||
spack.hooks.on_install_failure(task.request.pkg.spec)
|
||||
|
||||
if self.fail_fast:
|
||||
raise InstallError(fail_fast_err)
|
||||
|
||||
@@ -1550,6 +1590,7 @@ def install(self):
|
||||
# Only terminate at this point if a single build request was
|
||||
# made.
|
||||
if task.explicit and single_explicit_spec:
|
||||
spack.hooks.on_install_failure(task.request.pkg.spec)
|
||||
raise
|
||||
|
||||
if task.explicit:
|
||||
@@ -1561,10 +1602,12 @@ def install(self):
|
||||
err = 'Failed to install {0} due to {1}: {2}'
|
||||
tty.error(err.format(pkg.name, exc.__class__.__name__,
|
||||
str(exc)))
|
||||
spack.hooks.on_install_failure(task.request.pkg.spec)
|
||||
raise
|
||||
|
||||
except (Exception, SystemExit) as exc:
|
||||
self._update_failed(task, True, exc)
|
||||
spack.hooks.on_install_failure(task.request.pkg.spec)
|
||||
|
||||
# Best effort installs suppress the exception and mark the
|
||||
# package as a failure.
|
||||
@@ -1662,6 +1705,7 @@ def build_process(pkg, kwargs):
|
||||
echo = spack.package.PackageBase._verbose
|
||||
|
||||
pkg.stage.keep = keep_stage
|
||||
|
||||
with pkg.stage:
|
||||
# Run the pre-install hook in the child process after
|
||||
# the directory is created.
|
||||
@@ -1679,6 +1723,7 @@ def build_process(pkg, kwargs):
|
||||
|
||||
# Do the real install in the source directory.
|
||||
with fs.working_dir(pkg.stage.source_path):
|
||||
|
||||
# Save the build environment in a file before building.
|
||||
dump_environment(pkg.env_path)
|
||||
|
||||
@@ -1699,25 +1744,48 @@ def build_process(pkg, kwargs):
|
||||
debug_level = tty.debug_level()
|
||||
|
||||
# Spawn a daemon that reads from a pipe and redirects
|
||||
# everything to log_path
|
||||
with log_output(pkg.log_path, echo, True,
|
||||
env=unmodified_env) as logger:
|
||||
# everything to log_path, and provide the phase for logging
|
||||
for i, (phase_name, phase_attr) in enumerate(zip(
|
||||
pkg.phases, pkg._InstallPhase_phases)):
|
||||
|
||||
for phase_name, phase_attr in zip(
|
||||
pkg.phases, pkg._InstallPhase_phases):
|
||||
# Keep a log file for each phase
|
||||
log_dir = os.path.dirname(pkg.log_path)
|
||||
log_file = "spack-build-%02d-%s-out.txt" % (
|
||||
i + 1, phase_name.lower()
|
||||
)
|
||||
log_file = os.path.join(log_dir, log_file)
|
||||
|
||||
with logger.force_echo():
|
||||
inner_debug_level = tty.debug_level()
|
||||
tty.set_debug(debug_level)
|
||||
tty.msg("{0} Executing phase: '{1}'"
|
||||
.format(pre, phase_name))
|
||||
tty.set_debug(inner_debug_level)
|
||||
try:
|
||||
# DEBUGGING TIP - to debug this section, insert an IPython
|
||||
# embed here, and run the sections below without log capture
|
||||
with log_output(log_file, echo, True,
|
||||
env=unmodified_env) as logger:
|
||||
|
||||
# Redirect stdout and stderr to daemon pipe
|
||||
phase = getattr(pkg, phase_attr)
|
||||
phase(pkg.spec, pkg.prefix)
|
||||
with logger.force_echo():
|
||||
inner_debug_level = tty.debug_level()
|
||||
tty.set_debug(debug_level)
|
||||
tty.msg("{0} Executing phase: '{1}'"
|
||||
.format(pre, phase_name))
|
||||
tty.set_debug(inner_debug_level)
|
||||
|
||||
echo = logger.echo
|
||||
# Redirect stdout and stderr to daemon pipe
|
||||
phase = getattr(pkg, phase_attr)
|
||||
|
||||
# Catch any errors to report to logging
|
||||
|
||||
phase(pkg.spec, pkg.prefix)
|
||||
spack.hooks.on_phase_success(pkg, phase_name, log_file)
|
||||
|
||||
except BaseException:
|
||||
combine_phase_logs(pkg.phase_log_files, pkg.log_path)
|
||||
spack.hooks.on_phase_error(pkg, phase_name, log_file)
|
||||
raise
|
||||
|
||||
# We assume loggers share echo True/False
|
||||
echo = logger.echo
|
||||
|
||||
# After log, we can get all output/error files from the package stage
|
||||
combine_phase_logs(pkg.phase_log_files, pkg.log_path)
|
||||
log(pkg)
|
||||
|
||||
# Run post install hooks before build stage is removed.
|
||||
@@ -1733,6 +1801,9 @@ def build_process(pkg, kwargs):
|
||||
_hms(pkg._total_time)))
|
||||
_print_installed_pkg(pkg.prefix)
|
||||
|
||||
# Send final status that install is successful
|
||||
spack.hooks.on_install_success(pkg.spec)
|
||||
|
||||
# preserve verbosity across runs
|
||||
return echo
|
||||
|
||||
|
@@ -354,7 +354,8 @@ def make_argument_parser(**kwargs):
|
||||
dest='help', action='store_const', const='long', default=None,
|
||||
help="show help for all commands (same as spack help --all)")
|
||||
parser.add_argument(
|
||||
'--color', action='store', default='auto',
|
||||
'--color', action='store',
|
||||
default=os.environ.get('SPACK_COLOR', 'auto'),
|
||||
choices=('always', 'never', 'auto'),
|
||||
help="when to colorize output (default: auto)")
|
||||
parser.add_argument(
|
||||
@@ -415,6 +416,7 @@ def make_argument_parser(**kwargs):
|
||||
help="print additional output during builds")
|
||||
parser.add_argument(
|
||||
'--stacktrace', action='store_true',
|
||||
default='SPACK_STACKTRACE' in os.environ,
|
||||
help="add stacktraces to all printed statements")
|
||||
parser.add_argument(
|
||||
'-V', '--version', action='store_true',
|
||||
@@ -528,6 +530,8 @@ def __call__(self, *argv, **kwargs):
|
||||
|
||||
Keyword Args:
|
||||
fail_on_error (optional bool): Don't raise an exception on error
|
||||
global_args (optional list): List of global spack arguments:
|
||||
simulates ``spack [global_args] [command] [*argv]``
|
||||
|
||||
Returns:
|
||||
(str): combined output and error as a string
|
||||
@@ -540,8 +544,10 @@ def __call__(self, *argv, **kwargs):
|
||||
self.returncode = None
|
||||
self.error = None
|
||||
|
||||
prepend = kwargs['global_args'] if 'global_args' in kwargs else []
|
||||
|
||||
args, unknown = self.parser.parse_known_args(
|
||||
[self.command_name] + list(argv))
|
||||
prepend + [self.command_name] + list(argv))
|
||||
|
||||
fail_on_error = kwargs.get('fail_on_error', True)
|
||||
|
||||
|
522
lib/spack/spack/monitor.py
Normal file
522
lib/spack/spack/monitor.py
Normal file
@@ -0,0 +1,522 @@
|
||||
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
"""Interact with a Spack Monitor Service. Derived from
|
||||
https://github.com/spack/spack-monitor/blob/main/script/spackmoncli.py
|
||||
"""
|
||||
|
||||
import base64
|
||||
import os
|
||||
import re
|
||||
|
||||
try:
|
||||
from urllib.request import Request, urlopen
|
||||
from urllib.error import URLError
|
||||
except ImportError:
|
||||
from urllib2 import urlopen, Request, URLError # type: ignore # novm
|
||||
|
||||
import spack
|
||||
import spack.hash_types as ht
|
||||
import spack.main
|
||||
import spack.store
|
||||
import spack.util.spack_json as sjson
|
||||
import spack.util.spack_yaml as syaml
|
||||
import llnl.util.tty as tty
|
||||
from copy import deepcopy
|
||||
|
||||
|
||||
# A global client to instantiate once
|
||||
cli = None
|
||||
|
||||
|
||||
def get_client(host, prefix="ms1", disable_auth=False, allow_fail=False, tags=None):
|
||||
"""
|
||||
Get a monitor client for a particular host and prefix.
|
||||
|
||||
If the client is not running, we exit early, unless allow_fail is set
|
||||
to true, indicating that we should continue the build even if the
|
||||
server is not present. Note that this client is defined globally as "cli"
|
||||
so we can istantiate it once (checking for credentials, etc.) and then
|
||||
always have access to it via spack.monitor.cli. Also note that
|
||||
typically, we call the monitor by way of hooks in spack.hooks.monitor.
|
||||
So if you want the monitor to have a new interaction with some part of
|
||||
the codebase, it's recommended to write a hook first, and then have
|
||||
the monitor use it.
|
||||
"""
|
||||
global cli
|
||||
cli = SpackMonitorClient(host=host, prefix=prefix, allow_fail=allow_fail,
|
||||
tags=tags)
|
||||
|
||||
# If we don't disable auth, environment credentials are required
|
||||
if not disable_auth:
|
||||
cli.require_auth()
|
||||
|
||||
# We will exit early if the monitoring service is not running
|
||||
info = cli.service_info()
|
||||
|
||||
# If we allow failure, the response will be done
|
||||
if info:
|
||||
tty.debug("%s v.%s has status %s" % (
|
||||
info['id'],
|
||||
info['version'],
|
||||
info['status'])
|
||||
)
|
||||
return cli
|
||||
|
||||
else:
|
||||
tty.debug("spack-monitor server not found, continuing as allow_fail is True.")
|
||||
|
||||
|
||||
def get_monitor_group(subparser):
|
||||
"""
|
||||
Retrieve the monitor group for the argument parser.
|
||||
|
||||
Since the monitor group is shared between commands, we provide a common
|
||||
function to generate the group for it. The user can pass the subparser, and
|
||||
the group is added, and returned.
|
||||
"""
|
||||
# Monitoring via https://github.com/spack/spack-monitor
|
||||
monitor_group = subparser.add_argument_group()
|
||||
monitor_group.add_argument(
|
||||
'--monitor', action='store_true', dest='use_monitor', default=False,
|
||||
help="interact with a montor server during builds.")
|
||||
monitor_group.add_argument(
|
||||
'--monitor-no-auth', action='store_true', dest='monitor_disable_auth',
|
||||
default=False, help="the monitoring server does not require auth.")
|
||||
monitor_group.add_argument(
|
||||
'--monitor-tags', dest='monitor_tags', default=None,
|
||||
help="One or more (comma separated) tags for a build.")
|
||||
monitor_group.add_argument(
|
||||
'--monitor-keep-going', action='store_true', dest='monitor_keep_going',
|
||||
default=False, help="continue the build if a request to monitor fails.")
|
||||
monitor_group.add_argument(
|
||||
'--monitor-host', dest='monitor_host', default="http://127.0.0.1",
|
||||
help="If using a monitor, customize the host.")
|
||||
monitor_group.add_argument(
|
||||
'--monitor-prefix', dest='monitor_prefix', default="ms1",
|
||||
help="The API prefix for the monitor service.")
|
||||
return monitor_group
|
||||
|
||||
|
||||
class SpackMonitorClient:
|
||||
"""Client to interact with a spack monitor server.
|
||||
|
||||
We require the host url, along with the prefix to discover the
|
||||
service_info endpoint. If allow_fail is set to True, we will not exit
|
||||
on error with tty.die given that a request is not successful. The spack
|
||||
version is one of the fields to uniquely identify a spec, so we add it
|
||||
to the client on init.
|
||||
"""
|
||||
|
||||
def __init__(self, host=None, prefix="ms1", allow_fail=False, tags=None):
|
||||
self.host = host or "http://127.0.0.1"
|
||||
self.baseurl = "%s/%s" % (self.host, prefix.strip("/"))
|
||||
self.token = os.environ.get("SPACKMON_TOKEN")
|
||||
self.username = os.environ.get("SPACKMON_USER")
|
||||
self.headers = {}
|
||||
self.allow_fail = allow_fail
|
||||
self.spack_version = spack.main.get_version()
|
||||
self.capture_build_environment()
|
||||
self.tags = tags
|
||||
|
||||
# We keey lookup of build_id by full_hash
|
||||
self.build_ids = {}
|
||||
|
||||
def load_build_environment(self, spec):
|
||||
"""
|
||||
Load a build environment from install_environment.json.
|
||||
|
||||
If we are running an analyze command, we will need to load previously
|
||||
used build environment metadata from install_environment.json to capture
|
||||
what was done during the build.
|
||||
"""
|
||||
if not hasattr(spec, "package") or not spec.package:
|
||||
tty.die("A spec must have a package to load the environment.")
|
||||
|
||||
pkg_dir = os.path.dirname(spec.package.install_log_path)
|
||||
env_file = os.path.join(pkg_dir, "install_environment.json")
|
||||
build_environment = read_json(env_file)
|
||||
if not build_environment:
|
||||
tty.warning(
|
||||
"install_environment.json not found in package folder. "
|
||||
" This means that the current environment metadata will be used."
|
||||
)
|
||||
else:
|
||||
self.build_environment = build_environment
|
||||
|
||||
def capture_build_environment(self):
|
||||
"""
|
||||
Capture the environment for the build.
|
||||
|
||||
This uses spack.util.environment.get_host_environment_metadata to do so.
|
||||
This is important because it's a unique identifier, along with the spec,
|
||||
for a Build. It should look something like this:
|
||||
|
||||
{'host_os': 'ubuntu20.04',
|
||||
'platform': 'linux',
|
||||
'host_target': 'skylake',
|
||||
'hostname': 'vanessa-ThinkPad-T490s',
|
||||
'spack_version': '0.16.1-1455-52d5b55b65',
|
||||
'kernel_version': '#73-Ubuntu SMP Mon Jan 18 17:25:17 UTC 2021'}
|
||||
|
||||
This is saved to a package install's metadata folder as
|
||||
install_environment.json, and can be loaded by the monitor for uploading
|
||||
data relevant to a later analysis.
|
||||
"""
|
||||
from spack.util.environment import get_host_environment_metadata
|
||||
self.build_environment = get_host_environment_metadata()
|
||||
|
||||
def require_auth(self):
|
||||
"""
|
||||
Require authentication.
|
||||
|
||||
The token and username must not be unset
|
||||
"""
|
||||
if not self.token or not self.username:
|
||||
tty.die("You are required to export SPACKMON_TOKEN and SPACKMON_USER")
|
||||
|
||||
def set_header(self, name, value):
|
||||
self.headers.update({name: value})
|
||||
|
||||
def set_basic_auth(self, username, password):
|
||||
"""
|
||||
A wrapper to adding basic authentication to the Request
|
||||
"""
|
||||
auth_str = "%s:%s" % (username, password)
|
||||
auth_header = base64.b64encode(auth_str.encode("utf-8"))
|
||||
self.set_header("Authorization", "Basic %s" % auth_header.decode("utf-8"))
|
||||
|
||||
def reset(self):
|
||||
"""
|
||||
Reset and prepare for a new request.
|
||||
"""
|
||||
if "Authorization" in self.headers:
|
||||
self.headers = {"Authorization": self.headers['Authorization']}
|
||||
else:
|
||||
self.headers = {}
|
||||
|
||||
def prepare_request(self, endpoint, data, headers):
|
||||
"""
|
||||
Prepare a request given an endpoint, data, and headers.
|
||||
|
||||
If data is provided, urllib makes the request a POST
|
||||
"""
|
||||
# Always reset headers for new request.
|
||||
self.reset()
|
||||
|
||||
# Preserve previously used auth token
|
||||
headers = headers or self.headers
|
||||
|
||||
# The calling function can provide a full or partial url
|
||||
if not endpoint.startswith("http"):
|
||||
endpoint = "%s/%s" % (self.baseurl, endpoint)
|
||||
|
||||
# If we have data, the request will be POST
|
||||
if data:
|
||||
if not isinstance(data, str):
|
||||
data = sjson.dump(data)
|
||||
data = data.encode('ascii')
|
||||
|
||||
return Request(endpoint, data=data, headers=headers)
|
||||
|
||||
def issue_request(self, request, retry=True):
|
||||
"""
|
||||
Given a prepared request, issue it.
|
||||
|
||||
If we get an error, die. If
|
||||
there are times when we don't want to exit on error (but instead
|
||||
disable using the monitoring service) we could add that here.
|
||||
"""
|
||||
try:
|
||||
response = urlopen(request)
|
||||
except URLError as e:
|
||||
|
||||
# If we have an authorization request, retry once with auth
|
||||
if hasattr(e, "code") and e.code == 401 and retry:
|
||||
if self.authenticate_request(e):
|
||||
request = self.prepare_request(
|
||||
e.url,
|
||||
sjson.load(request.data.decode('utf-8')),
|
||||
self.headers
|
||||
)
|
||||
return self.issue_request(request, False)
|
||||
|
||||
# Otherwise, relay the message and exit on error
|
||||
msg = ""
|
||||
if hasattr(e, 'reason'):
|
||||
msg = e.reason
|
||||
elif hasattr(e, 'code'):
|
||||
msg = e.code
|
||||
|
||||
if self.allow_fail:
|
||||
tty.warning("Request to %s was not successful, but continuing." % e.url)
|
||||
return
|
||||
|
||||
tty.die(msg)
|
||||
|
||||
return response
|
||||
|
||||
def do_request(self, endpoint, data=None, headers=None, url=None):
|
||||
"""
|
||||
Do the actual request.
|
||||
|
||||
If data is provided, it is POST, otherwise GET.
|
||||
If an entire URL is provided, don't use the endpoint
|
||||
"""
|
||||
request = self.prepare_request(endpoint, data, headers)
|
||||
|
||||
# If we have an authorization error, we retry with
|
||||
response = self.issue_request(request)
|
||||
|
||||
# A 200/201 response incidates success
|
||||
if response.code in [200, 201]:
|
||||
return sjson.load(response.read().decode('utf-8'))
|
||||
|
||||
return response
|
||||
|
||||
def authenticate_request(self, originalResponse):
|
||||
"""
|
||||
Authenticate the request.
|
||||
|
||||
Given a response (an HTTPError 401), look for a Www-Authenticate
|
||||
header to parse. We return True/False to indicate if the request
|
||||
should be retried.
|
||||
"""
|
||||
authHeaderRaw = originalResponse.headers.get("Www-Authenticate")
|
||||
if not authHeaderRaw:
|
||||
return False
|
||||
|
||||
# If we have a username and password, set basic auth automatically
|
||||
if self.token and self.username:
|
||||
self.set_basic_auth(self.username, self.token)
|
||||
|
||||
headers = deepcopy(self.headers)
|
||||
if "Authorization" not in headers:
|
||||
tty.error(
|
||||
"This endpoint requires a token. Please set "
|
||||
"client.set_basic_auth(username, password) first "
|
||||
"or export them to the environment."
|
||||
)
|
||||
return False
|
||||
|
||||
# Prepare request to retry
|
||||
h = parse_auth_header(authHeaderRaw)
|
||||
headers.update({
|
||||
"service": h.Service,
|
||||
"Accept": "application/json",
|
||||
"User-Agent": "spackmoncli"}
|
||||
)
|
||||
|
||||
# Currently we don't set a scope (it defaults to build)
|
||||
authResponse = self.do_request(h.Realm, headers=headers)
|
||||
|
||||
# Request the token
|
||||
token = authResponse.get("token")
|
||||
if not token:
|
||||
return False
|
||||
|
||||
# Set the token to the original request and retry
|
||||
self.headers.update({"Authorization": "Bearer %s" % token})
|
||||
return True
|
||||
|
||||
# Functions correspond to endpoints
|
||||
def service_info(self):
|
||||
"""
|
||||
Get the service information endpoint
|
||||
"""
|
||||
# Base endpoint provides service info
|
||||
return self.do_request("")
|
||||
|
||||
def new_configuration(self, specs):
|
||||
"""
|
||||
Given a list of specs, generate a new configuration for each.
|
||||
|
||||
We return a lookup of specs with their package names. This assumes
|
||||
that we are only installing one version of each package. We aren't
|
||||
starting or creating any builds, so we don't need a build environment.
|
||||
"""
|
||||
configs = {}
|
||||
|
||||
# There should only be one spec generally (what cases would have >1?)
|
||||
for spec in specs:
|
||||
# Not sure if this is needed here, but I see it elsewhere
|
||||
if spec.name in spack.repo.path or spec.virtual:
|
||||
spec.concretize()
|
||||
as_dict = {"spec": spec.to_dict(hash=ht.full_hash),
|
||||
"spack_version": self.spack_version}
|
||||
response = self.do_request("specs/new/", data=sjson.dump(as_dict))
|
||||
configs[spec.package.name] = response.get('data', {})
|
||||
return configs
|
||||
|
||||
def new_build(self, spec):
|
||||
"""
|
||||
Create a new build.
|
||||
|
||||
This means sending the hash of the spec to be built,
|
||||
along with the build environment. These two sets of data uniquely can
|
||||
identify the build, and we will add objects (the binaries produced) to
|
||||
it. We return the build id to the calling client.
|
||||
"""
|
||||
return self.get_build_id(spec, return_response=True)
|
||||
|
||||
def get_build_id(self, spec, return_response=False, spec_exists=True):
|
||||
"""
|
||||
Retrieve a build id, either in the local cache, or query the server.
|
||||
"""
|
||||
full_hash = spec.full_hash()
|
||||
if full_hash in self.build_ids:
|
||||
return self.build_ids[full_hash]
|
||||
|
||||
# Prepare build environment data (including spack version)
|
||||
data = self.build_environment.copy()
|
||||
data['full_hash'] = full_hash
|
||||
|
||||
# If the build should be tagged, add it
|
||||
if self.tags:
|
||||
data['tags'] = self.tags
|
||||
|
||||
# If we allow the spec to not exist (meaning we create it) we need to
|
||||
# include the full spec.yaml here
|
||||
if not spec_exists:
|
||||
meta_dir = os.path.dirname(spec.package.install_log_path)
|
||||
spec_file = os.path.join(meta_dir, "spec.yaml")
|
||||
data['spec'] = syaml.load(read_file(spec_file))
|
||||
|
||||
response = self.do_request("builds/new/", data=sjson.dump(data))
|
||||
|
||||
# Add the build id to the lookup
|
||||
bid = self.build_ids[full_hash] = response['data']['build']['build_id']
|
||||
self.build_ids[full_hash] = bid
|
||||
|
||||
# If the function is called directly, the user might want output
|
||||
if return_response:
|
||||
return response
|
||||
return bid
|
||||
|
||||
def update_build(self, spec, status="SUCCESS"):
|
||||
"""
|
||||
Update a build with a new status.
|
||||
|
||||
This typically updates the relevant package to indicate a
|
||||
successful install. This endpoint can take a general status to update.
|
||||
"""
|
||||
data = {"build_id": self.get_build_id(spec), "status": status}
|
||||
return self.do_request("builds/update/", data=sjson.dump(data))
|
||||
|
||||
def fail_task(self, spec):
|
||||
"""Given a spec, mark it as failed. This means that Spack Monitor
|
||||
marks all dependencies as cancelled, unless they are already successful
|
||||
"""
|
||||
return self.update_build(spec, status="FAILED")
|
||||
|
||||
def send_analyze_metadata(self, pkg, metadata):
|
||||
"""
|
||||
Send spack analyzer metadata to the spack monitor server.
|
||||
|
||||
Given a dictionary of analyzers (with key as analyzer type, and
|
||||
value as the data) upload the analyzer output to Spack Monitor.
|
||||
Spack Monitor should either have a known understanding of the analyzer,
|
||||
or if not (the key is not recognized), it's assumed to be a dictionary
|
||||
of objects/files, each with attributes to be updated. E.g.,
|
||||
|
||||
{"analyzer-name": {"object-file-path": {"feature1": "value1"}}}
|
||||
"""
|
||||
# Prepare build environment data (including spack version)
|
||||
# Since the build might not have been generated, we include the spec
|
||||
data = {"build_id": self.get_build_id(pkg.spec, spec_exists=False),
|
||||
"metadata": metadata}
|
||||
return self.do_request("analyze/builds/", data=sjson.dump(data))
|
||||
|
||||
def send_phase(self, pkg, phase_name, phase_output_file, status):
|
||||
"""
|
||||
Send the result of a phase during install.
|
||||
|
||||
Given a package, phase name, and status, update the monitor endpoint
|
||||
to alert of the status of the stage. This includes parsing the package
|
||||
metadata folder for phase output and error files
|
||||
"""
|
||||
data = {"build_id": self.get_build_id(pkg.spec)}
|
||||
|
||||
# Send output specific to the phase (does this include error?)
|
||||
data.update({"status": status,
|
||||
"output": read_file(phase_output_file),
|
||||
"phase_name": phase_name})
|
||||
|
||||
return self.do_request("builds/phases/update/", data=sjson.dump(data))
|
||||
|
||||
def upload_specfile(self, filename):
|
||||
"""
|
||||
Upload a spec file to the spack monitor server.
|
||||
|
||||
Given a spec file (must be json) upload to the UploadSpec endpoint.
|
||||
This function is not used in the spack to server workflow, but could
|
||||
be useful is Spack Monitor is intended to send an already generated
|
||||
file in some kind of separate analysis. For the environment file, we
|
||||
parse out SPACK_* variables to include.
|
||||
"""
|
||||
# We load as json just to validate it
|
||||
spec = read_json(filename)
|
||||
data = {"spec": spec, "spack_verison": self.spack_version}
|
||||
return self.do_request("specs/new/", data=sjson.dump(data))
|
||||
|
||||
|
||||
# Helper functions
|
||||
|
||||
def parse_auth_header(authHeaderRaw):
|
||||
"""
|
||||
Parse an authentication header into relevant pieces
|
||||
"""
|
||||
regex = re.compile('([a-zA-z]+)="(.+?)"')
|
||||
matches = regex.findall(authHeaderRaw)
|
||||
lookup = dict()
|
||||
for match in matches:
|
||||
lookup[match[0]] = match[1]
|
||||
return authHeader(lookup)
|
||||
|
||||
|
||||
class authHeader:
|
||||
def __init__(self, lookup):
|
||||
"""Given a dictionary of values, match them to class attributes"""
|
||||
for key in lookup:
|
||||
if key in ["realm", "service", "scope"]:
|
||||
setattr(self, key.capitalize(), lookup[key])
|
||||
|
||||
|
||||
def read_file(filename):
|
||||
"""
|
||||
Read a file, if it exists. Otherwise return None
|
||||
"""
|
||||
if not os.path.exists(filename):
|
||||
return
|
||||
with open(filename, 'r') as fd:
|
||||
content = fd.read()
|
||||
return content
|
||||
|
||||
|
||||
def write_file(content, filename):
|
||||
"""
|
||||
Write content to file
|
||||
"""
|
||||
with open(filename, 'w') as fd:
|
||||
fd.writelines(content)
|
||||
return content
|
||||
|
||||
|
||||
def write_json(obj, filename):
|
||||
"""
|
||||
Write a json file, if the output directory exists.
|
||||
"""
|
||||
if not os.path.exists(os.path.dirname(filename)):
|
||||
return
|
||||
return write_file(sjson.dump(obj), filename)
|
||||
|
||||
|
||||
def read_json(filename):
|
||||
"""
|
||||
Read a file and load into json, if it exists. Otherwise return None.
|
||||
"""
|
||||
if not os.path.exists(filename):
|
||||
return
|
||||
return sjson.load(read_file(filename))
|
@@ -15,6 +15,7 @@
|
||||
import contextlib
|
||||
import copy
|
||||
import functools
|
||||
import glob
|
||||
import hashlib
|
||||
import inspect
|
||||
import os
|
||||
@@ -1066,6 +1067,14 @@ def log_path(self):
|
||||
# Otherwise, return the current log path name.
|
||||
return os.path.join(self.stage.path, _spack_build_logfile)
|
||||
|
||||
@property
|
||||
def phase_log_files(self):
|
||||
"""Find sorted phase log files written to the staging directory"""
|
||||
logs_dir = os.path.join(self.stage.path, "spack-build-*-out.txt")
|
||||
log_files = glob.glob(logs_dir)
|
||||
log_files.sort()
|
||||
return log_files
|
||||
|
||||
@property
|
||||
def install_log_path(self):
|
||||
"""Return the build log file path on successful installation."""
|
||||
@@ -2319,8 +2328,13 @@ def do_activate(self, view=None, with_dependencies=True, verbose=True):
|
||||
|
||||
extensions_layout = view.extensions_layout
|
||||
|
||||
extensions_layout.check_extension_conflict(
|
||||
self.extendee_spec, self.spec)
|
||||
try:
|
||||
extensions_layout.check_extension_conflict(
|
||||
self.extendee_spec, self.spec)
|
||||
except spack.directory_layout.ExtensionAlreadyInstalledError as e:
|
||||
# already installed, let caller know
|
||||
tty.msg(e.message)
|
||||
return
|
||||
|
||||
# Activate any package dependencies that are also extensions.
|
||||
if with_dependencies:
|
||||
|
@@ -33,6 +33,7 @@
|
||||
build_env_path = os.path.join(lib_path, "env")
|
||||
module_path = os.path.join(lib_path, "spack")
|
||||
command_path = os.path.join(module_path, "cmd")
|
||||
analyzers_path = os.path.join(module_path, "analyzers")
|
||||
platform_path = os.path.join(module_path, 'platforms')
|
||||
compilers_path = os.path.join(module_path, "compilers")
|
||||
build_systems_path = os.path.join(module_path, 'build_systems')
|
||||
@@ -52,6 +53,8 @@
|
||||
user_config_path = os.path.expanduser('~/.spack')
|
||||
user_bootstrap_path = os.path.join(user_config_path, 'bootstrap')
|
||||
user_bootstrap_store = os.path.join(user_bootstrap_path, 'store')
|
||||
reports_path = os.path.join(user_config_path, "reports")
|
||||
|
||||
|
||||
opt_path = os.path.join(prefix, "opt")
|
||||
etc_path = os.path.join(prefix, "etc")
|
||||
|
@@ -19,6 +19,10 @@
|
||||
from spack.build_systems.aspell_dict import AspellDictPackage
|
||||
from spack.build_systems.autotools import AutotoolsPackage
|
||||
from spack.build_systems.cmake import CMakePackage
|
||||
from spack.build_systems.cached_cmake import (
|
||||
CachedCMakePackage, cmake_cache_option, cmake_cache_path,
|
||||
cmake_cache_string
|
||||
)
|
||||
from spack.build_systems.cuda import CudaPackage
|
||||
from spack.build_systems.oneapi import IntelOneApiPackage
|
||||
from spack.build_systems.oneapi import IntelOneApiLibraryPackage
|
||||
|
@@ -169,6 +169,7 @@ def target_names_from_modules(modules):
|
||||
for mod in modules:
|
||||
if 'craype-' in mod:
|
||||
name = mod[7:]
|
||||
name = name.split()[0]
|
||||
_n = name.replace('-', '_') # test for mic-knl/mic_knl
|
||||
is_target_name = (name in archspec.cpu.TARGETS or
|
||||
_n in archspec.cpu.TARGETS)
|
||||
|
@@ -918,8 +918,12 @@ def _read_config(self):
|
||||
@autospec
|
||||
def get(self, spec):
|
||||
"""Returns the package associated with the supplied spec."""
|
||||
if not self.exists(spec.name):
|
||||
raise UnknownPackageError(spec.name)
|
||||
# NOTE: we only check whether the package is None here, not whether it
|
||||
# actually exists, because we have to load it anyway, and that ends up
|
||||
# checking for existence. We avoid constructing FastPackageChecker,
|
||||
# which will stat all packages.
|
||||
if spec.name is None:
|
||||
raise UnknownPackageError(None, self)
|
||||
|
||||
if spec.namespace and spec.namespace != self.namespace:
|
||||
raise UnknownPackageError(spec.name, self.namespace)
|
||||
@@ -1064,7 +1068,16 @@ def all_package_classes(self):
|
||||
|
||||
def exists(self, pkg_name):
|
||||
"""Whether a package with the supplied name exists."""
|
||||
return pkg_name in self._pkg_checker
|
||||
if pkg_name is None:
|
||||
return False
|
||||
|
||||
# if the FastPackageChecker is already constructed, use it
|
||||
if self._fast_package_checker:
|
||||
return pkg_name in self._pkg_checker
|
||||
|
||||
# if not, check for the package.py file
|
||||
path = self.filename_for_package_name(pkg_name)
|
||||
return os.path.exists(path)
|
||||
|
||||
def last_mtime(self):
|
||||
"""Time a package file in this repo was last updated."""
|
||||
@@ -1331,7 +1344,7 @@ def __init__(self, name, repo=None):
|
||||
long_msg = None
|
||||
if name:
|
||||
if repo:
|
||||
msg = "Package '{0}' not found in repository '{1}'"
|
||||
msg = "Package '{0}' not found in repository '{1.root}'"
|
||||
msg = msg.format(name, repo)
|
||||
else:
|
||||
msg = "Package '{0}' not found.".format(name)
|
||||
|
@@ -22,6 +22,7 @@
|
||||
import spack.build_environment
|
||||
import spack.fetch_strategy
|
||||
import spack.package
|
||||
from spack.error import SpackError
|
||||
from spack.reporter import Reporter
|
||||
from spack.util.crypto import checksum
|
||||
from spack.util.executable import which
|
||||
@@ -60,6 +61,7 @@ class CDash(Reporter):
|
||||
def __init__(self, args):
|
||||
Reporter.__init__(self, args)
|
||||
tty.set_verbose(args.verbose)
|
||||
self.success = True
|
||||
self.template_dir = os.path.join('reports', 'cdash')
|
||||
self.cdash_upload_url = args.cdash_upload_url
|
||||
|
||||
@@ -159,13 +161,21 @@ def build_report_for_package(self, directory_name, package, duration):
|
||||
report_data[phase]['log'] = \
|
||||
'\n'.join(report_data[phase]['loglines'])
|
||||
errors, warnings = parse_log_events(report_data[phase]['loglines'])
|
||||
|
||||
# Convert errors to warnings if the package reported success.
|
||||
if package['result'] == 'success':
|
||||
warnings = errors + warnings
|
||||
errors = []
|
||||
|
||||
# Cap the number of errors and warnings at 50 each.
|
||||
errors = errors[:50]
|
||||
warnings = warnings[:50]
|
||||
nerrors = len(errors)
|
||||
|
||||
if phase == 'configure' and nerrors > 0:
|
||||
report_data[phase]['status'] = 1
|
||||
if nerrors > 0:
|
||||
self.success = False
|
||||
if phase == 'configure':
|
||||
report_data[phase]['status'] = 1
|
||||
|
||||
if phase == 'build':
|
||||
# Convert log output from ASCII to Unicode and escape for XML.
|
||||
@@ -186,11 +196,6 @@ def clean_log_event(event):
|
||||
event['source_file'])
|
||||
return event
|
||||
|
||||
# Convert errors to warnings if the package reported success.
|
||||
if package['result'] == 'success':
|
||||
warnings = errors + warnings
|
||||
errors = []
|
||||
|
||||
report_data[phase]['errors'] = []
|
||||
report_data[phase]['warnings'] = []
|
||||
for error in errors:
|
||||
@@ -254,7 +259,7 @@ def build_report(self, directory_name, input_data):
|
||||
for package in spec['packages']:
|
||||
self.build_report_for_package(
|
||||
directory_name, package, duration)
|
||||
self.print_cdash_link()
|
||||
self.finalize_report()
|
||||
|
||||
def test_report_for_package(self, directory_name, package, duration):
|
||||
if 'stdout' not in package:
|
||||
@@ -360,7 +365,7 @@ def test_report(self, directory_name, input_data):
|
||||
for package in spec['packages']:
|
||||
self.test_report_for_package(
|
||||
directory_name, package, duration)
|
||||
self.print_cdash_link()
|
||||
self.finalize_report()
|
||||
|
||||
def concretization_report(self, directory_name, msg):
|
||||
self.buildname = self.base_buildname
|
||||
@@ -381,7 +386,8 @@ def concretization_report(self, directory_name, msg):
|
||||
# errors so refer to this report with the base buildname instead.
|
||||
self.current_package_name = self.base_buildname
|
||||
self.upload(output_filename)
|
||||
self.print_cdash_link()
|
||||
self.success = False
|
||||
self.finalize_report()
|
||||
|
||||
def initialize_report(self, directory_name):
|
||||
if not os.path.exists(directory_name):
|
||||
@@ -430,7 +436,7 @@ def upload(self, filename):
|
||||
buildid = match.group(1)
|
||||
self.buildIds[self.current_package_name] = buildid
|
||||
|
||||
def print_cdash_link(self):
|
||||
def finalize_report(self):
|
||||
if self.buildIds:
|
||||
print("View your build results here:")
|
||||
for package_name, buildid in iteritems(self.buildIds):
|
||||
@@ -440,3 +446,5 @@ def print_cdash_link(self):
|
||||
build_url = build_url[0:build_url.find("submit.php")]
|
||||
build_url += "buildSummary.php?buildid={0}".format(buildid)
|
||||
print("{0}: {1}".format(package_name, build_url))
|
||||
if not self.success:
|
||||
raise SpackError("Errors encountered, see above for more details")
|
||||
|
@@ -111,6 +111,7 @@
|
||||
},
|
||||
'service-job-attributes': runner_selector_schema,
|
||||
'rebuild-index': {'type': 'boolean'},
|
||||
'broken-specs-url': {'type': 'string'},
|
||||
},
|
||||
)
|
||||
|
||||
|
@@ -12,6 +12,7 @@
|
||||
import sys
|
||||
import time
|
||||
import types
|
||||
import warnings
|
||||
from six import string_types
|
||||
|
||||
import archspec.cpu
|
||||
@@ -207,6 +208,9 @@ def __init__(self, asp=None):
|
||||
self.answers = []
|
||||
self.cores = []
|
||||
|
||||
# names of optimization criteria
|
||||
self.criteria = []
|
||||
|
||||
def print_cores(self):
|
||||
for core in self.cores:
|
||||
tty.msg(
|
||||
@@ -253,14 +257,11 @@ def __init__(self, cores=True, asp=None):
|
||||
# TODO: Find a way to vendor the concrete spec
|
||||
# in a cross-platform way
|
||||
with spack.bootstrap.ensure_bootstrap_configuration():
|
||||
generic_target = archspec.cpu.host().family
|
||||
spec_str = 'clingo-bootstrap@spack+python target={0}'.format(
|
||||
str(generic_target)
|
||||
)
|
||||
clingo_spec = spack.spec.Spec(spec_str)
|
||||
clingo_spec = spack.bootstrap.clingo_root_spec()
|
||||
clingo_spec._old_concretize()
|
||||
spack.bootstrap.make_module_available(
|
||||
'clingo', spec=clingo_spec, install=True)
|
||||
'clingo', spec=clingo_spec, install=True
|
||||
)
|
||||
import clingo
|
||||
self.out = asp or llnl.util.lang.Devnull()
|
||||
self.cores = cores
|
||||
@@ -357,6 +358,7 @@ def stringify(x):
|
||||
return x.string or str(x)
|
||||
|
||||
if result.satisfiable:
|
||||
# build spec from the best model
|
||||
builder = SpecBuilder(specs)
|
||||
min_cost, best_model = min(models)
|
||||
tuples = [
|
||||
@@ -364,8 +366,20 @@ def stringify(x):
|
||||
for sym in best_model
|
||||
]
|
||||
answers = builder.build_specs(tuples)
|
||||
|
||||
# add best spec to the results
|
||||
result.answers.append((list(min_cost), 0, answers))
|
||||
|
||||
# pull optimization criteria names out of the solution
|
||||
criteria = [
|
||||
(int(args[0]), args[1]) for name, args in tuples
|
||||
if name == "opt_criterion"
|
||||
]
|
||||
result.criteria = [t[1] for t in sorted(criteria, reverse=True)]
|
||||
|
||||
# record the number of models the solver considered
|
||||
result.nmodels = len(models)
|
||||
|
||||
elif cores:
|
||||
symbols = dict(
|
||||
(a.literal, a.symbol)
|
||||
@@ -396,6 +410,9 @@ class SpackSolverSetup(object):
|
||||
def __init__(self):
|
||||
self.gen = None # set by setup()
|
||||
self.possible_versions = {}
|
||||
self.versions_in_package_py = {}
|
||||
self.deprecated_versions = {}
|
||||
self.versions_from_externals = {}
|
||||
self.possible_virtuals = None
|
||||
self.possible_compilers = []
|
||||
self.variant_values_from_specs = set()
|
||||
@@ -449,13 +466,27 @@ def pkg_version_rules(self, pkg):
|
||||
# c) Numeric or string comparison
|
||||
v)
|
||||
|
||||
most_to_least_preferred = sorted(
|
||||
self.possible_versions[pkg.name], key=keyfn, reverse=True
|
||||
)
|
||||
# Compute which versions appear only in packages.yaml
|
||||
from_externals = self.versions_from_externals[pkg.name]
|
||||
from_package_py = self.versions_in_package_py[pkg.name]
|
||||
only_from_externals = from_externals - from_package_py
|
||||
|
||||
# These versions don't need a default weight, as they are
|
||||
# already weighted in a more favorable way when accounting
|
||||
# for externals. Assigning them a default weight would be
|
||||
# equivalent to state that they are also declared in
|
||||
# the package.py file
|
||||
considered = self.possible_versions[pkg.name] - only_from_externals
|
||||
most_to_least_preferred = sorted(considered, key=keyfn, reverse=True)
|
||||
|
||||
for i, v in enumerate(most_to_least_preferred):
|
||||
self.gen.fact(fn.version_declared(pkg.name, v, i))
|
||||
|
||||
# Declare deprecated versions for this package, if any
|
||||
deprecated = self.deprecated_versions[pkg.name]
|
||||
for v in sorted(deprecated):
|
||||
self.gen.fact(fn.deprecated_version(pkg.name, v))
|
||||
|
||||
def spec_versions(self, spec):
|
||||
"""Return list of clauses expressing spec's version constraints."""
|
||||
spec = specify(spec)
|
||||
@@ -593,11 +624,16 @@ def pkg_rules(self, pkg, tests):
|
||||
values = []
|
||||
elif isinstance(values, spack.variant.DisjointSetsOfValues):
|
||||
union = set()
|
||||
for s in values.sets:
|
||||
# Encode the disjoint sets in the logic program
|
||||
for sid, s in enumerate(values.sets):
|
||||
for value in s:
|
||||
self.gen.fact(fn.variant_value_from_disjoint_sets(
|
||||
pkg.name, name, value, sid
|
||||
))
|
||||
union.update(s)
|
||||
values = union
|
||||
|
||||
# make sure that every variant has at least one posisble value
|
||||
# make sure that every variant has at least one possible value
|
||||
if not values:
|
||||
values = [variant.default]
|
||||
|
||||
@@ -778,6 +814,7 @@ def external_packages(self):
|
||||
self.gen.fact(
|
||||
fn.possible_external(condition_id, pkg_name, local_idx)
|
||||
)
|
||||
self.versions_from_externals[spec.name].add(spec.version)
|
||||
self.possible_versions[spec.name].add(spec.version)
|
||||
self.gen.newline()
|
||||
|
||||
@@ -986,12 +1023,19 @@ class Body(object):
|
||||
|
||||
def build_version_dict(self, possible_pkgs, specs):
|
||||
"""Declare any versions in specs not declared in packages."""
|
||||
self.possible_versions = collections.defaultdict(lambda: set())
|
||||
self.possible_versions = collections.defaultdict(set)
|
||||
self.versions_in_package_py = collections.defaultdict(set)
|
||||
self.versions_from_externals = collections.defaultdict(set)
|
||||
self.deprecated_versions = collections.defaultdict(set)
|
||||
|
||||
for pkg_name in possible_pkgs:
|
||||
pkg = spack.repo.get(pkg_name)
|
||||
for v in pkg.versions:
|
||||
for v, version_info in pkg.versions.items():
|
||||
self.versions_in_package_py[pkg_name].add(v)
|
||||
self.possible_versions[pkg_name].add(v)
|
||||
deprecated = version_info.get('deprecated', False)
|
||||
if deprecated:
|
||||
self.deprecated_versions[pkg_name].add(v)
|
||||
|
||||
for spec in specs:
|
||||
for dep in spec.traverse():
|
||||
@@ -1007,7 +1051,9 @@ def _supported_targets(self, compiler_name, compiler_version, targets):
|
||||
|
||||
for target in targets:
|
||||
try:
|
||||
target.optimization_flags(compiler_name, compiler_version)
|
||||
with warnings.catch_warnings():
|
||||
warnings.simplefilter("ignore")
|
||||
target.optimization_flags(compiler_name, compiler_version)
|
||||
supported.append(target)
|
||||
except archspec.cpu.UnsupportedMicroarchitecture:
|
||||
continue
|
||||
@@ -1357,7 +1403,10 @@ def setup(self, driver, specs, tests=False):
|
||||
)
|
||||
for clause in self.spec_clauses(spec):
|
||||
self.gen.fact(clause)
|
||||
|
||||
if clause.name == 'variant_set':
|
||||
self.gen.fact(fn.variant_default_value_from_cli(
|
||||
*clause.args
|
||||
))
|
||||
self.gen.h1("Variant Values defined in specs")
|
||||
self.define_variant_values()
|
||||
|
||||
@@ -1514,6 +1563,10 @@ def reorder_flags(self):
|
||||
check_same_flags(spec.compiler_flags, flags)
|
||||
spec.compiler_flags.update(flags)
|
||||
|
||||
def deprecated(self, pkg, version):
|
||||
msg = 'using "{0}@{1}" which is a deprecated version'
|
||||
tty.warn(msg.format(pkg, version))
|
||||
|
||||
def build_specs(self, function_tuples):
|
||||
# Functions don't seem to be in particular order in output. Sort
|
||||
# them here so that directives that build objects (like node and
|
||||
|
@@ -19,13 +19,18 @@ version_declared(Package, Version) :- version_declared(Package, Version, _).
|
||||
1 { version(Package, Version) : version_declared(Package, Version) } 1
|
||||
:- node(Package).
|
||||
|
||||
version_weight(Package, Weight)
|
||||
% If we select a deprecated version, mark the package as deprecated
|
||||
deprecated(Package, Version) :- version(Package, Version), deprecated_version(Package, Version).
|
||||
|
||||
possible_version_weight(Package, Weight)
|
||||
:- version(Package, Version), version_declared(Package, Version, Weight),
|
||||
not preferred_version_declared(Package, Version, _).
|
||||
|
||||
version_weight(Package, Weight)
|
||||
possible_version_weight(Package, Weight)
|
||||
:- version(Package, Version), preferred_version_declared(Package, Version, Weight).
|
||||
|
||||
1 { version_weight(Package, Weight) : possible_version_weight(Package, Weight) } 1 :- node(Package).
|
||||
|
||||
% version_satisfies implies that exactly one of the satisfying versions
|
||||
% is the package's version, and vice versa.
|
||||
1 { version(Package, Version) : version_satisfies(Package, Constraint, Version) } 1
|
||||
@@ -35,6 +40,7 @@ version_satisfies(Package, Constraint)
|
||||
|
||||
#defined preferred_version_declared/3.
|
||||
#defined version_satisfies/3.
|
||||
#defined deprecated_version/2.
|
||||
|
||||
%-----------------------------------------------------------------------------
|
||||
% Spec conditions and imposed constraints
|
||||
@@ -350,6 +356,15 @@ variant_set(Package, Variant) :- variant_set(Package, Variant, _).
|
||||
% A variant cannot have a value that is not also a possible value
|
||||
:- variant_value(Package, Variant, Value), not variant_possible_value(Package, Variant, Value).
|
||||
|
||||
% Some multi valued variants accept multiple values from disjoint sets.
|
||||
% Ensure that we respect that constraint and we don't pick values from more
|
||||
% than one set at once
|
||||
:- variant_value(Package, Variant, Value1),
|
||||
variant_value(Package, Variant, Value2),
|
||||
variant_value_from_disjoint_sets(Package, Variant, Value1, Set1),
|
||||
variant_value_from_disjoint_sets(Package, Variant, Value2, Set2),
|
||||
Set1 != Set2.
|
||||
|
||||
% variant_set is an explicitly set variant value. If it's not 'set',
|
||||
% we revert to the default value. If it is set, we force the set value
|
||||
variant_value(Package, Variant, Value)
|
||||
@@ -357,32 +372,58 @@ variant_value(Package, Variant, Value)
|
||||
variant(Package, Variant),
|
||||
variant_set(Package, Variant, Value).
|
||||
|
||||
% prefer default values.
|
||||
% The rules below allow us to prefer default values for variants
|
||||
% whenever possible. If a variant is set in a spec, or if it is
|
||||
% specified in an external, we score it as if it was a default value.
|
||||
variant_not_default(Package, Variant, Value, 1)
|
||||
:- variant_value(Package, Variant, Value),
|
||||
not variant_default_value(Package, Variant, Value),
|
||||
not variant_set(Package, Variant, Value),
|
||||
not external_with_variant_set(Package, Variant, Value),
|
||||
node(Package).
|
||||
|
||||
% We are using the default value for a variant
|
||||
variant_not_default(Package, Variant, Value, 0)
|
||||
:- variant_value(Package, Variant, Value),
|
||||
variant_default_value(Package, Variant, Value),
|
||||
node(Package).
|
||||
|
||||
% The variant is set in the spec
|
||||
variant_not_default(Package, Variant, Value, 0)
|
||||
:- variant_value(Package, Variant, Value),
|
||||
variant_set(Package, Variant, Value),
|
||||
node(Package).
|
||||
|
||||
% The variant is set in an external spec
|
||||
external_with_variant_set(Package, Variant, Value)
|
||||
:- variant_value(Package, Variant, Value),
|
||||
condition_requirement(ID, "variant_value", Package, Variant, Value),
|
||||
possible_external(ID, Package, _),
|
||||
external(Package),
|
||||
node(Package).
|
||||
|
||||
% The default value for a variant in a package is what is written
|
||||
% in the package.py file, unless some preference is set in packages.yaml
|
||||
variant_not_default(Package, Variant, Value, 0)
|
||||
:- variant_value(Package, Variant, Value),
|
||||
external_with_variant_set(Package, Variant, Value),
|
||||
node(Package).
|
||||
|
||||
% The default value for a variant in a package is what is prescribed:
|
||||
%
|
||||
% 1. On the command line
|
||||
% 2. In packages.yaml (if there's no command line settings)
|
||||
% 3. In the package.py file (if there are no settings in
|
||||
% packages.yaml and the command line)
|
||||
%
|
||||
variant_default_value(Package, Variant, Value)
|
||||
:- variant_default_value_from_package_py(Package, Variant, Value),
|
||||
not variant_default_value_from_packages_yaml(Package, Variant, _).
|
||||
not variant_default_value_from_packages_yaml(Package, Variant, _),
|
||||
not variant_default_value_from_cli(Package, Variant, _).
|
||||
|
||||
variant_default_value(Package, Variant, Value)
|
||||
:- variant_default_value_from_packages_yaml(Package, Variant, Value).
|
||||
:- variant_default_value_from_packages_yaml(Package, Variant, Value),
|
||||
not variant_default_value_from_cli(Package, Variant, _).
|
||||
|
||||
variant_default_value(Package, Variant, Value) :- variant_default_value_from_cli(Package, Variant, Value).
|
||||
|
||||
% Treat 'none' in a special way - it cannot be combined with other
|
||||
% values even if the variant is multi-valued
|
||||
@@ -407,8 +448,10 @@ variant_single_value(Package, "dev_path")
|
||||
#defined variant_single_value/2.
|
||||
#defined variant_default_value/3.
|
||||
#defined variant_possible_value/3.
|
||||
#defined variant_default_value_from_cli/3.
|
||||
#defined variant_default_value_from_packages_yaml/3.
|
||||
#defined variant_default_value_from_package_py/3.
|
||||
#defined variant_value_from_disjoint_sets/4.
|
||||
|
||||
%-----------------------------------------------------------------------------
|
||||
% Platform semantics
|
||||
@@ -449,6 +492,8 @@ node_os_inherit(Dependency, OS)
|
||||
not node_os_set(Dependency).
|
||||
node_os_inherit(Package) :- node_os_inherit(Package, _).
|
||||
|
||||
node_os(Package, OS) :- node_os_inherit(Package, OS).
|
||||
|
||||
% fall back to default if not set or inherited
|
||||
node_os(Package, OS)
|
||||
:- node(Package),
|
||||
@@ -504,29 +549,19 @@ node_target_weight(Package, Weight)
|
||||
node_target(Package, Target),
|
||||
target_weight(Target, Package, Weight).
|
||||
|
||||
% compatibility rules for targets among nodes
|
||||
node_target_match_pref(Dependency, Target)
|
||||
:- depends_on(Package, Dependency),
|
||||
node_target_match_pref(Package, Target),
|
||||
not node_target_set(Dependency, _).
|
||||
|
||||
node_target_match_pref(Dependency, Target)
|
||||
:- depends_on(Package, Dependency),
|
||||
node_target_set(Package, Target),
|
||||
not node_target_match_pref(Package, Target),
|
||||
not node_target_set(Dependency, _).
|
||||
|
||||
node_target_match_pref(Dependency, Target)
|
||||
:- depends_on(Package, Dependency),
|
||||
root(Package), node_target(Package, Target),
|
||||
not node_target_match_pref(Package, _).
|
||||
|
||||
node_target_match(Package, 1)
|
||||
:- node_target(Package, Target), node_target_match_pref(Package, Target).
|
||||
|
||||
derive_target_from_parent(Parent, Package)
|
||||
:- depends_on(Parent, Package), not package_target_weight(_, Package, _).
|
||||
:- depends_on(Parent, Package),
|
||||
not package_target_weight(_, Package, _).
|
||||
|
||||
% compatibility rules for targets among nodes
|
||||
node_target_match(Parent, Dependency)
|
||||
:- depends_on(Parent, Dependency),
|
||||
node_target(Parent, Target),
|
||||
node_target(Dependency, Target).
|
||||
|
||||
node_target_mismatch(Parent, Dependency)
|
||||
:- depends_on(Parent, Dependency),
|
||||
not node_target_match(Parent, Dependency).
|
||||
|
||||
#defined node_target_set/2.
|
||||
#defined package_target_weight/3.
|
||||
@@ -572,32 +607,16 @@ node_compiler_version(Package, Compiler, Version) :- node_compiler_version_set(P
|
||||
not compiler_supports_os(Compiler, Version, OS),
|
||||
not allow_compiler(Compiler, Version).
|
||||
|
||||
% If the compiler is what was prescribed from command line etc.
|
||||
% or is the same as a root node, there is a version match
|
||||
% If a package and one of its dependencies don't have the
|
||||
% same compiler there's a mismatch.
|
||||
compiler_match(Package, Dependency)
|
||||
:- depends_on(Package, Dependency),
|
||||
node_compiler_version(Package, Compiler, Version),
|
||||
node_compiler_version(Dependency, Compiler, Version).
|
||||
|
||||
% Compiler prescribed in the root spec
|
||||
node_compiler_version_match_pref(Package, Compiler, V)
|
||||
:- node_compiler_set(Package, Compiler),
|
||||
node_compiler_version(Package, Compiler, V),
|
||||
not external(Package).
|
||||
|
||||
% Compiler inherited from a parent node
|
||||
node_compiler_version_match_pref(Dependency, Compiler, V)
|
||||
:- depends_on(Package, Dependency),
|
||||
node_compiler_version_match_pref(Package, Compiler, V),
|
||||
node_compiler_version(Dependency, Compiler, V),
|
||||
not node_compiler_set(Dependency, Compiler).
|
||||
|
||||
% Compiler inherited from the root package
|
||||
node_compiler_version_match_pref(Dependency, Compiler, V)
|
||||
:- depends_on(Package, Dependency),
|
||||
node_compiler_version(Package, Compiler, V), root(Package),
|
||||
node_compiler_version(Dependency, Compiler, V),
|
||||
not node_compiler_set(Dependency, Compiler).
|
||||
|
||||
compiler_version_match(Package, 1)
|
||||
:- node_compiler_version(Package, Compiler, V),
|
||||
node_compiler_version_match_pref(Package, Compiler, V).
|
||||
compiler_mismatch(Package, Dependency)
|
||||
:- depends_on(Package, Dependency),
|
||||
not compiler_match(Package, Dependency).
|
||||
|
||||
#defined node_compiler_set/2.
|
||||
#defined node_compiler_version_set/3.
|
||||
@@ -676,67 +695,101 @@ no_flags(Package, FlagType)
|
||||
%-----------------------------------------------------------------------------
|
||||
% How to optimize the spec (high to low priority)
|
||||
%-----------------------------------------------------------------------------
|
||||
% Each criterion below has:
|
||||
% 1. an opt_criterion(ID, Name) fact that describes the criterion, and
|
||||
% 2. a `#minimize{ 0@2 : #true }.` statement that ensures the criterion
|
||||
% is displayed (clingo doesn't display sums over empty sets by default)
|
||||
|
||||
% Minimize the number of deprecated versions being used
|
||||
opt_criterion(16, "deprecated versions used").
|
||||
#minimize{ 0@16 : #true }.
|
||||
#minimize{ 1@16,Package : deprecated(Package, _)}.
|
||||
|
||||
% The highest priority is to minimize the:
|
||||
% 1. Version weight
|
||||
% 2. Number of variants with a non default value, if not set
|
||||
% for the root(Package)
|
||||
#minimize { Weight@15 : root(Package),version_weight(Package, Weight)}.
|
||||
opt_criterion(15, "version weight").
|
||||
#minimize{ 0@15 : #true }.
|
||||
#minimize { Weight@15 : root(Package),version_weight(Package, Weight) }.
|
||||
|
||||
opt_criterion(14, "number of non-default variants (roots)").
|
||||
#minimize{ 0@14 : #true }.
|
||||
#minimize {
|
||||
Weight@14,Package,Variant,Value
|
||||
: variant_not_default(Package, Variant, Value, Weight), root(Package)
|
||||
}.
|
||||
|
||||
% If the value is a multivalued variant there could be multiple
|
||||
% values set as default. Since a default value has a weight of 0 we
|
||||
% need to maximize their number below to ensure they're all set
|
||||
opt_criterion(13, "multi-valued variants").
|
||||
#minimize{ 0@13 : #true }.
|
||||
#maximize {
|
||||
1@13,Package,Variant,Value
|
||||
: variant_not_default(Package, Variant, Value, Weight),
|
||||
not variant_single_value(Package, Variant),
|
||||
root(Package)
|
||||
}.
|
||||
opt_criterion(12, "preferred providers for roots").
|
||||
#minimize{ 0@12 : #true }.
|
||||
#minimize{
|
||||
Weight@13,Provider
|
||||
Weight@12,Provider
|
||||
: provider_weight(Provider, Weight), root(Provider)
|
||||
}.
|
||||
|
||||
% Try to use default variants or variants that have been set
|
||||
opt_criterion(11, "number of non-default variants (non-roots)").
|
||||
#minimize{ 0@11 : #true }.
|
||||
#minimize {
|
||||
Weight@11,Package,Variant,Value
|
||||
: variant_not_default(Package, Variant, Value, Weight), not root(Package)
|
||||
}.
|
||||
|
||||
% Minimize the weights of the providers, i.e. use as much as
|
||||
% possible the most preferred providers
|
||||
opt_criterion(9, "number of non-default providers (non-roots)").
|
||||
#minimize{ 0@9 : #true }.
|
||||
#minimize{
|
||||
Weight@9,Provider
|
||||
: provider_weight(Provider, Weight), not root(Provider)
|
||||
}.
|
||||
|
||||
% If the value is a multivalued variant there could be multiple
|
||||
% values set as default. Since a default value has a weight of 0 we
|
||||
% need to maximize their number below to ensure they're all set
|
||||
opt_criterion(8, "count of non-root multi-valued variants").
|
||||
#minimize{ 0@8 : #true }.
|
||||
#maximize {
|
||||
1@8,Package,Variant,Value
|
||||
: variant_not_default(Package, Variant, Value, Weight),
|
||||
: variant_not_default(Package, Variant, Value, _),
|
||||
not variant_single_value(Package, Variant),
|
||||
not root(Package)
|
||||
}.
|
||||
|
||||
% Try to maximize the number of compiler matches in the DAG,
|
||||
% while minimizing the number of nodes. This is done because
|
||||
% a maximization on the number of matches for compilers is highly
|
||||
% correlated to a preference to have as many nodes as possible
|
||||
#minimize{ 1@7,Package : node(Package) }.
|
||||
#maximize{ Weight@7,Package : compiler_version_match(Package, Weight) }.
|
||||
% Try to minimize the number of compiler mismatches in the DAG.
|
||||
opt_criterion(7, "compiler mismatches").
|
||||
#minimize{ 0@7 : #true }.
|
||||
#minimize{ 1@7,Package,Dependency : compiler_mismatch(Package, Dependency) }.
|
||||
|
||||
% Choose more recent versions for nodes
|
||||
opt_criterion(6, "version badness").
|
||||
#minimize{ 0@6 : #true }.
|
||||
#minimize{
|
||||
Weight@6,Package : version_weight(Package, Weight)
|
||||
}.
|
||||
|
||||
% Try to use preferred compilers
|
||||
opt_criterion(5, "non-preferred compilers").
|
||||
#minimize{ 0@5 : #true }.
|
||||
#minimize{ Weight@5,Package : compiler_weight(Package, Weight) }.
|
||||
|
||||
% Maximize the number of matches for targets in the DAG, try
|
||||
% Minimize the number of mismatches for targets in the DAG, try
|
||||
% to select the preferred target.
|
||||
#maximize{ Weight@4,Package : node_target_match(Package, Weight) }.
|
||||
opt_criterion(4, "target mismatches").
|
||||
#minimize{ 0@4 : #true }.
|
||||
#minimize{ 1@4,Package,Dependency : node_target_mismatch(Package, Dependency) }.
|
||||
|
||||
opt_criterion(3, "non-preferred targets").
|
||||
#minimize{ 0@3 : #true }.
|
||||
#minimize{ Weight@3,Package : node_target_weight(Package, Weight) }.
|
||||
|
@@ -8,6 +8,9 @@
|
||||
%
|
||||
% This section determines what parts of the model are printed at the end
|
||||
%==============================================================================
|
||||
|
||||
% Spec-related functions.
|
||||
% Used to build the result of the solve.
|
||||
#show node/1.
|
||||
#show depends_on/3.
|
||||
#show version/2.
|
||||
@@ -21,12 +24,10 @@
|
||||
#show node_flag_compiler_default/1.
|
||||
#show node_flag_source/2.
|
||||
#show no_flags/2.
|
||||
|
||||
#show variant_not_default/4.
|
||||
#show provider_weight/2.
|
||||
#show version_weight/2.
|
||||
#show compiler_version_match/2.
|
||||
#show compiler_weight/2.
|
||||
#show node_target_match/2.
|
||||
#show node_target_weight/2.
|
||||
#show external_spec_selected/2.
|
||||
|
||||
% names of optimization criteria
|
||||
#show opt_criterion/2.
|
||||
|
||||
% deprecated packages
|
||||
#show deprecated/2.
|
||||
|
@@ -76,10 +76,8 @@
|
||||
specs to avoid ambiguity. Both are provided because ~ can cause shell
|
||||
expansion when it is the first character in an id typed on the command line.
|
||||
"""
|
||||
import base64
|
||||
import sys
|
||||
import collections
|
||||
import hashlib
|
||||
import itertools
|
||||
import operator
|
||||
import os
|
||||
@@ -108,6 +106,7 @@
|
||||
import spack.store
|
||||
import spack.util.crypto
|
||||
import spack.util.executable
|
||||
import spack.util.hash
|
||||
import spack.util.module_cmd as md
|
||||
import spack.util.prefix
|
||||
import spack.util.spack_json as sjson
|
||||
@@ -203,7 +202,7 @@ def __call__(self, match):
|
||||
return clr.colorize(re.sub(_separators, insert_color(), str(spec)) + '@.')
|
||||
|
||||
|
||||
@lang.key_ordering
|
||||
@lang.lazy_lexicographic_ordering
|
||||
class ArchSpec(object):
|
||||
def __init__(self, spec_or_platform_tuple=(None, None, None)):
|
||||
""" Architecture specification a package should be built with.
|
||||
@@ -252,8 +251,10 @@ def _autospec(self, spec_like):
|
||||
return spec_like
|
||||
return ArchSpec(spec_like)
|
||||
|
||||
def _cmp_key(self):
|
||||
return self.platform, self.os, self.target
|
||||
def _cmp_iter(self):
|
||||
yield self.platform
|
||||
yield self.os
|
||||
yield self.target
|
||||
|
||||
def _dup(self, other):
|
||||
self.platform = other.platform
|
||||
@@ -534,7 +535,7 @@ def __contains__(self, string):
|
||||
return string in str(self) or string in self.target
|
||||
|
||||
|
||||
@lang.key_ordering
|
||||
@lang.lazy_lexicographic_ordering
|
||||
class CompilerSpec(object):
|
||||
"""The CompilerSpec field represents the compiler or range of compiler
|
||||
versions that a package should be built with. CompilerSpecs have a
|
||||
@@ -623,8 +624,9 @@ def copy(self):
|
||||
clone.versions = self.versions.copy()
|
||||
return clone
|
||||
|
||||
def _cmp_key(self):
|
||||
return (self.name, self.versions)
|
||||
def _cmp_iter(self):
|
||||
yield self.name
|
||||
yield self.versions
|
||||
|
||||
def to_dict(self):
|
||||
d = syaml.syaml_dict([('name', self.name)])
|
||||
@@ -648,7 +650,7 @@ def __repr__(self):
|
||||
return str(self)
|
||||
|
||||
|
||||
@lang.key_ordering
|
||||
@lang.lazy_lexicographic_ordering
|
||||
class DependencySpec(object):
|
||||
"""DependencySpecs connect two nodes in the DAG, and contain deptypes.
|
||||
|
||||
@@ -686,10 +688,10 @@ def add_type(self, type):
|
||||
self.deptypes + dp.canonical_deptype(type)
|
||||
)
|
||||
|
||||
def _cmp_key(self):
|
||||
return (self.parent.name if self.parent else None,
|
||||
self.spec.name if self.spec else None,
|
||||
self.deptypes)
|
||||
def _cmp_iter(self):
|
||||
yield self.parent.name if self.parent else None
|
||||
yield self.spec.name if self.spec else None
|
||||
yield self.deptypes
|
||||
|
||||
def __str__(self):
|
||||
return "%s %s--> %s" % (self.parent.name if self.parent else None,
|
||||
@@ -747,8 +749,15 @@ def copy(self):
|
||||
clone[name] = value
|
||||
return clone
|
||||
|
||||
def _cmp_key(self):
|
||||
return tuple((k, tuple(v)) for k, v in sorted(six.iteritems(self)))
|
||||
def _cmp_iter(self):
|
||||
for k, v in sorted(self.items()):
|
||||
yield k
|
||||
|
||||
def flags():
|
||||
for flag in v:
|
||||
yield flag
|
||||
|
||||
yield flags
|
||||
|
||||
def __str__(self):
|
||||
sorted_keys = [k for k in sorted(self.keys()) if self[k] != []]
|
||||
@@ -1016,7 +1025,7 @@ def __init__(self, spec, name, query_parameters):
|
||||
)
|
||||
|
||||
|
||||
@lang.key_ordering
|
||||
@lang.lazy_lexicographic_ordering(set_hash=False)
|
||||
class Spec(object):
|
||||
|
||||
#: Cache for spec's prefix, computed lazily in the corresponding property
|
||||
@@ -1060,7 +1069,7 @@ def __init__(self, spec_like=None,
|
||||
|
||||
self._hash = None
|
||||
self._build_hash = None
|
||||
self._cmp_key_cache = None
|
||||
self._dunder_hash = None
|
||||
self._package = None
|
||||
|
||||
# Most of these are internal implementation details that can be
|
||||
@@ -1382,7 +1391,16 @@ def traverse_edges(self, visited=None, d=0, deptype='all',
|
||||
cover = kwargs.get('cover', 'nodes')
|
||||
direction = kwargs.get('direction', 'children')
|
||||
order = kwargs.get('order', 'pre')
|
||||
deptype = dp.canonical_deptype(deptype)
|
||||
|
||||
# we don't want to run canonical_deptype every time through
|
||||
# traverse, because it is somewhat expensive. This ensures we
|
||||
# canonicalize only once.
|
||||
canonical_deptype = kwargs.get("canonical_deptype", None)
|
||||
if canonical_deptype is None:
|
||||
deptype = dp.canonical_deptype(deptype)
|
||||
kwargs["canonical_deptype"] = deptype
|
||||
else:
|
||||
deptype = canonical_deptype
|
||||
|
||||
# Make sure kwargs have legal values; raise ValueError if not.
|
||||
def validate(name, val, allowed_values):
|
||||
@@ -1486,13 +1504,7 @@ def _spec_hash(self, hash):
|
||||
# this when we move to using package hashing on all specs.
|
||||
node_dict = self.to_node_dict(hash=hash)
|
||||
yaml_text = syaml.dump(node_dict, default_flow_style=True)
|
||||
sha = hashlib.sha1(yaml_text.encode('utf-8'))
|
||||
b32_hash = base64.b32encode(sha.digest()).lower()
|
||||
|
||||
if sys.version_info[0] >= 3:
|
||||
b32_hash = b32_hash.decode('utf-8')
|
||||
|
||||
return b32_hash
|
||||
return spack.util.hash.b32_hash(yaml_text)
|
||||
|
||||
def _cached_hash(self, hash, length=None):
|
||||
"""Helper function for storing a cached hash on the spec.
|
||||
@@ -1548,7 +1560,7 @@ def full_hash(self, length=None):
|
||||
|
||||
def dag_hash_bit_prefix(self, bits):
|
||||
"""Get the first <bits> bits of the DAG hash as an integer type."""
|
||||
return base32_prefix_bits(self.dag_hash(), bits)
|
||||
return spack.util.hash.base32_prefix_bits(self.dag_hash(), bits)
|
||||
|
||||
def to_node_dict(self, hash=ht.dag_hash):
|
||||
"""Create a dictionary representing the state of this Spec.
|
||||
@@ -1642,7 +1654,13 @@ def to_node_dict(self, hash=ht.dag_hash):
|
||||
d['patches'] = variant._patches_in_order_of_appearance
|
||||
|
||||
if hash.package_hash:
|
||||
d['package_hash'] = self.package.content_hash()
|
||||
package_hash = self.package.content_hash()
|
||||
|
||||
# Full hashes are in bytes
|
||||
if (not isinstance(package_hash, six.text_type)
|
||||
and isinstance(package_hash, six.binary_type)):
|
||||
package_hash = package_hash.decode('utf-8')
|
||||
d['package_hash'] = package_hash
|
||||
|
||||
deps = self.dependencies_dict(deptype=hash.deptype)
|
||||
if deps:
|
||||
@@ -3175,25 +3193,23 @@ def satisfies(self, other, deps=True, strict=False, strict_deps=False):
|
||||
if other.concrete:
|
||||
return self.concrete and self.dag_hash() == other.dag_hash()
|
||||
|
||||
# A concrete provider can satisfy a virtual dependency.
|
||||
if not self.virtual and other.virtual:
|
||||
try:
|
||||
pkg = spack.repo.get(self.fullname)
|
||||
except spack.repo.UnknownEntityError:
|
||||
# If we can't get package info on this spec, don't treat
|
||||
# it as a provider of this vdep.
|
||||
return False
|
||||
|
||||
if pkg.provides(other.name):
|
||||
for provided, when_specs in pkg.provided.items():
|
||||
if any(self.satisfies(when_spec, deps=False, strict=strict)
|
||||
for when_spec in when_specs):
|
||||
if provided.satisfies(other):
|
||||
return True
|
||||
return False
|
||||
|
||||
# Otherwise, first thing we care about is whether the name matches
|
||||
# If the names are different, we need to consider virtuals
|
||||
if self.name != other.name and self.name and other.name:
|
||||
# A concrete provider can satisfy a virtual dependency.
|
||||
if not self.virtual and other.virtual:
|
||||
try:
|
||||
pkg = spack.repo.get(self.fullname)
|
||||
except spack.repo.UnknownEntityError:
|
||||
# If we can't get package info on this spec, don't treat
|
||||
# it as a provider of this vdep.
|
||||
return False
|
||||
|
||||
if pkg.provides(other.name):
|
||||
for provided, when_specs in pkg.provided.items():
|
||||
if any(self.satisfies(when, deps=False, strict=strict)
|
||||
for when in when_specs):
|
||||
if provided.satisfies(other):
|
||||
return True
|
||||
return False
|
||||
|
||||
# namespaces either match, or other doesn't require one.
|
||||
@@ -3349,7 +3365,7 @@ def _dup(self, other, deps=True, cleardeps=True, caches=None):
|
||||
before possibly copying the dependencies of ``other`` onto
|
||||
``self``
|
||||
caches (bool or None): preserve cached fields such as
|
||||
``_normal``, ``_hash``, and ``_cmp_key_cache``. By
|
||||
``_normal``, ``_hash``, and ``_dunder_hash``. By
|
||||
default this is ``False`` if DAG structure would be
|
||||
changed by the copy, ``True`` if it's an exact copy.
|
||||
|
||||
@@ -3423,13 +3439,13 @@ def _dup(self, other, deps=True, cleardeps=True, caches=None):
|
||||
if caches:
|
||||
self._hash = other._hash
|
||||
self._build_hash = other._build_hash
|
||||
self._cmp_key_cache = other._cmp_key_cache
|
||||
self._dunder_hash = other._dunder_hash
|
||||
self._normal = other._normal
|
||||
self._full_hash = other._full_hash
|
||||
else:
|
||||
self._hash = None
|
||||
self._build_hash = None
|
||||
self._cmp_key_cache = None
|
||||
self._dunder_hash = None
|
||||
self._normal = False
|
||||
self._full_hash = None
|
||||
|
||||
@@ -3548,18 +3564,17 @@ def __contains__(self, spec):
|
||||
else:
|
||||
return any(s.satisfies(spec) for s in self.traverse(root=False))
|
||||
|
||||
def sorted_deps(self):
|
||||
"""Return a list of all dependencies sorted by name."""
|
||||
deps = self.flat_dependencies()
|
||||
return tuple(deps[name] for name in sorted(deps))
|
||||
def eq_dag(self, other, deptypes=True, vs=None, vo=None):
|
||||
"""True if the full dependency DAGs of specs are equal."""
|
||||
if vs is None:
|
||||
vs = set()
|
||||
if vo is None:
|
||||
vo = set()
|
||||
|
||||
def _eq_dag(self, other, vs, vo, deptypes):
|
||||
"""Recursive helper for eq_dag and ne_dag. Does the actual DAG
|
||||
traversal."""
|
||||
vs.add(id(self))
|
||||
vo.add(id(other))
|
||||
|
||||
if self.ne_node(other):
|
||||
if not self.eq_node(other):
|
||||
return False
|
||||
|
||||
if len(self._dependencies) != len(other._dependencies):
|
||||
@@ -3587,58 +3602,38 @@ def _eq_dag(self, other, vs, vo, deptypes):
|
||||
continue
|
||||
|
||||
# Recursive check for equality
|
||||
if not s._eq_dag(o, vs, vo, deptypes):
|
||||
if not s.eq_dag(o, deptypes, vs, vo):
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def eq_dag(self, other, deptypes=True):
|
||||
"""True if the full dependency DAGs of specs are equal."""
|
||||
return self._eq_dag(other, set(), set(), deptypes)
|
||||
|
||||
def ne_dag(self, other, deptypes=True):
|
||||
"""True if the full dependency DAGs of specs are not equal."""
|
||||
return not self.eq_dag(other, set(), set(), deptypes)
|
||||
|
||||
def _cmp_node(self):
|
||||
"""Comparison key for just *this node* and not its deps."""
|
||||
# Name or namespace None will lead to invalid comparisons for abstract
|
||||
# specs. Replace them with the empty string, which is not a valid spec
|
||||
# name nor namespace so it will not create spurious equalities.
|
||||
return (self.name or '',
|
||||
self.namespace or '',
|
||||
tuple(self.versions),
|
||||
self.variants,
|
||||
self.architecture,
|
||||
self.compiler,
|
||||
self.compiler_flags)
|
||||
"""Yield comparable elements of just *this node* and not its deps."""
|
||||
yield self.name
|
||||
yield self.namespace
|
||||
yield self.versions
|
||||
yield self.variants
|
||||
yield self.compiler
|
||||
yield self.compiler_flags
|
||||
yield self.architecture
|
||||
|
||||
def eq_node(self, other):
|
||||
"""Equality with another spec, not including dependencies."""
|
||||
return self._cmp_node() == other._cmp_node()
|
||||
return (other is not None) and lang.lazy_eq(
|
||||
self._cmp_node, other._cmp_node
|
||||
)
|
||||
|
||||
def ne_node(self, other):
|
||||
"""Inequality with another spec, not including dependencies."""
|
||||
return self._cmp_node() != other._cmp_node()
|
||||
def _cmp_iter(self):
|
||||
"""Lazily yield components of self for comparison."""
|
||||
for item in self._cmp_node():
|
||||
yield item
|
||||
|
||||
def _cmp_key(self):
|
||||
"""This returns a key for the spec *including* DAG structure.
|
||||
|
||||
The key is the concatenation of:
|
||||
1. A tuple describing this node in the DAG.
|
||||
2. The hash of each of this node's dependencies' cmp_keys.
|
||||
"""
|
||||
if self._cmp_key_cache:
|
||||
return self._cmp_key_cache
|
||||
|
||||
dep_tuple = tuple(
|
||||
(d.spec.name, hash(d.spec), tuple(sorted(d.deptypes)))
|
||||
for name, d in sorted(self._dependencies.items()))
|
||||
|
||||
key = (self._cmp_node(), dep_tuple)
|
||||
if self._concrete:
|
||||
self._cmp_key_cache = key
|
||||
return key
|
||||
def deps():
|
||||
for _, dep in sorted(self._dependencies.items()):
|
||||
yield dep.spec.name
|
||||
yield tuple(sorted(dep.deptypes))
|
||||
yield hash(dep.spec)
|
||||
yield deps
|
||||
|
||||
def colorized(self):
|
||||
return colorize_spec(self)
|
||||
@@ -3878,7 +3873,9 @@ def write_attribute(spec, attribute, color):
|
||||
'Format string terminated while reading attribute.'
|
||||
'Missing terminating }.'
|
||||
)
|
||||
return out.getvalue()
|
||||
|
||||
formatted_spec = out.getvalue()
|
||||
return formatted_spec.strip()
|
||||
|
||||
def old_format(self, format_string='$_$@$%@+$+$=', **kwargs):
|
||||
"""
|
||||
@@ -4134,12 +4131,12 @@ def cformat(self, *args, **kwargs):
|
||||
kwargs.setdefault('color', None)
|
||||
return self.format(*args, **kwargs)
|
||||
|
||||
def dep_string(self):
|
||||
return ''.join(" ^" + dep.format() for dep in self.sorted_deps())
|
||||
|
||||
def __str__(self):
|
||||
ret = self.format() + self.dep_string()
|
||||
return ret.strip()
|
||||
sorted_nodes = [self] + sorted(
|
||||
self.traverse(root=False), key=lambda x: x.name
|
||||
)
|
||||
spec_str = " ^".join(d.format() for d in sorted_nodes)
|
||||
return spec_str.strip()
|
||||
|
||||
def install_status(self):
|
||||
"""Helper for tree to print DB install status."""
|
||||
@@ -4330,6 +4327,22 @@ def clear_cached_hashes(self):
|
||||
if hasattr(self, attr):
|
||||
setattr(self, attr, None)
|
||||
|
||||
def __hash__(self):
|
||||
# If the spec is concrete, we leverage the DAG hash and just use
|
||||
# a 64-bit prefix of it. The DAG hash has the advantage that it's
|
||||
# computed once per concrete spec, and it's saved -- so if we
|
||||
# read concrete specs we don't need to recompute the whole hash.
|
||||
# This is good for large, unchanging specs.
|
||||
if self.concrete:
|
||||
if not self._dunder_hash:
|
||||
self._dunder_hash = self.dag_hash_bit_prefix(64)
|
||||
return self._dunder_hash
|
||||
|
||||
# This is the normal hash for lazy_lexicographic_ordering. It's
|
||||
# slow for large specs because it traverses the whole spec graph,
|
||||
# so we hope it only runs on abstract specs, which are small.
|
||||
return hash(lang.tuplify(self._cmp_iter))
|
||||
|
||||
|
||||
class LazySpecCache(collections.defaultdict):
|
||||
"""Cache for Specs that uses a spec_like as key, and computes lazily
|
||||
@@ -4624,7 +4637,8 @@ def spec(self, name):
|
||||
break
|
||||
|
||||
elif self.accept(HASH):
|
||||
# Get spec by hash and confirm it matches what we already have
|
||||
# Get spec by hash and confirm it matches any constraints we
|
||||
# already read in
|
||||
hash_spec = self.spec_by_hash()
|
||||
if hash_spec.satisfies(spec):
|
||||
spec._dup(hash_spec)
|
||||
@@ -4739,16 +4753,6 @@ def save_dependency_spec_yamls(
|
||||
fd.write(dep_spec.to_yaml(hash=ht.build_hash))
|
||||
|
||||
|
||||
def base32_prefix_bits(hash_string, bits):
|
||||
"""Return the first <bits> bits of a base32 string as an integer."""
|
||||
if bits > len(hash_string) * 5:
|
||||
raise ValueError("Too many bits! Requested %d bit prefix of '%s'."
|
||||
% (bits, hash_string))
|
||||
|
||||
hash_bytes = base64.b32decode(hash_string, casefold=True)
|
||||
return spack.util.crypto.prefix_bits(hash_bytes, bits)
|
||||
|
||||
|
||||
class SpecParseError(spack.error.SpecError):
|
||||
"""Wrapper for ParseError for when we're parsing specs."""
|
||||
def __init__(self, parse_error):
|
||||
|
@@ -239,6 +239,19 @@ def _store_layout():
|
||||
layout = llnl.util.lang.LazyReference(_store_layout)
|
||||
|
||||
|
||||
def reinitialize():
|
||||
"""Restore globals to the same state they would have at start-up"""
|
||||
global store
|
||||
global root, unpadded_root, db, layout
|
||||
|
||||
store = llnl.util.lang.Singleton(_store)
|
||||
|
||||
root = llnl.util.lang.LazyReference(_store_root)
|
||||
unpadded_root = llnl.util.lang.LazyReference(_store_unpadded_root)
|
||||
db = llnl.util.lang.LazyReference(_store_db)
|
||||
layout = llnl.util.lang.LazyReference(_store_layout)
|
||||
|
||||
|
||||
def retrieve_upstream_dbs():
|
||||
other_spack_instances = spack.config.get('upstreams', {})
|
||||
|
||||
|
26
lib/spack/spack/test/bootstrap.py
Normal file
26
lib/spack/spack/test/bootstrap.py
Normal file
@@ -0,0 +1,26 @@
|
||||
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
import pytest
|
||||
|
||||
import spack.bootstrap
|
||||
import spack.store
|
||||
|
||||
|
||||
@pytest.mark.regression('22294')
|
||||
def test_store_is_restored_correctly_after_bootstrap(mutable_config, tmpdir):
|
||||
# Prepare a custom store path. This should be in a writeable location
|
||||
# since Spack needs to initialize the DB.
|
||||
user_path = str(tmpdir.join('store'))
|
||||
# Reassign global variables in spack.store to the value
|
||||
# they would have at Spack startup.
|
||||
spack.store.reinitialize()
|
||||
# Set the custom user path
|
||||
spack.config.set('config:install_tree:root', user_path)
|
||||
|
||||
# Test that within the context manager we use the bootstrap store
|
||||
# and that outside we restore the correct location
|
||||
with spack.bootstrap.ensure_bootstrap_configuration():
|
||||
assert spack.store.root == spack.paths.user_bootstrap_store
|
||||
assert spack.store.root == user_path
|
@@ -11,11 +11,12 @@
|
||||
import spack.build_environment
|
||||
import spack.config
|
||||
import spack.spec
|
||||
import spack.util.spack_yaml as syaml
|
||||
from spack.paths import build_env_path
|
||||
from spack.build_environment import dso_suffix, _static_to_shared_library
|
||||
from spack.build_environment import determine_number_of_jobs
|
||||
from spack.util.executable import Executable
|
||||
from spack.util.environment import EnvironmentModifications
|
||||
|
||||
from llnl.util.filesystem import LibraryList, HeaderList
|
||||
|
||||
|
||||
@@ -299,6 +300,45 @@ def normpaths(paths):
|
||||
delattr(dep_pkg, 'libs')
|
||||
|
||||
|
||||
def test_external_prefixes_last(mutable_config, mock_packages, working_env,
|
||||
monkeypatch):
|
||||
# Sanity check: under normal circumstances paths associated with
|
||||
# dt-diamond-left would appear first. We'll mark it as external in
|
||||
# the test to check if the associated paths are placed last.
|
||||
assert 'dt-diamond-left' < 'dt-diamond-right'
|
||||
|
||||
cfg_data = syaml.load_config("""\
|
||||
dt-diamond-left:
|
||||
externals:
|
||||
- spec: dt-diamond-left@1.0
|
||||
prefix: /fake/path1
|
||||
buildable: false
|
||||
""")
|
||||
spack.config.set("packages", cfg_data)
|
||||
top = spack.spec.Spec('dt-diamond').concretized()
|
||||
|
||||
def _trust_me_its_a_dir(path):
|
||||
return True
|
||||
monkeypatch.setattr(
|
||||
os.path, 'isdir', _trust_me_its_a_dir
|
||||
)
|
||||
|
||||
env_mods = EnvironmentModifications()
|
||||
spack.build_environment.set_build_environment_variables(
|
||||
top.package, env_mods, False)
|
||||
|
||||
env_mods.apply_modifications()
|
||||
link_dir_var = os.environ['SPACK_LINK_DIRS']
|
||||
link_dirs = link_dir_var.split(':')
|
||||
external_lib_paths = set(['/fake/path1/lib', '/fake/path1/lib64'])
|
||||
# The external lib paths should be the last two entries of the list and
|
||||
# should not appear anywhere before the last two entries
|
||||
assert (set(os.path.normpath(x) for x in link_dirs[-2:]) ==
|
||||
external_lib_paths)
|
||||
assert not (set(os.path.normpath(x) for x in link_dirs[:-2]) &
|
||||
external_lib_paths)
|
||||
|
||||
|
||||
def test_parallel_false_is_not_propagating(config, mock_packages):
|
||||
class AttributeHolder(object):
|
||||
pass
|
||||
@@ -339,3 +379,22 @@ def test_setting_dtags_based_on_config(
|
||||
|
||||
dtags_to_add = modifications['SPACK_DTAGS_TO_ADD'][0]
|
||||
assert dtags_to_add.value == expected_flag
|
||||
|
||||
|
||||
def test_build_jobs_sequential_is_sequential():
|
||||
assert determine_number_of_jobs(
|
||||
parallel=False, command_line=8, config_default=8, max_cpus=8) == 1
|
||||
|
||||
|
||||
def test_build_jobs_command_line_overrides():
|
||||
assert determine_number_of_jobs(
|
||||
parallel=True, command_line=10, config_default=1, max_cpus=1) == 10
|
||||
assert determine_number_of_jobs(
|
||||
parallel=True, command_line=10, config_default=100, max_cpus=100) == 10
|
||||
|
||||
|
||||
def test_build_jobs_defaults():
|
||||
assert determine_number_of_jobs(
|
||||
parallel=True, command_line=None, config_default=1, max_cpus=10) == 1
|
||||
assert determine_number_of_jobs(
|
||||
parallel=True, command_line=None, config_default=100, max_cpus=10) == 10
|
||||
|
@@ -622,3 +622,24 @@ def test_filter_enable_new_dtags(wrapper_flags):
|
||||
result = cc(*(test_args + ['-Wl,--enable-new-dtags']), output=str)
|
||||
result = result.strip().split('\n')
|
||||
assert '-Wl,--enable-new-dtags' not in result
|
||||
|
||||
|
||||
@pytest.mark.regression('22643')
|
||||
def test_linker_strips_loopopt(wrapper_flags):
|
||||
with set_env(SPACK_TEST_COMMAND='dump-args'):
|
||||
# ensure that -loopopt=0 is not present in ld mode
|
||||
result = ld(*(test_args + ["-loopopt=0"]), output=str)
|
||||
result = result.strip().split('\n')
|
||||
assert '-loopopt=0' not in result
|
||||
|
||||
# ensure that -loopopt=0 is not present in ccld mode
|
||||
result = cc(*(test_args + ["-loopopt=0"]), output=str)
|
||||
result = result.strip().split('\n')
|
||||
assert '-loopopt=0' not in result
|
||||
|
||||
# ensure that -loopopt=0 *is* present in cc mode
|
||||
# The "-c" argument is needed for cc to be detected
|
||||
# as compile only (cc) mode.
|
||||
result = cc(*(test_args + ["-loopopt=0", "-c", "x.c"]), output=str)
|
||||
result = result.strip().split('\n')
|
||||
assert '-loopopt=0' in result
|
||||
|
176
lib/spack/spack/test/cmd/analyze.py
Normal file
176
lib/spack/spack/test/cmd/analyze.py
Normal file
@@ -0,0 +1,176 @@
|
||||
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
import os
|
||||
import pytest
|
||||
|
||||
import spack.config
|
||||
import spack.package
|
||||
import spack.cmd.install
|
||||
|
||||
from spack.spec import Spec
|
||||
import spack.util.spack_json as sjson
|
||||
from spack.main import SpackCommand
|
||||
|
||||
install = SpackCommand('install')
|
||||
analyze = SpackCommand('analyze')
|
||||
|
||||
|
||||
def test_test_package_not_installed(mock_fetch, install_mockery_mutable_config):
|
||||
# We cannot run an analysis for a package not installed
|
||||
out = analyze('run', 'libdwarf', fail_on_error=False)
|
||||
assert "==> Error: Spec 'libdwarf' matches no installed packages.\n" in out
|
||||
|
||||
|
||||
def test_analyzer_get_install_dir(mock_fetch, install_mockery_mutable_config):
|
||||
"""
|
||||
Test that we cannot get an analyzer directory without a spec package.
|
||||
"""
|
||||
spec = Spec('libdwarf').concretized()
|
||||
assert 'libdwarf' in spack.analyzers.analyzer_base.get_analyzer_dir(spec)
|
||||
|
||||
# Case 1: spec is missing attribute for package
|
||||
with pytest.raises(SystemExit):
|
||||
spack.analyzers.analyzer_base.get_analyzer_dir(None)
|
||||
|
||||
class Packageless(object):
|
||||
package = None
|
||||
|
||||
# Case 2: spec has package attribute, but it's None
|
||||
with pytest.raises(SystemExit):
|
||||
spack.analyzers.analyzer_base.get_analyzer_dir(Packageless())
|
||||
|
||||
|
||||
def test_malformed_analyzer(mock_fetch, install_mockery_mutable_config):
|
||||
"""
|
||||
Test that an analyzer missing needed attributes is invalid.
|
||||
"""
|
||||
from spack.analyzers.analyzer_base import AnalyzerBase
|
||||
|
||||
# Missing attribute description
|
||||
class MyAnalyzer(AnalyzerBase):
|
||||
name = "my_analyzer"
|
||||
outfile = "my_analyzer_output.txt"
|
||||
|
||||
spec = Spec('libdwarf').concretized()
|
||||
with pytest.raises(SystemExit):
|
||||
MyAnalyzer(spec)
|
||||
|
||||
|
||||
def test_analyze_output(tmpdir, mock_fetch, install_mockery_mutable_config):
|
||||
"""
|
||||
Test that an analyzer errors if requested name does not exist.
|
||||
"""
|
||||
install('libdwarf')
|
||||
install('python@3.8')
|
||||
analyzer_dir = tmpdir.join('analyzers')
|
||||
|
||||
# An analyzer that doesn't exist should not work
|
||||
out = analyze('run', '-a', 'pusheen', 'libdwarf', fail_on_error=False)
|
||||
assert '==> Error: Analyzer pusheen does not exist\n' in out
|
||||
|
||||
# We will output to this analyzer directory
|
||||
analyzer_dir = tmpdir.join('analyzers')
|
||||
out = analyze('run', '-a', 'install_files', '-p', str(analyzer_dir), 'libdwarf')
|
||||
|
||||
# Ensure that if we run again without over write, we don't run
|
||||
out = analyze('run', '-a', 'install_files', '-p', str(analyzer_dir), 'libdwarf')
|
||||
assert "skipping" in out
|
||||
|
||||
# With overwrite it should run
|
||||
out = analyze('run', '-a', 'install_files', '-p', str(analyzer_dir),
|
||||
'--overwrite', 'libdwarf')
|
||||
assert "==> Writing result to" in out
|
||||
|
||||
|
||||
def _run_analyzer(name, package, tmpdir):
|
||||
"""
|
||||
A shared function to test that an analyzer runs.
|
||||
|
||||
We return the output file for further inspection.
|
||||
"""
|
||||
analyzer = spack.analyzers.get_analyzer(name)
|
||||
analyzer_dir = tmpdir.join('analyzers')
|
||||
out = analyze('run', '-a', analyzer.name, '-p', str(analyzer_dir), package)
|
||||
|
||||
assert "==> Writing result to" in out
|
||||
assert "/%s/%s\n" % (analyzer.name, analyzer.outfile) in out
|
||||
|
||||
# The output file should exist
|
||||
output_file = out.strip('\n').split(' ')[-1].strip()
|
||||
assert os.path.exists(output_file)
|
||||
return output_file
|
||||
|
||||
|
||||
def test_installfiles_analyzer(tmpdir, mock_fetch, install_mockery_mutable_config):
|
||||
"""
|
||||
test the install files analyzer
|
||||
"""
|
||||
install('libdwarf')
|
||||
output_file = _run_analyzer("install_files", "libdwarf", tmpdir)
|
||||
|
||||
# Ensure it's the correct content
|
||||
with open(output_file, 'r') as fd:
|
||||
content = sjson.load(fd.read())
|
||||
|
||||
basenames = set()
|
||||
for key, attrs in content.items():
|
||||
basenames.add(os.path.basename(key))
|
||||
|
||||
# Check for a few expected files
|
||||
for key in ['.spack', 'libdwarf', 'packages', 'repo.yaml', 'repos']:
|
||||
assert key in basenames
|
||||
|
||||
|
||||
def test_environment_analyzer(tmpdir, mock_fetch, install_mockery_mutable_config):
|
||||
"""
|
||||
test the environment variables analyzer.
|
||||
"""
|
||||
install('libdwarf')
|
||||
output_file = _run_analyzer("environment_variables", "libdwarf", tmpdir)
|
||||
with open(output_file, 'r') as fd:
|
||||
content = sjson.load(fd.read())
|
||||
|
||||
# Check a few expected keys
|
||||
for key in ['SPACK_CC', 'SPACK_COMPILER_SPEC', 'SPACK_ENV_PATH']:
|
||||
assert key in content
|
||||
|
||||
# The analyzer should return no result if the output file does not exist.
|
||||
spec = Spec('libdwarf').concretized()
|
||||
env_file = os.path.join(spec.package.prefix, '.spack', 'spack-build-env.txt')
|
||||
assert os.path.exists(env_file)
|
||||
os.remove(env_file)
|
||||
analyzer = spack.analyzers.get_analyzer("environment_variables")
|
||||
analyzer_dir = tmpdir.join('analyzers')
|
||||
result = analyzer(spec, analyzer_dir).run()
|
||||
assert "environment_variables" in result
|
||||
assert not result['environment_variables']
|
||||
|
||||
|
||||
def test_list_analyzers():
|
||||
"""
|
||||
test that listing analyzers shows all the possible analyzers.
|
||||
"""
|
||||
from spack.analyzers import analyzer_types
|
||||
|
||||
# all cannot be an analyzer
|
||||
assert "all" not in analyzer_types
|
||||
|
||||
# All types should be present!
|
||||
out = analyze('list-analyzers')
|
||||
for analyzer_type in analyzer_types:
|
||||
assert analyzer_type in out
|
||||
|
||||
|
||||
def test_configargs_analyzer(tmpdir, mock_fetch, install_mockery_mutable_config):
|
||||
"""
|
||||
test the config args analyzer.
|
||||
|
||||
Since we don't have any, this should return an empty result.
|
||||
"""
|
||||
install('libdwarf')
|
||||
analyzer_dir = tmpdir.join('analyzers')
|
||||
out = analyze('run', '-a', 'config_args', '-p', str(analyzer_dir), 'libdwarf')
|
||||
assert out == ''
|
@@ -6,9 +6,9 @@
|
||||
from six.moves import cPickle
|
||||
import pytest
|
||||
|
||||
from spack.main import SpackCommand, SpackCommandError
|
||||
from spack.main import SpackCommand
|
||||
|
||||
info = SpackCommand('build-env')
|
||||
build_env = SpackCommand('build-env')
|
||||
|
||||
|
||||
@pytest.mark.parametrize('pkg', [
|
||||
@@ -17,17 +17,24 @@
|
||||
])
|
||||
@pytest.mark.usefixtures('config')
|
||||
def test_it_just_runs(pkg):
|
||||
info(*pkg)
|
||||
build_env(*pkg)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('pkg,error_cls', [
|
||||
('zlib libszip', SpackCommandError),
|
||||
('', IndexError)
|
||||
@pytest.mark.usefixtures('config')
|
||||
def test_error_when_multiple_specs_are_given():
|
||||
output = build_env('libelf libdwarf', fail_on_error=False)
|
||||
assert 'only takes one spec' in output
|
||||
|
||||
|
||||
@pytest.mark.parametrize('args', [
|
||||
('--', '/bin/bash', '-c', 'echo test'),
|
||||
('--',),
|
||||
(),
|
||||
])
|
||||
@pytest.mark.usefixtures('config')
|
||||
def test_it_just_fails(pkg, error_cls):
|
||||
with pytest.raises(error_cls):
|
||||
info(pkg)
|
||||
def test_build_env_requires_a_spec(args):
|
||||
output = build_env(*args, fail_on_error=False)
|
||||
assert 'requires a spec' in output
|
||||
|
||||
|
||||
_out_file = 'env.out'
|
||||
@@ -36,7 +43,7 @@ def test_it_just_fails(pkg, error_cls):
|
||||
@pytest.mark.usefixtures('config')
|
||||
def test_dump(tmpdir):
|
||||
with tmpdir.as_cwd():
|
||||
info('--dump', _out_file, 'zlib')
|
||||
build_env('--dump', _out_file, 'zlib')
|
||||
with open(_out_file) as f:
|
||||
assert(any(line.startswith('PATH=') for line in f.readlines()))
|
||||
|
||||
@@ -44,7 +51,7 @@ def test_dump(tmpdir):
|
||||
@pytest.mark.usefixtures('config')
|
||||
def test_pickle(tmpdir):
|
||||
with tmpdir.as_cwd():
|
||||
info('--pickle', _out_file, 'zlib')
|
||||
build_env('--pickle', _out_file, 'zlib')
|
||||
environment = cPickle.load(open(_out_file, 'rb'))
|
||||
assert(type(environment) == dict)
|
||||
assert('PATH' in environment)
|
||||
|
@@ -125,10 +125,6 @@ def test_buildcache_create_fails_on_noargs(tmpdir):
|
||||
buildcache('create', '-d', str(tmpdir), '--unsigned')
|
||||
|
||||
|
||||
@pytest.mark.skipif(
|
||||
os.environ.get('SPACK_TEST_SOLVER') == 'clingo',
|
||||
reason='Test for Clingo are run in a container with root permissions'
|
||||
)
|
||||
def test_buildcache_create_fail_on_perm_denied(
|
||||
install_mockery, mock_fetch, monkeypatch, tmpdir):
|
||||
"""Ensure that buildcache create fails on permission denied error."""
|
||||
|
@@ -1404,3 +1404,55 @@ def test_ci_generate_temp_storage_url(tmpdir, mutable_mock_env_path,
|
||||
# Cleanup job should be 2nd to last, just before rebuild-index
|
||||
assert('stage' in cleanup_job)
|
||||
assert(cleanup_job['stage'] == stages[-2])
|
||||
|
||||
|
||||
def test_ci_generate_read_broken_specs_url(tmpdir, mutable_mock_env_path,
|
||||
env_deactivate, install_mockery,
|
||||
mock_packages, monkeypatch):
|
||||
"""Verify that `broken-specs-url` works as intended"""
|
||||
spec_a = Spec('a')
|
||||
spec_a.concretize()
|
||||
a_full_hash = spec_a.full_hash()
|
||||
|
||||
spec_flattendeps = Spec('flatten-deps')
|
||||
spec_flattendeps.concretize()
|
||||
flattendeps_full_hash = spec_flattendeps.full_hash()
|
||||
|
||||
# Mark 'a' as broken (but not 'flatten-deps')
|
||||
broken_spec_a_path = str(tmpdir.join(a_full_hash))
|
||||
with open(broken_spec_a_path, 'w') as bsf:
|
||||
bsf.write('')
|
||||
|
||||
# Test that `spack ci generate` notices this broken spec and fails.
|
||||
filename = str(tmpdir.join('spack.yaml'))
|
||||
with open(filename, 'w') as f:
|
||||
f.write("""\
|
||||
spack:
|
||||
specs:
|
||||
- flatten-deps
|
||||
- a
|
||||
mirrors:
|
||||
some-mirror: https://my.fake.mirror
|
||||
gitlab-ci:
|
||||
broken-specs-url: "{0}"
|
||||
mappings:
|
||||
- match:
|
||||
- archive-files
|
||||
runner-attributes:
|
||||
tags:
|
||||
- donotcare
|
||||
image: donotcare
|
||||
""".format(tmpdir.strpath))
|
||||
|
||||
with tmpdir.as_cwd():
|
||||
env_cmd('create', 'test', './spack.yaml')
|
||||
with ev.read('test'):
|
||||
# Check output of the 'generate' subcommand
|
||||
output = ci_cmd('generate', output=str, fail_on_error=False)
|
||||
assert('known to be broken' in output)
|
||||
|
||||
ex = '({0})'.format(a_full_hash)
|
||||
assert(ex in output)
|
||||
|
||||
ex = '({0})'.format(flattendeps_full_hash)
|
||||
assert(ex not in output)
|
||||
|
@@ -4,11 +4,9 @@
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
import argparse
|
||||
import multiprocessing
|
||||
|
||||
import pytest
|
||||
|
||||
|
||||
import spack.cmd
|
||||
import spack.cmd.common.arguments as arguments
|
||||
import spack.config
|
||||
@@ -16,53 +14,32 @@
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def parser():
|
||||
def job_parser():
|
||||
# --jobs needs to write to a command_line config scope, so this is the only
|
||||
# scope we create.
|
||||
p = argparse.ArgumentParser()
|
||||
arguments.add_common_arguments(p, ['jobs'])
|
||||
yield p
|
||||
# Cleanup the command line scope if it was set during tests
|
||||
spack.config.config.clear_caches()
|
||||
if 'command_line' in spack.config.config.scopes:
|
||||
spack.config.config.scopes['command_line'].clear()
|
||||
scopes = [spack.config.InternalConfigScope('command_line', {'config': {}})]
|
||||
|
||||
with spack.config.use_configuration(*scopes):
|
||||
yield p
|
||||
|
||||
|
||||
@pytest.fixture(params=[1, 2, 4, 8, 16, 32])
|
||||
def ncores(monkeypatch, request):
|
||||
"""Mocks having a machine with n cores for the purpose of
|
||||
computing config:build_jobs.
|
||||
"""
|
||||
def _cpu_count():
|
||||
return request.param
|
||||
|
||||
# Patch multiprocessing.cpu_count() to return the value we need
|
||||
monkeypatch.setattr(multiprocessing, 'cpu_count', _cpu_count)
|
||||
# Patch the configuration parts that have been cached already
|
||||
monkeypatch.setitem(spack.config.config_defaults['config'],
|
||||
'build_jobs', min(16, request.param))
|
||||
monkeypatch.setitem(
|
||||
spack.config.config.scopes, '_builtin',
|
||||
spack.config.InternalConfigScope(
|
||||
'_builtin', spack.config.config_defaults
|
||||
))
|
||||
return request.param
|
||||
def test_setting_jobs_flag(job_parser):
|
||||
namespace = job_parser.parse_args(['-j', '24'])
|
||||
assert namespace.jobs == 24
|
||||
assert spack.config.get('config:build_jobs', scope='command_line') == 24
|
||||
|
||||
|
||||
@pytest.mark.parametrize('cli_args,requested', [
|
||||
(['-j', '24'], 24),
|
||||
# Here we report the default if we have enough cores, as the cap
|
||||
# on the available number of cores will be taken care of in the test
|
||||
([], 16)
|
||||
])
|
||||
def test_setting_parallel_jobs(parser, cli_args, ncores, requested):
|
||||
expected = min(requested, ncores)
|
||||
namespace = parser.parse_args(cli_args)
|
||||
assert namespace.jobs == expected
|
||||
assert spack.config.get('config:build_jobs') == expected
|
||||
def test_omitted_job_flag(job_parser):
|
||||
namespace = job_parser.parse_args([])
|
||||
assert namespace.jobs is None
|
||||
assert spack.config.get('config:build_jobs') is None
|
||||
|
||||
|
||||
def test_negative_integers_not_allowed_for_parallel_jobs(parser):
|
||||
def test_negative_integers_not_allowed_for_parallel_jobs(job_parser):
|
||||
with pytest.raises(ValueError) as exc_info:
|
||||
parser.parse_args(['-j', '-2'])
|
||||
job_parser.parse_args(['-j', '-2'])
|
||||
|
||||
assert 'expected a positive integer' in str(exc_info.value)
|
||||
|
||||
@@ -119,3 +96,21 @@ def test_multiple_env_match_raises_error(mock_packages, mutable_mock_env_path):
|
||||
spack.cmd.matching_spec_from_env(spack.cmd.parse_specs(['a'])[0])
|
||||
|
||||
assert 'matches multiple specs' in exc_info.value.message
|
||||
|
||||
|
||||
@pytest.mark.usefixtures('config')
|
||||
def test_root_and_dep_match_returns_root(mock_packages, mutable_mock_env_path):
|
||||
e = ev.create('test')
|
||||
e.add('b@0.9')
|
||||
e.add('a foobar=bar') # Depends on b, should choose b@1.0
|
||||
e.concretize()
|
||||
with e:
|
||||
# This query matches the root b and b as a dependency of a. In that
|
||||
# case the root instance should be preferred.
|
||||
env_spec1 = spack.cmd.matching_spec_from_env(
|
||||
spack.cmd.parse_specs(['b'])[0])
|
||||
assert env_spec1.satisfies('@0.9')
|
||||
|
||||
env_spec2 = spack.cmd.matching_spec_from_env(
|
||||
spack.cmd.parse_specs(['b@1.0'])[0])
|
||||
assert env_spec2
|
||||
|
@@ -9,6 +9,7 @@
|
||||
import pytest
|
||||
|
||||
import llnl.util.filesystem as fs
|
||||
import llnl.util.link_tree
|
||||
|
||||
import spack.hash_types as ht
|
||||
import spack.modules
|
||||
@@ -200,6 +201,19 @@ def setup_error(pkg, env):
|
||||
assert "Warning: couldn't get environment settings" in err
|
||||
|
||||
|
||||
def test_activate_adds_transitive_run_deps_to_path(
|
||||
install_mockery, mock_fetch, monkeypatch):
|
||||
env('create', 'test')
|
||||
install = SpackCommand('install')
|
||||
|
||||
e = ev.read('test')
|
||||
with e:
|
||||
install('depends-on-run-env')
|
||||
|
||||
cmds = spack.environment.activate(e)
|
||||
assert 'DEPENDENCY_ENV_VAR=1' in cmds
|
||||
|
||||
|
||||
def test_env_install_same_spec_twice(install_mockery, mock_fetch):
|
||||
env('create', 'test')
|
||||
|
||||
@@ -1084,7 +1098,7 @@ def noop(*args):
|
||||
|
||||
def test_env_updates_view_install(
|
||||
tmpdir, mock_stage, mock_fetch, install_mockery):
|
||||
view_dir = tmpdir.mkdir('view')
|
||||
view_dir = tmpdir.join('view')
|
||||
env('create', '--with-view=%s' % view_dir, 'test')
|
||||
with ev.read('test'):
|
||||
add('mpileaks')
|
||||
@@ -1095,12 +1109,13 @@ def test_env_updates_view_install(
|
||||
|
||||
def test_env_view_fails(
|
||||
tmpdir, mock_packages, mock_stage, mock_fetch, install_mockery):
|
||||
view_dir = tmpdir.mkdir('view')
|
||||
view_dir = tmpdir.join('view')
|
||||
env('create', '--with-view=%s' % view_dir, 'test')
|
||||
with ev.read('test'):
|
||||
add('libelf')
|
||||
add('libelf cflags=-g')
|
||||
with pytest.raises(RuntimeError, match='merge blocked by file'):
|
||||
with pytest.raises(llnl.util.link_tree.MergeConflictError,
|
||||
match='merge blocked by file'):
|
||||
install('--fake')
|
||||
|
||||
|
||||
@@ -1113,7 +1128,7 @@ def test_env_without_view_install(
|
||||
with pytest.raises(spack.environment.SpackEnvironmentError):
|
||||
test_env.default_view
|
||||
|
||||
view_dir = tmpdir.mkdir('view')
|
||||
view_dir = tmpdir.join('view')
|
||||
|
||||
with ev.read('test'):
|
||||
add('mpileaks')
|
||||
@@ -1148,7 +1163,7 @@ def test_env_config_view_default(
|
||||
|
||||
def test_env_updates_view_install_package(
|
||||
tmpdir, mock_stage, mock_fetch, install_mockery):
|
||||
view_dir = tmpdir.mkdir('view')
|
||||
view_dir = tmpdir.join('view')
|
||||
env('create', '--with-view=%s' % view_dir, 'test')
|
||||
with ev.read('test'):
|
||||
install('--fake', 'mpileaks')
|
||||
@@ -1158,7 +1173,7 @@ def test_env_updates_view_install_package(
|
||||
|
||||
def test_env_updates_view_add_concretize(
|
||||
tmpdir, mock_stage, mock_fetch, install_mockery):
|
||||
view_dir = tmpdir.mkdir('view')
|
||||
view_dir = tmpdir.join('view')
|
||||
env('create', '--with-view=%s' % view_dir, 'test')
|
||||
install('--fake', 'mpileaks')
|
||||
with ev.read('test'):
|
||||
@@ -1170,7 +1185,7 @@ def test_env_updates_view_add_concretize(
|
||||
|
||||
def test_env_updates_view_uninstall(
|
||||
tmpdir, mock_stage, mock_fetch, install_mockery):
|
||||
view_dir = tmpdir.mkdir('view')
|
||||
view_dir = tmpdir.join('view')
|
||||
env('create', '--with-view=%s' % view_dir, 'test')
|
||||
with ev.read('test'):
|
||||
install('--fake', 'mpileaks')
|
||||
@@ -1185,7 +1200,7 @@ def test_env_updates_view_uninstall(
|
||||
|
||||
def test_env_updates_view_uninstall_referenced_elsewhere(
|
||||
tmpdir, mock_stage, mock_fetch, install_mockery):
|
||||
view_dir = tmpdir.mkdir('view')
|
||||
view_dir = tmpdir.join('view')
|
||||
env('create', '--with-view=%s' % view_dir, 'test')
|
||||
install('--fake', 'mpileaks')
|
||||
with ev.read('test'):
|
||||
@@ -1202,7 +1217,7 @@ def test_env_updates_view_uninstall_referenced_elsewhere(
|
||||
|
||||
def test_env_updates_view_remove_concretize(
|
||||
tmpdir, mock_stage, mock_fetch, install_mockery):
|
||||
view_dir = tmpdir.mkdir('view')
|
||||
view_dir = tmpdir.join('view')
|
||||
env('create', '--with-view=%s' % view_dir, 'test')
|
||||
install('--fake', 'mpileaks')
|
||||
with ev.read('test'):
|
||||
@@ -1220,7 +1235,7 @@ def test_env_updates_view_remove_concretize(
|
||||
|
||||
def test_env_updates_view_force_remove(
|
||||
tmpdir, mock_stage, mock_fetch, install_mockery):
|
||||
view_dir = tmpdir.mkdir('view')
|
||||
view_dir = tmpdir.join('view')
|
||||
env('create', '--with-view=%s' % view_dir, 'test')
|
||||
with ev.read('test'):
|
||||
install('--fake', 'mpileaks')
|
||||
@@ -2454,3 +2469,18 @@ def test_does_not_rewrite_rel_dev_path_when_keep_relative_is_set(tmpdir):
|
||||
print(e.dev_specs)
|
||||
assert e.dev_specs['mypkg1']['path'] == '../build_folder'
|
||||
assert e.dev_specs['mypkg2']['path'] == '/some/other/path'
|
||||
|
||||
|
||||
@pytest.mark.regression('23440')
|
||||
def test_custom_version_concretize_together(tmpdir):
|
||||
# Custom versions should be permitted in specs when
|
||||
# concretizing together
|
||||
e = ev.create('custom_version')
|
||||
e.concretization = 'together'
|
||||
|
||||
# Concretize a first time using 'mpich' as the MPI provider
|
||||
e.add('hdf5@myversion')
|
||||
e.add('mpich')
|
||||
e.concretize()
|
||||
|
||||
assert any('hdf5@myversion' in spec for _, spec in e.concretized_specs())
|
||||
|
@@ -1,4 +1,4 @@
|
||||
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
@@ -32,6 +32,7 @@
|
||||
mirror = SpackCommand('mirror')
|
||||
uninstall = SpackCommand('uninstall')
|
||||
buildcache = SpackCommand('buildcache')
|
||||
find = SpackCommand('find')
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
@@ -119,7 +120,9 @@ def test_install_dirty_flag(arguments, expected):
|
||||
|
||||
|
||||
def test_package_output(tmpdir, capsys, install_mockery, mock_fetch):
|
||||
"""Ensure output printed from pkgs is captured by output redirection."""
|
||||
"""
|
||||
Ensure output printed from pkgs is captured by output redirection.
|
||||
"""
|
||||
# we can't use output capture here because it interferes with Spack's
|
||||
# logging. TODO: see whether we can get multiple log_outputs to work
|
||||
# when nested AND in pytest
|
||||
@@ -140,12 +143,15 @@ def test_package_output(tmpdir, capsys, install_mockery, mock_fetch):
|
||||
@pytest.mark.disable_clean_stage_check
|
||||
def test_install_output_on_build_error(mock_packages, mock_archive, mock_fetch,
|
||||
config, install_mockery, capfd):
|
||||
"""
|
||||
This test used to assume receiving full output, but since we've updated
|
||||
spack to generate logs on the level of phases, it will only return the
|
||||
last phase, install.
|
||||
"""
|
||||
# capfd interferes with Spack's capturing
|
||||
with capfd.disabled():
|
||||
out = install('build-error', fail_on_error=False)
|
||||
assert 'ProcessError' in out
|
||||
assert 'configure: error: in /path/to/some/file:' in out
|
||||
assert 'configure: error: cannot run C compiled programs.' in out
|
||||
out = install('-v', 'build-error', fail_on_error=False)
|
||||
assert 'Installing build-error' in out
|
||||
|
||||
|
||||
@pytest.mark.disable_clean_stage_check
|
||||
@@ -172,20 +178,17 @@ def test_install_with_source(
|
||||
@pytest.mark.disable_clean_stage_check
|
||||
def test_show_log_on_error(mock_packages, mock_archive, mock_fetch,
|
||||
config, install_mockery, capfd):
|
||||
"""Make sure --show-log-on-error works."""
|
||||
"""
|
||||
Make sure --show-log-on-error works.
|
||||
"""
|
||||
with capfd.disabled():
|
||||
out = install('--show-log-on-error', 'build-error',
|
||||
fail_on_error=False)
|
||||
assert isinstance(install.error, spack.build_environment.ChildError)
|
||||
assert install.error.pkg.name == 'build-error'
|
||||
assert 'Full build log:' in out
|
||||
|
||||
print(out)
|
||||
|
||||
# Message shows up for ProcessError (1) and output (1)
|
||||
errors = [line for line in out.split('\n')
|
||||
if 'configure: error: cannot run C compiled programs' in line]
|
||||
assert len(errors) == 2
|
||||
assert '==> Installing build-error' in out
|
||||
assert 'See build log for details:' in out
|
||||
|
||||
|
||||
def test_install_overwrite(
|
||||
@@ -710,8 +713,116 @@ def test_install_only_dependencies_of_all_in_env(
|
||||
assert os.path.exists(dep.prefix)
|
||||
|
||||
|
||||
def test_install_no_add_in_env(tmpdir, mock_fetch, install_mockery,
|
||||
mutable_mock_env_path):
|
||||
# To test behavior of --no-add option, we create the following environment:
|
||||
#
|
||||
# mpileaks
|
||||
# ^callpath
|
||||
# ^dyninst
|
||||
# ^libelf@0.8.13 # or latest, really
|
||||
# ^libdwarf
|
||||
# ^mpich
|
||||
# libelf@0.8.10
|
||||
# a~bvv
|
||||
# ^b
|
||||
# a
|
||||
# ^b
|
||||
e = ev.create('test')
|
||||
e.add('mpileaks')
|
||||
e.add('libelf@0.8.10') # so env has both root and dep libelf specs
|
||||
e.add('a')
|
||||
e.add('a ~bvv')
|
||||
e.concretize()
|
||||
env_specs = e.all_specs()
|
||||
|
||||
a_spec = None
|
||||
b_spec = None
|
||||
mpi_spec = None
|
||||
|
||||
# First find and remember some target concrete specs in the environment
|
||||
for e_spec in env_specs:
|
||||
if e_spec.satisfies(Spec('a ~bvv')):
|
||||
a_spec = e_spec
|
||||
elif e_spec.name == 'b':
|
||||
b_spec = e_spec
|
||||
elif e_spec.satisfies(Spec('mpi')):
|
||||
mpi_spec = e_spec
|
||||
|
||||
assert(a_spec)
|
||||
assert(a_spec.concrete)
|
||||
|
||||
assert(b_spec)
|
||||
assert(b_spec.concrete)
|
||||
assert(b_spec not in e.roots())
|
||||
|
||||
assert(mpi_spec)
|
||||
assert(mpi_spec.concrete)
|
||||
|
||||
# Activate the environment
|
||||
with e:
|
||||
# Assert using --no-add with a spec not in the env fails
|
||||
inst_out = install(
|
||||
'--no-add', 'boost', fail_on_error=False, output=str)
|
||||
|
||||
assert('no such spec exists in environment' in inst_out)
|
||||
|
||||
# Ensure using --no-add with an ambiguous spec fails
|
||||
with pytest.raises(ev.SpackEnvironmentError) as err:
|
||||
inst_out = install(
|
||||
'--no-add', 'a', output=str)
|
||||
|
||||
assert('a matches multiple specs in the env' in str(err))
|
||||
|
||||
# With "--no-add", install an unambiguous dependency spec (that already
|
||||
# exists as a dep in the environment) using --no-add and make sure it
|
||||
# gets installed (w/ deps), but is not added to the environment.
|
||||
install('--no-add', 'dyninst')
|
||||
|
||||
find_output = find('-l', output=str)
|
||||
assert('dyninst' in find_output)
|
||||
assert('libdwarf' in find_output)
|
||||
assert('libelf' in find_output)
|
||||
assert('callpath' not in find_output)
|
||||
|
||||
post_install_specs = e.all_specs()
|
||||
assert all([s in env_specs for s in post_install_specs])
|
||||
|
||||
# Make sure we can install a concrete dependency spec from a spec.yaml
|
||||
# file on disk, using the ``--no-add` option, and the spec is installed
|
||||
# but not added as a root
|
||||
mpi_spec_yaml_path = tmpdir.join('{0}.yaml'.format(mpi_spec.name))
|
||||
with open(mpi_spec_yaml_path.strpath, 'w') as fd:
|
||||
fd.write(mpi_spec.to_yaml(hash=ht.full_hash))
|
||||
|
||||
install('--no-add', '-f', mpi_spec_yaml_path.strpath)
|
||||
assert(mpi_spec not in e.roots())
|
||||
|
||||
find_output = find('-l', output=str)
|
||||
assert(mpi_spec.name in find_output)
|
||||
|
||||
# Without "--no-add", install an unambiguous depependency spec (that
|
||||
# already exists as a dep in the environment) without --no-add and make
|
||||
# sure it is added as a root of the environment as well as installed.
|
||||
assert(b_spec not in e.roots())
|
||||
|
||||
install('b')
|
||||
|
||||
assert(b_spec in e.roots())
|
||||
assert(b_spec not in e.uninstalled_specs())
|
||||
|
||||
# Without "--no-add", install a novel spec and make sure it is added
|
||||
# as a root and installed.
|
||||
install('bowtie')
|
||||
|
||||
assert(any([s.name == 'bowtie' for s in e.roots()]))
|
||||
assert(not any([s.name == 'bowtie' for s in e.uninstalled_specs()]))
|
||||
|
||||
|
||||
def test_install_help_does_not_show_cdash_options(capsys):
|
||||
"""Make sure `spack install --help` does not describe CDash arguments"""
|
||||
"""
|
||||
Make sure `spack install --help` does not describe CDash arguments
|
||||
"""
|
||||
with pytest.raises(SystemExit):
|
||||
install('--help')
|
||||
captured = capsys.readouterr()
|
||||
@@ -739,6 +850,25 @@ def test_cdash_auth_token(tmpdir, install_mockery, capfd):
|
||||
assert 'Using CDash auth token from environment' in out
|
||||
|
||||
|
||||
@pytest.mark.disable_clean_stage_check
|
||||
def test_cdash_configure_warning(tmpdir, mock_fetch, install_mockery, capfd):
|
||||
# capfd interferes with Spack's capturing of e.g., Build.xml output
|
||||
with capfd.disabled():
|
||||
with tmpdir.as_cwd():
|
||||
# Test would fail if install raised an error.
|
||||
install(
|
||||
'--log-file=cdash_reports',
|
||||
'--log-format=cdash',
|
||||
'configure-warning')
|
||||
# Verify Configure.xml exists with expected contents.
|
||||
report_dir = tmpdir.join('cdash_reports')
|
||||
assert report_dir in tmpdir.listdir()
|
||||
report_file = report_dir.join('Configure.xml')
|
||||
assert report_file in report_dir.listdir()
|
||||
content = report_file.open().read()
|
||||
assert 'foo: No such file or directory' in content
|
||||
|
||||
|
||||
def test_compiler_bootstrap(
|
||||
install_mockery_mutable_config, mock_packages, mock_fetch,
|
||||
mock_archive, mutable_config, monkeypatch):
|
||||
@@ -754,7 +884,9 @@ def test_compiler_bootstrap(
|
||||
def test_compiler_bootstrap_from_binary_mirror(
|
||||
install_mockery_mutable_config, mock_packages, mock_fetch,
|
||||
mock_archive, mutable_config, monkeypatch, tmpdir):
|
||||
"""Make sure installing compiler from buildcache registers compiler"""
|
||||
"""
|
||||
Make sure installing compiler from buildcache registers compiler
|
||||
"""
|
||||
|
||||
# Create a temp mirror directory for buildcache usage
|
||||
mirror_dir = tmpdir.join('mirror_dir')
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user