Compare commits
1381 Commits
v0.13.0
...
releases/v
Author | SHA1 | Date | |
---|---|---|---|
![]() |
a8d6533b09 | ||
![]() |
93bd27dc19 | ||
![]() |
ddc79ce4df | ||
![]() |
e9e2a84be1 | ||
![]() |
eb3792ec65 | ||
![]() |
ef1b8c1916 | ||
![]() |
5a6f8cf671 | ||
![]() |
750ca36a8d | ||
![]() |
476961782d | ||
![]() |
ac51bfb530 | ||
![]() |
a57689084d | ||
![]() |
9c62115101 | ||
![]() |
328d512341 | ||
![]() |
0762d8356d | ||
![]() |
322a12e801 | ||
![]() |
a69674c73e | ||
![]() |
e2f5f668a9 | ||
![]() |
dc59fc7ab8 | ||
![]() |
473424ad60 | ||
![]() |
3c1379c985 | ||
![]() |
b0dc57a939 | ||
![]() |
b99102f68c | ||
![]() |
7a68a4d851 | ||
![]() |
a3bcd88f8d | ||
![]() |
740f8fe1a9 | ||
![]() |
430ca7c7cf | ||
![]() |
55f5afaf3c | ||
![]() |
6b559912c1 | ||
![]() |
9b5805a5cd | ||
![]() |
c6c1d01ab6 | ||
![]() |
b9688a8c35 | ||
![]() |
ed2781973c | ||
![]() |
99bb88aead | ||
![]() |
a85cce05a1 | ||
![]() |
e2b1737a42 | ||
![]() |
ff0abb9838 | ||
![]() |
3826cdf139 | ||
![]() |
30b4704522 | ||
![]() |
09e13cf7cf | ||
![]() |
296d58ef6b | ||
![]() |
ec720bf28d | ||
![]() |
1e42f0a545 | ||
![]() |
62683eb4bf | ||
![]() |
901bed48ec | ||
![]() |
cc8d9eee8e | ||
![]() |
1c8f792bb5 | ||
![]() |
9a1ce36e44 | ||
![]() |
59a7963785 | ||
![]() |
733f9f8cfa | ||
![]() |
fa0a5e44aa | ||
![]() |
5406e1f43d | ||
![]() |
f3a1a8c6fe | ||
![]() |
b02981f10c | ||
![]() |
654914d53e | ||
![]() |
3753424a87 | ||
![]() |
32a3d59bfa | ||
![]() |
97d46dc36f | ||
![]() |
388960f044 | ||
![]() |
629c69d383 | ||
![]() |
0ea6bab984 | ||
![]() |
50b5e6b94b | ||
![]() |
1a479b5e9e | ||
![]() |
3104bfe93e | ||
![]() |
f961f64215 | ||
![]() |
9f2cee43cf | ||
![]() |
82be8965f2 | ||
![]() |
16a9464b5c | ||
![]() |
6764a9464d | ||
![]() |
6d8e0699f4 | ||
![]() |
0bc4a778bd | ||
![]() |
4a5c64ab6e | ||
![]() |
abfdabd34e | ||
![]() |
b399761106 | ||
![]() |
065cbf1bbe | ||
![]() |
08b5264607 | ||
![]() |
fabd8693b9 | ||
![]() |
38fc441a36 | ||
![]() |
48375adf17 | ||
![]() |
a03b252522 | ||
![]() |
2e387ef585 | ||
![]() |
01bda12692 | ||
![]() |
c5134ff4f2 | ||
![]() |
19af86592b | ||
![]() |
ea9aa2c7cb | ||
![]() |
0d84ee6c68 | ||
![]() |
e4968a495d | ||
![]() |
c784799895 | ||
![]() |
b5b19084de | ||
![]() |
8b72cb64dd | ||
![]() |
dd0e18d7b8 | ||
![]() |
d1929b2ea7 | ||
![]() |
a1e3a1653a | ||
![]() |
f5b5036ad5 | ||
![]() |
40410102d2 | ||
![]() |
b538885b08 | ||
![]() |
d9b8a9a8b3 | ||
![]() |
545c436d33 | ||
![]() |
0ea7b83e71 | ||
![]() |
186ca13cf0 | ||
![]() |
674c4379ee | ||
![]() |
308a6bc601 | ||
![]() |
43f8e9a260 | ||
![]() |
98895297b5 | ||
![]() |
64887e4821 | ||
![]() |
4e2a5388ac | ||
![]() |
9a5fa90c25 | ||
![]() |
d98b98aae6 | ||
![]() |
4d62dda17a | ||
![]() |
a02aadb4db | ||
![]() |
22a56a89c7 | ||
![]() |
a47ab9c6b2 | ||
![]() |
ef9ab2ebc4 | ||
![]() |
0be09abd9c | ||
![]() |
66c3f89b78 | ||
![]() |
47e8084fab | ||
![]() |
d9d863330b | ||
![]() |
e258326133 | ||
![]() |
7a60a04402 | ||
![]() |
9b221d26b8 | ||
![]() |
4cd1a5660e | ||
![]() |
65133daad7 | ||
![]() |
8825335056 | ||
![]() |
d1cbc14022 | ||
![]() |
f4d91cc3ec | ||
![]() |
c969fffbf0 | ||
![]() |
1d986c3790 | ||
![]() |
f9b857f1b0 | ||
![]() |
3331b70f20 | ||
![]() |
0c8738a860 | ||
![]() |
31f9b66e18 | ||
![]() |
dc46e219d1 | ||
![]() |
129256de52 | ||
![]() |
44c01e6b70 | ||
![]() |
f46f238a33 | ||
![]() |
d3b1248c2d | ||
![]() |
eac6c53afb | ||
![]() |
e9862671c9 | ||
![]() |
2dac7b60a9 | ||
![]() |
14025e89f6 | ||
![]() |
f2aca86502 | ||
![]() |
2f4881d582 | ||
![]() |
26ad754f42 | ||
![]() |
4c215d1fed | ||
![]() |
a92543803c | ||
![]() |
0ea220db83 | ||
![]() |
11340a332d | ||
![]() |
3ee0295fb2 | ||
![]() |
6f95967e95 | ||
![]() |
53238af2dc | ||
![]() |
813bfcfcbd | ||
![]() |
893f76da53 | ||
![]() |
975acd4dfb | ||
![]() |
c0bae73d2f | ||
![]() |
c77b6923b8 | ||
![]() |
4bca09cc89 | ||
![]() |
e488b36d59 | ||
![]() |
c8c53cc7e7 | ||
![]() |
5b2d1a6a54 | ||
![]() |
74018df678 | ||
![]() |
db9ccc21b0 | ||
![]() |
1f54627cee | ||
![]() |
0ec908cd13 | ||
![]() |
d08f53c5fb | ||
![]() |
6f6e1a8c4a | ||
![]() |
02f3fc6d7a | ||
![]() |
82f76c44a0 | ||
![]() |
f396106d1c | ||
![]() |
fa28602092 | ||
![]() |
f95348074b | ||
![]() |
86a3d58159 | ||
![]() |
76bf5c53fa | ||
![]() |
1270b4d0df | ||
![]() |
342200774b | ||
![]() |
9f89dce52f | ||
![]() |
1d7fc47672 | ||
![]() |
80123e0d1a | ||
![]() |
21156e6243 | ||
![]() |
5ea7031019 | ||
![]() |
17f19ad407 | ||
![]() |
6c93ef5088 | ||
![]() |
8783f16e36 | ||
![]() |
921cb6c860 | ||
![]() |
ccbdd3c902 | ||
![]() |
30d487509b | ||
![]() |
e781d8eb72 | ||
![]() |
33edadb041 | ||
![]() |
03d32c868a | ||
![]() |
d6e4b4773a | ||
![]() |
38c3b55f19 | ||
![]() |
b8c2141760 | ||
![]() |
bbd256e2fe | ||
![]() |
1cc7a3533b | ||
![]() |
60847abf0e | ||
![]() |
8071369ffe | ||
![]() |
b9156895ec | ||
![]() |
b261b80ebd | ||
![]() |
d3db3c6a2b | ||
![]() |
0dbd3b21c8 | ||
![]() |
fc190f397a | ||
![]() |
8624bf65f9 | ||
![]() |
e833ef9cce | ||
![]() |
1b95c33c82 | ||
![]() |
978d960158 | ||
![]() |
df1e9317b8 | ||
![]() |
7f9cd886f3 | ||
![]() |
7d96e496b2 | ||
![]() |
e75b8c1230 | ||
![]() |
41a2a5db2c | ||
![]() |
8f877fdc65 | ||
![]() |
bab407a28a | ||
![]() |
8308d2d59d | ||
![]() |
a6dc3fe40b | ||
![]() |
12ec8e340f | ||
![]() |
f547f2ce10 | ||
![]() |
00de09268e | ||
![]() |
ee573540a0 | ||
![]() |
d7de6cf055 | ||
![]() |
ad7c11c482 | ||
![]() |
ff71844bf6 | ||
![]() |
98c4d15655 | ||
![]() |
4f3287a9dc | ||
![]() |
45ee708596 | ||
![]() |
959d9d2365 | ||
![]() |
7c88bb937d | ||
![]() |
c92201e6ab | ||
![]() |
bcccf0d4e7 | ||
![]() |
8313de5be0 | ||
![]() |
4db0eb4917 | ||
![]() |
8b5cc1e76f | ||
![]() |
bff3a087ec | ||
![]() |
bd24d53ceb | ||
![]() |
2c0bfd071f | ||
![]() |
e5e6791d97 | ||
![]() |
4a7e4655ac | ||
![]() |
3f5d889d2b | ||
![]() |
b688bb549b | ||
![]() |
baafa7ec84 | ||
![]() |
3c28e72d2f | ||
![]() |
e65aa7569e | ||
![]() |
978a3bb7ef | ||
![]() |
8c05221bc6 | ||
![]() |
c607288a7c | ||
![]() |
d524abad20 | ||
![]() |
e0dfc3ddbf | ||
![]() |
8beb42e749 | ||
![]() |
4fa29ceb50 | ||
![]() |
690ed2fe98 | ||
![]() |
7996bc809a | ||
![]() |
580c8f5b7e | ||
![]() |
46ae3f0570 | ||
![]() |
725336ee48 | ||
![]() |
0d0ab60150 | ||
![]() |
9046deae80 | ||
![]() |
413de215b2 | ||
![]() |
1c5f72dbaa | ||
![]() |
4b7f057a4b | ||
![]() |
03ce5effa1 | ||
![]() |
eb88dfee9a | ||
![]() |
1d25564230 | ||
![]() |
4dc67b79aa | ||
![]() |
681ad2ac44 | ||
![]() |
268f42bf7e | ||
![]() |
856734b804 | ||
![]() |
7252066a32 | ||
![]() |
06c85ea5bf | ||
![]() |
3dd844e597 | ||
![]() |
fb482ae0c9 | ||
![]() |
a337d27874 | ||
![]() |
de84bd3f24 | ||
![]() |
5243d270d4 | ||
![]() |
38303a6a79 | ||
![]() |
a7b43f1015 | ||
![]() |
90f3635afd | ||
![]() |
7e50cec5a4 | ||
![]() |
ca66ab6901 | ||
![]() |
3b2c534e73 | ||
![]() |
b5a3ee636b | ||
![]() |
78ce1c7ce4 | ||
![]() |
c8cb480eb7 | ||
![]() |
cf120d7441 | ||
![]() |
947dabc356 | ||
![]() |
2c63ea49d1 | ||
![]() |
630611a786 | ||
![]() |
78f16d703d | ||
![]() |
c56c4b334d | ||
![]() |
582e7ce2c8 | ||
![]() |
c2885990b8 | ||
![]() |
5c33f638d7 | ||
![]() |
e8b6c40b31 | ||
![]() |
7706b76d3a | ||
![]() |
1a41ec766a | ||
![]() |
35df75ee77 | ||
![]() |
490508d324 | ||
![]() |
357786ce6b | ||
![]() |
4e32505770 | ||
![]() |
37a6b8d54f | ||
![]() |
c77eeca61e | ||
![]() |
745a843911 | ||
![]() |
cb3dbea198 | ||
![]() |
8aa1eba2e0 | ||
![]() |
1ed0efec7d | ||
![]() |
bd442e884a | ||
![]() |
ad914c28d7 | ||
![]() |
d00a3eda6b | ||
![]() |
22df37c328 | ||
![]() |
716978e00c | ||
![]() |
42c829adb4 | ||
![]() |
4d173579cb | ||
![]() |
f274d89c33 | ||
![]() |
85b6e3e6d4 | ||
![]() |
1c5838be5c | ||
![]() |
3ba5df3763 | ||
![]() |
30b30e11dc | ||
![]() |
6d2e6e1f4d | ||
![]() |
7a10478708 | ||
![]() |
e22ac814b0 | ||
![]() |
0311b63e0b | ||
![]() |
a8d5c6ccf2 | ||
![]() |
3e5427078d | ||
![]() |
3a3a58a106 | ||
![]() |
010f9451c9 | ||
![]() |
f9f28e8fba | ||
![]() |
4da8f7fcef | ||
![]() |
69e5683ba4 | ||
![]() |
5397d500c8 | ||
![]() |
b442b21751 | ||
![]() |
d1d5f5f9e7 | ||
![]() |
f685d538d8 | ||
![]() |
2c3e2669f2 | ||
![]() |
fd9b1fb6de | ||
![]() |
4cb8294800 | ||
![]() |
5bfb0eb447 | ||
![]() |
1203134253 | ||
![]() |
fecb26763d | ||
![]() |
8c1845581d | ||
![]() |
8c1aee1b78 | ||
![]() |
ef9cb97376 | ||
![]() |
e4cac22462 | ||
![]() |
291c110700 | ||
![]() |
4d3bd1116c | ||
![]() |
7de3ea4e19 | ||
![]() |
710fabd68a | ||
![]() |
f9889526f2 | ||
![]() |
7d9a0fa180 | ||
![]() |
22c9f5cbd8 | ||
![]() |
458c9a22bf | ||
![]() |
b43f658c39 | ||
![]() |
6b19582198 | ||
![]() |
f7e2e84647 | ||
![]() |
759f6b6d13 | ||
![]() |
e62c82de7f | ||
![]() |
88289cd2c7 | ||
![]() |
1a846abbe8 | ||
![]() |
f66f56287d | ||
![]() |
f745b790f3 | ||
![]() |
bb0b88f38a | ||
![]() |
588c87c665 | ||
![]() |
c2460dbcd2 | ||
![]() |
3b38a45a76 | ||
![]() |
29a906d20c | ||
![]() |
0a92b54701 | ||
![]() |
2dab92742d | ||
![]() |
58a905ec76 | ||
![]() |
1f6f812696 | ||
![]() |
1a0c31703a | ||
![]() |
bce4bec059 | ||
![]() |
ba25bb3050 | ||
![]() |
57cc7831bf | ||
![]() |
ffdde40f56 | ||
![]() |
29a01f488c | ||
![]() |
731148e0e1 | ||
![]() |
52d1f5b839 | ||
![]() |
1974ad4e7f | ||
![]() |
d4d82281d1 | ||
![]() |
997a0f4207 | ||
![]() |
b34e7ad28f | ||
![]() |
603e2794db | ||
![]() |
1bbe0a1f74 | ||
![]() |
0ec9377852 | ||
![]() |
19ff3c0f67 | ||
![]() |
4accc78409 | ||
![]() |
7d444f08e7 | ||
![]() |
42633b0869 | ||
![]() |
74c8f25826 | ||
![]() |
5ad44477b2 | ||
![]() |
94971d519c | ||
![]() |
30dd7ae176 | ||
![]() |
dcaa50c9d0 | ||
![]() |
3d9787a8bf | ||
![]() |
44f60f9fec | ||
![]() |
0c0aba579a | ||
![]() |
ab36008635 | ||
![]() |
412c336113 | ||
![]() |
0c9ac8d8d1 | ||
![]() |
04e6fd60f3 | ||
![]() |
6f4c90378a | ||
![]() |
b7ee2d02e4 | ||
![]() |
c029c8ff89 | ||
![]() |
d83309493f | ||
![]() |
47ee690076 | ||
![]() |
09e318fc84 | ||
![]() |
9635ff3d20 | ||
![]() |
ed501eaab2 | ||
![]() |
12a99f4a2d | ||
![]() |
b072caadec | ||
![]() |
7b2895109c | ||
![]() |
b2adcdb389 | ||
![]() |
1e0408d05a | ||
![]() |
ee35d949f9 | ||
![]() |
23a7feb917 | ||
![]() |
a5b2347cfe | ||
![]() |
23a759cda0 | ||
![]() |
39035e4517 | ||
![]() |
3519a17624 | ||
![]() |
a2f8a2321d | ||
![]() |
85ef1be780 | ||
![]() |
488e25ea34 | ||
![]() |
bd60e0f137 | ||
![]() |
da189b8d1d | ||
![]() |
ed4d544e8f | ||
![]() |
b142914b24 | ||
![]() |
30c8e1d8a0 | ||
![]() |
549aae7f72 | ||
![]() |
52a7f94d9e | ||
![]() |
4e3617fe1d | ||
![]() |
1af36f0cdc | ||
![]() |
53d891a501 | ||
![]() |
60ed6d2012 | ||
![]() |
f7ec09d30b | ||
![]() |
9d7e482497 | ||
![]() |
69feea280d | ||
![]() |
e710656310 | ||
![]() |
2b0b340aab | ||
![]() |
ea0a549db3 | ||
![]() |
492b600d29 | ||
![]() |
0605fc1557 | ||
![]() |
94def872ee | ||
![]() |
f17ce36da2 | ||
![]() |
2b6106524a | ||
![]() |
635fc62de0 | ||
![]() |
da091c0cf5 | ||
![]() |
16da648d03 | ||
![]() |
48a12c8773 | ||
![]() |
52ab2421bb | ||
![]() |
67c6d99219 | ||
![]() |
12b0340d2f | ||
![]() |
3f5bed2e36 | ||
![]() |
f58004e436 | ||
![]() |
6ab2c3caa3 | ||
![]() |
d86816bc1a | ||
![]() |
7badd69d1e | ||
![]() |
d0523ca087 | ||
![]() |
e01c39019c | ||
![]() |
0ce4eef256 | ||
![]() |
9ffa053f18 | ||
![]() |
893b0792e4 | ||
![]() |
b9629c36f2 | ||
![]() |
0f3ae864a5 | ||
![]() |
b7b27d3862 | ||
![]() |
eb79c82cba | ||
![]() |
0e86961b84 | ||
![]() |
71f47b3b26 | ||
![]() |
02e077a7fa | ||
![]() |
87c55b79c5 | ||
![]() |
06e100d5cc | ||
![]() |
f8be90cf3f | ||
![]() |
d2098d337a | ||
![]() |
a26e5caa57 | ||
![]() |
cb3f4081a0 | ||
![]() |
c700200959 | ||
![]() |
f569577747 | ||
![]() |
1fd335d654 | ||
![]() |
1e09de73b7 | ||
![]() |
a57edb7029 | ||
![]() |
71243f3f7b | ||
![]() |
35db2d05b7 | ||
![]() |
bb29932362 | ||
![]() |
f8e0f4251d | ||
![]() |
9a6b3b3a07 | ||
![]() |
80745128b3 | ||
![]() |
dcd8d7a620 | ||
![]() |
12a261523e | ||
![]() |
95ddd3d6f2 | ||
![]() |
33d35768e6 | ||
![]() |
6fd2c66022 | ||
![]() |
1c9337a5e4 | ||
![]() |
4d7d657366 | ||
![]() |
d646c8d8d5 | ||
![]() |
d58390dbf7 | ||
![]() |
36fca2c29a | ||
![]() |
4a5835ef2c | ||
![]() |
ca6e75c9f6 | ||
![]() |
b0fce56d5b | ||
![]() |
04a6a55cf8 | ||
![]() |
031fdfd7ca | ||
![]() |
80ea96312f | ||
![]() |
1a385a5178 | ||
![]() |
c9e01ff9d7 | ||
![]() |
5053dfa259 | ||
![]() |
11f2b61261 | ||
![]() |
8011fedd9c | ||
![]() |
910df8cb4e | ||
![]() |
74266ea789 | ||
![]() |
7a0990903f | ||
![]() |
ffbb0efc81 | ||
![]() |
2eadfa24e9 | ||
![]() |
8283d87f6a | ||
![]() |
4d794d63b5 | ||
![]() |
796722aeee | ||
![]() |
900161d182 | ||
![]() |
5eed196f74 | ||
![]() |
54ecc4e504 | ||
![]() |
5db5040871 | ||
![]() |
d10505678f | ||
![]() |
f23a136d83 | ||
![]() |
ca26eb6923 | ||
![]() |
dbb149cd7f | ||
![]() |
818b00e302 | ||
![]() |
ba22af0de0 | ||
![]() |
d8b4bee0cd | ||
![]() |
cacd57d340 | ||
![]() |
c6a6911821 | ||
![]() |
9010d4fce5 | ||
![]() |
4bb76cf5e8 | ||
![]() |
c52bcdd080 | ||
![]() |
4d752e63e5 | ||
![]() |
94bc6b06b1 | ||
![]() |
bdc9b89d86 | ||
![]() |
563e261fa4 | ||
![]() |
38cd15d0e0 | ||
![]() |
808c80d65a | ||
![]() |
7a61d1dbd1 | ||
![]() |
0f68ed73c6 | ||
![]() |
876305adf0 | ||
![]() |
7319516749 | ||
![]() |
c763b68b7c | ||
![]() |
f22855f6e7 | ||
![]() |
362e79bb3a | ||
![]() |
5aeab7dbe5 | ||
![]() |
8ad0be96aa | ||
![]() |
b7f0493563 | ||
![]() |
8b1bf2d613 | ||
![]() |
a48fb69601 | ||
![]() |
3cd6938d80 | ||
![]() |
027142bcfc | ||
![]() |
c2778d8898 | ||
![]() |
e6e06aa2f3 | ||
![]() |
b5d729a829 | ||
![]() |
946a80bd3d | ||
![]() |
a2f961bd6f | ||
![]() |
0232c820ab | ||
![]() |
f8acb95ad3 | ||
![]() |
726a662c2c | ||
![]() |
b69cf08cfb | ||
![]() |
e7add79cba | ||
![]() |
9f6ac938b7 | ||
![]() |
e78f3ed040 | ||
![]() |
ef68791210 | ||
![]() |
8ffb9605d7 | ||
![]() |
4b1c356a27 | ||
![]() |
ae6afaf8e1 | ||
![]() |
ed9ab0668f | ||
![]() |
df8ee438e5 | ||
![]() |
7ddbc9bc87 | ||
![]() |
30694aa501 | ||
![]() |
a18ab8f765 | ||
![]() |
eefccd5d22 | ||
![]() |
a04faa8f6b | ||
![]() |
240a9e6284 | ||
![]() |
7a88c17d3a | ||
![]() |
5c7a3e2d14 | ||
![]() |
4c65324084 | ||
![]() |
6d8ec8890a | ||
![]() |
2e5c9eb2e3 | ||
![]() |
4366977540 | ||
![]() |
c51db51bfb | ||
![]() |
41c40d4ba4 | ||
![]() |
713d7a3793 | ||
![]() |
e2e9f02d81 | ||
![]() |
b33d108c1b | ||
![]() |
737d2f55a8 | ||
![]() |
c41dfb0a75 | ||
![]() |
35a696a965 | ||
![]() |
a76ef717bd | ||
![]() |
fa2e094c81 | ||
![]() |
6c4c26f538 | ||
![]() |
0a666af326 | ||
![]() |
487731e207 | ||
![]() |
8660b0b5b8 | ||
![]() |
5b1297378c | ||
![]() |
d78c52c291 | ||
![]() |
0d435630db | ||
![]() |
673a725576 | ||
![]() |
eb7a4e1029 | ||
![]() |
5afc407faa | ||
![]() |
465577847d | ||
![]() |
6edc7a2558 | ||
![]() |
8917d58e7a | ||
![]() |
9c4479624c | ||
![]() |
d1c708bdf3 | ||
![]() |
257e71d87a | ||
![]() |
52df2309cb | ||
![]() |
b982015930 | ||
![]() |
ff23672591 | ||
![]() |
4bd4ed0f84 | ||
![]() |
75760e44f3 | ||
![]() |
ffb6102a42 | ||
![]() |
9ae80a56a3 | ||
![]() |
592a144feb | ||
![]() |
6b3e173331 | ||
![]() |
4a84155caa | ||
![]() |
f79649d2e3 | ||
![]() |
26ffbe3d21 | ||
![]() |
de93830b9a | ||
![]() |
2028687efe | ||
![]() |
8a82c930bb | ||
![]() |
11501532d3 | ||
![]() |
3e8b4a9b24 | ||
![]() |
a24498f7ba | ||
![]() |
c40ee08c8d | ||
![]() |
e4f56378b1 | ||
![]() |
7546ca6d4d | ||
![]() |
777812df4e | ||
![]() |
be3482de09 | ||
![]() |
a5ae5aca36 | ||
![]() |
08d0267c9a | ||
![]() |
eddb42ed43 | ||
![]() |
d2ed01a12c | ||
![]() |
e49a5adf04 | ||
![]() |
e04a9031b0 | ||
![]() |
0950f5ffa3 | ||
![]() |
37b11d0468 | ||
![]() |
f3c0d23a3c | ||
![]() |
627fd1949d | ||
![]() |
47750c8800 | ||
![]() |
7cde359eb8 | ||
![]() |
adffa45264 | ||
![]() |
c0672eb641 | ||
![]() |
f7f4d1a02e | ||
![]() |
f0532e27da | ||
![]() |
8c8f3f228c | ||
![]() |
90f4860402 | ||
![]() |
4d85979ec4 | ||
![]() |
378016d350 | ||
![]() |
d83d7ce694 | ||
![]() |
c22329c5e8 | ||
![]() |
66bb19a593 | ||
![]() |
c9729aaab7 | ||
![]() |
b9dc263801 | ||
![]() |
f7b9592eb9 | ||
![]() |
100593b83a | ||
![]() |
729e43ffac | ||
![]() |
39a09691c5 | ||
![]() |
e46548c3b8 | ||
![]() |
70184c6ea2 | ||
![]() |
c3c1cf13e7 | ||
![]() |
ba0e144b07 | ||
![]() |
8e70fa52df | ||
![]() |
19c1312eb4 | ||
![]() |
e26f517daf | ||
![]() |
85fc6448f8 | ||
![]() |
456aae2627 | ||
![]() |
3f190a432e | ||
![]() |
6e828206b6 | ||
![]() |
9ed34f686f | ||
![]() |
4beb9fc5d3 | ||
![]() |
de73121ebd | ||
![]() |
7a97dc3770 | ||
![]() |
9192f046d2 | ||
![]() |
1ac0c51dad | ||
![]() |
ffc91bd86e | ||
![]() |
3017584c48 | ||
![]() |
9cc013cc0f | ||
![]() |
58cb4e5241 | ||
![]() |
5ddbd2fa6c | ||
![]() |
5b979aee81 | ||
![]() |
026534dadb | ||
![]() |
b70a5245f2 | ||
![]() |
e92f6c70a6 | ||
![]() |
9ff6079986 | ||
![]() |
9a0fd8db9a | ||
![]() |
62cd7e4490 | ||
![]() |
3bf7998bb5 | ||
![]() |
4af6303086 | ||
![]() |
98ad6e39b5 | ||
![]() |
cc96758fdc | ||
![]() |
65ef6d5dcb | ||
![]() |
b2e9696052 | ||
![]() |
e7dc8a2bea | ||
![]() |
e839432472 | ||
![]() |
8e8235043d | ||
![]() |
0699f8ac9d | ||
![]() |
25f217f5f2 | ||
![]() |
4d6462247e | ||
![]() |
2dafeaf819 | ||
![]() |
855f9afa6e | ||
![]() |
bd345e16b8 | ||
![]() |
12692424a7 | ||
![]() |
c0d5c360d5 | ||
![]() |
8ee75e19bd | ||
![]() |
2166a91ec5 | ||
![]() |
6c957b6b62 | ||
![]() |
08898a2ad7 | ||
![]() |
3315700da9 | ||
![]() |
c7f1e2835b | ||
![]() |
2aa8132afd | ||
![]() |
61b4ad1837 | ||
![]() |
d333e14721 | ||
![]() |
1e2c9d960c | ||
![]() |
6a084a8289 | ||
![]() |
37c1d2d004 | ||
![]() |
ee8bc0df98 | ||
![]() |
748c552992 | ||
![]() |
7652d1a4c1 | ||
![]() |
55d5b435c8 | ||
![]() |
231e237764 | ||
![]() |
8616a26406 | ||
![]() |
e22d3250dd | ||
![]() |
e3939b0c72 | ||
![]() |
f013687397 | ||
![]() |
79ddf6cf0d | ||
![]() |
be6d7db2a8 | ||
![]() |
d87ededddc | ||
![]() |
b3a5f2e3c3 | ||
![]() |
98577e3af5 | ||
![]() |
a85b9070cb | ||
![]() |
91ea90c253 | ||
![]() |
5bdba98837 | ||
![]() |
cbf8553406 | ||
![]() |
48befd67b5 | ||
![]() |
37eac1a226 | ||
![]() |
639156130b | ||
![]() |
587c650b88 | ||
![]() |
d71428622b | ||
![]() |
a69b3c85b0 | ||
![]() |
98b498c671 | ||
![]() |
64209dda97 | ||
![]() |
c15e55c668 | ||
![]() |
d7f2a32887 | ||
![]() |
78b84e4ade | ||
![]() |
9b90d7e801 | ||
![]() |
c83e365c59 | ||
![]() |
0fb3280011 | ||
![]() |
6c9467e8c6 | ||
![]() |
bb517fdb84 | ||
![]() |
eb8fc4f3be | ||
![]() |
779ac9fe3e | ||
![]() |
99dee90372 | ||
![]() |
643cc95055 | ||
![]() |
9573fa2299 | ||
![]() |
ae8a72c0da | ||
![]() |
e2ec1c76ce | ||
![]() |
fa0d710626 | ||
![]() |
ded5cbec70 | ||
![]() |
0950442ea0 | ||
![]() |
013ff54731 | ||
![]() |
2a3bf87484 | ||
![]() |
3690bc44bd | ||
![]() |
b83365c945 | ||
![]() |
249c1d9d81 | ||
![]() |
a10253a667 | ||
![]() |
8fe2c0a4fe | ||
![]() |
96063f9168 | ||
![]() |
8522d1f0cf | ||
![]() |
80495d83ed | ||
![]() |
87d0ac804e | ||
![]() |
497fddfcb9 | ||
![]() |
1b93320848 | ||
![]() |
abf8e09fe5 | ||
![]() |
73dbda8c5a | ||
![]() |
3dd59c569e | ||
![]() |
3bb9d0feb4 | ||
![]() |
f408535f2d | ||
![]() |
97afe560d5 | ||
![]() |
d01ceabca2 | ||
![]() |
71d10d76dc | ||
![]() |
72eef17ff9 | ||
![]() |
48fb5b8b17 | ||
![]() |
a5cab03d10 | ||
![]() |
e89915eacc | ||
![]() |
dc17d548c8 | ||
![]() |
530a67c280 | ||
![]() |
01edc28fde | ||
![]() |
ae52f41448 | ||
![]() |
5940ec0d74 | ||
![]() |
af249d3cf6 | ||
![]() |
531f370e0d | ||
![]() |
81b147cc0a | ||
![]() |
a3799b2c7b | ||
![]() |
0e9c8d236c | ||
![]() |
f73cdac731 | ||
![]() |
33335c9d0a | ||
![]() |
92c2c47f72 | ||
![]() |
8da6fcf324 | ||
![]() |
67603d71bc | ||
![]() |
135cf4835f | ||
![]() |
52894f5c71 | ||
![]() |
5052ce803d | ||
![]() |
99a19596c4 | ||
![]() |
52ebc19b4e | ||
![]() |
4eb54b6358 | ||
![]() |
fa5e8aa876 | ||
![]() |
09b17ea147 | ||
![]() |
0607579fd3 | ||
![]() |
201a62d957 | ||
![]() |
5a1b6e1cfc | ||
![]() |
6e12c49c28 | ||
![]() |
ae3eac0b19 | ||
![]() |
3511245ca4 | ||
![]() |
9583bde739 | ||
![]() |
a24b973fa4 | ||
![]() |
60a22f1a7a | ||
![]() |
5db646d768 | ||
![]() |
07f522bed6 | ||
![]() |
6a0d15a5c9 | ||
![]() |
dc69256a59 | ||
![]() |
a52df46e2c | ||
![]() |
cd8be44f62 | ||
![]() |
f657f73e94 | ||
![]() |
1a41380649 | ||
![]() |
8bda848cb9 | ||
![]() |
18c2029fef | ||
![]() |
189eb5b883 | ||
![]() |
d831db37ca | ||
![]() |
5eca4f1470 | ||
![]() |
a4ff92e42d | ||
![]() |
9ceec7e219 | ||
![]() |
e366d5b559 | ||
![]() |
9ece934084 | ||
![]() |
f80491826b | ||
![]() |
9f1d728646 | ||
![]() |
60580f5871 | ||
![]() |
410bce91d4 | ||
![]() |
34128ca70d | ||
![]() |
5e9fb6b309 | ||
![]() |
986df8c233 | ||
![]() |
0f0e2c4397 | ||
![]() |
72390ecd62 | ||
![]() |
43712b4982 | ||
![]() |
2f4521d418 | ||
![]() |
f40520c6bf | ||
![]() |
33028c8955 | ||
![]() |
f757b6f4e5 | ||
![]() |
7e668b1153 | ||
![]() |
43e9fe95ea | ||
![]() |
f589f69a6c | ||
![]() |
0167ceb104 | ||
![]() |
83baf35246 | ||
![]() |
268914e7a2 | ||
![]() |
988b377382 | ||
![]() |
da501564cb | ||
![]() |
5db615feca | ||
![]() |
2e19a9343b | ||
![]() |
c659408368 | ||
![]() |
3e5c926a95 | ||
![]() |
481a920fe8 | ||
![]() |
3344bb0a96 | ||
![]() |
2520806df2 | ||
![]() |
0f5724e908 | ||
![]() |
8c2305e867 | ||
![]() |
8199f22e7c | ||
![]() |
b62ba7609d | ||
![]() |
754dd6eb1f | ||
![]() |
b2cc50aa6a | ||
![]() |
b64f458102 | ||
![]() |
03203a86b4 | ||
![]() |
0d7b9de3be | ||
![]() |
ddfca1b2f2 | ||
![]() |
d98b433a3c | ||
![]() |
c2f45fb4da | ||
![]() |
fec3a852c7 | ||
![]() |
695c09a8b7 | ||
![]() |
53f73e280a | ||
![]() |
1f9d37c346 | ||
![]() |
7ec1917d9b | ||
![]() |
47615566ea | ||
![]() |
aac45154fd | ||
![]() |
d07e988fbc | ||
![]() |
a254f7011e | ||
![]() |
34adebc7cf | ||
![]() |
6f7a58b142 | ||
![]() |
69a91b5b6b | ||
![]() |
ee0b9bfa5b | ||
![]() |
927f496306 | ||
![]() |
f2979da2af | ||
![]() |
77482e102a | ||
![]() |
b0a7c8487d | ||
![]() |
d4326038b9 | ||
![]() |
bb3bae4bae | ||
![]() |
ec3d979899 | ||
![]() |
5c2afd0535 | ||
![]() |
e7a03d07b6 | ||
![]() |
3cd6ca02d2 | ||
![]() |
ec060f3d9c | ||
![]() |
2c31776007 | ||
![]() |
917224cb3c | ||
![]() |
5b3b522861 | ||
![]() |
33f603734d | ||
![]() |
59d222c172 | ||
![]() |
34f0195de8 | ||
![]() |
fb6d29adfa | ||
![]() |
f96216cf40 | ||
![]() |
122264609e | ||
![]() |
dd67868ef6 | ||
![]() |
9e8204d075 | ||
![]() |
c627e5a6fc | ||
![]() |
0592c58030 | ||
![]() |
da9a562182 | ||
![]() |
23faffa2d0 | ||
![]() |
919a5421b7 | ||
![]() |
b33b8a3e29 | ||
![]() |
1d06949306 | ||
![]() |
aa8d5d2e8c | ||
![]() |
2266aab5c5 | ||
![]() |
c72568d01b | ||
![]() |
bf9661f18a | ||
![]() |
0c1599403d | ||
![]() |
cb258daf88 | ||
![]() |
4593b7b693 | ||
![]() |
3547776921 | ||
![]() |
aca67581e8 | ||
![]() |
99d2bcf64e | ||
![]() |
16a61d1422 | ||
![]() |
4e24c4ca5f | ||
![]() |
4fb03f7222 | ||
![]() |
8d21e39f00 | ||
![]() |
fdc3176920 | ||
![]() |
4f7c28e9cd | ||
![]() |
ab250a2e3a | ||
![]() |
0a1d2a45f4 | ||
![]() |
db1026cb62 | ||
![]() |
891ccbea3f | ||
![]() |
c1826d9d32 | ||
![]() |
129e548ac0 | ||
![]() |
71ec2db3a9 | ||
![]() |
e77a557ca1 | ||
![]() |
cec03c2db4 | ||
![]() |
1a662f428e | ||
![]() |
a9f6caf863 | ||
![]() |
c88b95bd28 | ||
![]() |
e78865b3ab | ||
![]() |
b6a6f28482 | ||
![]() |
9ece63242a | ||
![]() |
31b45d40d9 | ||
![]() |
969bc948b5 | ||
![]() |
15bb5da99f | ||
![]() |
d705e96a63 | ||
![]() |
9d9737f765 | ||
![]() |
01d35a4f3b | ||
![]() |
66a169e2a5 | ||
![]() |
350a5422ed | ||
![]() |
d173ec6a35 | ||
![]() |
842e9d9375 | ||
![]() |
99c9365974 | ||
![]() |
65a292fbbf | ||
![]() |
06042ec7ec | ||
![]() |
5d523dd689 | ||
![]() |
dd26cc4004 | ||
![]() |
5e49b76dd7 | ||
![]() |
246799d4ce | ||
![]() |
67e45096c8 | ||
![]() |
5e20bd86e4 | ||
![]() |
f069a3feda | ||
![]() |
b0f59484d3 | ||
![]() |
f84ad57fe7 | ||
![]() |
c3eafde7bf | ||
![]() |
a288449f0b | ||
![]() |
831133a4c3 | ||
![]() |
9a0b1b6aef | ||
![]() |
fcc9a668bf | ||
![]() |
2da2fc8bc5 | ||
![]() |
dbdc6c4a68 | ||
![]() |
e9ee9eaf50 | ||
![]() |
55ee2aecdf | ||
![]() |
fe4ccdd555 | ||
![]() |
ad5d612925 | ||
![]() |
b3d6eb79f5 | ||
![]() |
3fee65cb92 | ||
![]() |
46b68263d8 | ||
![]() |
03a5771b9d | ||
![]() |
ba156dfcd8 | ||
![]() |
e5f04f9abc | ||
![]() |
ba7ac53b75 | ||
![]() |
e0e9fb4462 | ||
![]() |
10f4445309 | ||
![]() |
e9f027210f | ||
![]() |
728f5a76f3 | ||
![]() |
bb33b6b8e6 | ||
![]() |
ba87a7f53a | ||
![]() |
92ee0659e2 | ||
![]() |
c7dbb4f103 | ||
![]() |
5ddf5e2e7b | ||
![]() |
111dc8f823 | ||
![]() |
fb52f1c7e2 | ||
![]() |
88680febc9 | ||
![]() |
a5d872f510 | ||
![]() |
7e2d01b238 | ||
![]() |
cb38725a88 | ||
![]() |
4f8a2eeca2 | ||
![]() |
a93a613668 | ||
![]() |
bca59f8d83 | ||
![]() |
1b624b9d45 | ||
![]() |
6d127e29de | ||
![]() |
543f37ea51 | ||
![]() |
71eaffa46b | ||
![]() |
2dffbec486 | ||
![]() |
c36d9f297f | ||
![]() |
b44df29316 | ||
![]() |
fba963f1d1 | ||
![]() |
9c5b583508 | ||
![]() |
8e3ff9b39c | ||
![]() |
c7568c9146 | ||
![]() |
fdd7a0e236 | ||
![]() |
daaeb412a4 | ||
![]() |
1c8a62aa86 | ||
![]() |
c850e80bea | ||
![]() |
aec5874b94 | ||
![]() |
c5da94eb58 | ||
![]() |
46617b6f12 | ||
![]() |
94c8bf0322 | ||
![]() |
ab98ef1765 | ||
![]() |
92ca639905 | ||
![]() |
0f464b8d9f | ||
![]() |
e865856c84 | ||
![]() |
cacfc3a6e1 | ||
![]() |
4dca63f007 | ||
![]() |
008fe37941 | ||
![]() |
e974f48be0 | ||
![]() |
1b24dfb8ba | ||
![]() |
0f46c3452f | ||
![]() |
6d7250f59c | ||
![]() |
d7db42e201 | ||
![]() |
85ce22a0b8 | ||
![]() |
484dab6bac | ||
![]() |
206e7bd5aa | ||
![]() |
7125d7eebe | ||
![]() |
399336ae89 | ||
![]() |
3cc8c93883 | ||
![]() |
9e397042a7 | ||
![]() |
515f1466c8 | ||
![]() |
f07131a390 | ||
![]() |
d207dceb3a | ||
![]() |
51ee8aa639 | ||
![]() |
c80792fc06 | ||
![]() |
07325f5fd0 | ||
![]() |
cfe9550bfe | ||
![]() |
1450c30656 | ||
![]() |
ad1ad83664 | ||
![]() |
f6781d65f0 | ||
![]() |
11e3e332bd | ||
![]() |
6bc2012ff1 | ||
![]() |
3cfab76d25 | ||
![]() |
7764fd0833 | ||
![]() |
eb0561735d | ||
![]() |
18cf751d13 | ||
![]() |
260a4c4904 | ||
![]() |
8b85a6ca14 | ||
![]() |
b75f2e2c24 | ||
![]() |
327de3b3d4 | ||
![]() |
8ea4692ee9 | ||
![]() |
77c206185b | ||
![]() |
7a81c37bde | ||
![]() |
1291ca3410 | ||
![]() |
52d0940b42 | ||
![]() |
07cda58bcc | ||
![]() |
ad2cc38f22 | ||
![]() |
7d3d03dea9 | ||
![]() |
577391e62b | ||
![]() |
c171ff6335 | ||
![]() |
b4049c484f | ||
![]() |
5c8e34160d | ||
![]() |
7872435753 | ||
![]() |
0aab6068ff | ||
![]() |
9c10b80591 | ||
![]() |
552ffed171 | ||
![]() |
3eb1a0226d | ||
![]() |
70e4377348 | ||
![]() |
c349759acd | ||
![]() |
87e757ee93 | ||
![]() |
448e09e5b2 | ||
![]() |
289aa6ad8a | ||
![]() |
be9c551add | ||
![]() |
53c2b618da | ||
![]() |
0cb5460a52 | ||
![]() |
b8c18068c2 | ||
![]() |
3a94c032c7 | ||
![]() |
bb6a203fe1 | ||
![]() |
fa37387373 | ||
![]() |
2f2dd879f7 | ||
![]() |
4c4aba5f9e | ||
![]() |
959f52dbf5 | ||
![]() |
8b1d800d53 | ||
![]() |
c3c5abb9cc | ||
![]() |
42a0061cb0 | ||
![]() |
c7472e0cbe | ||
![]() |
e5588880d5 | ||
![]() |
0a14a0c48d | ||
![]() |
836b6016e4 | ||
![]() |
2c8a867a81 | ||
![]() |
ff64004a7b | ||
![]() |
99650d6caf | ||
![]() |
b95961d26b | ||
![]() |
da883f2270 | ||
![]() |
48420d0c24 | ||
![]() |
6c55a7c85f | ||
![]() |
f53abbecd3 | ||
![]() |
b23d2efd99 | ||
![]() |
0684a58d16 | ||
![]() |
73038a3f51 | ||
![]() |
bed7e40dcc | ||
![]() |
9a6a19d464 | ||
![]() |
49b9365205 | ||
![]() |
71cd91e4c1 | ||
![]() |
100fcc2c8e | ||
![]() |
2f069b571d | ||
![]() |
fb3a3ba95e | ||
![]() |
9cd778f152 | ||
![]() |
1e52aa750a | ||
![]() |
5e9ca0f939 | ||
![]() |
ace47cecbe | ||
![]() |
af9281b843 | ||
![]() |
ad4986be94 | ||
![]() |
894267e02e | ||
![]() |
f25cf16b11 | ||
![]() |
63874db562 | ||
![]() |
38ae7ab6cd | ||
![]() |
4e4e3d192e | ||
![]() |
0d1e9c6e06 | ||
![]() |
d43364d679 | ||
![]() |
d19eb21181 | ||
![]() |
33059daff4 | ||
![]() |
ea42d67a9a | ||
![]() |
66cf530459 | ||
![]() |
513fe55fc3 | ||
![]() |
f25a4ab089 | ||
![]() |
d00be588e3 | ||
![]() |
bb0667ceaf | ||
![]() |
99dfff4475 | ||
![]() |
e433a5dc0b | ||
![]() |
e0746a2a07 | ||
![]() |
e5b3202352 | ||
![]() |
ab62ad463d | ||
![]() |
eb22abc521 | ||
![]() |
686e613e94 | ||
![]() |
d3c1547dba | ||
![]() |
02f27fc45d | ||
![]() |
15207bae43 | ||
![]() |
c8c8dca5b5 | ||
![]() |
aa83c483f7 | ||
![]() |
3dbafb5ee7 | ||
![]() |
46bfcbbd3d | ||
![]() |
5510bba1fd | ||
![]() |
74e04b7e20 | ||
![]() |
c587c76537 | ||
![]() |
40c77bf158 | ||
![]() |
94c9e5f9a0 | ||
![]() |
e670476024 | ||
![]() |
7a6c7f23a0 | ||
![]() |
8490587c76 | ||
![]() |
2fb5df0284 | ||
![]() |
46871416b1 | ||
![]() |
d79d11b6d4 | ||
![]() |
11e65661ac | ||
![]() |
28163cb34f | ||
![]() |
7e84fe966c | ||
![]() |
00bf299e46 | ||
![]() |
ab2b3f30a5 | ||
![]() |
cd04538bb9 | ||
![]() |
b8916ecc08 | ||
![]() |
3f861e18b0 | ||
![]() |
045e9c905f | ||
![]() |
1ed8de8757 | ||
![]() |
43b0356e14 | ||
![]() |
b280034380 | ||
![]() |
dc36b4737d | ||
![]() |
a2ef7896b7 | ||
![]() |
ff9b03edc3 | ||
![]() |
d6274e1a1d | ||
![]() |
9ae4f42ab5 | ||
![]() |
1a6bba1632 | ||
![]() |
4d37db02da | ||
![]() |
56de86dda6 | ||
![]() |
53b2add34f | ||
![]() |
da386b560a | ||
![]() |
d0de9f8d9d | ||
![]() |
1ae824737e | ||
![]() |
2b6c4245d0 | ||
![]() |
ca19fb86c1 | ||
![]() |
57aa7ba045 | ||
![]() |
cea7403b67 | ||
![]() |
826e8c6394 | ||
![]() |
b1965fa913 | ||
![]() |
cbfdc85d26 | ||
![]() |
9e12b4e95a | ||
![]() |
5a9389a528 | ||
![]() |
e5b38c525e | ||
![]() |
fdb85744e3 | ||
![]() |
3eaa790efa | ||
![]() |
dc51d676a2 | ||
![]() |
03fb2d61c6 | ||
![]() |
38da69346c | ||
![]() |
3eb19b142f | ||
![]() |
6fc3e73f8d | ||
![]() |
4934448065 | ||
![]() |
bd9907809e | ||
![]() |
e10f8fe531 | ||
![]() |
901da240f3 | ||
![]() |
86202a5ad0 | ||
![]() |
60070759a0 | ||
![]() |
1ca91d728a | ||
![]() |
ff330631ed | ||
![]() |
834a8be933 | ||
![]() |
550e9907f5 | ||
![]() |
3db3395ed1 | ||
![]() |
d01a8f7cd6 | ||
![]() |
eee3a63a3e | ||
![]() |
9a4797b74c | ||
![]() |
e27d7479a3 | ||
![]() |
2584639e34 | ||
![]() |
148566ebd5 | ||
![]() |
ec6b2f7bb1 | ||
![]() |
b189d10f1f | ||
![]() |
c1ee069d1b | ||
![]() |
3c2542f387 | ||
![]() |
2eb65f8a5d | ||
![]() |
3f7fd4037f | ||
![]() |
f4a1666b1a | ||
![]() |
ae6229dee2 | ||
![]() |
119af6d71b | ||
![]() |
a2627da7d9 | ||
![]() |
fd01ac0ff1 | ||
![]() |
ff307aa86c | ||
![]() |
c9c267518f | ||
![]() |
719ebd18c3 | ||
![]() |
8139d7cb69 | ||
![]() |
9936c8269f | ||
![]() |
0616c6ada1 | ||
![]() |
76c15f11e0 | ||
![]() |
344e4e0d1c | ||
![]() |
be56fe0677 | ||
![]() |
d748df73a3 | ||
![]() |
45eecbdbc2 | ||
![]() |
12da0e7152 | ||
![]() |
6d6716e302 | ||
![]() |
0d35ae7f5f | ||
![]() |
52afcfac4d | ||
![]() |
6b7a51587b | ||
![]() |
4ed56573c4 | ||
![]() |
52eba8fb00 | ||
![]() |
95bc677445 | ||
![]() |
e292bef411 | ||
![]() |
ea5c7917ac | ||
![]() |
2bd1a0ca4c | ||
![]() |
abce878fad | ||
![]() |
f9dc41f806 | ||
![]() |
a684efe826 | ||
![]() |
6dabdb7a82 | ||
![]() |
6ffbc9c091 | ||
![]() |
8cbea5f767 | ||
![]() |
7c018bc2c8 | ||
![]() |
10f509b182 | ||
![]() |
27ab045dc1 | ||
![]() |
7a0bed7893 | ||
![]() |
68a34a11bc | ||
![]() |
74c81c0fc3 | ||
![]() |
35f0003c08 | ||
![]() |
aa2972172a | ||
![]() |
54e1d57647 | ||
![]() |
a6ccb5b2fa | ||
![]() |
b77974e986 | ||
![]() |
b4501c4586 | ||
![]() |
ecfe84def7 | ||
![]() |
fbb5cf944b | ||
![]() |
385e41d70b | ||
![]() |
527d639b0e | ||
![]() |
4f5d3770c7 | ||
![]() |
0341c38dff | ||
![]() |
18243695c9 | ||
![]() |
7e2e93b787 | ||
![]() |
dd7ebcf2d9 | ||
![]() |
0f3ffad430 | ||
![]() |
0b93e95049 | ||
![]() |
80f63f7f9b | ||
![]() |
6b27d87e7a | ||
![]() |
3bd7d7f211 | ||
![]() |
880a8d40ce | ||
![]() |
1114ae9375 | ||
![]() |
bbb5284b42 | ||
![]() |
19410461b7 | ||
![]() |
903b0071cf | ||
![]() |
edf9548310 | ||
![]() |
0784ec1a6d | ||
![]() |
bcda14f825 | ||
![]() |
1d8391a60c | ||
![]() |
7cdb241f80 | ||
![]() |
aa7c229c8f | ||
![]() |
c42ebb0ccc | ||
![]() |
9331a943ce | ||
![]() |
c175be4420 | ||
![]() |
ed037d0c5d | ||
![]() |
ed89825557 | ||
![]() |
e12a738d84 | ||
![]() |
573789f067 | ||
![]() |
c43e6839d4 | ||
![]() |
a8228e1aec | ||
![]() |
2e029fc2e5 | ||
![]() |
24ec9f0ce3 | ||
![]() |
f6e26c402b | ||
![]() |
854f45a136 | ||
![]() |
2cea0633fa | ||
![]() |
d670765b97 | ||
![]() |
01a0d554f5 | ||
![]() |
2a9d6b9fbf | ||
![]() |
eb286bb80f | ||
![]() |
1cc69e1ce0 | ||
![]() |
338a532e07 | ||
![]() |
30c9609c4e | ||
![]() |
944d7b3d62 | ||
![]() |
b727f922a4 | ||
![]() |
835df4b2e4 | ||
![]() |
0f816561db | ||
![]() |
3bdab6f686 | ||
![]() |
390ffb80e7 | ||
![]() |
42b8355269 | ||
![]() |
94de86aeb8 | ||
![]() |
43b18dada4 | ||
![]() |
63afd0d2cf | ||
![]() |
93e87474fb | ||
![]() |
6ec39b6c81 | ||
![]() |
4b56933643 | ||
![]() |
536486f0e5 | ||
![]() |
14fdaca4b4 | ||
![]() |
b4a51084ab | ||
![]() |
a334007808 | ||
![]() |
e7f574aad5 | ||
![]() |
738f2e14ce | ||
![]() |
3d60c3ec10 | ||
![]() |
187706f108 | ||
![]() |
a97faeb3c7 | ||
![]() |
230c6aa326 | ||
![]() |
cf1c38162e | ||
![]() |
4626c28c27 | ||
![]() |
ad6908e35d | ||
![]() |
7af668f452 | ||
![]() |
ccd20c9973 | ||
![]() |
9a0ab3a87a | ||
![]() |
4d345429ab | ||
![]() |
d76ec7b2da | ||
![]() |
c99d367a11 | ||
![]() |
e29a6c532c | ||
![]() |
944ef2f690 | ||
![]() |
0a5c184a5d | ||
![]() |
3cf21e6edc | ||
![]() |
aad8ea172c | ||
![]() |
bc930f310f | ||
![]() |
95e0d118be | ||
![]() |
330cd56517 | ||
![]() |
2264e30d99 | ||
![]() |
4367e16740 | ||
![]() |
e5b1dbf4b4 | ||
![]() |
1b3f546ba4 | ||
![]() |
ffb4e02a53 | ||
![]() |
8786cb468a | ||
![]() |
802c3c0c51 | ||
![]() |
f4e6f28e6d | ||
![]() |
33a34f9619 | ||
![]() |
b4e027e918 | ||
![]() |
283b3e601c | ||
![]() |
cacd0fb139 | ||
![]() |
ba69ebc5f7 | ||
![]() |
c11c3f2710 | ||
![]() |
7b86251bef | ||
![]() |
1d760e79a6 | ||
![]() |
c0a2e8651b | ||
![]() |
ea6e279bbc | ||
![]() |
82428133d6 | ||
![]() |
929ec39a63 | ||
![]() |
57f040753d | ||
![]() |
59a6226f74 | ||
![]() |
c107b7531c | ||
![]() |
d68b554cd3 |
@@ -4,13 +4,13 @@ coverage:
|
||||
range: 60...90
|
||||
status:
|
||||
project:
|
||||
default: yes
|
||||
default:
|
||||
threshold: 0.3%
|
||||
|
||||
ignore:
|
||||
- lib/spack/spack/test/.*
|
||||
- lib/spack/docs/.*
|
||||
- lib/spack/external/.*
|
||||
- share/spack/qa/.*
|
||||
- share/spack/spack-completion.bash
|
||||
|
||||
comment: off
|
||||
|
@@ -9,6 +9,7 @@ omit =
|
||||
lib/spack/spack/test/*
|
||||
lib/spack/docs/*
|
||||
lib/spack/external/*
|
||||
share/spack/qa/*
|
||||
|
||||
[report]
|
||||
# Regexes for lines to exclude from consideration
|
||||
|
1
.gitattributes
vendored
Normal file
1
.gitattributes
vendored
Normal file
@@ -0,0 +1 @@
|
||||
*.py diff=python
|
@@ -1,6 +1,6 @@
|
||||
#!/usr/bin/env python
|
||||
#
|
||||
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
|
||||
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
57
.github/workflows/linux_build_tests.yaml
vendored
Normal file
57
.github/workflows/linux_build_tests.yaml
vendored
Normal file
@@ -0,0 +1,57 @@
|
||||
name: linux builds
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- develop
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- develop
|
||||
paths-ignore:
|
||||
# Don't run if we only modified packages in the built-in repository
|
||||
- 'var/spack/repos/builtin/**'
|
||||
- '!var/spack/repos/builtin/packages/lz4/**'
|
||||
- '!var/spack/repos/builtin/packages/mpich/**'
|
||||
- '!var/spack/repos/builtin/packages/tut/**'
|
||||
- '!var/spack/repos/builtin/packages/py-setuptools/**'
|
||||
- '!var/spack/repos/builtin/packages/openjpeg/**'
|
||||
- '!var/spack/repos/builtin/packages/r-rcpp/**'
|
||||
|
||||
jobs:
|
||||
build:
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
max-parallel: 4
|
||||
matrix:
|
||||
package: [lz4, mpich, tut, py-setuptools, openjpeg, r-rcpp]
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- name: Cache ccache's store
|
||||
uses: actions/cache@v1
|
||||
with:
|
||||
path: ~/.ccache
|
||||
key: ccache-build-${{ matrix.package }}
|
||||
restore-keys: |
|
||||
ccache-build-${{ matrix.package }}
|
||||
- name: Setup Python
|
||||
uses: actions/setup-python@v1
|
||||
with:
|
||||
python-version: 3.8
|
||||
- name: Install System Packages
|
||||
run: |
|
||||
sudo apt-get -yqq install ccache gfortran perl perl-base r-base r-base-core r-base-dev findutils openssl libssl-dev libpciaccess-dev
|
||||
R --version
|
||||
perl --version
|
||||
- name: Copy Configuration
|
||||
run: |
|
||||
ccache -M 300M && ccache -z
|
||||
# Set up external deps for build tests, b/c they take too long to compile
|
||||
cp share/spack/qa/configuration/*.yaml etc/spack/
|
||||
- name: Run the build test
|
||||
run: |
|
||||
. share/spack/setup-env.sh
|
||||
SPEC=${{ matrix.package }} share/spack/qa/run-build-tests
|
||||
ccache -s
|
30
.github/workflows/minimum_python_versions.yaml
vendored
Normal file
30
.github/workflows/minimum_python_versions.yaml
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
name: python version check
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- develop
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- develop
|
||||
jobs:
|
||||
validate:
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- name: Setup Python
|
||||
uses: actions/setup-python@v1
|
||||
with:
|
||||
python-version: 3.7
|
||||
- name: Install Python Packages
|
||||
run: |
|
||||
pip install --upgrade pip
|
||||
pip install --upgrade vermin
|
||||
- name: Minimum Version (Spack's Core)
|
||||
run: vermin --backport argparse -t=2.6- -t=3.5- -v lib/spack/spack/ lib/spack/llnl/ bin/
|
||||
- name: Minimum Version (Repositories)
|
||||
run: vermin --backport argparse -t=2.6- -t=3.5- -v var/spack/repos
|
@@ -1,14 +0,0 @@
|
||||
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
generate ci jobs:
|
||||
script:
|
||||
- "./bin/generate-gitlab-ci-yml.sh"
|
||||
tags:
|
||||
- "spack-pre-ci"
|
||||
artifacts:
|
||||
paths:
|
||||
- ci-generation
|
||||
when: always
|
72
.travis.yml
72
.travis.yml
@@ -18,12 +18,12 @@ jobs:
|
||||
fast_finish: true
|
||||
include:
|
||||
- stage: 'style checks'
|
||||
python: '3.7'
|
||||
python: '3.8'
|
||||
os: linux
|
||||
language: python
|
||||
env: TEST_SUITE=flake8
|
||||
# Shell integration with module files
|
||||
- python: '3.7'
|
||||
- python: '3.8'
|
||||
os: linux
|
||||
language: python
|
||||
env: [ TEST_SUITE=bootstrap ]
|
||||
@@ -46,10 +46,14 @@ jobs:
|
||||
language: python
|
||||
env: TEST_SUITE=unit
|
||||
- python: '3.7'
|
||||
os: linux
|
||||
language: python
|
||||
env: TEST_SUITE=unit
|
||||
- python: '3.8'
|
||||
os: linux
|
||||
language: python
|
||||
env: [ TEST_SUITE=unit, COVERAGE=true ]
|
||||
- python: '3.7'
|
||||
- python: '3.8'
|
||||
os: linux
|
||||
language: python
|
||||
env: TEST_SUITE=doc
|
||||
@@ -57,61 +61,10 @@ jobs:
|
||||
language: generic
|
||||
env: [ TEST_SUITE=unit, PYTHON_VERSION=2.7, COVERAGE=true ]
|
||||
if: type != pull_request
|
||||
# mpich (AutotoolsPackage)
|
||||
- stage: 'build tests'
|
||||
python: '2.7'
|
||||
os: linux
|
||||
language: python
|
||||
env: [ TEST_SUITE=build, 'SPEC=mpich' ]
|
||||
# astyle (MakefilePackage)
|
||||
- python: '3.7'
|
||||
os: linux
|
||||
language: python
|
||||
env: [ TEST_SUITE=build, 'SPEC=astyle' ]
|
||||
# tut (WafPackage)
|
||||
- python: '3.7'
|
||||
os: linux
|
||||
language: python
|
||||
env: [ TEST_SUITE=build, 'SPEC=tut' ]
|
||||
# py-setuptools (PythonPackage)
|
||||
- python: '3.7'
|
||||
os: linux
|
||||
language: python
|
||||
env: [ TEST_SUITE=build, 'SPEC=py-setuptools' ]
|
||||
# perl-dbi (PerlPackage)
|
||||
# - python: '2.7'
|
||||
# os: linux
|
||||
# language: python
|
||||
# env: [ TEST_SUITE=build, 'SPEC=perl-dbi' ]
|
||||
# openjpeg (CMakePackage + external cmake)
|
||||
- python: '3.7'
|
||||
os: linux
|
||||
language: python
|
||||
env: [ TEST_SUITE=build, 'SPEC=openjpeg' ]
|
||||
# r-rcpp (RPackage + external R)
|
||||
- python: '3.7'
|
||||
os: linux
|
||||
language: python
|
||||
env: [ TEST_SUITE=build, 'SPEC=r-rcpp' ]
|
||||
# mpich (AutotoolsPackage)
|
||||
- python: '3.7'
|
||||
os: linux
|
||||
language: python
|
||||
env: [ TEST_SUITE=build, 'SPEC=mpich' ]
|
||||
- python: '3.6'
|
||||
stage: 'docker build'
|
||||
os: linux
|
||||
language: python
|
||||
env: TEST_SUITE=docker
|
||||
allow_failures:
|
||||
- env: TEST_SUITE=docker
|
||||
|
||||
stages:
|
||||
- 'style checks'
|
||||
- 'unit tests + documentation'
|
||||
- 'build tests'
|
||||
- name: 'docker build'
|
||||
if: type = push AND branch IN (develop, master)
|
||||
|
||||
|
||||
#=============================================================================
|
||||
@@ -155,7 +108,6 @@ cache:
|
||||
pip: true
|
||||
ccache: true
|
||||
directories:
|
||||
- ~/.mirror
|
||||
- ~/.ccache
|
||||
|
||||
# Work around Travis's lack of support for Python on OSX
|
||||
@@ -173,7 +125,7 @@ install:
|
||||
- pip install --upgrade pip
|
||||
- pip install --upgrade six
|
||||
- pip install --upgrade setuptools
|
||||
- pip install --upgrade codecov
|
||||
- pip install --upgrade codecov coverage==4.5.4
|
||||
- pip install --upgrade flake8
|
||||
- pip install --upgrade pep8-naming
|
||||
- if [[ "$TEST_SUITE" == "doc" ]]; then
|
||||
@@ -188,17 +140,9 @@ before_script:
|
||||
# Need this to be able to compute the list of changed files
|
||||
- git fetch origin ${TRAVIS_BRANCH}:${TRAVIS_BRANCH}
|
||||
|
||||
# Set up external deps for build tests, b/c they take too long to compile
|
||||
- if [[ "$TEST_SUITE" == "build" ]]; then
|
||||
cp share/spack/qa/configuration/*.yaml etc/spack/;
|
||||
fi
|
||||
|
||||
#=============================================================================
|
||||
# Building
|
||||
#=============================================================================
|
||||
services:
|
||||
- docker
|
||||
|
||||
script:
|
||||
- share/spack/qa/run-$TEST_SUITE-tests
|
||||
|
||||
|
203
CHANGELOG.md
203
CHANGELOG.md
@@ -1,3 +1,206 @@
|
||||
# V0.14.3 (2020-07-10)
|
||||
|
||||
This is a minor release on the `0.14` series. The latest release of
|
||||
Spack is `0.15.1`. This release includes bugfixes backported to the
|
||||
`0.14` series from `0.15.0` and `0.15.1`. These include
|
||||
|
||||
* Spack has a public mirror for source files to prevent downtimes when sites go down (#17077)
|
||||
* Spack setup scripts no longer hang when sourced in .*rc files on Cray (#17386)
|
||||
* Spack commands no longer fail in incomplete spack environment (#16473)
|
||||
* Improved detection of config.guess and config.sub files (#16854, #17149, #17333, #17356)
|
||||
* GCC builds on aarch64 architectures and at spec `%gcc +binutils` (#17280, #9024)
|
||||
* Better cleaning of the build environment (#8623)
|
||||
* `spack versions` command no longer has potential to cause fork bomb (#16749)
|
||||
|
||||
# v0.14.2 (2020-04-15)
|
||||
|
||||
This is a minor release on the `0.14` series. It includes performance
|
||||
improvements and bug fixes:
|
||||
|
||||
* Improvements to how `spack install` handles foreground/background (#15723)
|
||||
* Major performance improvements for reading the package DB (#14693, #15777)
|
||||
* No longer check for the old `index.yaml` database file (#15298)
|
||||
* Properly activate environments with '-h' in the name (#15429)
|
||||
* External packages have correct `.prefix` in environments/views (#15475)
|
||||
* Improvements to computing env modifications from sourcing files (#15791)
|
||||
* Bugfix on Cray machines when getting `TERM` env variable (#15630)
|
||||
* Avoid adding spurious `LMOD` env vars to Intel modules (#15778)
|
||||
* Don't output [+] for mock installs run during tests (#15609)
|
||||
|
||||
# v0.14.1 (2020-03-20)
|
||||
|
||||
This is a bugfix release on top of `v0.14.0`. Specific fixes include:
|
||||
|
||||
* several bugfixes for parallel installation (#15339, #15341, #15220, #15197)
|
||||
* `spack load` now works with packages that have been renamed (#14348)
|
||||
* bugfix for `suite-sparse` installation (#15326)
|
||||
* deduplicate identical suffixes added to module names (#14920)
|
||||
* fix issues with `configure_args` during module refresh (#11084)
|
||||
* increased test coverage and test fixes (#15237, #15354, #15346)
|
||||
* remove some unused code (#15431)
|
||||
|
||||
# v0.14.0 (2020-02-23)
|
||||
|
||||
`v0.14.0` is a major feature release, with 3 highlighted features:
|
||||
|
||||
1. **Distributed builds.** Multiple Spack instances will now coordinate
|
||||
properly with each other through locks. This works on a single node
|
||||
(where you've called `spack` several times) or across multiple nodes
|
||||
with a shared filesystem. For example, with SLURM, you could build
|
||||
`trilinos` and its dependencies on 2 24-core nodes, with 3 Spack
|
||||
instances per node and 8 build jobs per instance, with `srun -N 2 -n 6
|
||||
spack install -j 8 trilinos`. This requires a filesystem with locking
|
||||
enabled, but not MPI or any other library for parallelism.
|
||||
|
||||
2. **Build pipelines.** You can also build in parallel through Gitlab
|
||||
CI. Simply create a Spack environment and push it to Gitlab to build
|
||||
on Gitlab runners. Pipeline support is now integreated into a single
|
||||
`spack ci` command, so setting it up is easier than ever. See the
|
||||
[Pipelines section](https://spack.readthedocs.io/en/v0.14.0/pipelines.html)
|
||||
in the docs.
|
||||
|
||||
3. **Container builds.** The new `spack containerize` command allows you
|
||||
to create a Docker or Singularity recipe from any Spack environment.
|
||||
There are options to customize the build if you need them. See the
|
||||
[Container Images section](https://spack.readthedocs.io/en/latest/containers.html)
|
||||
in the docs.
|
||||
|
||||
In addition, there are several other new commands, many bugfixes and
|
||||
improvements, and `spack load` no longer requires modules, so you can use
|
||||
it the same way on your laptop or on your supercomputer.
|
||||
|
||||
Spack grew by over 300 packages since our last release in November 2019,
|
||||
and the project grew to over 500 contributors. Thanks to all of you for
|
||||
making yet another great release possible. Detailed notes below.
|
||||
|
||||
## Major new core features
|
||||
* Distributed builds: spack instances coordinate and build in parallel (#13100)
|
||||
* New `spack ci` command to manage CI pipelines (#12854)
|
||||
* Generate container recipes from environments: `spack containerize` (#14202)
|
||||
* `spack load` now works without using modules (#14062, #14628)
|
||||
* Garbage collect old/unused installations with `spack gc` (#13534)
|
||||
* Configuration files all set environment modifications the same way (#14372,
|
||||
[docs](https://spack.readthedocs.io/en/v0.14.0/configuration.html#environment-modifications))
|
||||
* `spack commands --format=bash` auto-generates completion (#14393, #14607)
|
||||
* Packages can specify alternate fetch URLs in case one fails (#13881)
|
||||
|
||||
## Improvements
|
||||
* Improved locking for concurrency with environments (#14676, #14621, #14692)
|
||||
* `spack test` sends args to `pytest`, supports better listing (#14319)
|
||||
* Better support for aarch64 and cascadelake microarch (#13825, #13780, #13820)
|
||||
* Archspec is now a separate library (see https://github.com/archspec/archspec)
|
||||
* Many improvements to the `spack buildcache` command (#14237, #14346,
|
||||
#14466, #14467, #14639, #14642, #14659, #14696, #14698, #14714, #14732,
|
||||
#14929, #15003, #15086, #15134)
|
||||
|
||||
## Selected Bugfixes
|
||||
* Compilers now require an exact match on version (#8735, #14730, #14752)
|
||||
* Bugfix for patches that specified specific versions (#13989)
|
||||
* `spack find -p` now works in environments (#10019, #13972)
|
||||
* Dependency queries work correctly in `spack find` (#14757)
|
||||
* Bugfixes for locking upstream Spack instances chains (#13364)
|
||||
* Fixes for PowerPC clang optimization flags (#14196)
|
||||
* Fix for issue with compilers and specific microarchitectures (#13733, #14798)
|
||||
|
||||
## New commands and options
|
||||
* `spack ci` (#12854)
|
||||
* `spack containerize` (#14202)
|
||||
* `spack gc` (#13534)
|
||||
* `spack load` accepts `--only package`, `--only dependencies` (#14062, #14628)
|
||||
* `spack commands --format=bash` (#14393)
|
||||
* `spack commands --update-completion` (#14607)
|
||||
* `spack install --with-cache` has new option: `--no-check-signature` (#11107)
|
||||
* `spack test` now has `--list`, `--list-long`, and `--list-names` (#14319)
|
||||
* `spack install --help-cdash` moves CDash help out of the main help (#13704)
|
||||
|
||||
## Deprecations
|
||||
* `spack release-jobs` has been rolled into `spack ci`
|
||||
* `spack bootstrap` will be removed in a future version, as it is no longer
|
||||
needed to set up modules (see `spack load` improvements above)
|
||||
|
||||
## Documentation
|
||||
* New section on building container images with Spack (see
|
||||
[docs](https://spack.readthedocs.io/en/latest/containers.html))
|
||||
* New section on using `spack ci` command to build pipelines (see
|
||||
[docs](https://spack.readthedocs.io/en/latest/pipelines.html))
|
||||
* Document how to add conditional dependencies (#14694)
|
||||
* Document how to use Spack to replace Homebrew/Conda (#13083, see
|
||||
[docs](https://spack.readthedocs.io/en/latest/workflows.html#using-spack-to-replace-homebrew-conda))
|
||||
|
||||
## Important package changes
|
||||
* 3,908 total packages (345 added since 0.13.0)
|
||||
* Added first cut at a TensorFlow package (#13112)
|
||||
* We now build R without "recommended" packages, manage them w/Spack (#12015)
|
||||
* Elpa and OpenBLAS now leverage microarchitecture support (#13655, #14380)
|
||||
* Fix `octave` compiler wrapper usage (#14726)
|
||||
* Enforce that packages in `builtin` aren't missing dependencies (#13949)
|
||||
|
||||
|
||||
# v0.13.4 (2020-02-07)
|
||||
|
||||
This release contains several bugfixes:
|
||||
|
||||
* bugfixes for invoking python in various environments (#14349, #14496, #14569)
|
||||
* brought tab completion up to date (#14392)
|
||||
* bugfix for removing extensions from views in order (#12961)
|
||||
* bugfix for nondeterministic hashing for specs with externals (#14390)
|
||||
|
||||
# v0.13.3 (2019-12-23)
|
||||
|
||||
This release contains more major performance improvements for Spack
|
||||
environments, as well as bugfixes for mirrors and a `python` issue with
|
||||
RHEL8.
|
||||
|
||||
* mirror bugfixes: symlinks, duplicate patches, and exception handling (#13789)
|
||||
* don't try to fetch `BundlePackages` (#13908)
|
||||
* avoid re-fetching patches already added to a mirror (#13908)
|
||||
* avoid re-fetching alread added patches (#13908)
|
||||
* avoid re-fetching alread added patches (#13908)
|
||||
* allow repeated invocations of `spack mirror create` on the same dir (#13908)
|
||||
* bugfix for RHEL8 when `python` is unavailable (#14252)
|
||||
* improve concretization performance in environments (#14190)
|
||||
* improve installation performance in environments (#14263)
|
||||
|
||||
# v0.13.2 (2019-12-04)
|
||||
|
||||
This release contains major performance improvements for Spack environments, as
|
||||
well as some bugfixes and minor changes.
|
||||
|
||||
* allow missing modules if they are blacklisted (#13540)
|
||||
* speed up environment activation (#13557)
|
||||
* mirror path works for unknown versions (#13626)
|
||||
* environments: don't try to modify run-env if a spec is not installed (#13589)
|
||||
* use semicolons instead of newlines in module/python command (#13904)
|
||||
* verify.py: os.path.exists exception handling (#13656)
|
||||
* Document use of the maintainers field (#13479)
|
||||
* bugfix with config caching (#13755)
|
||||
* hwloc: added 'master' version pointing at the HEAD of the master branch (#13734)
|
||||
* config option to allow gpg warning suppression (#13744)
|
||||
* fix for relative symlinks when relocating binary packages (#13727)
|
||||
* allow binary relocation of strings in relative binaries (#13724)
|
||||
|
||||
# v0.13.1 (2019-11-05)
|
||||
|
||||
This is a bugfix release on top of `v0.13.0`. Specific fixes include:
|
||||
|
||||
* `spack find` now displays variants and other spec constraints
|
||||
* bugfix: uninstall should find concrete specs by DAG hash (#13598)
|
||||
* environments: make shell modifications partially unconditional (#13523)
|
||||
* binary distribution: relocate text files properly in relative binaries (#13578)
|
||||
* bugfix: fetch prefers to fetch local mirrors over remote resources (#13545)
|
||||
* environments: only write when necessary (#13546)
|
||||
* bugfix: spack.util.url.join() now handles absolute paths correctly (#13488)
|
||||
* sbang: use utf-8 for encoding when patching (#13490)
|
||||
* Specs with quoted flags containing spaces are parsed correctly (#13521)
|
||||
* targets: print a warning message before downgrading (#13513)
|
||||
* Travis CI: Test Python 3.8 (#13347)
|
||||
* Documentation: Database.query methods share docstrings (#13515)
|
||||
* cuda: fix conflict statements for x86-64 targets (#13472)
|
||||
* cpu: fix clang flags for generic x86_64 (#13491)
|
||||
* syaml_int type should use int.__repr__ rather than str.__repr__ (#13487)
|
||||
* elpa: prefer 2016.05.004 until sse/avx/avx2 issues are resolved (#13530)
|
||||
* trilinos: temporarily constrain netcdf@:4.7.1 (#13526)
|
||||
|
||||
# v0.13.0 (2019-10-25)
|
||||
|
||||
`v0.13.0` is our biggest Spack release yet, with *many* new major features.
|
||||
|
@@ -68,10 +68,6 @@ PackageName: py
|
||||
PackageHomePage: https://pypi.python.org/pypi/py
|
||||
PackageLicenseDeclared: MIT
|
||||
|
||||
PackageName: pyqver
|
||||
PackageHomePage: https://github.com/ghewgill/pyqver
|
||||
PackageLicenseDeclared: BSD-3-Clause
|
||||
|
||||
PackageName: pytest
|
||||
PackageHomePage: https://pypi.python.org/pypi/pytest
|
||||
PackageLicenseDeclared: MIT
|
||||
|
@@ -1,4 +1,4 @@
|
||||
Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
|
||||
Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a
|
||||
|
@@ -1,6 +1,7 @@
|
||||
# <img src="https://cdn.rawgit.com/spack/spack/develop/share/spack/logo/spack-logo.svg" width="64" valign="middle" alt="Spack"/> Spack
|
||||
|
||||
[](https://travis-ci.org/spack/spack)
|
||||
[](https://github.com/spack/spack/actions)
|
||||
[](https://codecov.io/gh/spack/spack)
|
||||
[](https://spack.readthedocs.io)
|
||||
[](https://spackpm.herokuapp.com)
|
||||
|
@@ -1,91 +0,0 @@
|
||||
#! /usr/bin/env bash
|
||||
|
||||
# Remember where we are initially, it's the repo cloned by gitlab-ci
|
||||
original_directory=$(pwd)
|
||||
. "${original_directory}/share/spack/setup-env.sh"
|
||||
|
||||
# Create a temporary working directory
|
||||
temp_dir=$(mktemp -d)
|
||||
trap 'rm -rf "$temp_dir"' INT TERM QUIT EXIT
|
||||
|
||||
if [ -z "${DOWNSTREAM_CI_REPO}" ] ; then
|
||||
echo "ERROR: missing variable: DOWNSTREAM_CI_REPO" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -z "${SPACK_RELEASE_ENVIRONMENT_PATH}" ] ; then
|
||||
echo "ERROR: missing variable: SPACK_RELEASE_ENVIRONMENT_PATH" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -z "${CDASH_AUTH_TOKEN}" ] ; then
|
||||
echo "WARNING: missing variable: CDASH_AUTH_TOKEN" >&2
|
||||
else
|
||||
token_file="${temp_dir}/cdash_auth_token"
|
||||
echo ${CDASH_AUTH_TOKEN} > ${token_file}
|
||||
fi
|
||||
|
||||
if [ -z "${SPACK_RELEASE_ENVIRONMENT_REPO}" ] ; then
|
||||
echo "Assuming spack repo contains environment" >&2
|
||||
env_repo_dir=${original_directory}
|
||||
else
|
||||
echo "Cloning ${SPACK_RELEASE_ENVIRONMENT_REPO} into ${temp_dir}/envrepo" >&2
|
||||
cd ${temp_dir}
|
||||
git clone ${SPACK_RELEASE_ENVIRONMENT_REPO} envrepo
|
||||
cd envrepo
|
||||
env_repo_dir=$(pwd)
|
||||
fi
|
||||
|
||||
current_branch="$CI_COMMIT_REF_NAME"
|
||||
|
||||
# Because want to see generated gitlab-ci file as an artifact,
|
||||
# we need to write it within the spack repo cloned by gitlab-ci.
|
||||
gen_ci_dir="${original_directory}/ci-generation"
|
||||
gen_ci_file="${gen_ci_dir}/.gitlab-ci.yml"
|
||||
mkdir -p ${gen_ci_dir}
|
||||
|
||||
env_dir="${env_repo_dir}/${SPACK_RELEASE_ENVIRONMENT_PATH}"
|
||||
|
||||
if [ ! -f "${env_dir}/spack.yaml" ] ; then
|
||||
echo "ERROR: Cannot find spack environment file in ${env_dir}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
cd $env_dir
|
||||
|
||||
# The next commands generates the .gitlab-ci.yml (and optionally creates a
|
||||
# buildgroup in cdash)
|
||||
RELEASE_JOBS_ARGS=("--output-file" "${gen_ci_file}")
|
||||
if [ ! -z "${token_file}" ]; then
|
||||
RELEASE_JOBS_ARGS+=("--cdash-credentials" "${token_file}")
|
||||
fi
|
||||
|
||||
spack release-jobs "${RELEASE_JOBS_ARGS[@]}"
|
||||
|
||||
if [[ $? -ne 0 ]]; then
|
||||
echo "spack release-jobs command failed"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
cd "$original_directory"
|
||||
mv .git "$temp_dir/original-git-dir"
|
||||
git init .
|
||||
|
||||
git config user.email "robot@spack.io"
|
||||
git config user.name "Spack Build Bot"
|
||||
|
||||
cp ${gen_ci_file} "${original_directory}/.gitlab-ci.yml"
|
||||
git add .
|
||||
|
||||
echo "git commit"
|
||||
commit_message="Auto-generated commit testing"
|
||||
commit_message="${commit_message} ${current_branch} (${CI_COMMIT_SHA})"
|
||||
git commit --message="${commit_message}"
|
||||
|
||||
echo "git push"
|
||||
git remote add origin "$DOWNSTREAM_CI_REPO"
|
||||
git push --force origin "master:multi-ci-${current_branch}"
|
||||
|
||||
rm -rf .git
|
||||
mv "$temp_dir/original-git-dir" .git
|
||||
git reset --hard HEAD
|
@@ -1,13 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
set -x
|
||||
|
||||
SPACK_BIN_DIR="${CI_PROJECT_DIR}/bin"
|
||||
export PATH="${SPACK_BIN_DIR}:${PATH}"
|
||||
|
||||
spack buildcache update-index -d "$MIRROR_URL"
|
@@ -1,399 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
###
|
||||
### This script represents a gitlab-ci job, corresponding to a single release
|
||||
### spec. As such this script must first decide whether or not the spec it
|
||||
### has been assigned is up to date on the remote binary mirror. If it is
|
||||
### not (i.e. the source code has changed in a way that caused a change in the
|
||||
### full_hash of the spec), this script will build the package, create a
|
||||
### binary cache for it, and then push all related files to the remote binary
|
||||
### mirror. This script also optionally communicates with a remote CDash
|
||||
### instance to share status on the package build process.
|
||||
###
|
||||
### The following environment variables are (possibly) used within this script
|
||||
### in order for the various elements function properly.
|
||||
###
|
||||
### First are two defaults we rely on from gitlab:
|
||||
###
|
||||
### CI_PROJECT_DIR
|
||||
### CI_JOB_NAME
|
||||
###
|
||||
### The following must be set up in the variables section of gitlab:
|
||||
###
|
||||
### AWS_ACCESS_KEY_ID
|
||||
### AWS_SECRET_ACCESS_KEY
|
||||
### SPACK_SIGNING_KEY
|
||||
###
|
||||
### SPACK_S3_UPLOAD_MIRROR_URL // only required in the short term for the cloud case
|
||||
###
|
||||
### The following variabes are defined by the ci generation process and are
|
||||
### required:
|
||||
###
|
||||
### SPACK_ENABLE_CDASH
|
||||
### SPACK_ROOT_SPEC
|
||||
### SPACK_MIRROR_URL
|
||||
### SPACK_JOB_SPEC_PKG_NAME
|
||||
### SPACK_COMPILER_ACTION
|
||||
###
|
||||
### Finally, these variables are optionally defined by the ci generation
|
||||
### process, and may or may not be present:
|
||||
###
|
||||
### SPACK_CDASH_BASE_URL
|
||||
### SPACK_CDASH_PROJECT
|
||||
### SPACK_CDASH_PROJECT_ENC
|
||||
### SPACK_CDASH_BUILD_NAME
|
||||
### SPACK_CDASH_SITE
|
||||
### SPACK_RELATED_BUILDS
|
||||
### SPACK_JOB_SPEC_BUILDGROUP
|
||||
###
|
||||
|
||||
shopt -s expand_aliases
|
||||
|
||||
export FORCE_UNSAFE_CONFIGURE=1
|
||||
|
||||
TEMP_DIR="${CI_PROJECT_DIR}/jobs_scratch_dir"
|
||||
|
||||
JOB_LOG_DIR="${TEMP_DIR}/logs"
|
||||
SPEC_DIR="${TEMP_DIR}/specs"
|
||||
LOCAL_MIRROR="${CI_PROJECT_DIR}/local_mirror"
|
||||
BUILD_CACHE_DIR="${LOCAL_MIRROR}/build_cache"
|
||||
SPACK_BIN_DIR="${CI_PROJECT_DIR}/bin"
|
||||
|
||||
if [ "${SPACK_ENABLE_CDASH}" == "True" ] ; then
|
||||
CDASH_UPLOAD_URL="${SPACK_CDASH_BASE_URL}/submit.php?project=${SPACK_CDASH_PROJECT_ENC}"
|
||||
DEP_JOB_RELATEBUILDS_URL="${SPACK_CDASH_BASE_URL}/api/v1/relateBuilds.php"
|
||||
declare -a JOB_DEPS_PKG_NAMES
|
||||
fi
|
||||
|
||||
export SPACK_ROOT=${CI_PROJECT_DIR}
|
||||
# export PATH="${SPACK_BIN_DIR}:${PATH}"
|
||||
export GNUPGHOME="${CI_PROJECT_DIR}/opt/spack/gpg"
|
||||
|
||||
. "${CI_PROJECT_DIR}/share/spack/setup-env.sh"
|
||||
|
||||
mkdir -p ${JOB_LOG_DIR}
|
||||
mkdir -p ${SPEC_DIR}
|
||||
|
||||
cleanup() {
|
||||
set +x
|
||||
|
||||
if [ -z "$exit_code" ] ; then
|
||||
|
||||
exit_code=$1
|
||||
if [ -z "$exit_code" ] ; then
|
||||
exit_code=0
|
||||
fi
|
||||
|
||||
restore_io
|
||||
|
||||
if [ "$( type -t finalize )" '=' 'function' ] ; then
|
||||
finalize "$JOB_LOG_DIR/cdash_log.txt"
|
||||
fi
|
||||
|
||||
# We can clean these out later on, once we have a good sense for
|
||||
# how the logging infrastructure is working
|
||||
# rm -rf "$JOB_LOG_DIR"
|
||||
fi
|
||||
|
||||
\exit $exit_code
|
||||
}
|
||||
|
||||
alias exit='cleanup'
|
||||
|
||||
begin_logging() {
|
||||
trap "cleanup 1; \\exit \$exit_code" INT TERM QUIT
|
||||
trap "cleanup 0; \\exit \$exit_code" EXIT
|
||||
|
||||
rm -rf "$JOB_LOG_DIR/cdash_log.txt"
|
||||
|
||||
# NOTE: Here, some redirects are set up
|
||||
exec 3>&1 # fd 3 is now a dup of stdout
|
||||
exec 4>&2 # fd 4 is now a dup of stderr
|
||||
|
||||
# stdout and stderr are joined and redirected to the log
|
||||
exec &> "$JOB_LOG_DIR/cdash_log.txt"
|
||||
|
||||
set -x
|
||||
}
|
||||
|
||||
restore_io() {
|
||||
exec >&-
|
||||
exec 2>&-
|
||||
|
||||
exec >&3
|
||||
exec 2>&4
|
||||
|
||||
exec 3>&-
|
||||
exec 4>&-
|
||||
}
|
||||
|
||||
finalize() {
|
||||
# If you define a finalize function:
|
||||
# - it will always be called at the very end of the script
|
||||
# - the log file will be passed in as the first argument, and
|
||||
# - the code in this function will not be logged.
|
||||
echo "The full log file is located at $1"
|
||||
# TODO: send this log data to cdash!
|
||||
}
|
||||
|
||||
check_error()
|
||||
{
|
||||
local last_exit_code=$1
|
||||
local last_cmd=$2
|
||||
if [[ ${last_exit_code} -ne 0 ]]; then
|
||||
echo "${last_cmd} exited with code ${last_exit_code}"
|
||||
echo "TERMINATING JOB"
|
||||
exit 1
|
||||
else
|
||||
echo "${last_cmd} completed successfully"
|
||||
fi
|
||||
}
|
||||
|
||||
extract_build_id()
|
||||
{
|
||||
LINES_TO_SEARCH=$1
|
||||
regex="buildSummary\.php\?buildid=([[:digit:]]+)"
|
||||
SINGLE_LINE_OUTPUT=$(echo ${LINES_TO_SEARCH} | tr -d '\n')
|
||||
|
||||
if [[ ${SINGLE_LINE_OUTPUT} =~ ${regex} ]]; then
|
||||
echo "${BASH_REMATCH[1]}"
|
||||
else
|
||||
echo "NONE"
|
||||
fi
|
||||
}
|
||||
|
||||
get_relate_builds_post_data()
|
||||
{
|
||||
cat <<EOF
|
||||
{
|
||||
"project": "${1}",
|
||||
"buildid": ${2},
|
||||
"relatedid": ${3},
|
||||
"relationship": "depends on"
|
||||
}
|
||||
EOF
|
||||
}
|
||||
|
||||
gen_full_specs_for_job_and_deps() {
|
||||
SPEC_YAML_PATH="${SPEC_DIR}/${SPACK_JOB_SPEC_PKG_NAME}.yaml"
|
||||
local spec_names_to_save="${SPACK_JOB_SPEC_PKG_NAME}"
|
||||
|
||||
if [ "${SPACK_ENABLE_CDASH}" == "True" ] ; then
|
||||
IFS=';' read -ra DEPS <<< "${SPACK_RELATED_BUILDS}"
|
||||
for i in "${DEPS[@]}"; do
|
||||
depPkgName="${i}"
|
||||
spec_names_to_save="${spec_names_to_save} ${depPkgName}"
|
||||
JOB_DEPS_PKG_NAMES+=("${depPkgName}")
|
||||
done
|
||||
fi
|
||||
|
||||
if [ "${SPACK_COMPILER_ACTION}" == "FIND_ANY" ]; then
|
||||
# This corresponds to a bootstrapping phase where we need to
|
||||
# rely on any available compiler to build the package (i.e. the
|
||||
# compiler needed to be stripped from the spec), and thus we need
|
||||
# to concretize the root spec again.
|
||||
spack -d buildcache save-yaml --specs "${spec_names_to_save}" --root-spec "${SPACK_ROOT_SPEC}" --yaml-dir "${SPEC_DIR}"
|
||||
else
|
||||
# in this case, either we're relying on Spack to install missing compiler
|
||||
# bootstrapped in a previous phase, or else we only had one phase (like a
|
||||
# site which already knows what compilers are available on it's runners),
|
||||
# so we don't want to concretize that root spec again. The reason we need
|
||||
# this in the first case (bootstrapped compiler), is that we can't concretize
|
||||
# a spec at this point if we're going to ask spack to "install_missing_compilers".
|
||||
tmp_dir=$(mktemp -d)
|
||||
TMP_YAML_PATH="${tmp_dir}/root.yaml"
|
||||
ROOT_SPEC_YAML=$(spack python -c "import base64 ; import zlib ; print(str(zlib.decompress(base64.b64decode('${SPACK_ROOT_SPEC}')).decode('utf-8')))")
|
||||
echo "${ROOT_SPEC_YAML}" > "${TMP_YAML_PATH}"
|
||||
spack -d buildcache save-yaml --specs "${spec_names_to_save}" --root-spec-yaml "${TMP_YAML_PATH}" --yaml-dir "${SPEC_DIR}"
|
||||
rm -rf ${tmp_dir}
|
||||
fi
|
||||
}
|
||||
|
||||
begin_logging
|
||||
|
||||
echo "Running job for spec: ${CI_JOB_NAME}"
|
||||
|
||||
# This should create the directory we referred to as GNUPGHOME earlier
|
||||
spack gpg list
|
||||
|
||||
# Importing the secret key using gpg2 directly should allow to
|
||||
# sign and verify both
|
||||
set +x
|
||||
KEY_IMPORT_RESULT=`echo ${SPACK_SIGNING_KEY} | base64 --decode | gpg2 --import`
|
||||
check_error $? "gpg2 --import"
|
||||
set -x
|
||||
|
||||
spack gpg list --trusted
|
||||
spack gpg list --signing
|
||||
|
||||
# To have spack install missing compilers, we need to add a custom
|
||||
# configuration scope, then we pass that to the package installation
|
||||
# command
|
||||
CUSTOM_CONFIG_SCOPE_DIR="${TEMP_DIR}/config_scope"
|
||||
mkdir -p "${CUSTOM_CONFIG_SCOPE_DIR}"
|
||||
CUSTOM_CONFIG_SCOPE_ARG=""
|
||||
|
||||
if [ "${SPACK_COMPILER_ACTION}" == "INSTALL_MISSING" ]; then
|
||||
echo "Make sure bootstrapped compiler will be installed"
|
||||
custom_config_file_path="${CUSTOM_CONFIG_SCOPE_DIR}/config.yaml"
|
||||
cat <<CONFIG_STUFF > "${custom_config_file_path}"
|
||||
config:
|
||||
install_missing_compilers: true
|
||||
CONFIG_STUFF
|
||||
CUSTOM_CONFIG_SCOPE_ARG="-C ${CUSTOM_CONFIG_SCOPE_DIR}"
|
||||
# Configure the binary mirror where, if needed, this jobs compiler
|
||||
# was installed in binary pacakge form, then tell spack to
|
||||
# install_missing_compilers.
|
||||
elif [ "${SPACK_COMPILER_ACTION}" == "FIND_ANY" ]; then
|
||||
echo "Just find any available compiler"
|
||||
spack compiler find
|
||||
else
|
||||
echo "No compiler action to be taken"
|
||||
fi
|
||||
|
||||
# Finally, list the compilers spack knows about
|
||||
echo "Compiler Configurations:"
|
||||
spack config get compilers
|
||||
|
||||
# Write full-deps yamls for this job spec and its dependencies
|
||||
gen_full_specs_for_job_and_deps
|
||||
|
||||
# Make the build_cache directory if it doesn't exist
|
||||
mkdir -p "${BUILD_CACHE_DIR}"
|
||||
|
||||
# Get buildcache name so we can write a CDash build id file in the right place.
|
||||
# If we're unable to get the buildcache name, we may have encountered a problem
|
||||
# concretizing the spec, or some other issue that will eventually cause the job
|
||||
# to fail.
|
||||
JOB_BUILD_CACHE_ENTRY_NAME=`spack -d buildcache get-buildcache-name --spec-yaml "${SPEC_YAML_PATH}"`
|
||||
if [[ $? -ne 0 ]]; then
|
||||
echo "ERROR, unable to get buildcache entry name for job ${CI_JOB_NAME}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "${SPACK_ENABLE_CDASH}" == "True" ] ; then
|
||||
# Whether we have to build the spec or download it pre-built, we expect to find
|
||||
# the cdash build id file sitting in this location afterwards.
|
||||
JOB_CDASH_ID_FILE="${BUILD_CACHE_DIR}/${JOB_BUILD_CACHE_ENTRY_NAME}.cdashid"
|
||||
fi
|
||||
|
||||
# Finally, we can check the spec we have been tasked with build against
|
||||
# the built binary on the remote mirror to see if it needs to be rebuilt
|
||||
spack -d buildcache check --spec-yaml "${SPEC_YAML_PATH}" --mirror-url "${SPACK_MIRROR_URL}" --rebuild-on-error
|
||||
|
||||
if [[ $? -ne 0 ]]; then
|
||||
# Configure mirror
|
||||
spack mirror add local_artifact_mirror "file://${LOCAL_MIRROR}"
|
||||
|
||||
if [ "${SPACK_ENABLE_CDASH}" == "True" ] ; then
|
||||
JOB_CDASH_ID="NONE"
|
||||
|
||||
# Install package, using the buildcache from the local mirror to
|
||||
# satisfy dependencies.
|
||||
BUILD_ID_LINE=`spack -d -k -v "${CUSTOM_CONFIG_SCOPE_ARG}" install --keep-stage --cdash-upload-url "${CDASH_UPLOAD_URL}" --cdash-build "${SPACK_CDASH_BUILD_NAME}" --cdash-site "${SPACK_CDASH_SITE}" --cdash-track "${SPACK_JOB_SPEC_BUILDGROUP}" -f "${SPEC_YAML_PATH}" | grep "buildSummary\\.php"`
|
||||
check_error $? "spack install"
|
||||
|
||||
# By parsing the output of the "spack install" command, we can get the
|
||||
# buildid generated for us by CDash
|
||||
JOB_CDASH_ID=$(extract_build_id "${BUILD_ID_LINE}")
|
||||
|
||||
# Write the .cdashid file to the buildcache as well
|
||||
echo "${JOB_CDASH_ID}" >> ${JOB_CDASH_ID_FILE}
|
||||
else
|
||||
spack -d -k -v "${CUSTOM_CONFIG_SCOPE_ARG}" install --keep-stage -f "${SPEC_YAML_PATH}"
|
||||
fi
|
||||
|
||||
# Copy some log files into an artifact location, once we have a way
|
||||
# to provide a spec.yaml file to more spack commands (e.g. "location")
|
||||
# stage_dir=$(spack location --stage-dir -f "${SPEC_YAML_PATH}")
|
||||
# build_log_file=$(find -L "${stage_dir}" | grep "spack-build\\.out")
|
||||
# config_log_file=$(find -L "${stage_dir}" | grep "config\\.log")
|
||||
# cp "${build_log_file}" "${JOB_LOG_DIR}/"
|
||||
# cp "${config_log_file}" "${JOB_LOG_DIR}/"
|
||||
|
||||
# Create buildcache entry for this package, reading the spec from the yaml
|
||||
# file.
|
||||
spack -d buildcache create --spec-yaml "${SPEC_YAML_PATH}" -a -f -d "${LOCAL_MIRROR}" --no-rebuild-index
|
||||
check_error $? "spack buildcache create"
|
||||
|
||||
# TODO: The upload-s3 command should eventually be replaced with something
|
||||
# like: "spack buildcache put <mirror> <spec>", when that subcommand is
|
||||
# properly implemented.
|
||||
if [ ! -z "${SPACK_S3_UPLOAD_MIRROR_URL}" ] ; then
|
||||
spack -d upload-s3 spec --base-dir "${LOCAL_MIRROR}" --spec-yaml "${SPEC_YAML_PATH}" --endpoint-url "${SPACK_S3_UPLOAD_MIRROR_URL}"
|
||||
check_error $? "spack upload-s3 spec"
|
||||
else
|
||||
spack -d buildcache copy --base-dir "${LOCAL_MIRROR}" --spec-yaml "${SPEC_YAML_PATH}" --destination-url "${SPACK_MIRROR_URL}"
|
||||
fi
|
||||
else
|
||||
echo "spec ${CI_JOB_NAME} is already up to date on remote mirror, downloading it"
|
||||
|
||||
# Configure remote mirror so we can download buildcache entry
|
||||
spack mirror add remote_binary_mirror ${SPACK_MIRROR_URL}
|
||||
|
||||
# Now download it
|
||||
BUILDCACHE_DL_ARGS=("--spec-yaml" "${SPEC_YAML_PATH}" "--path" "${BUILD_CACHE_DIR}/" )
|
||||
if [ "${SPACK_ENABLE_CDASH}" == "True" ] ; then
|
||||
BUILDCACHE_DL_ARGS+=( "--require-cdashid" )
|
||||
fi
|
||||
spack -d buildcache download "${BUILDCACHE_DL_ARGS[@]}"
|
||||
check_error $? "spack buildcache download"
|
||||
fi
|
||||
|
||||
# The next step is to relate this job to the jobs it depends on
|
||||
if [ "${SPACK_ENABLE_CDASH}" == "True" ] ; then
|
||||
if [ -f "${JOB_CDASH_ID_FILE}" ]; then
|
||||
JOB_CDASH_BUILD_ID=$(<${JOB_CDASH_ID_FILE})
|
||||
|
||||
if [ "${JOB_CDASH_BUILD_ID}" == "NONE" ]; then
|
||||
echo "ERROR: unable to read this jobs id from ${JOB_CDASH_ID_FILE}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Now get CDash ids for dependencies and "relate" each dependency build
|
||||
# with this jobs build
|
||||
for DEP_PKG_NAME in "${JOB_DEPS_PKG_NAMES[@]}"; do
|
||||
echo "Getting cdash id for dependency --> ${DEP_PKG_NAME} <--"
|
||||
DEP_SPEC_YAML_PATH="${SPEC_DIR}/${DEP_PKG_NAME}.yaml"
|
||||
DEP_JOB_BUILDCACHE_NAME=`spack -d buildcache get-buildcache-name --spec-yaml "${DEP_SPEC_YAML_PATH}"`
|
||||
|
||||
if [[ $? -eq 0 ]]; then
|
||||
DEP_JOB_ID_FILE="${BUILD_CACHE_DIR}/${DEP_JOB_BUILDCACHE_NAME}.cdashid"
|
||||
echo "DEP_JOB_ID_FILE path = ${DEP_JOB_ID_FILE}"
|
||||
|
||||
if [ -f "${DEP_JOB_ID_FILE}" ]; then
|
||||
DEP_JOB_CDASH_BUILD_ID=$(<${DEP_JOB_ID_FILE})
|
||||
echo "File ${DEP_JOB_ID_FILE} contained value ${DEP_JOB_CDASH_BUILD_ID}"
|
||||
echo "Relating builds -> ${SPACK_CDASH_BUILD_NAME} (buildid=${JOB_CDASH_BUILD_ID}) depends on ${DEP_PKG_NAME} (buildid=${DEP_JOB_CDASH_BUILD_ID})"
|
||||
relateBuildsPostBody="$(get_relate_builds_post_data "${SPACK_CDASH_PROJECT}" ${JOB_CDASH_BUILD_ID} ${DEP_JOB_CDASH_BUILD_ID})"
|
||||
relateBuildsResult=`curl "${DEP_JOB_RELATEBUILDS_URL}" -H "Content-Type: application/json" -H "Accept: application/json" -d "${relateBuildsPostBody}"`
|
||||
echo "Result of curl request: ${relateBuildsResult}"
|
||||
else
|
||||
echo "ERROR: Did not find expected .cdashid file for dependency: ${DEP_JOB_ID_FILE}"
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
echo "ERROR: Unable to get buildcache entry name for ${DEP_SPEC_NAME}"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
else
|
||||
echo "ERROR: Did not find expected .cdashid file ${JOB_CDASH_ID_FILE}"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# Show the size of the buildcache and a list of what's in it, directly
|
||||
# in the gitlab log output
|
||||
(
|
||||
restore_io
|
||||
du -sh ${BUILD_CACHE_DIR}
|
||||
find ${BUILD_CACHE_DIR} -maxdepth 3 -type d -ls
|
||||
)
|
||||
|
||||
echo "End of rebuild package script"
|
@@ -1,6 +1,6 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
|
||||
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
@@ -103,10 +103,10 @@ interpreter_f="${interpreter_v[0]}"
|
||||
|
||||
# Invoke any interpreter found, or raise an error if none was found.
|
||||
if [[ -n "$interpreter_f" ]]; then
|
||||
if [[ "${interpreter_f##*/}" = "perl" ]]; then
|
||||
exec $interpreter_v -x "$@"
|
||||
if [[ "${interpreter_f##*/}" = "perl"* ]]; then
|
||||
exec $interpreter -x "$@"
|
||||
else
|
||||
exec $interpreter_v "$@"
|
||||
exec $interpreter "$@"
|
||||
fi
|
||||
else
|
||||
echo "error: sbang found no interpreter in $script"
|
||||
|
@@ -1,7 +1,7 @@
|
||||
#!/bin/sh
|
||||
# -*- python -*-
|
||||
#
|
||||
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
|
||||
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
@@ -1,6 +1,6 @@
|
||||
#!/bin/sh
|
||||
#
|
||||
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
|
||||
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
@@ -80,6 +80,14 @@ config:
|
||||
verify_ssl: true
|
||||
|
||||
|
||||
# Suppress gpg warnings from binary package verification
|
||||
# Only suppresses warnings, gpg failure will still fail the install
|
||||
# Potential rationale to set True: users have already explicitly trusted the
|
||||
# gpg key they are using, and may not want to see repeated warnings that it
|
||||
# is self-signed or something of the sort.
|
||||
suppress_gpg_warnings: false
|
||||
|
||||
|
||||
# If set to true, Spack will attempt to build any compiler on the spec
|
||||
# that is not already available. If set to False, Spack will only use
|
||||
# compilers already configured in compilers.yaml
|
||||
@@ -129,7 +137,7 @@ config:
|
||||
# when Spack needs to manage its own package metadata and all operations are
|
||||
# expected to complete within the default time limit. The timeout should
|
||||
# therefore generally be left untouched.
|
||||
db_lock_timeout: 120
|
||||
db_lock_timeout: 3
|
||||
|
||||
|
||||
# How long to wait when attempting to modify a package (e.g. to install it).
|
||||
|
2
etc/spack/defaults/mirrors.yaml
Normal file
2
etc/spack/defaults/mirrors.yaml
Normal file
@@ -0,0 +1,2 @@
|
||||
mirrors:
|
||||
spack-public: https://spack-llnl-mirror.s3-us-west-2.amazonaws.com/
|
@@ -40,9 +40,11 @@ packages:
|
||||
pil: [py-pillow]
|
||||
pkgconfig: [pkgconf, pkg-config]
|
||||
scalapack: [netlib-scalapack]
|
||||
sycl: [hipsycl]
|
||||
szip: [libszip, libaec]
|
||||
tbb: [intel-tbb]
|
||||
unwind: [libunwind]
|
||||
sycl: [hipsycl]
|
||||
permissions:
|
||||
read: world
|
||||
write: user
|
||||
|
@@ -1,4 +1,4 @@
|
||||
.. Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
|
||||
.. Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
@@ -232,6 +232,50 @@ remove dependent packages *before* removing their dependencies or use the
|
||||
|
||||
.. _nondownloadable:
|
||||
|
||||
^^^^^^^^^^^^^^^^^^
|
||||
Garbage collection
|
||||
^^^^^^^^^^^^^^^^^^
|
||||
|
||||
When Spack builds software from sources, if often installs tools that are needed
|
||||
just to build or test other software. These are not necessary at runtime.
|
||||
To support cases where removing these tools can be a benefit Spack provides
|
||||
the ``spack gc`` ("garbage collector") command, which will uninstall all unneeded packages:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack find
|
||||
==> 24 installed packages
|
||||
-- linux-ubuntu18.04-broadwell / gcc@9.0.1 ----------------------
|
||||
autoconf@2.69 findutils@4.6.0 libiconv@1.16 libszip@2.1.1 m4@1.4.18 openjpeg@2.3.1 pkgconf@1.6.3 util-macros@1.19.1
|
||||
automake@1.16.1 gdbm@1.18.1 libpciaccess@0.13.5 libtool@2.4.6 mpich@3.3.2 openssl@1.1.1d readline@8.0 xz@5.2.4
|
||||
cmake@3.16.1 hdf5@1.10.5 libsigsegv@2.12 libxml2@2.9.9 ncurses@6.1 perl@5.30.0 texinfo@6.5 zlib@1.2.11
|
||||
|
||||
$ spack gc
|
||||
==> The following packages will be uninstalled:
|
||||
|
||||
-- linux-ubuntu18.04-broadwell / gcc@9.0.1 ----------------------
|
||||
vn47edz autoconf@2.69 6m3f2qn findutils@4.6.0 ubl6bgk libtool@2.4.6 pksawhz openssl@1.1.1d urdw22a readline@8.0
|
||||
ki6nfw5 automake@1.16.1 fklde6b gdbm@1.18.1 b6pswuo m4@1.4.18 k3s2csy perl@5.30.0 lp5ya3t texinfo@6.5
|
||||
ylvgsov cmake@3.16.1 5omotir libsigsegv@2.12 leuzbbh ncurses@6.1 5vmfbrq pkgconf@1.6.3 5bmv4tg util-macros@1.19.1
|
||||
|
||||
==> Do you want to proceed? [y/N] y
|
||||
|
||||
[ ... ]
|
||||
|
||||
$ spack find
|
||||
==> 9 installed packages
|
||||
-- linux-ubuntu18.04-broadwell / gcc@9.0.1 ----------------------
|
||||
hdf5@1.10.5 libiconv@1.16 libpciaccess@0.13.5 libszip@2.1.1 libxml2@2.9.9 mpich@3.3.2 openjpeg@2.3.1 xz@5.2.4 zlib@1.2.11
|
||||
|
||||
In the example above Spack went through all the packages in the DB
|
||||
and removed everything that is not either:
|
||||
|
||||
1. A package installed upon explicit request of the user
|
||||
2. A ``link`` or ``run`` dependency, even transitive, of one of the packages at point 1.
|
||||
|
||||
You can check :ref:`cmd-spack-find-metadata` to see how to query for explicitly installed packages
|
||||
or :ref:`dependency-types` for a more thorough treatment of dependency types.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Non-Downloadable Tarballs
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
@@ -414,6 +458,8 @@ Packages are divided into groups according to their architecture and
|
||||
compiler. Within each group, Spack tries to keep the view simple, and
|
||||
only shows the version of installed packages.
|
||||
|
||||
.. _cmd-spack-find-metadata:
|
||||
|
||||
""""""""""""""""""""""""""""""""
|
||||
Viewing more metadata
|
||||
""""""""""""""""""""""""""""""""
|
||||
@@ -883,11 +929,13 @@ in GNU Autotools. If all flags are set, the order is
|
||||
Compiler environment variables and additional RPATHs
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
In the exceptional case a compiler requires setting special environment
|
||||
variables, like an explicit library load path. These can bet set in an
|
||||
extra section in the compiler configuration (the supported environment
|
||||
modification commands are: ``set``, ``unset``, ``append-path``, and
|
||||
``prepend-path``). The user can also specify additional ``RPATHs`` that the
|
||||
Sometimes compilers require setting special environment variables to
|
||||
operate correctly. Spack handles these cases by allowing custom environment
|
||||
modifications in the ``environment`` attribute of the compiler configuration
|
||||
section. See also the :ref:`configuration_environment_variables` section
|
||||
of the configuration files docs for more information.
|
||||
|
||||
It is also possible to specify additional ``RPATHs`` that the
|
||||
compiler will add to all executables generated by that compiler. This is
|
||||
useful for forcing certain compilers to RPATH their own runtime libraries, so
|
||||
that executables will run without the need to set ``LD_LIBRARY_PATH``.
|
||||
@@ -904,28 +952,19 @@ that executables will run without the need to set ``LD_LIBRARY_PATH``.
|
||||
fc: /opt/gcc/bin/gfortran
|
||||
environment:
|
||||
unset:
|
||||
BAD_VARIABLE: # The colon is required but the value must be empty
|
||||
- BAD_VARIABLE
|
||||
set:
|
||||
GOOD_VARIABLE_NUM: 1
|
||||
GOOD_VARIABLE_STR: good
|
||||
prepend-path:
|
||||
prepend_path:
|
||||
PATH: /path/to/binutils
|
||||
append-path:
|
||||
append_path:
|
||||
LD_LIBRARY_PATH: /opt/gcc/lib
|
||||
extra_rpaths:
|
||||
- /path/to/some/compiler/runtime/directory
|
||||
- /path/to/some/other/compiler/runtime/directory
|
||||
|
||||
|
||||
.. note::
|
||||
|
||||
The section `environment` is interpreted as an ordered dictionary, which
|
||||
means two things. First, environment modification are applied in the order
|
||||
they are specified in the configuration file. Second, you cannot express
|
||||
environment modifications that require mixing different commands, i.e. you
|
||||
cannot `set` one variable, than `prepend-path` to another one, and than
|
||||
again `set` a third one.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Architecture specifiers
|
||||
^^^^^^^^^^^^^^^^^^^^^^^
|
||||
@@ -1271,10 +1310,9 @@ directly when you run ``python``:
|
||||
Using Extensions
|
||||
^^^^^^^^^^^^^^^^
|
||||
|
||||
There are three ways to get ``numpy`` working in Python. The first is
|
||||
to use :ref:`shell-support`. You can simply ``load`` the
|
||||
module for the extension, and it will be added to the ``PYTHONPATH``
|
||||
in your current shell:
|
||||
There are four ways to get ``numpy`` working in Python. The first is
|
||||
to use :ref:`shell-support`. You can simply ``load`` the extension,
|
||||
and it will be added to the ``PYTHONPATH`` in your current shell:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
@@ -1284,11 +1322,29 @@ in your current shell:
|
||||
Now ``import numpy`` will succeed for as long as you keep your current
|
||||
session open.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Loading Extensions via Modules
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Instead of using Spack's environment modification capabilities through
|
||||
the ``spack load`` command, you can load numpy through your
|
||||
environment modules (using ``environment-modules`` or ``lmod``). This
|
||||
will also add the extension to the ``PYTHONPATH`` in your current
|
||||
shell.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ module load <name of numpy module>
|
||||
|
||||
If you do not know the name of the specific numpy module you wish to
|
||||
load, you can use the ``spack module tcl|lmod loads`` command to get
|
||||
the name of the module from the Spack spec.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Activating Extensions in a View
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
The second way to use extensions is to create a view, which merges the
|
||||
Another way to use extensions is to create a view, which merges the
|
||||
python installation along with the extensions into a single prefix.
|
||||
See :ref:`filesystem-views` for a more in-depth description of views and
|
||||
:ref:`cmd-spack-view` for usage of the ``spack view`` command.
|
||||
|
@@ -1,4 +1,4 @@
|
||||
.. Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
|
||||
.. Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
@@ -1,4 +1,4 @@
|
||||
.. Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
|
||||
.. Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
@@ -58,9 +58,9 @@ directory. Here's an example of an external configuration:
|
||||
packages:
|
||||
openmpi:
|
||||
paths:
|
||||
openmpi@1.4.3%gcc@4.4.7 arch=linux-x86_64-debian7: /opt/openmpi-1.4.3
|
||||
openmpi@1.4.3%gcc@4.4.7 arch=linux-x86_64-debian7+debug: /opt/openmpi-1.4.3-debug
|
||||
openmpi@1.6.5%intel@10.1 arch=linux-x86_64-debian7: /opt/openmpi-1.6.5-intel
|
||||
openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64: /opt/openmpi-1.4.3
|
||||
openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64+debug: /opt/openmpi-1.4.3-debug
|
||||
openmpi@1.6.5%intel@10.1 arch=linux-debian7-x86_64: /opt/openmpi-1.6.5-intel
|
||||
|
||||
This example lists three installations of OpenMPI, one built with GCC,
|
||||
one built with GCC and debug information, and another built with Intel.
|
||||
@@ -107,9 +107,9 @@ be:
|
||||
packages:
|
||||
openmpi:
|
||||
paths:
|
||||
openmpi@1.4.3%gcc@4.4.7 arch=linux-x86_64-debian7: /opt/openmpi-1.4.3
|
||||
openmpi@1.4.3%gcc@4.4.7 arch=linux-x86_64-debian7+debug: /opt/openmpi-1.4.3-debug
|
||||
openmpi@1.6.5%intel@10.1 arch=linux-x86_64-debian7: /opt/openmpi-1.6.5-intel
|
||||
openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64: /opt/openmpi-1.4.3
|
||||
openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64+debug: /opt/openmpi-1.4.3-debug
|
||||
openmpi@1.6.5%intel@10.1 arch=linux-debian7-x86_64: /opt/openmpi-1.6.5-intel
|
||||
buildable: False
|
||||
|
||||
The addition of the ``buildable`` flag tells Spack that it should never build
|
||||
|
@@ -1,4 +1,4 @@
|
||||
.. Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
|
||||
.. Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
@@ -56,6 +56,7 @@ on these ideas for each distinct build system that Spack supports:
|
||||
:maxdepth: 1
|
||||
:caption: Other
|
||||
|
||||
build_systems/bundlepackage
|
||||
build_systems/cudapackage
|
||||
build_systems/intelpackage
|
||||
build_systems/custompackage
|
||||
|
@@ -1,4 +1,4 @@
|
||||
.. Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
|
||||
.. Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
52
lib/spack/docs/build_systems/bundlepackage.rst
Normal file
52
lib/spack/docs/build_systems/bundlepackage.rst
Normal file
@@ -0,0 +1,52 @@
|
||||
.. Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
.. _bundlepackage:
|
||||
|
||||
-------------
|
||||
BundlePackage
|
||||
-------------
|
||||
|
||||
``BundlePackage`` represents a set of packages that are expected to work well
|
||||
together, such as a collection of commonly used software libraries. The
|
||||
associated software is specified as bundle dependencies.
|
||||
|
||||
|
||||
^^^^^^^^
|
||||
Creation
|
||||
^^^^^^^^
|
||||
|
||||
Be sure to specify the ``bundle`` template if you are using ``spack create``
|
||||
to generate a package from the template. For example, use the following
|
||||
command to create a bundle package whose class name will be ``Mybundle``:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack create --template bundle --name mybundle
|
||||
|
||||
|
||||
|
||||
^^^^^^
|
||||
Phases
|
||||
^^^^^^
|
||||
|
||||
The ``BundlePackage`` base class does not provide any phases by default
|
||||
since the bundle does not represent a build system.
|
||||
|
||||
|
||||
^^^
|
||||
URL
|
||||
^^^
|
||||
|
||||
The ``url`` property does not have meaning since there is no package-specific
|
||||
code to fetch.
|
||||
|
||||
|
||||
^^^^^^^
|
||||
Version
|
||||
^^^^^^^
|
||||
|
||||
At least one ``version`` must be specified in order for the package to
|
||||
build.
|
@@ -1,4 +1,4 @@
|
||||
.. Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
|
||||
.. Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
@@ -1,4 +1,4 @@
|
||||
.. Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
|
||||
.. Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
@@ -1,4 +1,4 @@
|
||||
.. Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
|
||||
.. Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
@@ -1,4 +1,4 @@
|
||||
.. Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
|
||||
.. Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
@@ -1,4 +1,4 @@
|
||||
.. Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
|
||||
.. Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
@@ -1,4 +1,4 @@
|
||||
.. Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
|
||||
.. Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
@@ -1,4 +1,4 @@
|
||||
.. Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
|
||||
.. Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
@@ -1,4 +1,4 @@
|
||||
.. Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
|
||||
.. Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
@@ -1,4 +1,4 @@
|
||||
.. Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
|
||||
.. Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
@@ -1,4 +1,4 @@
|
||||
.. Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
|
||||
.. Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
@@ -1,4 +1,4 @@
|
||||
.. Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
|
||||
.. Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
@@ -1,4 +1,4 @@
|
||||
.. Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
|
||||
.. Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
@@ -1,4 +1,4 @@
|
||||
.. Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
|
||||
.. Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
@@ -1,4 +1,4 @@
|
||||
.. Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
|
||||
.. Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
@@ -1,4 +1,4 @@
|
||||
.. Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
|
||||
.. Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
@@ -1,4 +1,4 @@
|
||||
.. Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
|
||||
.. Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
@@ -1,4 +1,4 @@
|
||||
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
|
||||
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
@@ -176,7 +176,25 @@ def setup(sphinx):
|
||||
#show_authors = False
|
||||
|
||||
# The name of the Pygments (syntax highlighting) style to use.
|
||||
pygments_style = 'sphinx'
|
||||
# We use our own extension of the default style with a few modifications
|
||||
from pygments.style import Style
|
||||
from pygments.styles.default import DefaultStyle
|
||||
from pygments.token import Generic, Comment, Text
|
||||
|
||||
class SpackStyle(DefaultStyle):
|
||||
styles = DefaultStyle.styles.copy()
|
||||
background_color = "#f4f4f8"
|
||||
styles[Generic.Output] = "#355"
|
||||
styles[Generic.Prompt] = "bold #346ec9"
|
||||
|
||||
import pkg_resources
|
||||
dist = pkg_resources.Distribution(__file__)
|
||||
sys.path.append('.') # make 'conf' module findable
|
||||
ep = pkg_resources.EntryPoint.parse('spack = conf:SpackStyle', dist=dist)
|
||||
dist._ep_map = {'pygments.styles': {'plugin1': ep}}
|
||||
pkg_resources.working_set.add(dist)
|
||||
|
||||
pygments_style = 'spack'
|
||||
|
||||
# A list of ignored prefixes for module index sorting.
|
||||
#modindex_common_prefix = []
|
||||
|
@@ -1,4 +1,4 @@
|
||||
.. Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
|
||||
.. Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
@@ -30,11 +30,21 @@ Default is ``$spack/opt/spack``.
|
||||
``install_hash_length`` and ``install_path_scheme``
|
||||
---------------------------------------------------
|
||||
|
||||
The default Spack installation path can be very long and can create
|
||||
problems for scripts with hardcoded shebangs. There are two parameters
|
||||
to help with that. Firstly, the ``install_hash_length`` parameter can
|
||||
set the length of the hash in the installation path from 1 to 32. The
|
||||
default path uses the full 32 characters.
|
||||
The default Spack installation path can be very long and can create problems
|
||||
for scripts with hardcoded shebangs. Additionally, when using the Intel
|
||||
compiler, and if there is also a long list of dependencies, the compiler may
|
||||
segfault. If you see the following:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
: internal error: ** The compiler has encountered an unexpected problem.
|
||||
** Segmentation violation signal raised. **
|
||||
Access violation or stack overflow. Please contact Intel Support for assistance.
|
||||
|
||||
it may be because variables containing dependency specs may be too long. There
|
||||
are two parameters to help with long path names. Firstly, the
|
||||
``install_hash_length`` parameter can set the length of the hash in the
|
||||
installation path from 1 to 32. The default path uses the full 32 characters.
|
||||
|
||||
Secondly, it is also possible to modify the entire installation
|
||||
scheme. By default Spack uses
|
||||
|
@@ -1,4 +1,4 @@
|
||||
.. Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
|
||||
.. Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
@@ -427,6 +427,33 @@ home directory, and ``~user`` will expand to a specified user's home
|
||||
directory. The ``~`` must appear at the beginning of the path, or Spack
|
||||
will not expand it.
|
||||
|
||||
.. _configuration_environment_variables:
|
||||
|
||||
-------------------------
|
||||
Environment Modifications
|
||||
-------------------------
|
||||
|
||||
Spack allows to prescribe custom environment modifications in a few places
|
||||
within its configuration files. Every time these modifications are allowed
|
||||
they are specified as a dictionary, like in the following example:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
environment:
|
||||
set:
|
||||
LICENSE_FILE: '/path/to/license'
|
||||
unset:
|
||||
- CPATH
|
||||
- LIBRARY_PATH
|
||||
append_path:
|
||||
PATH: '/new/bin/dir'
|
||||
|
||||
The possible actions that are permitted are ``set``, ``unset``, ``append_path``,
|
||||
``prepend_path`` and finally ``remove_path``. They all require a dictionary
|
||||
of variable names mapped to the values used for the modification.
|
||||
The only exception is ``unset`` that requires just a list of variable names.
|
||||
No particular order is ensured on the execution of each of these modifications.
|
||||
|
||||
----------------------------
|
||||
Seeing Spack's Configuration
|
||||
----------------------------
|
||||
|
307
lib/spack/docs/containers.rst
Normal file
307
lib/spack/docs/containers.rst
Normal file
@@ -0,0 +1,307 @@
|
||||
.. Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
.. _containers:
|
||||
|
||||
================
|
||||
Container Images
|
||||
================
|
||||
|
||||
Spack can be an ideal tool to setup images for containers since all the
|
||||
features discussed in :ref:`environments` can greatly help to manage
|
||||
the installation of software during the image build process. Nonetheless,
|
||||
building a production image from scratch still requires a lot of
|
||||
boilerplate to:
|
||||
|
||||
- Get Spack working within the image, possibly running as root
|
||||
- Minimize the physical size of the software installed
|
||||
- Properly update the system software in the base image
|
||||
|
||||
To facilitate users with these tedious tasks, Spack provides a command
|
||||
to automatically generate recipes for container images based on
|
||||
Environments:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ ls
|
||||
spack.yaml
|
||||
|
||||
$ spack containerize
|
||||
# Build stage with Spack pre-installed and ready to be used
|
||||
FROM spack/centos7:latest as builder
|
||||
|
||||
# What we want to install and how we want to install it
|
||||
# is specified in a manifest file (spack.yaml)
|
||||
RUN mkdir /opt/spack-environment \
|
||||
&& (echo "spack:" \
|
||||
&& echo " specs:" \
|
||||
&& echo " - gromacs+mpi" \
|
||||
&& echo " - mpich" \
|
||||
&& echo " concretization: together" \
|
||||
&& echo " config:" \
|
||||
&& echo " install_tree: /opt/software" \
|
||||
&& echo " view: /opt/view") > /opt/spack-environment/spack.yaml
|
||||
|
||||
# Install the software, remove unecessary deps
|
||||
RUN cd /opt/spack-environment && spack install && spack gc -y
|
||||
|
||||
# Strip all the binaries
|
||||
RUN find -L /opt/view/* -type f -exec readlink -f '{}' \; | \
|
||||
xargs file -i | \
|
||||
grep 'charset=binary' | \
|
||||
grep 'x-executable\|x-archive\|x-sharedlib' | \
|
||||
awk -F: '{print $1}' | xargs strip -s
|
||||
|
||||
# Modifications to the environment that are necessary to run
|
||||
RUN cd /opt/spack-environment && \
|
||||
spack env activate --sh -d . >> /etc/profile.d/z10_spack_environment.sh
|
||||
|
||||
|
||||
# Bare OS image to run the installed executables
|
||||
FROM centos:7
|
||||
|
||||
COPY --from=builder /opt/spack-environment /opt/spack-environment
|
||||
COPY --from=builder /opt/software /opt/software
|
||||
COPY --from=builder /opt/view /opt/view
|
||||
COPY --from=builder /etc/profile.d/z10_spack_environment.sh /etc/profile.d/z10_spack_environment.sh
|
||||
|
||||
RUN yum update -y && yum install -y epel-release && yum update -y \
|
||||
&& yum install -y libgomp \
|
||||
&& rm -rf /var/cache/yum && yum clean all
|
||||
|
||||
RUN echo 'export PS1="\[$(tput bold)\]\[$(tput setaf 1)\][gromacs]\[$(tput setaf 2)\]\u\[$(tput sgr0)\]:\w $ \[$(tput sgr0)\]"' >> ~/.bashrc
|
||||
|
||||
|
||||
LABEL "app"="gromacs"
|
||||
LABEL "mpi"="mpich"
|
||||
|
||||
ENTRYPOINT ["/bin/bash", "--rcfile", "/etc/profile", "-l"]
|
||||
|
||||
|
||||
The bits that make this automation possible are discussed in details
|
||||
below. All the images generated in this way will be based on
|
||||
multi-stage builds with:
|
||||
|
||||
- A fat ``build`` stage containing common build tools and Spack itself
|
||||
- A minimal ``final`` stage containing only the software requested by the user
|
||||
|
||||
-----------------
|
||||
Spack Base Images
|
||||
-----------------
|
||||
|
||||
Docker images with Spack preinstalled and ready to be used are
|
||||
built on `Docker Hub <https://hub.docker.com/u/spack>`_
|
||||
at every push to ``develop`` or to a release branch. The OS that
|
||||
are currently supported are summarized in the table below:
|
||||
|
||||
.. _containers-supported-os:
|
||||
|
||||
.. list-table:: Supported operating systems
|
||||
:header-rows: 1
|
||||
|
||||
* - Operating System
|
||||
- Base Image
|
||||
- Spack Image
|
||||
* - Ubuntu 16.04
|
||||
- ``ubuntu:16.04``
|
||||
- ``spack/ubuntu-xenial``
|
||||
* - Ubuntu 18.04
|
||||
- ``ubuntu:16.04``
|
||||
- ``spack/ubuntu-bionic``
|
||||
* - CentOS 6
|
||||
- ``centos:6``
|
||||
- ``spack/centos6``
|
||||
* - CentOS 7
|
||||
- ``centos:7``
|
||||
- ``spack/centos7``
|
||||
|
||||
All the images are tagged with the corresponding release of Spack:
|
||||
|
||||
.. image:: dockerhub_spack.png
|
||||
|
||||
with the exception of the ``latest`` tag that points to the HEAD
|
||||
of the ``develop`` branch. These images are available for anyone
|
||||
to use and take care of all the repetitive tasks that are necessary
|
||||
to setup Spack within a container. All the container recipes generated
|
||||
automatically by Spack use them as base images for their ``build`` stage.
|
||||
|
||||
|
||||
-------------------------
|
||||
Environment Configuration
|
||||
-------------------------
|
||||
|
||||
Any Spack Environment can be used for the automatic generation of container
|
||||
recipes. Sensible defaults are provided for things like the base image or the
|
||||
version of Spack used in the image. If a finer tuning is needed it can be
|
||||
obtained by adding the relevant metadata under the ``container`` attribute
|
||||
of environments:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
spack:
|
||||
specs:
|
||||
- gromacs+mpi
|
||||
- mpich
|
||||
|
||||
container:
|
||||
# Select the format of the recipe e.g. docker,
|
||||
# singularity or anything else that is currently supported
|
||||
format: docker
|
||||
|
||||
# Select from a valid list of images
|
||||
base:
|
||||
image: "centos:7"
|
||||
spack: develop
|
||||
|
||||
# Whether or not to strip binaries
|
||||
strip: true
|
||||
|
||||
# Additional system packages that are needed at runtime
|
||||
os_packages:
|
||||
- libgomp
|
||||
|
||||
# Extra instructions
|
||||
extra_instructions:
|
||||
final: |
|
||||
RUN echo 'export PS1="\[$(tput bold)\]\[$(tput setaf 1)\][gromacs]\[$(tput setaf 2)\]\u\[$(tput sgr0)\]:\w $ \[$(tput sgr0)\]"' >> ~/.bashrc
|
||||
|
||||
# Labels for the image
|
||||
labels:
|
||||
app: "gromacs"
|
||||
mpi: "mpich"
|
||||
|
||||
The tables below describe the configuration options that are currently supported:
|
||||
|
||||
.. list-table:: General configuration options for the ``container`` section of ``spack.yaml``
|
||||
:header-rows: 1
|
||||
|
||||
* - Option Name
|
||||
- Description
|
||||
- Allowed Values
|
||||
- Required
|
||||
* - ``format``
|
||||
- The format of the recipe
|
||||
- ``docker`` or ``singularity``
|
||||
- Yes
|
||||
* - ``base:image``
|
||||
- Base image for ``final`` stage
|
||||
- See :ref:`containers-supported-os`
|
||||
- Yes
|
||||
* - ``base:spack``
|
||||
- Version of Spack
|
||||
- Valid tags for ``base:image``
|
||||
- Yes
|
||||
* - ``strip``
|
||||
- Whether to strip binaries
|
||||
- ``true`` (default) or ``false``
|
||||
- No
|
||||
* - ``os_packages``
|
||||
- System packages to be installed
|
||||
- Valid packages for the ``final`` OS
|
||||
- No
|
||||
* - ``extra_instructions:build``
|
||||
- Extra instructions (e.g. `RUN`, `COPY`, etc.) at the end of the ``build`` stage
|
||||
- Anything understood by the current ``format``
|
||||
- No
|
||||
* - ``extra_instructions:final``
|
||||
- Extra instructions (e.g. `RUN`, `COPY`, etc.) at the end of the ``final`` stage
|
||||
- Anything understood by the current ``format``
|
||||
- No
|
||||
* - ``labels``
|
||||
- Labels to tag the image
|
||||
- Pairs of key-value strings
|
||||
- No
|
||||
|
||||
.. list-table:: Configuration options specific to Singularity
|
||||
:header-rows: 1
|
||||
|
||||
* - Option Name
|
||||
- Description
|
||||
- Allowed Values
|
||||
- Required
|
||||
* - ``singularity:runscript``
|
||||
- Content of ``%runscript``
|
||||
- Any valid script
|
||||
- No
|
||||
* - ``singularity:startscript``
|
||||
- Content of ``%startscript``
|
||||
- Any valid script
|
||||
- No
|
||||
* - ``singularity:test``
|
||||
- Content of ``%test``
|
||||
- Any valid script
|
||||
- No
|
||||
* - ``singularity:help``
|
||||
- Description of the image
|
||||
- Description string
|
||||
- No
|
||||
|
||||
Once the Environment is properly configured a recipe for a container
|
||||
image can be printed to standard output by issuing the following
|
||||
command from the directory where the ``spack.yaml`` resides:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack containerize
|
||||
|
||||
The example ``spack.yaml`` above would produce for instance the
|
||||
following ``Dockerfile``:
|
||||
|
||||
.. code-block:: docker
|
||||
|
||||
# Build stage with Spack pre-installed and ready to be used
|
||||
FROM spack/centos7:latest as builder
|
||||
|
||||
# What we want to install and how we want to install it
|
||||
# is specified in a manifest file (spack.yaml)
|
||||
RUN mkdir /opt/spack-environment \
|
||||
&& (echo "spack:" \
|
||||
&& echo " specs:" \
|
||||
&& echo " - gromacs+mpi" \
|
||||
&& echo " - mpich" \
|
||||
&& echo " concretization: together" \
|
||||
&& echo " config:" \
|
||||
&& echo " install_tree: /opt/software" \
|
||||
&& echo " view: /opt/view") > /opt/spack-environment/spack.yaml
|
||||
|
||||
# Install the software, remove unecessary deps
|
||||
RUN cd /opt/spack-environment && spack install && spack gc -y
|
||||
|
||||
# Strip all the binaries
|
||||
RUN find -L /opt/view/* -type f -exec readlink -f '{}' \; | \
|
||||
xargs file -i | \
|
||||
grep 'charset=binary' | \
|
||||
grep 'x-executable\|x-archive\|x-sharedlib' | \
|
||||
awk -F: '{print $1}' | xargs strip -s
|
||||
|
||||
# Modifications to the environment that are necessary to run
|
||||
RUN cd /opt/spack-environment && \
|
||||
spack env activate --sh -d . >> /etc/profile.d/z10_spack_environment.sh
|
||||
|
||||
|
||||
# Bare OS image to run the installed executables
|
||||
FROM centos:7
|
||||
|
||||
COPY --from=builder /opt/spack-environment /opt/spack-environment
|
||||
COPY --from=builder /opt/software /opt/software
|
||||
COPY --from=builder /opt/view /opt/view
|
||||
COPY --from=builder /etc/profile.d/z10_spack_environment.sh /etc/profile.d/z10_spack_environment.sh
|
||||
|
||||
RUN yum update -y && yum install -y epel-release && yum update -y \
|
||||
&& yum install -y libgomp \
|
||||
&& rm -rf /var/cache/yum && yum clean all
|
||||
|
||||
RUN echo 'export PS1="\[$(tput bold)\]\[$(tput setaf 1)\][gromacs]\[$(tput setaf 2)\]\u\[$(tput sgr0)\]:\w $ \[$(tput sgr0)\]"' >> ~/.bashrc
|
||||
|
||||
|
||||
LABEL "app"="gromacs"
|
||||
LABEL "mpi"="mpich"
|
||||
|
||||
ENTRYPOINT ["/bin/bash", "--rcfile", "/etc/profile", "-l"]
|
||||
|
||||
.. note::
|
||||
Spack can also produce Singularity definition files to build the image. The
|
||||
minimum version of Singularity required to build a SIF (Singularity Image Format)
|
||||
from them is ``3.5.3``.
|
@@ -1,4 +1,4 @@
|
||||
.. Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
|
||||
.. Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
@@ -64,6 +64,8 @@ If you take a look in ``$SPACK_ROOT/.travis.yml``, you'll notice that we test
|
||||
against Python 2.6, 2.7, and 3.4-3.7 on both macOS and Linux. We currently
|
||||
perform 3 types of tests:
|
||||
|
||||
.. _cmd-spack-test:
|
||||
|
||||
^^^^^^^^^^
|
||||
Unit Tests
|
||||
^^^^^^^^^^
|
||||
@@ -86,40 +88,83 @@ To run *all* of the unit tests, use:
|
||||
|
||||
$ spack test
|
||||
|
||||
These tests may take several minutes to complete. If you know you are only
|
||||
modifying a single Spack feature, you can run a single unit test at a time:
|
||||
These tests may take several minutes to complete. If you know you are
|
||||
only modifying a single Spack feature, you can run subsets of tests at a
|
||||
time. For example, this would run all the tests in
|
||||
``lib/spack/spack/test/architecture.py``:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack test architecture
|
||||
$ spack test architecture.py
|
||||
|
||||
This allows you to develop iteratively: make a change, test that change, make
|
||||
another change, test that change, etc. To get a list of all available unit
|
||||
tests, run:
|
||||
And this would run the ``test_platform`` test from that file:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack test architecture.py::test_platform
|
||||
|
||||
This allows you to develop iteratively: make a change, test that change,
|
||||
make another change, test that change, etc. We use `pytest
|
||||
<http://pytest.org/>`_ as our tests fromework, and these types of
|
||||
arguments are just passed to the ``pytest`` command underneath. See `the
|
||||
pytest docs
|
||||
<http://doc.pytest.org/en/latest/usage.html#specifying-tests-selecting-tests>`_
|
||||
for more details on test selection syntax.
|
||||
|
||||
``spack test`` has a few special options that can help you understand
|
||||
what tests are available. To get a list of all available unit test
|
||||
files, run:
|
||||
|
||||
.. command-output:: spack test --list
|
||||
:ellipsis: 5
|
||||
|
||||
A more detailed list of available unit tests can be found by running
|
||||
``spack test --long-list``.
|
||||
To see a more detailed list of available unit tests, use ``spack test
|
||||
--list-long``:
|
||||
|
||||
By default, ``pytest`` captures the output of all unit tests. If you add print
|
||||
statements to a unit test and want to see the output, simply run:
|
||||
.. command-output:: spack test --list-long
|
||||
:ellipsis: 10
|
||||
|
||||
And to see the fully qualified names of all tests, use ``--list-names``:
|
||||
|
||||
.. command-output:: spack test --list-names
|
||||
:ellipsis: 5
|
||||
|
||||
You can combine these with ``pytest`` arguments to restrict which tests
|
||||
you want to know about. For example, to see just the tests in
|
||||
``architecture.py``:
|
||||
|
||||
.. command-output:: spack test --list-long architecture.py
|
||||
|
||||
You can also combine any of these options with a ``pytest`` keyword
|
||||
search. For example, to see the names of all tests that have "spec"
|
||||
or "concretize" somewhere in their names:
|
||||
|
||||
.. command-output:: spack test --list-names -k "spec and concretize"
|
||||
|
||||
By default, ``pytest`` captures the output of all unit tests, and it will
|
||||
print any captured output for failed tests. Sometimes it's helpful to see
|
||||
your output interactively, while the tests run (e.g., if you add print
|
||||
statements to a unit tests). To see the output *live*, use the ``-s``
|
||||
argument to ``pytest``:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack test -s -k architecture
|
||||
$ spack test -s architecture.py::test_platform
|
||||
|
||||
Unit tests are crucial to making sure bugs aren't introduced into Spack. If you
|
||||
are modifying core Spack libraries or adding new functionality, please consider
|
||||
adding new unit tests or strengthening existing tests.
|
||||
Unit tests are crucial to making sure bugs aren't introduced into
|
||||
Spack. If you are modifying core Spack libraries or adding new
|
||||
functionality, please add new unit tests for your feature, and consider
|
||||
strengthening existing tests. You will likely be asked to do this if you
|
||||
submit a pull request to the Spack project on GitHub. Check out the
|
||||
`pytest docs <http://pytest.org/>`_ and feel free to ask for guidance on
|
||||
how to write tests!
|
||||
|
||||
.. note::
|
||||
|
||||
There is also a ``run-unit-tests`` script in ``share/spack/qa`` that
|
||||
runs the unit tests. Afterwards, it reports back to Codecov with the
|
||||
percentage of Spack that is covered by unit tests. This script is
|
||||
designed for Travis CI. If you want to run the unit tests yourself, we
|
||||
suggest you use ``spack test``.
|
||||
You may notice the ``share/spack/qa/run-unit-tests`` script in the
|
||||
repository. This script is designed for Travis CI. It runs the unit
|
||||
tests and reports coverage statistics back to Codecov. If you want to
|
||||
run the unit tests yourself, we suggest you use ``spack test``.
|
||||
|
||||
^^^^^^^^^^^^
|
||||
Flake8 Tests
|
||||
@@ -223,8 +268,7 @@ documentation. In order to prevent things like broken links and missing imports,
|
||||
we added documentation tests that build the documentation and fail if there
|
||||
are any warning or error messages.
|
||||
|
||||
Building the documentation requires several dependencies, all of which can be
|
||||
installed with Spack:
|
||||
Building the documentation requires several dependencies:
|
||||
|
||||
* sphinx
|
||||
* sphinxcontrib-programoutput
|
||||
@@ -234,11 +278,18 @@ installed with Spack:
|
||||
* mercurial
|
||||
* subversion
|
||||
|
||||
All of these can be installed with Spack, e.g.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack install py-sphinx py-sphinxcontrib-programoutput py-sphinx-rtd-theme graphviz git mercurial subversion
|
||||
|
||||
.. warning::
|
||||
|
||||
Sphinx has `several required dependencies <https://github.com/spack/spack/blob/develop/var/spack/repos/builtin/packages/py-sphinx/package.py>`_.
|
||||
If you installed ``py-sphinx`` with Spack, make sure to add all of these
|
||||
dependencies to your ``PYTHONPATH``. The easiest way to do this is to run:
|
||||
If you're using a ``python`` from Spack and you installed
|
||||
``py-sphinx`` and friends, you need to make them available to your
|
||||
``python``. The easiest way to do this is to run:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
@@ -246,8 +297,10 @@ installed with Spack:
|
||||
$ spack activate py-sphinx-rtd-theme
|
||||
$ spack activate py-sphinxcontrib-programoutput
|
||||
|
||||
so that all of the dependencies are symlinked to a central location.
|
||||
If you see an error message like:
|
||||
so that all of the dependencies are symlinked into that Python's
|
||||
tree. Alternatively, you could arrange for their library
|
||||
directories to be added to PYTHONPATH. If you see an error message
|
||||
like:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
|
@@ -1,4 +1,4 @@
|
||||
.. Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
|
||||
.. Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
@@ -363,12 +363,12 @@ Developer commands
|
||||
``spack doc``
|
||||
^^^^^^^^^^^^^
|
||||
|
||||
.. _cmd-spack-test:
|
||||
|
||||
^^^^^^^^^^^^^^
|
||||
``spack test``
|
||||
^^^^^^^^^^^^^^
|
||||
|
||||
See the :ref:`contributor guide section <cmd-spack-test>` on ``spack test``.
|
||||
|
||||
.. _cmd-spack-python:
|
||||
|
||||
^^^^^^^^^^^^^^^^
|
||||
|
@@ -1,4 +1,4 @@
|
||||
.. Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
|
||||
.. Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
BIN
lib/spack/docs/dockerhub_spack.png
Normal file
BIN
lib/spack/docs/dockerhub_spack.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 88 KiB |
@@ -1,4 +1,4 @@
|
||||
.. Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
|
||||
.. Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
@@ -49,6 +49,8 @@ Spack uses a "manifest and lock" model similar to `Bundler gemfiles
|
||||
managers. The user input file is named ``spack.yaml`` and the lock
|
||||
file is named ``spack.lock``
|
||||
|
||||
.. _environments-using:
|
||||
|
||||
------------------
|
||||
Using Environments
|
||||
------------------
|
||||
@@ -382,11 +384,12 @@ the Environment.
|
||||
Loading
|
||||
^^^^^^^
|
||||
|
||||
Once an environment has been installed, the following creates a load script for it:
|
||||
Once an environment has been installed, the following creates a load
|
||||
script for it:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack env myenv loads -r
|
||||
$ spack env loads -r
|
||||
|
||||
This creates a file called ``loads`` in the environment directory.
|
||||
Sourcing that file in Bash will make the environment available to the
|
||||
|
@@ -1,4 +1,4 @@
|
||||
.. Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
|
||||
.. Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
@@ -9,12 +9,6 @@
|
||||
Custom Extensions
|
||||
=================
|
||||
|
||||
.. warning::
|
||||
|
||||
The support for extending Spack with custom commands is still experimental.
|
||||
Users should expect APIs or prescribed directory structures to
|
||||
change at any time.
|
||||
|
||||
*Spack extensions* permit you to extend Spack capabilities by deploying your
|
||||
own custom commands or logic in an arbitrary location on your filesystem.
|
||||
This might be extremely useful e.g. to develop and maintain a command whose purpose is
|
||||
|
@@ -1,4 +1,4 @@
|
||||
.. Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
|
||||
.. Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
@@ -1,4 +1,4 @@
|
||||
.. Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
|
||||
.. Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
@@ -16,7 +16,7 @@ Prerequisites
|
||||
Spack has the following minimum requirements, which must be installed
|
||||
before Spack is run:
|
||||
|
||||
#. Python 2 (2.6 or 2.7) or 3 (3.4 - 3.7) to run Spack
|
||||
#. Python 2 (2.6 or 2.7) or 3 (3.5 - 3.8) to run Spack
|
||||
#. A C/C++ compiler for building
|
||||
#. The ``make`` executable for building
|
||||
#. The ``git`` and ``curl`` commands for fetching
|
||||
@@ -97,7 +97,7 @@ Check Installation
|
||||
With Spack installed, you should be able to run some basic Spack
|
||||
commands. For example:
|
||||
|
||||
.. command-output:: spack spec netcdf
|
||||
.. command-output:: spack spec netcdf-c
|
||||
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
@@ -1,4 +1,4 @@
|
||||
.. Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
|
||||
.. Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
@@ -66,6 +66,7 @@ or refer to the full manual below.
|
||||
config_yaml
|
||||
build_settings
|
||||
environments
|
||||
containers
|
||||
mirrors
|
||||
module_file_support
|
||||
repositories
|
||||
@@ -74,6 +75,7 @@ or refer to the full manual below.
|
||||
package_list
|
||||
chain
|
||||
extensions
|
||||
pipelines
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
@@ -1,4 +1,4 @@
|
||||
.. Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
|
||||
.. Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
@@ -1,4 +1,4 @@
|
||||
.. Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
|
||||
.. Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
@@ -1,4 +1,4 @@
|
||||
.. Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
|
||||
.. Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
@@ -119,7 +119,7 @@ For example this will add the ``mpich`` package built with ``gcc`` to your path:
|
||||
|
||||
# ... wait for install ...
|
||||
|
||||
$ spack load mpich %gcc@4.4.7 # modules
|
||||
$ spack load mpich %gcc@4.4.7
|
||||
$ which mpicc
|
||||
~/spack/opt/linux-debian7-x86_64/gcc@4.4.7/mpich@3.0.4/bin/mpicc
|
||||
|
||||
@@ -129,27 +129,29 @@ want to use a package, you can type unload or unuse similarly:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack unload mpich %gcc@4.4.7 # modules
|
||||
$ spack unload mpich %gcc@4.4.7
|
||||
|
||||
.. note::
|
||||
|
||||
The ``load`` and ``unload`` subcommands are
|
||||
only available if you have enabled Spack's shell support *and* you
|
||||
have environment-modules installed on your machine.
|
||||
The ``load`` and ``unload`` subcommands are only available if you
|
||||
have enabled Spack's shell support. These command DO NOT use the
|
||||
underlying Spack-generated module files.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^
|
||||
Ambiguous module names
|
||||
^^^^^^^^^^^^^^^^^^^^^^
|
||||
^^^^^^^^^^^^^^^
|
||||
Ambiguous specs
|
||||
^^^^^^^^^^^^^^^
|
||||
|
||||
If a spec used with load/unload or use/unuse is ambiguous (i.e. more
|
||||
than one installed package matches it), then Spack will warn you:
|
||||
If a spec used with load/unload or is ambiguous (i.e. more than one
|
||||
installed package matches it), then Spack will warn you:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack load libelf
|
||||
==> Error: Multiple matches for spec libelf. Choose one:
|
||||
libelf@0.8.13%gcc@4.4.7 arch=linux-debian7-x86_64
|
||||
libelf@0.8.13%intel@15.0.0 arch=linux-debian7-x86_64
|
||||
==> Error: libelf matches multiple packages.
|
||||
Matching packages:
|
||||
libelf@0.8.13%gcc@4.4.7 arch=linux-debian7-x86_64
|
||||
libelf@0.8.13%intel@15.0.0 arch=linux-debian7-x86_64
|
||||
Use a more specific spec
|
||||
|
||||
You can either type the ``spack load`` command again with a fully
|
||||
qualified argument, or you can add just enough extra constraints to
|
||||
@@ -171,8 +173,15 @@ To identify just the one built with the Intel compiler.
|
||||
``spack module tcl loads``
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
In some cases, it is desirable to load not just a module, but also all
|
||||
the modules it depends on. This is not required for most modules
|
||||
In some cases, it is desirable to use a Spack-generated module, rather
|
||||
than relying on Spack's built-in user-environment modification
|
||||
capabilities. To translate a spec into a module name, use ``spack
|
||||
module tcl loads`` or ``spack module lmod loads`` depending on the
|
||||
module system desired.
|
||||
|
||||
|
||||
To load not just a module, but also all the modules it depends on, use
|
||||
the ``--dependencies`` option. This is not required for most modules
|
||||
because Spack builds binaries with RPATH support. However, not all
|
||||
packages use RPATH to find their dependencies: this can be true in
|
||||
particular for Python extensions, which are currently *not* built with
|
||||
|
@@ -1,4 +1,4 @@
|
||||
.. Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
|
||||
.. Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
@@ -1,4 +1,4 @@
|
||||
.. Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
|
||||
.. Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
@@ -136,6 +136,10 @@ generates a boilerplate template for your package, and opens up the new
|
||||
homepage = "http://www.example.com"
|
||||
url = "https://gmplib.org/download/gmp/gmp-6.1.2.tar.bz2"
|
||||
|
||||
# FIXME: Add a list of GitHub accounts to
|
||||
# notify when the package is updated.
|
||||
# maintainers = ['github_user1', 'github_user2']
|
||||
|
||||
version('6.1.2', '8ddbb26dc3bd4e2302984debba1406a5')
|
||||
version('6.1.1', '4c175f86e11eb32d8bf9872ca3a8e11d')
|
||||
version('6.1.0', '86ee6e54ebfc4a90b643a65e402c4048')
|
||||
@@ -184,6 +188,17 @@ The rest of the tasks you need to do are as follows:
|
||||
The ``homepage`` is displayed when users run ``spack info`` so
|
||||
that they can learn more about your package.
|
||||
|
||||
#. Add a comma-separated list of maintainers.
|
||||
|
||||
The ``maintainers`` field is a list of GitHub accounts of people
|
||||
who want to be notified any time the package is modified. When a
|
||||
pull request is submitted that updates the package, these people
|
||||
will be requested to review the PR. This is useful for developers
|
||||
who maintain a Spack package for their own software, as well as
|
||||
users who rely on a piece of software and want to ensure that the
|
||||
package doesn't break. It also gives users a list of people to
|
||||
contact for help when someone reports a build error with the package.
|
||||
|
||||
#. Add ``depends_on()`` calls for the package's dependencies.
|
||||
|
||||
``depends_on`` tells Spack that other packages need to be built
|
||||
@@ -538,6 +553,34 @@ version. This is useful for packages that have an easy to extrapolate URL, but
|
||||
keep changing their URL format every few releases. With this method, you only
|
||||
need to specify the ``url`` when the URL changes.
|
||||
|
||||
"""""""""""""""""""""""
|
||||
Mirrors of the main URL
|
||||
"""""""""""""""""""""""
|
||||
|
||||
Spack supports listing mirrors of the main URL in a package by defining
|
||||
the ``urls`` attribute:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
class Foo(Package):
|
||||
|
||||
urls = [
|
||||
'http://example.com/foo-1.0.tar.gz',
|
||||
'http://mirror.com/foo-1.0.tar.gz'
|
||||
]
|
||||
|
||||
instead of just a single ``url``. This attribute is a list of possible URLs that
|
||||
will be tried in order when fetching packages. Notice that either one of ``url``
|
||||
or ``urls`` can be present in a package, but not both at the same time.
|
||||
|
||||
A well-known case of packages that can be fetched from multiple mirrors is that
|
||||
of GNU. For that, Spack goes a step further and defines a mixin class that
|
||||
takes care of all of the plumbing and requires packagers to just define a proper
|
||||
``gnu_mirror_path`` attribute:
|
||||
|
||||
.. literalinclude:: _spack_root/var/spack/repos/builtin/packages/autoconf/package.py
|
||||
:lines: 9-18
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Skipping the expand step
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
@@ -886,6 +929,9 @@ Git fetching supports the following parameters to ``version``:
|
||||
* ``tag``: Name of a tag to fetch.
|
||||
* ``commit``: SHA hash (or prefix) of a commit to fetch.
|
||||
* ``submodules``: Also fetch submodules recursively when checking out this repository.
|
||||
* ``submodules_delete``: A list of submodules to forcibly delete from the repository
|
||||
after fetching. Useful if a version in the repository has submodules that
|
||||
have disappeared/are no longer accessible.
|
||||
* ``get_full_repo``: Ensure the full git history is checked out with all remote
|
||||
branch information. Normally (``get_full_repo=False``, the default), the git
|
||||
option ``--depth 1`` will be used if the version of git and the specified
|
||||
@@ -1464,8 +1510,8 @@ that the same package with different patches applied will have different
|
||||
hash identifiers. To ensure that the hashing scheme is consistent, you
|
||||
must use a ``sha256`` checksum for the patch. Patches will be fetched
|
||||
from their URLs, checked, and applied to your source code. You can use
|
||||
the ``spack sha256`` command to generate a checksum for a patch file or
|
||||
URL.
|
||||
the GNU utils ``sha256sum`` or the macOS ``shasum -a 256`` commands to
|
||||
generate a checksum for a patch file.
|
||||
|
||||
Spack can also handle compressed patches. If you use these, Spack needs
|
||||
a little more help. Specifically, it needs *two* checksums: the
|
||||
@@ -1907,6 +1953,8 @@ issues with 1.64.0, 1.65.0, and 1.66.0, you can say:
|
||||
depends_on('boost@1.59.0:1.63,1.65.1,1.67.0:')
|
||||
|
||||
|
||||
.. _dependency-types:
|
||||
|
||||
^^^^^^^^^^^^^^^^
|
||||
Dependency types
|
||||
^^^^^^^^^^^^^^^^
|
||||
@@ -1944,6 +1992,28 @@ inject the dependency's ``prefix/lib`` directory, but the package needs to
|
||||
be in ``PATH`` and ``PYTHONPATH`` during the build process and later when
|
||||
a user wants to run the package.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Conditional dependencies
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
You may have a package that only requires a dependency under certain
|
||||
conditions. For example, you may have a package that has optional MPI support,
|
||||
- MPI is only a dependency when you want to enable MPI support for the
|
||||
package. In that case, you could say something like:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
variant('mpi', default=False)
|
||||
depends_on('mpi', when='+mpi')
|
||||
|
||||
``when`` can include constraints on the variant, version, compiler, etc. and
|
||||
the :mod:`syntax<spack.spec>` is the same as for Specs written on the command
|
||||
line.
|
||||
|
||||
If a dependency/feature of a package isn't typically used, you can save time
|
||||
by making it conditional (since Spack will not build the dependency unless it
|
||||
is required for the Spec).
|
||||
|
||||
.. _dependency_dependency_patching:
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^
|
||||
@@ -4384,7 +4454,7 @@ translate variant flags into CMake definitions. For example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def configure_args(self):
|
||||
def cmake_args(self):
|
||||
spec = self.spec
|
||||
return [
|
||||
'-DUSE_EVERYTRACE=%s' % ('YES' if '+everytrace' in spec else 'NO'),
|
||||
|
439
lib/spack/docs/pipelines.rst
Normal file
439
lib/spack/docs/pipelines.rst
Normal file
@@ -0,0 +1,439 @@
|
||||
.. Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
|
||||
Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
.. _pipelines:
|
||||
|
||||
=========
|
||||
Pipelines
|
||||
=========
|
||||
|
||||
Spack provides commands that support generating and running automated build
|
||||
pipelines designed for Gitlab CI. At the highest level it works like this:
|
||||
provide a spack environment describing the set of packages you care about,
|
||||
and include within that environment file a description of how those packages
|
||||
should be mapped to Gitlab runners. Spack can then generate a ``.gitlab-ci.yml``
|
||||
file containing job descriptions for all your packages that can be run by a
|
||||
properly configured Gitlab CI instance. When run, the generated pipeline will
|
||||
build and deploy binaries, and it can optionally report to a CDash instance
|
||||
regarding the health of the builds as they evolve over time.
|
||||
|
||||
------------------------------
|
||||
Getting started with pipelines
|
||||
------------------------------
|
||||
|
||||
It is fairly straightforward to get started with automated build pipelines. At
|
||||
a minimum, you'll need to set up a Gitlab instance (more about Gitlab CI
|
||||
`here <https://about.gitlab.com/product/continuous-integration/>`_) and configure
|
||||
at least one `runner <https://docs.gitlab.com/runner/>`_. Then the basic steps
|
||||
for setting up a build pipeline are as follows:
|
||||
|
||||
#. Create a repository on your gitlab instance
|
||||
#. Add a ``spack.yaml`` at the root containing your pipeline environment (see
|
||||
below for details)
|
||||
#. Add a ``.gitlab-ci.yml`` at the root containing a single job, similar to
|
||||
this one:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
pipeline-job:
|
||||
tags:
|
||||
- <custom-tag>
|
||||
...
|
||||
script:
|
||||
- spack ci start
|
||||
|
||||
#. Add any secrets required by the CI process to environment variables using the
|
||||
CI web ui
|
||||
#. Push a commit containing the ``spack.yaml`` and ``.gitlab-ci.yml`` mentioned above
|
||||
to the gitlab repository
|
||||
|
||||
The ``<custom-tag>``, above, is used to pick one of your configured runners,
|
||||
while the use of the ``spack ci start`` command implies that runner has an
|
||||
appropriate version of spack installed and configured for use. Of course, there
|
||||
are myriad ways to customize the process. You can configure CDash reporting
|
||||
on the progress of your builds, set up S3 buckets to mirror binaries built by
|
||||
the pipeline, clone a custom spack repository/ref for use by the pipeline, and
|
||||
more.
|
||||
|
||||
While it is possible to set up pipelines on gitlab.com, the builds there are
|
||||
limited to 60 minutes and generic hardware. It is also possible to
|
||||
`hook up <https://about.gitlab.com/blog/2018/04/24/getting-started-gitlab-ci-gcp>`_
|
||||
Gitlab to Google Kubernetes Engine (`GKE <https://cloud.google.com/kubernetes-engine/>`_)
|
||||
or Amazon Elastic Kubernetes Service (`EKS <https://aws.amazon.com/eks>`_), though those
|
||||
topics are outside the scope of this document.
|
||||
|
||||
-----------------------------------
|
||||
Spack commands supporting pipelines
|
||||
-----------------------------------
|
||||
|
||||
Spack provides a command `ci` with sub-commands for doing various things related
|
||||
to automated build pipelines. All of the ``spack ci ...`` commands must be run
|
||||
from within a environment, as each one makes use of the environment for different
|
||||
purposes. Additionally, some options to the commands (or conditions present in
|
||||
the spack environment file) may require particular environment variables to be
|
||||
set in order to function properly. Examples of these are typically secrets
|
||||
needed for pipeline operation that should not be visible in a spack environment
|
||||
file. These environment variables are described in more detail
|
||||
:ref:`ci_environment_variables`.
|
||||
|
||||
.. _cmd_spack_ci:
|
||||
|
||||
^^^^^^^^^^^^^^^^^^
|
||||
``spack ci``
|
||||
^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Super-command for functionality related to generating pipelines and executing
|
||||
pipeline jobs.
|
||||
|
||||
.. _cmd_spack_ci_start:
|
||||
|
||||
^^^^^^^^^^^^^^^^^^
|
||||
``spack ci start``
|
||||
^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Currently this command is a short-cut to first run ``spack ci generate``, followed
|
||||
by ``spack ci pushyaml``.
|
||||
|
||||
.. _cmd_spack_ci_generate:
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
``spack ci generate``
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Concretizes the specs in the active environment, stages them (as described in
|
||||
:ref:`staging_algorithm`), and writes the resulting ``.gitlab-ci.yml`` to disk.
|
||||
|
||||
.. _cmd_spack_ci_pushyaml:
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
``spack ci pushyaml``
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Generates a commit containing the generated ``.gitlab-ci.yml`` and pushes it to a
|
||||
``DOWNSTREAM_CI_REPO``, which is frequently the same repository. The branch
|
||||
created has the same name as the current branch being tested, but has ``multi-ci-``
|
||||
prepended to the branch name. Once Gitlab CI has full support for dynamically
|
||||
defined workloads, this command will be deprecated.
|
||||
|
||||
.. _cmd_spack_ci_rebuild:
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^
|
||||
``spack ci rebuild``
|
||||
^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
This sub-command is responsible for ensuring a single spec from the release
|
||||
environment is up to date on the remote mirror configured in the environment,
|
||||
and as such, corresponds to a single job in the ``.gitlab-ci.yml`` file.
|
||||
|
||||
------------------------------------
|
||||
A pipeline-enabled spack environment
|
||||
------------------------------------
|
||||
|
||||
Here's an example of a spack environment file that has been enhanced with
|
||||
sections desribing a build pipeline:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
spack:
|
||||
definitions:
|
||||
- pkgs:
|
||||
- readline@7.0
|
||||
- compilers:
|
||||
- '%gcc@5.5.0'
|
||||
- oses:
|
||||
- os=ubuntu18.04
|
||||
- os=centos7
|
||||
specs:
|
||||
- matrix:
|
||||
- [$pkgs]
|
||||
- [$compilers]
|
||||
- [$oses]
|
||||
mirrors:
|
||||
cloud_gitlab: https://mirror.spack.io
|
||||
gitlab-ci:
|
||||
mappings:
|
||||
- match:
|
||||
- os=ubuntu18.04
|
||||
runner-attributes:
|
||||
tags:
|
||||
- spack-k8s
|
||||
image: spack/spack_builder_ubuntu_18.04
|
||||
- match:
|
||||
- os=centos7
|
||||
runner-attributes:
|
||||
tags:
|
||||
- spack-k8s
|
||||
image: spack/spack_builder_centos_7
|
||||
cdash:
|
||||
build-group: Release Testing
|
||||
url: https://cdash.spack.io
|
||||
project: Spack
|
||||
site: Spack AWS Gitlab Instance
|
||||
|
||||
Hopefully, the ``definitions``, ``specs``, ``mirrors``, etc. sections are already
|
||||
familiar, as they are part of spack :ref:`environments`. So let's take a more
|
||||
in-depth look some of the pipeline-related sections in that environment file
|
||||
that might not be as familiar.
|
||||
|
||||
The ``gitlab-ci`` section is used to configure how the pipeline workload should be
|
||||
generated, mainly how the jobs for building specs should be assigned to the
|
||||
configured runners on your instance. Each entry within the list of ``mappings``
|
||||
corresponds to a known gitlab runner, where the ``match`` section is used
|
||||
in assigning a release spec to one of the runners, and the ``runner-attributes``
|
||||
section is used to configure the spec/job for that particular runner.
|
||||
|
||||
There are other pipeline options you can configure within the ``gitlab-ci`` section
|
||||
as well. The ``bootstrap`` section allows you to specify lists of specs from
|
||||
your ``definitions`` that should be staged ahead of the environment's ``specs`` (this
|
||||
section is described in more detail below). The ``enable-artifacts-buildcache`` key
|
||||
takes a boolean and determines whether the pipeline uses artifacts to store and
|
||||
pass along the buildcaches from one stage to the next (the default if you don't
|
||||
provide this option is ``False``). The ``enable-debug-messages`` key takes a boolean
|
||||
and allows you to choose whether the pipeline build jobs are run as ``spack -d ci rebuild``
|
||||
or just ``spack ci rebuild`` (the default is not to enable debug messages). The
|
||||
``final-stage-rebuild-index`` section controls whether an extra job is added to the
|
||||
end of your pipeline (in a stage by itself) which will regenerate the mirror's
|
||||
buildcache index. Under normal operation, each pipeline job that rebuilds a package
|
||||
will re-generate the mirror's buildcache index after the buildcache entry for that
|
||||
job has been created and pushed to the mirror. Since jobs in the same stage can run in
|
||||
parallel, there is the possibility that at the end of some stage, the index may not
|
||||
reflect all the binaries in the buildcache. Adding the ``final-stage-rebuild-index``
|
||||
section ensures that at the end of the pipeline, the index will be in sync with the
|
||||
binaries on the mirror. If the mirror lives in an S3 bucket, this job will need to
|
||||
run on a machine with the Python ``boto3`` module installed, and consequently the
|
||||
``final-stage-rebuild-index`` needs to specify a list of ``tags`` to pick a runner
|
||||
satisfying that condition. It can also take an ``image`` key so Docker executor type
|
||||
runners can pick the right image for the index regeneration job.
|
||||
|
||||
The optional ``cdash`` section provides information that will be used by the
|
||||
``spack ci generate`` command (invoked by ``spack ci start``) for reporting
|
||||
to CDash. All the jobs generated from this environment will belong to a
|
||||
"build group" within CDash that can be tracked over time. As the release
|
||||
progresses, this build group may have jobs added or removed. The url, project,
|
||||
and site are used to specify the CDash instance to which build results should
|
||||
be reported.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Assignment of specs to runners
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
The ``mappings`` section corresponds to a list of runners, and during assignment
|
||||
of specs to runners, the list is traversed in order looking for matches, the
|
||||
first runner that matches a release spec is assigned to build that spec. The
|
||||
``match`` section within each runner mapping section is a list of specs, and
|
||||
if any of those specs match the release spec (the ``spec.satisfies()`` method
|
||||
is used), then that runner is considered a match.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Configuration of specs/jobs for a runner
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Once a runner has been chosen to build a release spec, the ``runner-attributes``
|
||||
section provides information determining details of the job in the context of
|
||||
the runner. The ``runner-attributes`` section must have a ``tags`` key, which
|
||||
is a list containing at least one tag used to select the runner from among the
|
||||
runners known to the gitlab instance. For Docker executor type runners, the
|
||||
``image`` key is used to specify the Docker image used to build the release spec
|
||||
(and could also appear as a dictionary with a ``name`` specifying the image name,
|
||||
as well as an ``entrypoint`` to override whatever the default for that image is).
|
||||
For other types of runners the ``variables`` key will be useful to pass any
|
||||
information on to the runner that it needs to do its work (e.g. scheduler
|
||||
parameters, etc.).
|
||||
|
||||
.. _staging_algorithm:
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Summary of ``.gitlab-ci.yml`` generation algorithm
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
All specs yielded by the matrix (or all the specs in the environment) have their
|
||||
dependencies computed, and the entire resulting set of specs are staged together
|
||||
before being run through the ``gitlab-ci/mappings`` entries, where each staged
|
||||
spec is assigned a runner. "Staging" is the name we have given to the process
|
||||
of figuring out in what order the specs should be built, taking into consideration
|
||||
Gitlab CI rules about jobs/stages. In the staging process the goal is to maximize
|
||||
the number of jobs in any stage of the pipeline, while ensuring that the jobs in
|
||||
any stage only depend on jobs in previous stages (since those jobs are guaranteed
|
||||
to have completed already). As a runner is determined for a job, the information
|
||||
in the ``runner-attributes`` is used to populate various parts of the job
|
||||
description that will be used by Gitlab CI. Once all the jobs have been assigned
|
||||
a runner, the ``.gitlab-ci.yml`` is written to disk.
|
||||
|
||||
The short example provided above would result in the ``readline``, ``ncurses``,
|
||||
and ``pkgconf`` packages getting staged and built on the runner chosen by the
|
||||
``spack-k8s`` tag. In this example, we assume the runner is a Docker executor
|
||||
type runner, and thus certain jobs will be run in the ``centos7`` container,
|
||||
and others in the ``ubuntu-18.04`` container. The resulting ``.gitlab-ci.yml``
|
||||
will contain 6 jobs in three stages. Once the jobs have been generated, the
|
||||
presence of a ``SPACK_CDASH_AUTH_TOKEN`` environment variable during the
|
||||
``spack ci generate`` command would result in all of the jobs being put in a
|
||||
build group on CDash called "Release Testing" (that group will be created if
|
||||
it didn't already exist).
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Optional compiler bootstrapping
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Spack pipelines also have support for bootstrapping compilers on systems that
|
||||
may not already have the desired compilers installed. The idea here is that
|
||||
you can specify a list of things to bootstrap in your ``definitions``, and
|
||||
spack will guarantee those will be installed in a phase of the pipeline before
|
||||
your release specs, so that you can rely on those packages being available in
|
||||
the binary mirror when you need them later on in the pipeline. At the moment
|
||||
the only viable use-case for bootstrapping is to install compilers.
|
||||
|
||||
Here's an example of what bootstrapping some compilers might look like:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
spack:
|
||||
definitions:
|
||||
- compiler-pkgs:
|
||||
- 'llvm+clang@6.0.1 os=centos7'
|
||||
- 'gcc@6.5.0 os=centos7'
|
||||
- 'llvm+clang@6.0.1 os=ubuntu18.04'
|
||||
- 'gcc@6.5.0 os=ubuntu18.04'
|
||||
- pkgs:
|
||||
- readline@7.0
|
||||
- compilers:
|
||||
- '%gcc@5.5.0'
|
||||
- '%gcc@6.5.0'
|
||||
- '%gcc@7.3.0'
|
||||
- '%clang@6.0.0'
|
||||
- '%clang@6.0.1'
|
||||
- oses:
|
||||
- os=ubuntu18.04
|
||||
- os=centos7
|
||||
specs:
|
||||
- matrix:
|
||||
- [$pkgs]
|
||||
- [$compilers]
|
||||
- [$oses]
|
||||
exclude:
|
||||
- '%gcc@7.3.0 os=centos7'
|
||||
- '%gcc@5.5.0 os=ubuntu18.04'
|
||||
gitlab-ci:
|
||||
bootstrap:
|
||||
- name: compiler-pkgs
|
||||
compiler-agnostic: true
|
||||
mappings:
|
||||
# mappings similar to the example higher up in this description
|
||||
...
|
||||
|
||||
In the example above, we have added a list to the ``definitions`` called
|
||||
``compiler-pkgs`` (you can add any number of these), which lists compiler packages
|
||||
we want to be staged ahead of the full matrix of release specs (which consists
|
||||
only of readline in our example). Then within the ``gitlab-ci`` section, we
|
||||
have added a ``bootstrap`` section, which can contain a list of items, each
|
||||
referring to a list in the ``definitions`` section. These items can either
|
||||
be a dictionary or a string. If you supply a dictionary, it must have a name
|
||||
key whose value must match one of the lists in definitions and it can have a
|
||||
``compiler-agnostic`` key whose value is a boolean. If you supply a string,
|
||||
then it needs to match one of the lists provided in ``definitions``. You can
|
||||
think of the bootstrap list as an ordered list of pipeline "phases" that will
|
||||
be staged before your actual release specs. While this introduces another
|
||||
layer of bottleneck in the pipeline (all jobs in all stages of one phase must
|
||||
complete before any jobs in the next phase can begin), it also means you are
|
||||
guaranteed your bootstrapped compilers will be available when you need them.
|
||||
|
||||
The ``compiler-agnostic`` key can be provided with each item in the
|
||||
bootstrap list. It tells the ``spack ci generate`` command that any jobs staged
|
||||
from that particular list should have the compiler removed from the spec, so
|
||||
that any compiler available on the runner where the job is run can be used to
|
||||
build the package.
|
||||
|
||||
When including a bootstrapping phase as in the example above, the result is that
|
||||
the bootstrapped compiler packages will be pushed to the binary mirror (and the
|
||||
local artifacts mirror) before the actual release specs are built. In this case,
|
||||
the jobs corresponding to subsequent release specs are configured to
|
||||
``install_missing_compilers``, so that if spack is asked to install a package
|
||||
with a compiler it doesn't know about, it can be quickly installed from the
|
||||
binary mirror first.
|
||||
|
||||
Since bootstrapping compilers is optional, those items can be left out of the
|
||||
environment/stack file, and in that case no bootstrapping will be done (only the
|
||||
specs will be staged for building) and the runners will be expected to already
|
||||
have all needed compilers installed and configured for spack to use.
|
||||
|
||||
-------------------------------------
|
||||
Using a custom spack in your pipeline
|
||||
-------------------------------------
|
||||
|
||||
If your runners will not have a version of spack ready to invoke, or if for some
|
||||
other reason you want to use a custom version of spack to run your pipelines,
|
||||
this can be accomplished fairly simply. First, create CI environment variables
|
||||
containing the url and branch/tag you want to clone (calling them, for example,
|
||||
``SPACK_REPO`` and ``SPACK_REF``), use them to clone spack in your pre-ci
|
||||
``before_script``, and finally pass those same values along to the workload
|
||||
generation process via the ``spack-repo`` and ``spack-ref`` cli args. Here's
|
||||
an example:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
pipeline-job:
|
||||
tags:
|
||||
- <some-other-tag>
|
||||
before_script:
|
||||
- git clone ${SPACK_REPO} --branch ${SPACK_REF}
|
||||
- . ./spack/share/spack/setup-env.sh
|
||||
script:
|
||||
- spack ci start --spack-repo ${SPACK_REPO} --spack-ref ${SPACK_REF} <...args>
|
||||
after_script:
|
||||
- rm -rf ./spack
|
||||
|
||||
If the ``spack ci start`` command receives those extra command line arguments,
|
||||
then it adds similar ``before_script`` and ``after_script`` sections for each of
|
||||
the ``spack ci rebuild`` jobs it generates (cloning and sourcing a custom
|
||||
spack in the ``before_script`` and removing it again in the ``after_script``).
|
||||
This gives you control over the version of spack used when the rebuild jobs
|
||||
are actually run on the gitlab runner.
|
||||
|
||||
.. _ci_environment_variables:
|
||||
|
||||
--------------------------------------------------
|
||||
Environment variables affecting pipeline operation
|
||||
--------------------------------------------------
|
||||
|
||||
Certain secrets and some other information should be provided to the pipeline
|
||||
infrastructure via environment variables, usually for reasons of security, but
|
||||
in some cases to support other pipeline use cases such as PR testing. The
|
||||
environment variables used by the pipeline infrastructure are described here.
|
||||
|
||||
^^^^^^^^^^^^^^^^^
|
||||
AWS_ACCESS_KEY_ID
|
||||
^^^^^^^^^^^^^^^^^
|
||||
|
||||
Needed when binary mirror is an S3 bucket.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
AWS_SECRET_ACCESS_KEY
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Needed when binary mirror is an S3 bucket.
|
||||
|
||||
^^^^^^^^^^^^^^^
|
||||
S3_ENDPOINT_URL
|
||||
^^^^^^^^^^^^^^^
|
||||
|
||||
Needed when binary mirror is an S3 bucket that is *not* on AWS.
|
||||
|
||||
^^^^^^^^^^^^^^^^^
|
||||
CDASH_AUTH_TOKEN
|
||||
^^^^^^^^^^^^^^^^^
|
||||
|
||||
Needed in order to report build groups to CDash.
|
||||
|
||||
^^^^^^^^^^^^^^^^^
|
||||
SPACK_SIGNING_KEY
|
||||
^^^^^^^^^^^^^^^^^
|
||||
|
||||
Needed to sign/verify binary packages from the remote binary mirror.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^
|
||||
DOWNSTREAM_CI_REPO
|
||||
^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Needed until Gitlab CI supports dynamic job generation. Can contain connection
|
||||
credentials, and could be the same repository or a different one.
|
@@ -1,4 +1,4 @@
|
||||
.. Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
|
||||
.. Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
@@ -1,7 +1,7 @@
|
||||
# These dependencies should be installed using pip in order
|
||||
# to build the documentation.
|
||||
|
||||
sphinx==2.0.1
|
||||
sphinxcontrib-programoutput==0.14
|
||||
sphinx-rtd-theme==0.4.3
|
||||
sphinx
|
||||
sphinxcontrib-programoutput
|
||||
sphinx-rtd-theme
|
||||
python-levenshtein
|
||||
|
@@ -1,4 +1,4 @@
|
||||
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
|
||||
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
@@ -1,4 +1,4 @@
|
||||
.. Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
|
||||
.. Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
@@ -253,14 +253,14 @@ However, other more powerful methods are generally preferred for user
|
||||
environments.
|
||||
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Spack-Generated Modules
|
||||
^^^^^^^^^^^^^^^^^^^^^^^
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Using ``spack load`` to Manage the User Environment
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Suppose that Spack has been used to install a set of command-line
|
||||
programs, which users now wish to use. One can in principle put a
|
||||
number of ``spack load`` commands into ``.bashrc``, for example, to
|
||||
load a set of Spack-generated modules:
|
||||
load a set of Spack packages:
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
@@ -273,7 +273,7 @@ load a set of Spack-generated modules:
|
||||
Although simple load scripts like this are useful in many cases, they
|
||||
have some drawbacks:
|
||||
|
||||
1. The set of modules loaded by them will in general not be
|
||||
1. The set of packages loaded by them will in general not be
|
||||
consistent. They are a decent way to load commands to be called
|
||||
from command shells. See below for better ways to assemble a
|
||||
consistent set of packages for building application programs.
|
||||
@@ -285,19 +285,24 @@ have some drawbacks:
|
||||
other hand, are not very smart: if the user-supplied spec matches
|
||||
more than one installed package, then ``spack module tcl loads`` will
|
||||
fail. This may change in the future. For now, the workaround is to
|
||||
be more specific on any ``spack module tcl loads`` lines that fail.
|
||||
be more specific on any ``spack load`` commands that fail.
|
||||
|
||||
|
||||
""""""""""""""""""""""
|
||||
Generated Load Scripts
|
||||
""""""""""""""""""""""
|
||||
|
||||
Another problem with using `spack load` is, it is slow; a typical user
|
||||
environment could take several seconds to load, and would not be
|
||||
appropriate to put into ``.bashrc`` directly. It is preferable to use
|
||||
a series of ``spack module tcl loads`` commands to pre-compute which
|
||||
modules to load. These can be put in a script that is run whenever
|
||||
installed Spack packages change. For example:
|
||||
Another problem with using `spack load` is, it can be slow; a typical
|
||||
user environment could take several seconds to load, and would not be
|
||||
appropriate to put into ``.bashrc`` directly. This is because it
|
||||
requires the full start-up overhead of python/Spack for each command.
|
||||
In some circumstances it is preferable to use a series of ``spack
|
||||
module tcl loads`` (or ``spack module lmod loads``) commands to
|
||||
pre-compute which modules to load. This will generate the modulenames
|
||||
to load the packages using environment modules, rather than Spack's
|
||||
built-in support for environment modifications. These can be put in a
|
||||
script that is run whenever installed Spack packages change. For
|
||||
example:
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
@@ -634,7 +639,7 @@ Global Activations
|
||||
Python (and similar systems) packages directly or creating a view.
|
||||
If extensions are globally activated, then ``spack load python`` will
|
||||
also load all the extensions activated for the given ``python``.
|
||||
This reduces the need for users to load a large number of modules.
|
||||
This reduces the need for users to load a large number of packages.
|
||||
|
||||
However, Spack global activations have two potential drawbacks:
|
||||
|
||||
@@ -1090,6 +1095,248 @@ or filesystem views. However, it has some drawbacks:
|
||||
integrate Spack explicitly in their workflow. Not all users are
|
||||
willing to do this.
|
||||
|
||||
-------------------------------------
|
||||
Using Spack to Replace Homebrew/Conda
|
||||
-------------------------------------
|
||||
|
||||
Spack is an incredibly powerful package manager, designed for supercomputers
|
||||
where users have diverse installation needs. But Spack can also be used to
|
||||
handle simple single-user installations on your laptop. Most macOS users are
|
||||
already familiar with package managers like Homebrew and Conda, where all
|
||||
installed packages are symlinked to a single central location like ``/usr/local``.
|
||||
In this section, we will show you how to emulate the behavior of Homebrew/Conda
|
||||
using :ref:`environments`!
|
||||
|
||||
^^^^^
|
||||
Setup
|
||||
^^^^^
|
||||
|
||||
First, let's create a new environment. We'll assume that Spack is already set up
|
||||
correctly, and that you've already sourced the setup script for your shell.
|
||||
To create a new environment, simply run:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack env create myenv
|
||||
==> Updating view at /Users/me/spack/var/spack/environments/myenv/.spack-env/view
|
||||
==> Created environment 'myenv' in /Users/me/spack/var/spack/environments/myenv
|
||||
$ spack env activate myenv
|
||||
|
||||
Here, *myenv* can be anything you want to name your environment. Next, we can add
|
||||
a list of packages we would like to install into our environment. Let's say we
|
||||
want a newer version of Bash than the one that comes with macOS, and we want a
|
||||
few Python libraries. We can run:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack add bash
|
||||
==> Adding bash to environment myenv
|
||||
==> Updating view at /Users/me/spack/var/spack/environments/myenv/.spack-env/view
|
||||
$ spack add python@3:
|
||||
==> Adding python@3: to environment myenv
|
||||
==> Updating view at /Users/me/spack/var/spack/environments/myenv/.spack-env/view
|
||||
$ spack add py-numpy py-scipy py-matplotlib
|
||||
==> Adding py-numpy to environment myenv
|
||||
==> Adding py-scipy to environment myenv
|
||||
==> Adding py-matplotlib to environment myenv
|
||||
==> Updating view at /Users/me/spack/var/spack/environments/myenv/.spack-env/view
|
||||
|
||||
Each package can be listed on a separate line, or combined into a single line.
|
||||
Notice that we're explicitly asking for Python 3 here. You can use any spec
|
||||
you would normally use on the command line with other Spack commands.
|
||||
|
||||
Next, we want to manually configure a couple of things. In the ``myenv``
|
||||
directory, we can find the ``spack.yaml`` that actually defines our environment.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ vim ~/spack/var/spack/environments/myenv/spack.yaml
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
# This is a Spack Environment file.
|
||||
#
|
||||
# It describes a set of packages to be installed, along with
|
||||
# configuration settings.
|
||||
spack:
|
||||
# add package specs to the `specs` list
|
||||
specs: [bash, 'python@3:', py-numpy, py-scipy, py-matplotlib]
|
||||
view:
|
||||
default:
|
||||
root: /Users/me/spack/var/spack/environments/myenv/.spack-env/view
|
||||
projections: {}
|
||||
config: {}
|
||||
mirrors: {}
|
||||
modules:
|
||||
enable: []
|
||||
packages: {}
|
||||
repos: []
|
||||
upstreams: {}
|
||||
definitions: []
|
||||
concretization: separately
|
||||
|
||||
You can see the packages we added earlier in the ``specs:`` section. If you
|
||||
ever want to add more packages, you can either use ``spack add`` or manually
|
||||
edit this file.
|
||||
|
||||
We also need to change the ``concretization:`` option. By default, Spack
|
||||
concretizes each spec *separately*, allowing multiple versions of the same
|
||||
package to coexist. Since we want a single consistent environment, we want to
|
||||
concretize all of the specs *together*.
|
||||
|
||||
Here is what your ``spack.yaml`` looks like with these new settings, and with
|
||||
some of the sections we don't plan on using removed:
|
||||
|
||||
.. code-block:: diff
|
||||
|
||||
spack:
|
||||
- specs: [bash, 'python@3:', py-numpy, py-scipy, py-matplotlib]
|
||||
+ specs:
|
||||
+ - bash
|
||||
+ - 'python@3:'
|
||||
+ - py-numpy
|
||||
+ - py-scipy
|
||||
+ - py-matplotlib
|
||||
- view:
|
||||
- default:
|
||||
- root: /Users/me/spack/var/spack/environments/myenv/.spack-env/view
|
||||
- projections: {}
|
||||
+ view: /Users/me/spack/var/spack/environments/myenv/.spack-env/view
|
||||
- config: {}
|
||||
- mirrors: {}
|
||||
- modules:
|
||||
- enable: []
|
||||
- packages: {}
|
||||
- repos: []
|
||||
- upstreams: {}
|
||||
- definitions: []
|
||||
+ concretization: together
|
||||
- concretization: separately
|
||||
|
||||
""""""""""""""""
|
||||
Symlink location
|
||||
""""""""""""""""
|
||||
|
||||
In the ``spack.yaml`` file above, you'll notice that by default, Spack symlinks
|
||||
all installations to ``/Users/me/spack/var/spack/environments/myenv/.spack-env/view``.
|
||||
You can actually change this to any directory you want. For example, Homebrew
|
||||
uses ``/usr/local``, while Conda uses ``/Users/me/anaconda``. In order to access
|
||||
files in these locations, you need to update ``PATH`` and other environment variables
|
||||
to point to them. Activating the Spack environment does this automatically, but
|
||||
you can also manually set them in your ``.bashrc``.
|
||||
|
||||
.. warning::
|
||||
|
||||
There are several reasons why you shouldn't use ``/usr/local``:
|
||||
|
||||
1. If you are on macOS 10.11+ (El Capitan and newer), Apple makes it hard
|
||||
for you. You may notice permissions issues on ``/usr/local`` due to their
|
||||
`System Integrity Protection <https://support.apple.com/en-us/HT204899>`_.
|
||||
By default, users don't have permissions to install anything in ``/usr/local``,
|
||||
and you can't even change this using ``sudo chown`` or ``sudo chmod``.
|
||||
2. Other package managers like Homebrew will try to install things to the
|
||||
same directory. If you plan on using Homebrew in conjunction with Spack,
|
||||
don't symlink things to ``/usr/local``.
|
||||
3. If you are on a shared workstation, or don't have sudo priveleges, you
|
||||
can't do this.
|
||||
|
||||
If you still want to do this anyway, there are several ways around SIP.
|
||||
You could disable SIP by booting into recovery mode and running
|
||||
``csrutil disable``, but this is not recommended, as it can open up your OS
|
||||
to security vulnerabilities. Another technique is to run ``spack concretize``
|
||||
and ``spack install`` using ``sudo``. This is also not recommended.
|
||||
|
||||
The safest way I've found is to create your installation directories using
|
||||
sudo, then change ownership back to the user like so:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
for directory in .spack bin contrib include lib man share
|
||||
do
|
||||
sudo mkdir -p /usr/local/$directory
|
||||
sudo chown $(id -un):$(id -gn) /usr/local/$directory
|
||||
done
|
||||
|
||||
Depending on the packages you install in your environment, the exact list of
|
||||
directories you need to create may vary. You may also find some packages
|
||||
like Java libraries that install a single file to the installation prefix
|
||||
instead of in a subdirectory. In this case, the action is the same, just replace
|
||||
``mkdir -p`` with ``touch`` in the for-loop above.
|
||||
|
||||
But again, it's safer just to use the default symlink location.
|
||||
|
||||
|
||||
^^^^^^^^^^^^
|
||||
Installation
|
||||
^^^^^^^^^^^^
|
||||
|
||||
To actually concretize the environment, run:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack concretize
|
||||
|
||||
This will tell you which if any packages are already installed, and alert you
|
||||
to any conflicting specs.
|
||||
|
||||
To actually install these packages and symlink them to your ``view:``
|
||||
directory, simply run:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack install
|
||||
|
||||
Now, when you type ``which python3``, it should find the one you just installed.
|
||||
|
||||
In order to change the default shell to our newer Bash installation, we first
|
||||
need to add it to this list of acceptable shells. Run:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ sudo vim /etc/shells
|
||||
|
||||
and add the absolute path to your bash executable. Then run:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ chsh -s /path/to/bash
|
||||
|
||||
Now, when you log out and log back in, ``echo $SHELL`` should point to the
|
||||
newer version of Bash.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Updating Installed Packages
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Let's say you upgraded to a new version of macOS, or a new version of Python
|
||||
was released, and you want to rebuild your entire software stack. To do this,
|
||||
simply run the following commands:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack env activate myenv
|
||||
$ spack concretize --force
|
||||
$ spack install
|
||||
|
||||
The ``--force`` flag tells Spack to overwrite its previous concretization
|
||||
decisions, allowing you to choose a new version of Python. If any of the new
|
||||
packages like Bash are already installed, ``spack install`` won't re-install
|
||||
them, it will keep the symlinks in place.
|
||||
|
||||
^^^^^^^^^^^^^^
|
||||
Uninstallation
|
||||
^^^^^^^^^^^^^^
|
||||
|
||||
If you decide that Spack isn't right for you, uninstallation is simple.
|
||||
Just run:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack env activate myenv
|
||||
$ spack uninstall --all
|
||||
|
||||
This will uninstall all packages in your environment and remove the symlinks.
|
||||
|
||||
------------------------
|
||||
Using Spack on Travis-CI
|
||||
------------------------
|
||||
@@ -1254,7 +1501,7 @@ In order to build and run the image, execute:
|
||||
RUN spack install tar \
|
||||
&& spack clean -a
|
||||
|
||||
# need the modules already during image build?
|
||||
# need the executables from a package already during image build?
|
||||
#RUN /bin/bash -l -c ' \
|
||||
# spack load tar \
|
||||
# && which tar'
|
||||
|
2
lib/spack/env/cc
vendored
2
lib/spack/env/cc
vendored
@@ -1,6 +1,6 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
|
||||
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
10
lib/spack/external/__init__.py
vendored
10
lib/spack/external/__init__.py
vendored
@@ -1,4 +1,4 @@
|
||||
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
|
||||
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
@@ -82,14 +82,6 @@
|
||||
ini-parsing, io, code, and log facilities.
|
||||
* Version: 1.4.34 (last version supporting Python 2.6)
|
||||
|
||||
pyqver
|
||||
------
|
||||
|
||||
* Homepage: https://github.com/ghewgill/pyqver
|
||||
* Usage: External script to query required python version of
|
||||
python source code. Used for ensuring 2.6 compatibility.
|
||||
* Version: Unversioned
|
||||
|
||||
pytest
|
||||
------
|
||||
|
||||
|
2
lib/spack/external/_pytest/pytester.py
vendored
2
lib/spack/external/_pytest/pytester.py
vendored
@@ -569,7 +569,7 @@ def syspathinsert(self, path=None):
|
||||
def _possibly_invalidate_import_caches(self):
|
||||
# invalidate caches if we can (py33 and above)
|
||||
try:
|
||||
import importlib # nopyqver
|
||||
import importlib
|
||||
except ImportError:
|
||||
pass
|
||||
else:
|
||||
|
2
lib/spack/external/ordereddict_backport.py
vendored
2
lib/spack/external/ordereddict_backport.py
vendored
@@ -1,4 +1,4 @@
|
||||
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
|
||||
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
344
lib/spack/external/pyqver2.py
vendored
344
lib/spack/external/pyqver2.py
vendored
@@ -1,344 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
#
|
||||
# pyqver2.py
|
||||
# by Greg Hewgill
|
||||
# https://github.com/ghewgill/pyqver
|
||||
#
|
||||
# This software is provided 'as-is', without any express or implied
|
||||
# warranty. In no event will the author be held liable for any damages
|
||||
# arising from the use of this software.
|
||||
#
|
||||
# Permission is granted to anyone to use this software for any purpose,
|
||||
# including commercial applications, and to alter it and redistribute it
|
||||
# freely, subject to the following restrictions:
|
||||
#
|
||||
# 1. The origin of this software must not be misrepresented; you must not
|
||||
# claim that you wrote the original software. If you use this software
|
||||
# in a product, an acknowledgment in the product documentation would be
|
||||
# appreciated but is not required.
|
||||
# 2. Altered source versions must be plainly marked as such, and must not be
|
||||
# misrepresented as being the original software.
|
||||
# 3. This notice may not be removed or altered from any source distribution.
|
||||
#
|
||||
# Copyright (c) 2009-2013 Greg Hewgill http://hewgill.com
|
||||
#
|
||||
|
||||
import compiler
|
||||
import platform
|
||||
import sys
|
||||
|
||||
StandardModules = {
|
||||
"__future__": (2, 1),
|
||||
"abc": (2, 6),
|
||||
# skip argparse now that it's in lib/spack/external
|
||||
# "argparse": (2, 7),
|
||||
"ast": (2, 6),
|
||||
"atexit": (2, 0),
|
||||
"bz2": (2, 3),
|
||||
"cgitb": (2, 2),
|
||||
"collections": (2, 4),
|
||||
"contextlib": (2, 5),
|
||||
"cookielib": (2, 4),
|
||||
"cProfile": (2, 5),
|
||||
"csv": (2, 3),
|
||||
"ctypes": (2, 5),
|
||||
"datetime": (2, 3),
|
||||
"decimal": (2, 4),
|
||||
"difflib": (2, 1),
|
||||
"DocXMLRPCServer": (2, 3),
|
||||
"dummy_thread": (2, 3),
|
||||
"dummy_threading": (2, 3),
|
||||
"email": (2, 2),
|
||||
"fractions": (2, 6),
|
||||
"functools": (2, 5),
|
||||
"future_builtins": (2, 6),
|
||||
"hashlib": (2, 5),
|
||||
"heapq": (2, 3),
|
||||
"hmac": (2, 2),
|
||||
"hotshot": (2, 2),
|
||||
"HTMLParser": (2, 2),
|
||||
"importlib": (2, 7),
|
||||
"inspect": (2, 1),
|
||||
"io": (2, 6),
|
||||
"itertools": (2, 3),
|
||||
"json": (2, 6),
|
||||
"logging": (2, 3),
|
||||
"modulefinder": (2, 3),
|
||||
"msilib": (2, 5),
|
||||
"multiprocessing": (2, 6),
|
||||
"netrc": (1, 5, 2),
|
||||
"numbers": (2, 6),
|
||||
"optparse": (2, 3),
|
||||
"ossaudiodev": (2, 3),
|
||||
"pickletools": (2, 3),
|
||||
"pkgutil": (2, 3),
|
||||
"platform": (2, 3),
|
||||
"pydoc": (2, 1),
|
||||
"runpy": (2, 5),
|
||||
"sets": (2, 3),
|
||||
"shlex": (1, 5, 2),
|
||||
"SimpleXMLRPCServer": (2, 2),
|
||||
"spwd": (2, 5),
|
||||
"sqlite3": (2, 5),
|
||||
"ssl": (2, 6),
|
||||
"stringprep": (2, 3),
|
||||
"subprocess": (2, 4),
|
||||
"sysconfig": (2, 7),
|
||||
"tarfile": (2, 3),
|
||||
"textwrap": (2, 3),
|
||||
"timeit": (2, 3),
|
||||
"unittest": (2, 1),
|
||||
"uuid": (2, 5),
|
||||
"warnings": (2, 1),
|
||||
"weakref": (2, 1),
|
||||
"winsound": (1, 5, 2),
|
||||
"wsgiref": (2, 5),
|
||||
"xml.dom": (2, 0),
|
||||
"xml.dom.minidom": (2, 0),
|
||||
"xml.dom.pulldom": (2, 0),
|
||||
"xml.etree.ElementTree": (2, 5),
|
||||
"xml.parsers.expat":(2, 0),
|
||||
"xml.sax": (2, 0),
|
||||
"xml.sax.handler": (2, 0),
|
||||
"xml.sax.saxutils": (2, 0),
|
||||
"xml.sax.xmlreader":(2, 0),
|
||||
"xmlrpclib": (2, 2),
|
||||
"zipfile": (1, 6),
|
||||
"zipimport": (2, 3),
|
||||
"_ast": (2, 5),
|
||||
"_winreg": (2, 0),
|
||||
}
|
||||
|
||||
Functions = {
|
||||
"all": (2, 5),
|
||||
"any": (2, 5),
|
||||
"collections.Counter": (2, 7),
|
||||
"collections.defaultdict": (2, 5),
|
||||
"collections.OrderedDict": (2, 7),
|
||||
"functools.total_ordering": (2, 7),
|
||||
"enumerate": (2, 3),
|
||||
"frozenset": (2, 4),
|
||||
"itertools.compress": (2, 7),
|
||||
"math.erf": (2, 7),
|
||||
"math.erfc": (2, 7),
|
||||
"math.expm1": (2, 7),
|
||||
"math.gamma": (2, 7),
|
||||
"math.lgamma": (2, 7),
|
||||
"memoryview": (2, 7),
|
||||
"next": (2, 6),
|
||||
"os.getresgid": (2, 7),
|
||||
"os.getresuid": (2, 7),
|
||||
"os.initgroups": (2, 7),
|
||||
"os.setresgid": (2, 7),
|
||||
"os.setresuid": (2, 7),
|
||||
"reversed": (2, 4),
|
||||
"set": (2, 4),
|
||||
"subprocess.check_call": (2, 5),
|
||||
"subprocess.check_output": (2, 7),
|
||||
"sum": (2, 3),
|
||||
"symtable.is_declared_global": (2, 7),
|
||||
"weakref.WeakSet": (2, 7),
|
||||
}
|
||||
|
||||
Identifiers = {
|
||||
"False": (2, 2),
|
||||
"True": (2, 2),
|
||||
}
|
||||
|
||||
def uniq(a):
|
||||
if len(a) == 0:
|
||||
return []
|
||||
else:
|
||||
return [a[0]] + uniq([x for x in a if x != a[0]])
|
||||
|
||||
class NodeChecker(object):
|
||||
def __init__(self):
|
||||
self.vers = dict()
|
||||
self.vers[(2,0)] = []
|
||||
def add(self, node, ver, msg):
|
||||
if ver not in self.vers:
|
||||
self.vers[ver] = []
|
||||
self.vers[ver].append((node.lineno, msg))
|
||||
def default(self, node):
|
||||
for child in node.getChildNodes():
|
||||
self.visit(child)
|
||||
def visitCallFunc(self, node):
|
||||
def rollup(n):
|
||||
if isinstance(n, compiler.ast.Name):
|
||||
return n.name
|
||||
elif isinstance(n, compiler.ast.Const):
|
||||
return type(n.value).__name__
|
||||
elif isinstance(n, compiler.ast.Getattr):
|
||||
r = rollup(n.expr)
|
||||
if r:
|
||||
return r + "." + n.attrname
|
||||
name = rollup(node.node)
|
||||
if name:
|
||||
# Special handling for empty format strings, which aren't
|
||||
# allowed in Python 2.6
|
||||
if name in ('unicode.format', 'str.format'):
|
||||
n = node.node
|
||||
if isinstance(n, compiler.ast.Getattr):
|
||||
n = n.expr
|
||||
if isinstance(n, compiler.ast.Const):
|
||||
if '{}' in n.value:
|
||||
self.add(node, (2,7), name + ' with {} format string')
|
||||
|
||||
v = Functions.get(name)
|
||||
if v is not None:
|
||||
self.add(node, v, name)
|
||||
self.default(node)
|
||||
def visitClass(self, node):
|
||||
if node.bases:
|
||||
self.add(node, (2,2), "new-style class")
|
||||
if node.decorators:
|
||||
self.add(node, (2,6), "class decorator")
|
||||
self.default(node)
|
||||
def visitDictComp(self, node):
|
||||
self.add(node, (2,7), "dictionary comprehension")
|
||||
self.default(node)
|
||||
def visitFloorDiv(self, node):
|
||||
self.add(node, (2,2), "// operator")
|
||||
self.default(node)
|
||||
def visitFrom(self, node):
|
||||
v = StandardModules.get(node.modname)
|
||||
if v is not None:
|
||||
self.add(node, v, node.modname)
|
||||
for n in node.names:
|
||||
name = node.modname + "." + n[0]
|
||||
v = Functions.get(name)
|
||||
if v is not None:
|
||||
self.add(node, v, name)
|
||||
def visitFunction(self, node):
|
||||
if node.decorators:
|
||||
self.add(node, (2,4), "function decorator")
|
||||
self.default(node)
|
||||
def visitGenExpr(self, node):
|
||||
self.add(node, (2,4), "generator expression")
|
||||
self.default(node)
|
||||
def visitGetattr(self, node):
|
||||
if (isinstance(node.expr, compiler.ast.Const)
|
||||
and isinstance(node.expr.value, str)
|
||||
and node.attrname == "format"):
|
||||
self.add(node, (2,6), "string literal .format()")
|
||||
self.default(node)
|
||||
def visitIfExp(self, node):
|
||||
self.add(node, (2,5), "inline if expression")
|
||||
self.default(node)
|
||||
def visitImport(self, node):
|
||||
for n in node.names:
|
||||
v = StandardModules.get(n[0])
|
||||
if v is not None:
|
||||
self.add(node, v, n[0])
|
||||
self.default(node)
|
||||
def visitName(self, node):
|
||||
v = Identifiers.get(node.name)
|
||||
if v is not None:
|
||||
self.add(node, v, node.name)
|
||||
self.default(node)
|
||||
def visitSet(self, node):
|
||||
self.add(node, (2,7), "set literal")
|
||||
self.default(node)
|
||||
def visitSetComp(self, node):
|
||||
self.add(node, (2,7), "set comprehension")
|
||||
self.default(node)
|
||||
def visitTryFinally(self, node):
|
||||
# try/finally with a suite generates a Stmt node as the body,
|
||||
# but try/except/finally generates a TryExcept as the body
|
||||
if isinstance(node.body, compiler.ast.TryExcept):
|
||||
self.add(node, (2,5), "try/except/finally")
|
||||
self.default(node)
|
||||
def visitWith(self, node):
|
||||
if isinstance(node.body, compiler.ast.With):
|
||||
self.add(node, (2,7), "with statement with multiple contexts")
|
||||
else:
|
||||
self.add(node, (2,5), "with statement")
|
||||
self.default(node)
|
||||
def visitYield(self, node):
|
||||
self.add(node, (2,2), "yield expression")
|
||||
self.default(node)
|
||||
|
||||
def get_versions(source, filename=None):
|
||||
"""Return information about the Python versions required for specific features.
|
||||
|
||||
The return value is a dictionary with keys as a version number as a tuple
|
||||
(for example Python 2.6 is (2,6)) and the value are a list of features that
|
||||
require the indicated Python version.
|
||||
"""
|
||||
tree = compiler.parse(source)
|
||||
checker = compiler.walk(tree, NodeChecker())
|
||||
return checker.vers
|
||||
|
||||
def v27(source):
|
||||
if sys.version_info >= (2, 7):
|
||||
return qver(source)
|
||||
else:
|
||||
print >>sys.stderr, "Not all features tested, run --test with Python 2.7"
|
||||
return (2, 7)
|
||||
|
||||
def qver(source):
|
||||
"""Return the minimum Python version required to run a particular bit of code.
|
||||
|
||||
>>> qver('print "hello world"')
|
||||
(2, 0)
|
||||
>>> qver('class test(object): pass')
|
||||
(2, 2)
|
||||
>>> qver('yield 1')
|
||||
(2, 2)
|
||||
>>> qver('a // b')
|
||||
(2, 2)
|
||||
>>> qver('True')
|
||||
(2, 2)
|
||||
>>> qver('enumerate(a)')
|
||||
(2, 3)
|
||||
>>> qver('total = sum')
|
||||
(2, 0)
|
||||
>>> qver('sum(a)')
|
||||
(2, 3)
|
||||
>>> qver('(x*x for x in range(5))')
|
||||
(2, 4)
|
||||
>>> qver('class C:\\n @classmethod\\n def m(): pass')
|
||||
(2, 4)
|
||||
>>> qver('y if x else z')
|
||||
(2, 5)
|
||||
>>> qver('import hashlib')
|
||||
(2, 5)
|
||||
>>> qver('from hashlib import md5')
|
||||
(2, 5)
|
||||
>>> qver('import xml.etree.ElementTree')
|
||||
(2, 5)
|
||||
>>> qver('try:\\n try: pass;\\n except: pass;\\nfinally: pass')
|
||||
(2, 0)
|
||||
>>> qver('try: pass;\\nexcept: pass;\\nfinally: pass')
|
||||
(2, 5)
|
||||
>>> qver('from __future__ import with_statement\\nwith x: pass')
|
||||
(2, 5)
|
||||
>>> qver('collections.defaultdict(list)')
|
||||
(2, 5)
|
||||
>>> qver('from collections import defaultdict')
|
||||
(2, 5)
|
||||
>>> qver('"{0}".format(0)')
|
||||
(2, 6)
|
||||
>>> qver('memoryview(x)')
|
||||
(2, 7)
|
||||
>>> v27('{1, 2, 3}')
|
||||
(2, 7)
|
||||
>>> v27('{x for x in s}')
|
||||
(2, 7)
|
||||
>>> v27('{x: y for x in s}')
|
||||
(2, 7)
|
||||
>>> qver('from __future__ import with_statement\\nwith x:\\n with y: pass')
|
||||
(2, 5)
|
||||
>>> v27('from __future__ import with_statement\\nwith x, y: pass')
|
||||
(2, 7)
|
||||
>>> qver('@decorator\\ndef f(): pass')
|
||||
(2, 4)
|
||||
>>> qver('@decorator\\nclass test:\\n pass')
|
||||
(2, 6)
|
||||
|
||||
#>>> qver('0o0')
|
||||
#(2, 6)
|
||||
#>>> qver('@foo\\nclass C: pass')
|
||||
#(2, 6)
|
||||
"""
|
||||
return max(get_versions(source).keys())
|
248
lib/spack/external/pyqver3.py
vendored
248
lib/spack/external/pyqver3.py
vendored
@@ -1,248 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
#
|
||||
# pyqver3.py
|
||||
# by Greg Hewgill
|
||||
# https://github.com/ghewgill/pyqver
|
||||
#
|
||||
# This software is provided 'as-is', without any express or implied
|
||||
# warranty. In no event will the author be held liable for any damages
|
||||
# arising from the use of this software.
|
||||
#
|
||||
# Permission is granted to anyone to use this software for any purpose,
|
||||
# including commercial applications, and to alter it and redistribute it
|
||||
# freely, subject to the following restrictions:
|
||||
#
|
||||
# 1. The origin of this software must not be misrepresented; you must not
|
||||
# claim that you wrote the original software. If you use this software
|
||||
# in a product, an acknowledgment in the product documentation would be
|
||||
# appreciated but is not required.
|
||||
# 2. Altered source versions must be plainly marked as such, and must not be
|
||||
# misrepresented as being the original software.
|
||||
# 3. This notice may not be removed or altered from any source distribution.
|
||||
#
|
||||
# Copyright (c) 2009-2013 Greg Hewgill http://hewgill.com
|
||||
#
|
||||
import ast
|
||||
import platform
|
||||
import sys
|
||||
|
||||
StandardModules = {
|
||||
# skip argparse now that it's in lib/spack/external
|
||||
# "argparse": (3, 2),
|
||||
"faulthandler": (3, 3),
|
||||
"importlib": (3, 1),
|
||||
"ipaddress": (3, 3),
|
||||
"lzma": (3, 3),
|
||||
"tkinter.ttk": (3, 1),
|
||||
"unittest.mock": (3, 3),
|
||||
"venv": (3, 3),
|
||||
}
|
||||
|
||||
Functions = {
|
||||
"bytearray.maketrans": (3, 1),
|
||||
"bytes.maketrans": (3, 1),
|
||||
"bz2.open": (3, 3),
|
||||
"collections.Counter": (3, 1),
|
||||
"collections.OrderedDict": (3, 1),
|
||||
"crypt.mksalt": (3, 3),
|
||||
"email.generator.BytesGenerator": (3, 2),
|
||||
"email.message_from_binary_file": (3, 2),
|
||||
"email.message_from_bytes": (3, 2),
|
||||
"functools.lru_cache": (3, 2),
|
||||
"gzip.compress": (3, 2),
|
||||
"gzip.decompress": (3, 2),
|
||||
"inspect.getclosurevars": (3, 3),
|
||||
"inspect.getgeneratorlocals": (3, 3),
|
||||
"inspect.getgeneratorstate": (3, 2),
|
||||
"itertools.combinations_with_replacement": (3, 1),
|
||||
"itertools.compress": (3, 1),
|
||||
"logging.config.dictConfig": (3, 2),
|
||||
"logging.NullHandler": (3, 1),
|
||||
"math.erf": (3, 2),
|
||||
"math.erfc": (3, 2),
|
||||
"math.expm1": (3, 2),
|
||||
"math.gamma": (3, 2),
|
||||
"math.isfinite": (3, 2),
|
||||
"math.lgamma": (3, 2),
|
||||
"math.log2": (3, 3),
|
||||
"os.environb": (3, 2),
|
||||
"os.fsdecode": (3, 2),
|
||||
"os.fsencode": (3, 2),
|
||||
"os.fwalk": (3, 3),
|
||||
"os.getenvb": (3, 2),
|
||||
"os.get_exec_path": (3, 2),
|
||||
"os.getgrouplist": (3, 3),
|
||||
"os.getpriority": (3, 3),
|
||||
"os.getresgid": (3, 2),
|
||||
"os.getresuid": (3, 2),
|
||||
"os.get_terminal_size": (3, 3),
|
||||
"os.getxattr": (3, 3),
|
||||
"os.initgroups": (3, 2),
|
||||
"os.listxattr": (3, 3),
|
||||
"os.lockf": (3, 3),
|
||||
"os.pipe2": (3, 3),
|
||||
"os.posix_fadvise": (3, 3),
|
||||
"os.posix_fallocate": (3, 3),
|
||||
"os.pread": (3, 3),
|
||||
"os.pwrite": (3, 3),
|
||||
"os.readv": (3, 3),
|
||||
"os.removexattr": (3, 3),
|
||||
"os.replace": (3, 3),
|
||||
"os.sched_get_priority_max": (3, 3),
|
||||
"os.sched_get_priority_min": (3, 3),
|
||||
"os.sched_getaffinity": (3, 3),
|
||||
"os.sched_getparam": (3, 3),
|
||||
"os.sched_getscheduler": (3, 3),
|
||||
"os.sched_rr_get_interval": (3, 3),
|
||||
"os.sched_setaffinity": (3, 3),
|
||||
"os.sched_setparam": (3, 3),
|
||||
"os.sched_setscheduler": (3, 3),
|
||||
"os.sched_yield": (3, 3),
|
||||
"os.sendfile": (3, 3),
|
||||
"os.setpriority": (3, 3),
|
||||
"os.setresgid": (3, 2),
|
||||
"os.setresuid": (3, 2),
|
||||
"os.setxattr": (3, 3),
|
||||
"os.sync": (3, 3),
|
||||
"os.truncate": (3, 3),
|
||||
"os.waitid": (3, 3),
|
||||
"os.writev": (3, 3),
|
||||
"shutil.chown": (3, 3),
|
||||
"shutil.disk_usage": (3, 3),
|
||||
"shutil.get_archive_formats": (3, 3),
|
||||
"shutil.get_terminal_size": (3, 3),
|
||||
"shutil.get_unpack_formats": (3, 3),
|
||||
"shutil.make_archive": (3, 3),
|
||||
"shutil.register_archive_format": (3, 3),
|
||||
"shutil.register_unpack_format": (3, 3),
|
||||
"shutil.unpack_archive": (3, 3),
|
||||
"shutil.unregister_archive_format": (3, 3),
|
||||
"shutil.unregister_unpack_format": (3, 3),
|
||||
"shutil.which": (3, 3),
|
||||
"signal.pthread_kill": (3, 3),
|
||||
"signal.pthread_sigmask": (3, 3),
|
||||
"signal.sigpending": (3, 3),
|
||||
"signal.sigtimedwait": (3, 3),
|
||||
"signal.sigwait": (3, 3),
|
||||
"signal.sigwaitinfo": (3, 3),
|
||||
"socket.CMSG_LEN": (3, 3),
|
||||
"socket.CMSG_SPACE": (3, 3),
|
||||
"socket.fromshare": (3, 3),
|
||||
"socket.if_indextoname": (3, 3),
|
||||
"socket.if_nameindex": (3, 3),
|
||||
"socket.if_nametoindex": (3, 3),
|
||||
"socket.sethostname": (3, 3),
|
||||
"ssl.match_hostname": (3, 2),
|
||||
"ssl.RAND_bytes": (3, 3),
|
||||
"ssl.RAND_pseudo_bytes": (3, 3),
|
||||
"ssl.SSLContext": (3, 2),
|
||||
"ssl.SSLEOFError": (3, 3),
|
||||
"ssl.SSLSyscallError": (3, 3),
|
||||
"ssl.SSLWantReadError": (3, 3),
|
||||
"ssl.SSLWantWriteError": (3, 3),
|
||||
"ssl.SSLZeroReturnError": (3, 3),
|
||||
"stat.filemode": (3, 3),
|
||||
"textwrap.indent": (3, 3),
|
||||
"threading.get_ident": (3, 3),
|
||||
"time.clock_getres": (3, 3),
|
||||
"time.clock_gettime": (3, 3),
|
||||
"time.clock_settime": (3, 3),
|
||||
"time.get_clock_info": (3, 3),
|
||||
"time.monotonic": (3, 3),
|
||||
"time.perf_counter": (3, 3),
|
||||
"time.process_time": (3, 3),
|
||||
"types.new_class": (3, 3),
|
||||
"types.prepare_class": (3, 3),
|
||||
}
|
||||
|
||||
def uniq(a):
|
||||
if len(a) == 0:
|
||||
return []
|
||||
else:
|
||||
return [a[0]] + uniq([x for x in a if x != a[0]])
|
||||
|
||||
class NodeChecker(ast.NodeVisitor):
|
||||
def __init__(self):
|
||||
self.vers = dict()
|
||||
self.vers[(3,0)] = []
|
||||
def add(self, node, ver, msg):
|
||||
if ver not in self.vers:
|
||||
self.vers[ver] = []
|
||||
self.vers[ver].append((node.lineno, msg))
|
||||
def visit_Call(self, node):
|
||||
def rollup(n):
|
||||
if isinstance(n, ast.Name):
|
||||
return n.id
|
||||
elif isinstance(n, ast.Attribute):
|
||||
r = rollup(n.value)
|
||||
if r:
|
||||
return r + "." + n.attr
|
||||
name = rollup(node.func)
|
||||
if name:
|
||||
v = Functions.get(name)
|
||||
if v is not None:
|
||||
self.add(node, v, name)
|
||||
self.generic_visit(node)
|
||||
def visit_Import(self, node):
|
||||
for n in node.names:
|
||||
v = StandardModules.get(n.name)
|
||||
if v is not None:
|
||||
self.add(node, v, n.name)
|
||||
self.generic_visit(node)
|
||||
def visit_ImportFrom(self, node):
|
||||
v = StandardModules.get(node.module)
|
||||
if v is not None:
|
||||
self.add(node, v, node.module)
|
||||
for n in node.names:
|
||||
name = node.module + "." + n.name
|
||||
v = Functions.get(name)
|
||||
if v is not None:
|
||||
self.add(node, v, name)
|
||||
def visit_Raise(self, node):
|
||||
if isinstance(node.cause, ast.Name) and node.cause.id == "None":
|
||||
self.add(node, (3,3), "raise ... from None")
|
||||
def visit_YieldFrom(self, node):
|
||||
self.add(node, (3,3), "yield from")
|
||||
|
||||
def get_versions(source, filename=None):
|
||||
"""Return information about the Python versions required for specific features.
|
||||
|
||||
The return value is a dictionary with keys as a version number as a tuple
|
||||
(for example Python 3.1 is (3,1)) and the value are a list of features that
|
||||
require the indicated Python version.
|
||||
"""
|
||||
tree = ast.parse(source, filename=filename)
|
||||
checker = NodeChecker()
|
||||
checker.visit(tree)
|
||||
return checker.vers
|
||||
|
||||
def v33(source):
|
||||
if sys.version_info >= (3, 3):
|
||||
return qver(source)
|
||||
else:
|
||||
print("Not all features tested, run --test with Python 3.3", file=sys.stderr)
|
||||
return (3, 3)
|
||||
|
||||
def qver(source):
|
||||
"""Return the minimum Python version required to run a particular bit of code.
|
||||
|
||||
>>> qver('print("hello world")')
|
||||
(3, 0)
|
||||
>>> qver("import importlib")
|
||||
(3, 1)
|
||||
>>> qver("from importlib import x")
|
||||
(3, 1)
|
||||
>>> qver("import tkinter.ttk")
|
||||
(3, 1)
|
||||
>>> qver("from collections import Counter")
|
||||
(3, 1)
|
||||
>>> qver("collections.OrderedDict()")
|
||||
(3, 1)
|
||||
>>> qver("import functools\\n@functools.lru_cache()\\ndef f(x): x*x")
|
||||
(3, 2)
|
||||
>>> v33("yield from x")
|
||||
(3, 3)
|
||||
>>> v33("raise x from None")
|
||||
(3, 3)
|
||||
"""
|
||||
return max(get_versions(source).keys())
|
2
lib/spack/external/ruamel/yaml/compat.py
vendored
2
lib/spack/external/ruamel/yaml/compat.py
vendored
@@ -12,7 +12,7 @@
|
||||
from ruamel.ordereddict import ordereddict
|
||||
except:
|
||||
try:
|
||||
from collections import OrderedDict # nopyqver
|
||||
from collections import OrderedDict
|
||||
except ImportError:
|
||||
from ordereddict import OrderedDict
|
||||
# to get the right name import ... as ordereddict doesn't do that
|
||||
|
@@ -1,4 +1,4 @@
|
||||
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
|
||||
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
@@ -1,4 +1,4 @@
|
||||
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
|
||||
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
@@ -1,136 +1,165 @@
|
||||
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
|
||||
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import re
|
||||
import argparse
|
||||
import errno
|
||||
import sys
|
||||
|
||||
from six import StringIO
|
||||
|
||||
class ArgparseWriter(object):
|
||||
|
||||
class Command(object):
|
||||
"""Parsed representation of a command from argparse.
|
||||
|
||||
This is a single command from an argparse parser. ``ArgparseWriter``
|
||||
creates these and returns them from ``parse()``, and it passes one of
|
||||
these to each call to ``format()`` so that we can take an action for
|
||||
a single command.
|
||||
|
||||
Parts of a Command:
|
||||
- prog: command name (str)
|
||||
- description: command description (str)
|
||||
- usage: command usage (str)
|
||||
- positionals: list of positional arguments (list)
|
||||
- optionals: list of optional arguments (list)
|
||||
- subcommands: list of subcommand parsers (list)
|
||||
"""
|
||||
def __init__(self, prog, description, usage,
|
||||
positionals, optionals, subcommands):
|
||||
self.prog = prog
|
||||
self.description = description
|
||||
self.usage = usage
|
||||
self.positionals = positionals
|
||||
self.optionals = optionals
|
||||
self.subcommands = subcommands
|
||||
|
||||
|
||||
# NOTE: The only reason we subclass argparse.HelpFormatter is to get access
|
||||
# to self._expand_help(), ArgparseWriter is not intended to be used as a
|
||||
# formatter_class.
|
||||
class ArgparseWriter(argparse.HelpFormatter):
|
||||
"""Analyzes an argparse ArgumentParser for easy generation of help."""
|
||||
def __init__(self, out=sys.stdout):
|
||||
|
||||
def __init__(self, prog, out=sys.stdout, aliases=False):
|
||||
"""Initializes a new ArgparseWriter instance.
|
||||
|
||||
Parameters:
|
||||
prog (str): the program name
|
||||
out (file object): the file to write to
|
||||
aliases (bool): whether or not to include subparsers for aliases
|
||||
"""
|
||||
super(ArgparseWriter, self).__init__(prog)
|
||||
self.level = 0
|
||||
self.prog = prog
|
||||
self.out = out
|
||||
self.aliases = aliases
|
||||
|
||||
def _write(self, parser, root=True, level=0):
|
||||
def parse(self, parser, prog):
|
||||
"""Parses the parser object and returns the relavent components.
|
||||
|
||||
Parameters:
|
||||
parser (argparse.ArgumentParser): the parser
|
||||
prog (str): the command name
|
||||
|
||||
Returns:
|
||||
(Command) information about the command from the parser
|
||||
"""
|
||||
self.parser = parser
|
||||
self.level = level
|
||||
|
||||
split_prog = parser.prog.split(' ')
|
||||
split_prog[-1] = prog
|
||||
prog = ' '.join(split_prog)
|
||||
description = parser.description
|
||||
|
||||
fmt = parser._get_formatter()
|
||||
actions = parser._actions
|
||||
groups = parser._mutually_exclusive_groups
|
||||
usage = fmt._format_usage(None, actions, groups, '').strip()
|
||||
|
||||
# allow root level to be flattened with rest of commands
|
||||
if type(root) == int:
|
||||
self.level = root
|
||||
root = True
|
||||
|
||||
# go through actions and split them into optionals, positionals,
|
||||
# Go through actions and split them into optionals, positionals,
|
||||
# and subcommands
|
||||
optionals = []
|
||||
positionals = []
|
||||
subcommands = []
|
||||
for action in actions:
|
||||
if action.option_strings:
|
||||
optionals.append(action)
|
||||
flags = action.option_strings
|
||||
dest_flags = fmt._format_action_invocation(action)
|
||||
help = self._expand_help(action) if action.help else ''
|
||||
help = help.replace('\n', ' ')
|
||||
optionals.append((flags, dest_flags, help))
|
||||
elif isinstance(action, argparse._SubParsersAction):
|
||||
for subaction in action._choices_actions:
|
||||
subparser = action._name_parser_map[subaction.dest]
|
||||
subcommands.append(subparser)
|
||||
subcommands.append((subparser, subaction.dest))
|
||||
|
||||
# Look for aliases of the form 'name (alias, ...)'
|
||||
if self.aliases:
|
||||
match = re.match(r'(.*) \((.*)\)', subaction.metavar)
|
||||
if match:
|
||||
aliases = match.group(2).split(', ')
|
||||
for alias in aliases:
|
||||
subparser = action._name_parser_map[alias]
|
||||
subcommands.append((subparser, alias))
|
||||
else:
|
||||
positionals.append(action)
|
||||
args = fmt._format_action_invocation(action)
|
||||
help = self._expand_help(action) if action.help else ''
|
||||
help = help.replace('\n', ' ')
|
||||
positionals.append((args, help))
|
||||
|
||||
groups = parser._mutually_exclusive_groups
|
||||
fmt = parser._get_formatter()
|
||||
description = parser.description
|
||||
return Command(
|
||||
prog, description, usage, positionals, optionals, subcommands)
|
||||
|
||||
def action_group(function, actions):
|
||||
for action in actions:
|
||||
arg = fmt._format_action_invocation(action)
|
||||
help = action.help if action.help else ''
|
||||
function(arg, re.sub('\n', ' ', help))
|
||||
def format(self, cmd):
|
||||
"""Returns the string representation of a single node in the
|
||||
parser tree.
|
||||
|
||||
if root:
|
||||
self.begin_command(parser.prog)
|
||||
Override this in subclasses to define how each subcommand
|
||||
should be displayed.
|
||||
|
||||
if description:
|
||||
self.description(parser.description)
|
||||
Parameters:
|
||||
(Command): parsed information about a command or subcommand
|
||||
|
||||
usage = fmt._format_usage(None, actions, groups, '').strip()
|
||||
self.usage(usage)
|
||||
Returns:
|
||||
str: the string representation of this subcommand
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
if positionals:
|
||||
self.begin_positionals()
|
||||
action_group(self.positional, positionals)
|
||||
self.end_positionals()
|
||||
def _write(self, parser, prog, level=0):
|
||||
"""Recursively writes a parser.
|
||||
|
||||
if optionals:
|
||||
self.begin_optionals()
|
||||
action_group(self.optional, optionals)
|
||||
self.end_optionals()
|
||||
Parameters:
|
||||
parser (argparse.ArgumentParser): the parser
|
||||
prog (str): the command name
|
||||
level (int): the current level
|
||||
"""
|
||||
self.level = level
|
||||
|
||||
if subcommands:
|
||||
self.begin_subcommands(subcommands)
|
||||
for subparser in subcommands:
|
||||
self._write(subparser, root=True, level=level + 1)
|
||||
self.end_subcommands(subcommands)
|
||||
cmd = self.parse(parser, prog)
|
||||
self.out.write(self.format(cmd))
|
||||
|
||||
if root:
|
||||
self.end_command(parser.prog)
|
||||
for subparser, prog in cmd.subcommands:
|
||||
self._write(subparser, prog, level=level + 1)
|
||||
|
||||
def write(self, parser, root=True):
|
||||
def write(self, parser):
|
||||
"""Write out details about an ArgumentParser.
|
||||
|
||||
Args:
|
||||
parser (ArgumentParser): an ``argparse`` parser
|
||||
root (bool or int): if bool, whether to include the root parser;
|
||||
or ``1`` to flatten the root parser with first-level
|
||||
subcommands
|
||||
parser (argparse.ArgumentParser): the parser
|
||||
"""
|
||||
try:
|
||||
self._write(parser, root, level=0)
|
||||
self._write(parser, self.prog)
|
||||
except IOError as e:
|
||||
# swallow pipe errors
|
||||
# Swallow pipe errors
|
||||
# Raises IOError in Python 2 and BrokenPipeError in Python 3
|
||||
if e.errno != errno.EPIPE:
|
||||
raise
|
||||
|
||||
def begin_command(self, prog):
|
||||
pass
|
||||
|
||||
def end_command(self, prog):
|
||||
pass
|
||||
|
||||
def description(self, description):
|
||||
pass
|
||||
|
||||
def usage(self, usage):
|
||||
pass
|
||||
|
||||
def begin_positionals(self):
|
||||
pass
|
||||
|
||||
def positional(self, name, help):
|
||||
pass
|
||||
|
||||
def end_positionals(self):
|
||||
pass
|
||||
|
||||
def begin_optionals(self):
|
||||
pass
|
||||
|
||||
def optional(self, option, help):
|
||||
pass
|
||||
|
||||
def end_optionals(self):
|
||||
pass
|
||||
|
||||
def begin_subcommands(self, subcommands):
|
||||
pass
|
||||
|
||||
def end_subcommands(self, subcommands):
|
||||
pass
|
||||
|
||||
|
||||
_rst_levels = ['=', '-', '^', '~', ':', '`']
|
||||
|
||||
@@ -138,66 +167,213 @@ def end_subcommands(self, subcommands):
|
||||
class ArgparseRstWriter(ArgparseWriter):
|
||||
"""Write argparse output as rst sections."""
|
||||
|
||||
def __init__(self, out=sys.stdout, rst_levels=_rst_levels,
|
||||
strip_root_prog=True):
|
||||
def __init__(self, prog, out=sys.stdout, aliases=False,
|
||||
rst_levels=_rst_levels):
|
||||
"""Create a new ArgparseRstWriter.
|
||||
|
||||
Args:
|
||||
Parameters:
|
||||
prog (str): program name
|
||||
out (file object): file to write to
|
||||
aliases (bool): whether or not to include subparsers for aliases
|
||||
rst_levels (list of str): list of characters
|
||||
for rst section headings
|
||||
strip_root_prog (bool): if ``True``, strip the base command name
|
||||
from subcommands in output
|
||||
"""
|
||||
super(ArgparseRstWriter, self).__init__(out)
|
||||
super(ArgparseRstWriter, self).__init__(prog, out, aliases)
|
||||
self.rst_levels = rst_levels
|
||||
self.strip_root_prog = strip_root_prog
|
||||
|
||||
def line(self, string=''):
|
||||
self.out.write('%s\n' % string)
|
||||
def format(self, cmd):
|
||||
string = StringIO()
|
||||
string.write(self.begin_command(cmd.prog))
|
||||
|
||||
if cmd.description:
|
||||
string.write(self.description(cmd.description))
|
||||
|
||||
string.write(self.usage(cmd.usage))
|
||||
|
||||
if cmd.positionals:
|
||||
string.write(self.begin_positionals())
|
||||
for args, help in cmd.positionals:
|
||||
string.write(self.positional(args, help))
|
||||
string.write(self.end_positionals())
|
||||
|
||||
if cmd.optionals:
|
||||
string.write(self.begin_optionals())
|
||||
for flags, dest_flags, help in cmd.optionals:
|
||||
string.write(self.optional(dest_flags, help))
|
||||
string.write(self.end_optionals())
|
||||
|
||||
if cmd.subcommands:
|
||||
string.write(self.begin_subcommands(cmd.subcommands))
|
||||
|
||||
return string.getvalue()
|
||||
|
||||
def begin_command(self, prog):
|
||||
self.line()
|
||||
self.line('----')
|
||||
self.line()
|
||||
self.line('.. _%s:\n' % prog.replace(' ', '-'))
|
||||
self.line('%s' % prog)
|
||||
self.line(self.rst_levels[self.level] * len(prog) + '\n')
|
||||
return """
|
||||
----
|
||||
|
||||
.. _{0}:
|
||||
|
||||
{1}
|
||||
{2}
|
||||
|
||||
""".format(prog.replace(' ', '-'), prog,
|
||||
self.rst_levels[self.level] * len(prog))
|
||||
|
||||
def description(self, description):
|
||||
self.line('%s\n' % description)
|
||||
return description + '\n\n'
|
||||
|
||||
def usage(self, usage):
|
||||
self.line('.. code-block:: console\n')
|
||||
self.line(' %s\n' % usage)
|
||||
return """\
|
||||
.. code-block:: console
|
||||
|
||||
{0}
|
||||
|
||||
""".format(usage)
|
||||
|
||||
def begin_positionals(self):
|
||||
self.line()
|
||||
self.line('**Positional arguments**\n')
|
||||
return '\n**Positional arguments**\n\n'
|
||||
|
||||
def positional(self, name, help):
|
||||
self.line(name)
|
||||
self.line(' %s\n' % help)
|
||||
return """\
|
||||
{0}
|
||||
{1}
|
||||
|
||||
""".format(name, help)
|
||||
|
||||
def end_positionals(self):
|
||||
return ''
|
||||
|
||||
def begin_optionals(self):
|
||||
self.line()
|
||||
self.line('**Optional arguments**\n')
|
||||
return '\n**Optional arguments**\n\n'
|
||||
|
||||
def optional(self, opts, help):
|
||||
self.line('``%s``' % opts)
|
||||
self.line(' %s\n' % help)
|
||||
return """\
|
||||
``{0}``
|
||||
{1}
|
||||
|
||||
""".format(opts, help)
|
||||
|
||||
def end_optionals(self):
|
||||
return ''
|
||||
|
||||
def begin_subcommands(self, subcommands):
|
||||
self.line()
|
||||
self.line('**Subcommands**\n')
|
||||
self.line('.. hlist::')
|
||||
self.line(' :columns: 4\n')
|
||||
string = """
|
||||
**Subcommands**
|
||||
|
||||
for cmd in subcommands:
|
||||
prog = cmd.prog
|
||||
if self.strip_root_prog:
|
||||
prog = re.sub(r'^[^ ]* ', '', prog)
|
||||
.. hlist::
|
||||
:columns: 4
|
||||
|
||||
self.line(' * :ref:`%s <%s>`'
|
||||
% (prog, cmd.prog.replace(' ', '-')))
|
||||
self.line()
|
||||
"""
|
||||
|
||||
for cmd, _ in subcommands:
|
||||
prog = re.sub(r'^[^ ]* ', '', cmd.prog)
|
||||
string += ' * :ref:`{0} <{1}>`\n'.format(
|
||||
prog, cmd.prog.replace(' ', '-'))
|
||||
|
||||
return string + '\n'
|
||||
|
||||
|
||||
class ArgparseCompletionWriter(ArgparseWriter):
|
||||
"""Write argparse output as shell programmable tab completion functions."""
|
||||
|
||||
def format(self, cmd):
|
||||
"""Returns the string representation of a single node in the
|
||||
parser tree.
|
||||
|
||||
Override this in subclasses to define how each subcommand
|
||||
should be displayed.
|
||||
|
||||
Parameters:
|
||||
(Command): parsed information about a command or subcommand
|
||||
|
||||
Returns:
|
||||
str: the string representation of this subcommand
|
||||
"""
|
||||
|
||||
assert cmd.optionals # we should always at least have -h, --help
|
||||
assert not (cmd.positionals and cmd.subcommands) # one or the other
|
||||
|
||||
# We only care about the arguments/flags, not the help messages
|
||||
positionals = []
|
||||
if cmd.positionals:
|
||||
positionals, _ = zip(*cmd.positionals)
|
||||
optionals, _, _ = zip(*cmd.optionals)
|
||||
subcommands = []
|
||||
if cmd.subcommands:
|
||||
_, subcommands = zip(*cmd.subcommands)
|
||||
|
||||
# Flatten lists of lists
|
||||
optionals = [x for xx in optionals for x in xx]
|
||||
|
||||
return (self.start_function(cmd.prog) +
|
||||
self.body(positionals, optionals, subcommands) +
|
||||
self.end_function(cmd.prog))
|
||||
|
||||
def start_function(self, prog):
|
||||
"""Returns the syntax needed to begin a function definition.
|
||||
|
||||
Parameters:
|
||||
prog (str): the command name
|
||||
|
||||
Returns:
|
||||
str: the function definition beginning
|
||||
"""
|
||||
name = prog.replace('-', '_').replace(' ', '_')
|
||||
return '\n_{0}() {{'.format(name)
|
||||
|
||||
def end_function(self, prog=None):
|
||||
"""Returns the syntax needed to end a function definition.
|
||||
|
||||
Parameters:
|
||||
prog (str, optional): the command name
|
||||
|
||||
Returns:
|
||||
str: the function definition ending
|
||||
"""
|
||||
return '}\n'
|
||||
|
||||
def body(self, positionals, optionals, subcommands):
|
||||
"""Returns the body of the function.
|
||||
|
||||
Parameters:
|
||||
positionals (list): list of positional arguments
|
||||
optionals (list): list of optional arguments
|
||||
subcommands (list): list of subcommand parsers
|
||||
|
||||
Returns:
|
||||
str: the function body
|
||||
"""
|
||||
return ''
|
||||
|
||||
def positionals(self, positionals):
|
||||
"""Returns the syntax for reporting positional arguments.
|
||||
|
||||
Parameters:
|
||||
positionals (list): list of positional arguments
|
||||
|
||||
Returns:
|
||||
str: the syntax for positional arguments
|
||||
"""
|
||||
return ''
|
||||
|
||||
def optionals(self, optionals):
|
||||
"""Returns the syntax for reporting optional flags.
|
||||
|
||||
Parameters:
|
||||
optionals (list): list of optional arguments
|
||||
|
||||
Returns:
|
||||
str: the syntax for optional flags
|
||||
"""
|
||||
return ''
|
||||
|
||||
def subcommands(self, subcommands):
|
||||
"""Returns the syntax for reporting subcommands.
|
||||
|
||||
Parameters:
|
||||
subcommands (list): list of subcommand parsers
|
||||
|
||||
Returns:
|
||||
str: the syntax for subcommand parsers
|
||||
"""
|
||||
return ''
|
||||
|
@@ -1,4 +1,4 @@
|
||||
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
|
||||
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
@@ -1,4 +1,4 @@
|
||||
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
|
||||
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
@@ -1,4 +1,4 @@
|
||||
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
|
||||
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
@@ -13,6 +13,7 @@
|
||||
import six
|
||||
|
||||
from .microarchitecture import generic_microarchitecture, targets
|
||||
from .schema import targets_json
|
||||
|
||||
#: Mapping from operating systems to chain of commands
|
||||
#: to obtain a dictionary of raw info on the current cpu
|
||||
@@ -108,23 +109,39 @@ def sysctl(*args):
|
||||
'model': sysctl('-n', 'machdep.cpu.model'),
|
||||
'model name': sysctl('-n', 'machdep.cpu.brand_string')
|
||||
}
|
||||
|
||||
# Super hacky way to deal with slight representation differences
|
||||
# Would be better to somehow consider these "identical"
|
||||
if 'sse4.1' in info['flags']:
|
||||
info['flags'] += ' sse4_1'
|
||||
if 'sse4.2' in info['flags']:
|
||||
info['flags'] += ' sse4_2'
|
||||
if 'avx1.0' in info['flags']:
|
||||
info['flags'] += ' avx'
|
||||
if 'clfsopt' in info['flags']:
|
||||
info['flags'] += ' clflushopt'
|
||||
if 'xsave' in info['flags']:
|
||||
info['flags'] += ' xsavec xsaveopt'
|
||||
|
||||
return info
|
||||
|
||||
|
||||
def adjust_raw_flags(info):
|
||||
"""Adjust the flags detected on the system to homogenize
|
||||
slightly different representations.
|
||||
"""
|
||||
# Flags detected on Darwin turned to their linux counterpart
|
||||
flags = info.get('flags', [])
|
||||
d2l = targets_json['conversions']['darwin_flags']
|
||||
for darwin_flag, linux_flag in d2l.items():
|
||||
if darwin_flag in flags:
|
||||
info['flags'] += ' ' + linux_flag
|
||||
|
||||
|
||||
def adjust_raw_vendor(info):
|
||||
"""Adjust the vendor field to make it human readable"""
|
||||
if 'CPU implementer' not in info:
|
||||
return
|
||||
|
||||
# Mapping numeric codes to vendor (ARM). This list is a merge from
|
||||
# different sources:
|
||||
#
|
||||
# https://github.com/karelzak/util-linux/blob/master/sys-utils/lscpu-arm.c
|
||||
# https://developer.arm.com/docs/ddi0487/latest/arm-architecture-reference-manual-armv8-for-armv8-a-architecture-profile
|
||||
# https://github.com/gcc-mirror/gcc/blob/master/gcc/config/aarch64/aarch64-cores.def
|
||||
# https://patchwork.kernel.org/patch/10524949/
|
||||
arm_vendors = targets_json['conversions']['arm_vendors']
|
||||
arm_code = info['CPU implementer']
|
||||
if arm_code in arm_vendors:
|
||||
info['CPU implementer'] = arm_vendors[arm_code]
|
||||
|
||||
|
||||
def raw_info_dictionary():
|
||||
"""Returns a dictionary with information on the cpu of the current host.
|
||||
|
||||
@@ -139,6 +156,8 @@ def raw_info_dictionary():
|
||||
warnings.warn(str(e))
|
||||
|
||||
if info:
|
||||
adjust_raw_flags(info)
|
||||
adjust_raw_vendor(info)
|
||||
break
|
||||
|
||||
return info
|
||||
@@ -223,3 +242,15 @@ def compatibility_check_for_x86_64(info, target):
|
||||
return (target == arch_root or arch_root in target.ancestors) \
|
||||
and (target.vendor == vendor or target.vendor == 'generic') \
|
||||
and target.features.issubset(features)
|
||||
|
||||
|
||||
@compatibility_check(architecture_family='aarch64')
|
||||
def compatibility_check_for_aarch64(info, target):
|
||||
basename = 'aarch64'
|
||||
features = set(info.get('Features', '').split())
|
||||
vendor = info.get('CPU implementer', 'generic')
|
||||
|
||||
arch_root = targets[basename]
|
||||
return (target == arch_root or arch_root in target.ancestors) \
|
||||
and (target.vendor == vendor or target.vendor == 'generic') \
|
||||
and target.features.issubset(features)
|
||||
|
@@ -1,4 +1,4 @@
|
||||
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
|
||||
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
@@ -8,7 +8,7 @@
|
||||
import warnings
|
||||
|
||||
try:
|
||||
from collections.abc import Sequence
|
||||
from collections.abc import Sequence # novm
|
||||
except ImportError:
|
||||
from collections import Sequence
|
||||
|
||||
|
@@ -61,11 +61,18 @@
|
||||
"flags": "-march={name} -mtune={name}"
|
||||
}
|
||||
],
|
||||
"clang": {
|
||||
"versions": ":",
|
||||
"family": "x86-64",
|
||||
"flags": "-march={family} -mcpu=generic"
|
||||
},
|
||||
"clang": [
|
||||
{
|
||||
"versions": "0.0.0-apple:",
|
||||
"name": "x86-64",
|
||||
"flags": "-march={name}"
|
||||
},
|
||||
{
|
||||
"versions": ":",
|
||||
"name": "x86-64",
|
||||
"flags": "-march={name} -mtune=generic"
|
||||
}
|
||||
],
|
||||
"intel": {
|
||||
"versions": ":",
|
||||
"name": "pentium4",
|
||||
@@ -89,8 +96,7 @@
|
||||
},
|
||||
"clang": {
|
||||
"versions": "3.9:",
|
||||
"family": "x86-64",
|
||||
"flags": "-march={family} -mcpu={name}"
|
||||
"flags": "-march={name} -mtune={name}"
|
||||
},
|
||||
"intel": {
|
||||
"versions": "16.0:",
|
||||
@@ -115,8 +121,7 @@
|
||||
},
|
||||
"clang": {
|
||||
"versions": "3.9:",
|
||||
"family": "x86-64",
|
||||
"flags": "-march={family} -mcpu={name}"
|
||||
"flags": "-march={name} -mtune={name}"
|
||||
},
|
||||
"intel": {
|
||||
"versions": "16.0:",
|
||||
@@ -150,8 +155,7 @@
|
||||
],
|
||||
"clang": {
|
||||
"versions": "3.9:",
|
||||
"family": "x86-64",
|
||||
"flags": "-march={family} -mcpu={name}"
|
||||
"flags": "-march={name} -mtune={name}"
|
||||
},
|
||||
"intel": {
|
||||
"versions": "16.0:",
|
||||
@@ -181,8 +185,7 @@
|
||||
},
|
||||
"clang": {
|
||||
"versions": "3.9:",
|
||||
"family": "x86-64",
|
||||
"flags": "-march={family} -mcpu={name}"
|
||||
"flags": "-march={name} -mtune={name}"
|
||||
},
|
||||
"intel": {
|
||||
"versions": "16.0:",
|
||||
@@ -220,8 +223,7 @@
|
||||
],
|
||||
"clang": {
|
||||
"versions": "3.9:",
|
||||
"family": "x86-64",
|
||||
"flags": "-march={family} -mcpu={name}"
|
||||
"flags": "-march={name} -mtune={name}"
|
||||
},
|
||||
"intel": [
|
||||
{
|
||||
@@ -267,8 +269,7 @@
|
||||
],
|
||||
"clang": {
|
||||
"versions": "3.9:",
|
||||
"family": "x86-64",
|
||||
"flags": "-march={family} -mcpu={name}"
|
||||
"flags": "-march={name} -mtune={name}"
|
||||
},
|
||||
"intel": [
|
||||
{
|
||||
@@ -319,8 +320,7 @@
|
||||
],
|
||||
"clang": {
|
||||
"versions": "3.9:",
|
||||
"family": "x86-64",
|
||||
"flags": "-march={family} -mcpu={name}"
|
||||
"flags": "-march={name} -mtune={name}"
|
||||
},
|
||||
"intel": [
|
||||
{
|
||||
@@ -366,8 +366,7 @@
|
||||
},
|
||||
"clang": {
|
||||
"versions": "3.9:",
|
||||
"family": "x86-64",
|
||||
"flags": "-march={family} -mcpu={name}"
|
||||
"flags": "-march={name} -mtune={name}"
|
||||
},
|
||||
"intel": {
|
||||
"versions": "18.0:",
|
||||
@@ -409,8 +408,7 @@
|
||||
},
|
||||
"clang": {
|
||||
"versions": "3.9:",
|
||||
"family": "x86-64",
|
||||
"flags": "-march={family} -mcpu={name}"
|
||||
"flags": "-march={name} -mtune={name}"
|
||||
},
|
||||
"intel": {
|
||||
"versions": "18.0:",
|
||||
@@ -456,8 +454,7 @@
|
||||
"clang": {
|
||||
"versions": "3.9:",
|
||||
"name": "knl",
|
||||
"family": "x86-64",
|
||||
"flags": "-march={family} -mcpu={name}"
|
||||
"flags": "-march={name} -mtune={name}"
|
||||
},
|
||||
"intel": {
|
||||
"versions": "18.0:",
|
||||
@@ -508,8 +505,7 @@
|
||||
"clang": {
|
||||
"versions": "3.9:",
|
||||
"name": "skylake-avx512",
|
||||
"family": "x86-64",
|
||||
"flags": "-march={family} -mcpu={name}"
|
||||
"flags": "-march={name} -mtune={name}"
|
||||
},
|
||||
"intel": {
|
||||
"versions": "18.0:",
|
||||
@@ -561,8 +557,7 @@
|
||||
},
|
||||
"clang": {
|
||||
"versions": "3.9:",
|
||||
"family": "x86-64",
|
||||
"flags": "-march={family} -mcpu={name}"
|
||||
"flags": "-march={name} -mtune={name}"
|
||||
},
|
||||
"intel": {
|
||||
"versions": "18.0:",
|
||||
@@ -602,7 +597,7 @@
|
||||
"avx512bw",
|
||||
"avx512dq",
|
||||
"avx512cd",
|
||||
"avx512vnni"
|
||||
"avx512_vnni"
|
||||
],
|
||||
"compilers": {
|
||||
"gcc": {
|
||||
@@ -611,8 +606,7 @@
|
||||
},
|
||||
"clang": {
|
||||
"versions": "8.0:",
|
||||
"family": "x86-64",
|
||||
"flags": "-march={family} -mcpu={name}"
|
||||
"flags": "-march={name} -mtune={name}"
|
||||
},
|
||||
"intel": {
|
||||
"versions": "19.0:",
|
||||
@@ -678,13 +672,11 @@
|
||||
{
|
||||
"versions": "7.0:",
|
||||
"name": "icelake-client",
|
||||
"family": "x86-64",
|
||||
"flags": "-march={family} -mcpu={name}"
|
||||
"flags": "-march={name} -mtune={name}"
|
||||
},
|
||||
{
|
||||
"versions": "6.0:6.9",
|
||||
"family": "x86-64",
|
||||
"flags": "-march={family} -mcpu={name}"
|
||||
"flags": "-march={name} -mtune={name}"
|
||||
}
|
||||
],
|
||||
"intel": {
|
||||
@@ -716,8 +708,7 @@
|
||||
"clang": {
|
||||
"versions": "3.9:",
|
||||
"name": "amdfam10",
|
||||
"family": "x86-64",
|
||||
"flags": "-march={family} -mcpu={name}"
|
||||
"flags": "-march={name} -mtune={name}"
|
||||
},
|
||||
"intel": {
|
||||
"versions": "16.0:",
|
||||
@@ -754,8 +745,7 @@
|
||||
"clang": {
|
||||
"versions": "3.9:",
|
||||
"name": "bdver1",
|
||||
"family": "x86-64",
|
||||
"flags": "-march={family} -mcpu={name}"
|
||||
"flags": "-march={name} -mtune={name}"
|
||||
},
|
||||
"intel": {
|
||||
"versions": "16.0:",
|
||||
@@ -796,8 +786,7 @@
|
||||
"clang": {
|
||||
"versions": "3.9:",
|
||||
"name": "bdver2",
|
||||
"family": "x86-64",
|
||||
"flags": "-march={family} -mcpu={name}"
|
||||
"flags": "-march={name} -mtune={name}"
|
||||
},
|
||||
"intel": {
|
||||
"versions": "16.0:",
|
||||
@@ -839,8 +828,7 @@
|
||||
"clang": {
|
||||
"versions": "3.9:",
|
||||
"name": "bdver3",
|
||||
"family": "x86-64",
|
||||
"flags": "-march={family} -mcpu={name}"
|
||||
"flags": "-march={name} -mtune={name}"
|
||||
},
|
||||
"intel": {
|
||||
"versions": "16.0:",
|
||||
@@ -885,8 +873,7 @@
|
||||
"clang": {
|
||||
"versions": "3.9:",
|
||||
"name": "bdver4",
|
||||
"family": "x86-64",
|
||||
"flags": "-march={family} -mcpu={name}"
|
||||
"flags": "-march={name} -mtune={name}"
|
||||
},
|
||||
"intel": {
|
||||
"versions": "16.0:",
|
||||
@@ -935,8 +922,7 @@
|
||||
"clang": {
|
||||
"versions": "4.0:",
|
||||
"name": "znver1",
|
||||
"family": "x86-64",
|
||||
"flags": "-march={family} -mcpu={name}"
|
||||
"flags": "-march={name} -mtune={name}"
|
||||
},
|
||||
"intel": {
|
||||
"versions": "16.0:",
|
||||
@@ -986,8 +972,7 @@
|
||||
"clang": {
|
||||
"versions": "9.0:",
|
||||
"name": "znver2",
|
||||
"family": "x86-64",
|
||||
"flags": "-march={family} -mcpu={name}"
|
||||
"flags": "-march={name} -mtune={name}"
|
||||
},
|
||||
"intel": {
|
||||
"versions": "16.0:",
|
||||
@@ -1009,8 +994,7 @@
|
||||
},
|
||||
"clang": {
|
||||
"versions": ":",
|
||||
"family": "ppc64",
|
||||
"flags": "-march={family} -mcpu=generic"
|
||||
"flags": "-mcpu={name} -mtune={name}"
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -1026,9 +1010,7 @@
|
||||
},
|
||||
"clang": {
|
||||
"versions": "3.9:",
|
||||
"family": "ppc64",
|
||||
"name": "pwr7",
|
||||
"flags": "-march={family} -mcpu={name}"
|
||||
"flags": "-mcpu={name} -mtune={name}"
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -1051,9 +1033,7 @@
|
||||
],
|
||||
"clang": {
|
||||
"versions": "3.9:",
|
||||
"family": "ppc64",
|
||||
"name": "pwr8",
|
||||
"flags": "-march={family} -mcpu={name}"
|
||||
"flags": "-mcpu={name} -mtune={name}"
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -1069,9 +1049,7 @@
|
||||
},
|
||||
"clang": {
|
||||
"versions": "3.9:",
|
||||
"family": "ppc64",
|
||||
"name": "pwr9",
|
||||
"flags": "-march={family} -mcpu={name}"
|
||||
"flags": "-mcpu={name} -mtune={name}"
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -1087,8 +1065,7 @@
|
||||
},
|
||||
"clang": {
|
||||
"versions": ":",
|
||||
"family": "ppc64le",
|
||||
"flags": "-march={family} -mcpu=generic"
|
||||
"flags": "-mcpu={name} -mtune={name}"
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -1114,8 +1091,8 @@
|
||||
"clang": {
|
||||
"versions": "3.9:",
|
||||
"family": "ppc64le",
|
||||
"name": "pwr8",
|
||||
"flags": "-march={family} -mcpu={name}"
|
||||
"name": "power8",
|
||||
"flags": "-mcpu={name} -mtune={name}"
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -1133,8 +1110,8 @@
|
||||
"clang": {
|
||||
"versions": "3.9:",
|
||||
"family": "ppc64le",
|
||||
"name": "pwr9",
|
||||
"flags": "-march={family} -mcpu={name}"
|
||||
"name": "power9",
|
||||
"flags": "-mcpu={name} -mtune={name}"
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -1149,11 +1126,113 @@
|
||||
},
|
||||
"clang": {
|
||||
"versions": ":",
|
||||
"family": "aarch64",
|
||||
"flags": "-march={family} -mcpu=generic"
|
||||
"flags": "-march=armv8-a -mtune=generic"
|
||||
}
|
||||
}
|
||||
},
|
||||
"thunderx2": {
|
||||
"from": "aarch64",
|
||||
"vendor": "Cavium",
|
||||
"features": [
|
||||
"fp",
|
||||
"asimd",
|
||||
"evtstrm",
|
||||
"aes",
|
||||
"pmull",
|
||||
"sha1",
|
||||
"sha2",
|
||||
"crc32",
|
||||
"atomics",
|
||||
"cpuid",
|
||||
"asimdrdm"
|
||||
],
|
||||
"compilers": {
|
||||
"gcc": [
|
||||
{
|
||||
"versions": "4.8:4.8.9",
|
||||
"flags": "-march=armv8-a"
|
||||
},
|
||||
{
|
||||
"versions": "4.9:5.9",
|
||||
"flags": "-march=armv8-a+crc+crypto"
|
||||
},
|
||||
{
|
||||
"versions": "6:6.9",
|
||||
"flags": "-march=armv8.1-a+crc+crypto"
|
||||
},
|
||||
{
|
||||
"versions": "7:",
|
||||
"flags": "-mcpu=thunderx2t99"
|
||||
}
|
||||
],
|
||||
"clang": [
|
||||
{
|
||||
"versions": "3.9:4.9",
|
||||
"flags": "-march=armv8.1-a+crc+crypto"
|
||||
},
|
||||
{
|
||||
"versions": "5:",
|
||||
"flags": "-mcpu=thunderx2t99"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"a64fx": {
|
||||
"from": "aarch64",
|
||||
"vendor": "Fujitsu",
|
||||
"features": [
|
||||
"fp",
|
||||
"asimd",
|
||||
"evtstrm",
|
||||
"aes",
|
||||
"pmull",
|
||||
"sha1",
|
||||
"sha2",
|
||||
"crc32",
|
||||
"atomics",
|
||||
"cpuid",
|
||||
"asimdrdm",
|
||||
"fphp",
|
||||
"asimdhp",
|
||||
"fcma",
|
||||
"dcpop",
|
||||
"sve"
|
||||
],
|
||||
"compilers": {
|
||||
"gcc": [
|
||||
{
|
||||
"versions": "4.8:4.8.9",
|
||||
"flags": "-march=armv8-a"
|
||||
},
|
||||
{
|
||||
"versions": "4.9:5.9",
|
||||
"flags": "-march=armv8-a+crc+crypto"
|
||||
},
|
||||
{
|
||||
"versions": "6:6.9",
|
||||
"flags": "-march=armv8.1-a+crc+crypto"
|
||||
},
|
||||
{
|
||||
"versions": "7:7.9",
|
||||
"flags": "-march=armv8.2-a+crc+crypto+fp16"
|
||||
},
|
||||
{
|
||||
"versions": "8:",
|
||||
"flags": "-march=armv8.2-a+crc+aes+sha2+fp16+sve -msve-vector-bits=512"
|
||||
}
|
||||
],
|
||||
"clang": [
|
||||
{
|
||||
"versions": "3.9:4.9",
|
||||
"flags": "-march=armv8.2-a+crc+crypto+fp16"
|
||||
},
|
||||
{
|
||||
"versions": "5:",
|
||||
"flags": "-march=armv8.2-a+crc+crypto+fp16+sve"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"arm": {
|
||||
"from": null,
|
||||
"vendor": "generic",
|
||||
@@ -1219,6 +1298,20 @@
|
||||
"ppc64"
|
||||
]
|
||||
},
|
||||
"vsx": {
|
||||
"reason": "VSX alitvec extensions are supported by PowerISA from v2.06 (Power7+), but might not be listed in features",
|
||||
"families": [
|
||||
"ppc64le",
|
||||
"ppc64"
|
||||
]
|
||||
},
|
||||
"fma": {
|
||||
"reason": "FMA has been supported by PowerISA since Power1, but might not be listed in features",
|
||||
"families": [
|
||||
"ppc64le",
|
||||
"ppc64"
|
||||
]
|
||||
},
|
||||
"sse4.1": {
|
||||
"reason": "permits to refer to sse4_1 also as sse4.1",
|
||||
"any_of": [
|
||||
@@ -1237,5 +1330,34 @@
|
||||
"aarch64"
|
||||
]
|
||||
}
|
||||
},
|
||||
"conversions": {
|
||||
"description": "Conversions that map some platform specific values to canonical values",
|
||||
"arm_vendors": {
|
||||
"0x41": "ARM",
|
||||
"0x42": "Broadcom",
|
||||
"0x43": "Cavium",
|
||||
"0x44": "DEC",
|
||||
"0x46": "Fujitsu",
|
||||
"0x48": "HiSilicon",
|
||||
"0x49": "Infineon Technologies AG",
|
||||
"0x4d": "Motorola",
|
||||
"0x4e": "Nvidia",
|
||||
"0x50": "APM",
|
||||
"0x51": "Qualcomm",
|
||||
"0x53": "Samsung",
|
||||
"0x56": "Marvell",
|
||||
"0x61": "Apple",
|
||||
"0x66": "Faraday",
|
||||
"0x68": "HXT",
|
||||
"0x69": "Intel"
|
||||
},
|
||||
"darwin_flags": {
|
||||
"sse4.1": "sse4_1",
|
||||
"sse4.2": "sse4_2",
|
||||
"avx1.0": "avx",
|
||||
"clfsopt": "clflushopt",
|
||||
"xsave": "xsavec xsaveopt"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -1,4 +1,4 @@
|
||||
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
|
||||
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
@@ -6,7 +6,7 @@
|
||||
import os.path
|
||||
|
||||
try:
|
||||
from collections.abc import MutableMapping
|
||||
from collections.abc import MutableMapping # novm
|
||||
except ImportError:
|
||||
from collections import MutableMapping
|
||||
|
||||
@@ -72,7 +72,21 @@
|
||||
'additionalProperties': False
|
||||
}
|
||||
},
|
||||
|
||||
},
|
||||
'conversions': {
|
||||
'type': 'object',
|
||||
'properties': {
|
||||
'description': {
|
||||
'type': 'string'
|
||||
},
|
||||
'arm_vendors': {
|
||||
'type': 'object',
|
||||
},
|
||||
'darwin_flags': {
|
||||
'type': 'object'
|
||||
}
|
||||
},
|
||||
'additionalProperties': False
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -1,4 +1,4 @@
|
||||
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
|
||||
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
@@ -201,7 +201,6 @@ def groupid_to_group(x):
|
||||
output_file.writelines(input_file.readlines())
|
||||
|
||||
except BaseException:
|
||||
os.remove(tmp_filename)
|
||||
# clean up the original file on failure.
|
||||
shutil.move(backup_filename, filename)
|
||||
raise
|
||||
@@ -457,7 +456,7 @@ def copy_tree(src, dest, symlinks=True, ignore=None, _permissions=False):
|
||||
if os.path.isdir(s):
|
||||
mkdirp(d)
|
||||
else:
|
||||
shutil.copyfile(s, d)
|
||||
shutil.copy2(s, d)
|
||||
|
||||
if _permissions:
|
||||
set_install_permissions(d)
|
||||
@@ -625,9 +624,9 @@ def replace_directory_transaction(directory_name, tmp_root=None):
|
||||
# Check the input is indeed a directory with absolute path.
|
||||
# Raise before anything is done to avoid moving the wrong directory
|
||||
assert os.path.isdir(directory_name), \
|
||||
'"directory_name" must be a valid directory'
|
||||
'Invalid directory: ' + directory_name
|
||||
assert os.path.isabs(directory_name), \
|
||||
'"directory_name" must contain an absolute path'
|
||||
'"directory_name" must contain an absolute path: ' + directory_name
|
||||
|
||||
directory_basename = os.path.basename(directory_name)
|
||||
|
||||
@@ -917,10 +916,8 @@ def remove_if_dead_link(path):
|
||||
Parameters:
|
||||
path (str): The potential dead link
|
||||
"""
|
||||
if os.path.islink(path):
|
||||
real_path = os.path.realpath(path)
|
||||
if not os.path.exists(real_path):
|
||||
os.unlink(path)
|
||||
if os.path.islink(path) and not os.path.exists(path):
|
||||
os.unlink(path)
|
||||
|
||||
|
||||
def remove_linked_tree(path):
|
||||
@@ -1156,7 +1153,9 @@ class HeaderList(FileList):
|
||||
|
||||
# Make sure to only match complete words, otherwise path components such
|
||||
# as "xinclude" will cause false matches.
|
||||
include_regex = re.compile(r'(.*)(\binclude\b)(.*)')
|
||||
# Avoid matching paths such as <prefix>/include/something/detail/include,
|
||||
# e.g. in the CUDA Toolkit which ships internal libc++ headers.
|
||||
include_regex = re.compile(r'(.*?)(\binclude\b)(.*)')
|
||||
|
||||
def __init__(self, files):
|
||||
super(HeaderList, self).__init__(files)
|
||||
|
@@ -1,4 +1,4 @@
|
||||
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
|
||||
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
@@ -19,12 +19,6 @@
|
||||
ignore_modules = [r'^\.#', '~$']
|
||||
|
||||
|
||||
class classproperty(property):
|
||||
"""classproperty decorator: like property but for classmethods."""
|
||||
def __get__(self, cls, owner):
|
||||
return self.fget.__get__(None, owner)()
|
||||
|
||||
|
||||
def index_by(objects, *funcs):
|
||||
"""Create a hierarchy of dictionaries by splitting the supplied
|
||||
set of objects on unique values of the supplied functions.
|
||||
@@ -612,14 +606,41 @@ def load_module_from_file(module_name, module_path):
|
||||
"""
|
||||
if sys.version_info[0] == 3 and sys.version_info[1] >= 5:
|
||||
import importlib.util
|
||||
spec = importlib.util.spec_from_file_location(module_name, module_path)
|
||||
module = importlib.util.module_from_spec(spec)
|
||||
spec = importlib.util.spec_from_file_location( # novm
|
||||
module_name, module_path)
|
||||
module = importlib.util.module_from_spec(spec) # novm
|
||||
spec.loader.exec_module(module)
|
||||
elif sys.version_info[0] == 3 and sys.version_info[1] < 5:
|
||||
import importlib.machinery
|
||||
loader = importlib.machinery.SourceFileLoader(module_name, module_path)
|
||||
loader = importlib.machinery.SourceFileLoader( # novm
|
||||
module_name, module_path)
|
||||
module = loader.load_module()
|
||||
elif sys.version_info[0] == 2:
|
||||
import imp
|
||||
module = imp.load_source(module_name, module_path)
|
||||
return module
|
||||
|
||||
|
||||
def uniq(sequence):
|
||||
"""Remove strings of duplicate elements from a list.
|
||||
|
||||
This works like the command-line ``uniq`` tool. It filters strings
|
||||
of duplicate elements in a list. Adjacent matching elements are
|
||||
merged into the first occurrence.
|
||||
|
||||
For example::
|
||||
|
||||
uniq([1, 1, 1, 1, 2, 2, 2, 3, 3]) == [1, 2, 3]
|
||||
uniq([1, 1, 1, 1, 2, 2, 2, 1, 1]) == [1, 2, 1]
|
||||
|
||||
"""
|
||||
if not sequence:
|
||||
return []
|
||||
|
||||
uniq_list = [sequence[0]]
|
||||
last = sequence[0]
|
||||
for element in sequence[1:]:
|
||||
if element != last:
|
||||
uniq_list.append(element)
|
||||
last = element
|
||||
return uniq_list
|
||||
|
@@ -1,4 +1,4 @@
|
||||
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
|
||||
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
@@ -1,4 +1,4 @@
|
||||
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
|
||||
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
@@ -8,14 +8,32 @@
|
||||
import errno
|
||||
import time
|
||||
import socket
|
||||
from datetime import datetime
|
||||
|
||||
import llnl.util.tty as tty
|
||||
import spack.util.string
|
||||
|
||||
|
||||
__all__ = ['Lock', 'LockTransaction', 'WriteTransaction', 'ReadTransaction',
|
||||
'LockError', 'LockTimeoutError',
|
||||
'LockPermissionError', 'LockROFileError', 'CantCreateLockError']
|
||||
|
||||
#: Mapping of supported locks to description
|
||||
lock_type = {fcntl.LOCK_SH: 'read', fcntl.LOCK_EX: 'write'}
|
||||
|
||||
#: A useful replacement for functions that should return True when not provided
|
||||
#: for example.
|
||||
true_fn = lambda: True
|
||||
|
||||
|
||||
def _attempts_str(wait_time, nattempts):
|
||||
# Don't print anything if we succeeded on the first try
|
||||
if nattempts <= 1:
|
||||
return ''
|
||||
|
||||
attempts = spack.util.string.plural(nattempts, 'attempt')
|
||||
return ' after {0:0.2f}s and {1}'.format(wait_time, attempts)
|
||||
|
||||
|
||||
class Lock(object):
|
||||
"""This is an implementation of a filesystem lock using Python's lockf.
|
||||
@@ -31,8 +49,8 @@ class Lock(object):
|
||||
maintain multiple locks on the same file.
|
||||
"""
|
||||
|
||||
def __init__(self, path, start=0, length=0, debug=False,
|
||||
default_timeout=None):
|
||||
def __init__(self, path, start=0, length=0, default_timeout=None,
|
||||
debug=False, desc=''):
|
||||
"""Construct a new lock on the file at ``path``.
|
||||
|
||||
By default, the lock applies to the whole file. Optionally,
|
||||
@@ -43,6 +61,16 @@ def __init__(self, path, start=0, length=0, debug=False,
|
||||
not currently expose the ``whence`` parameter -- ``whence`` is
|
||||
always ``os.SEEK_SET`` and ``start`` is always evaluated from the
|
||||
beginning of the file.
|
||||
|
||||
Args:
|
||||
path (str): path to the lock
|
||||
start (int): optional byte offset at which the lock starts
|
||||
length (int): optional number of bytes to lock
|
||||
default_timeout (int): number of seconds to wait for lock attempts,
|
||||
where None means to wait indefinitely
|
||||
debug (bool): debug mode specific to locking
|
||||
desc (str): optional debug message lock description, which is
|
||||
helpful for distinguishing between different Spack locks.
|
||||
"""
|
||||
self.path = path
|
||||
self._file = None
|
||||
@@ -56,6 +84,9 @@ def __init__(self, path, start=0, length=0, debug=False,
|
||||
# enable debug mode
|
||||
self.debug = debug
|
||||
|
||||
# optional debug description
|
||||
self.desc = ' ({0})'.format(desc) if desc else ''
|
||||
|
||||
# If the user doesn't set a default timeout, or if they choose
|
||||
# None, 0, etc. then lock attempts will not time out (unless the
|
||||
# user sets a timeout for each attempt)
|
||||
@@ -89,22 +120,33 @@ def _poll_interval_generator(_wait_times=None):
|
||||
num_requests += 1
|
||||
yield wait_time
|
||||
|
||||
def __repr__(self):
|
||||
"""Formal representation of the lock."""
|
||||
rep = '{0}('.format(self.__class__.__name__)
|
||||
for attr, value in self.__dict__.items():
|
||||
rep += '{0}={1}, '.format(attr, value.__repr__())
|
||||
return '{0})'.format(rep.strip(', '))
|
||||
|
||||
def __str__(self):
|
||||
"""Readable string (with key fields) of the lock."""
|
||||
location = '{0}[{1}:{2}]'.format(self.path, self._start, self._length)
|
||||
timeout = 'timeout={0}'.format(self.default_timeout)
|
||||
activity = '#reads={0}, #writes={1}'.format(self._reads, self._writes)
|
||||
return '({0}, {1}, {2})'.format(location, timeout, activity)
|
||||
|
||||
def _lock(self, op, timeout=None):
|
||||
"""This takes a lock using POSIX locks (``fcntl.lockf``).
|
||||
|
||||
The lock is implemented as a spin lock using a nonblocking call
|
||||
to ``lockf()``.
|
||||
|
||||
On acquiring an exclusive lock, the lock writes this process's
|
||||
pid and host to the lock file, in case the holding process needs
|
||||
to be killed later.
|
||||
|
||||
If the lock times out, it raises a ``LockError``. If the lock is
|
||||
successfully acquired, the total wait time and the number of attempts
|
||||
is returned.
|
||||
"""
|
||||
assert op in (fcntl.LOCK_SH, fcntl.LOCK_EX)
|
||||
assert op in lock_type
|
||||
|
||||
self._log_acquiring('{0} LOCK'.format(lock_type[op].upper()))
|
||||
timeout = timeout or self.default_timeout
|
||||
|
||||
# Create file and parent directories if they don't exist.
|
||||
@@ -132,6 +174,9 @@ def _lock(self, op, timeout=None):
|
||||
# If the file were writable, we'd have opened it 'r+'
|
||||
raise LockROFileError(self.path)
|
||||
|
||||
tty.debug("{0} locking [{1}:{2}]: timeout {3} sec"
|
||||
.format(lock_type[op], self._start, self._length, timeout))
|
||||
|
||||
poll_intervals = iter(Lock._poll_interval_generator())
|
||||
start_time = time.time()
|
||||
num_attempts = 0
|
||||
@@ -143,17 +188,21 @@ def _lock(self, op, timeout=None):
|
||||
|
||||
time.sleep(next(poll_intervals))
|
||||
|
||||
# TBD: Is an extra attempt after timeout needed/appropriate?
|
||||
num_attempts += 1
|
||||
if self._poll_lock(op):
|
||||
total_wait_time = time.time() - start_time
|
||||
return total_wait_time, num_attempts
|
||||
|
||||
raise LockTimeoutError("Timed out waiting for lock.")
|
||||
raise LockTimeoutError("Timed out waiting for a {0} lock."
|
||||
.format(lock_type[op]))
|
||||
|
||||
def _poll_lock(self, op):
|
||||
"""Attempt to acquire the lock in a non-blocking manner. Return whether
|
||||
the locking attempt succeeds
|
||||
"""
|
||||
assert op in lock_type
|
||||
|
||||
try:
|
||||
# Try to get the lock (will raise if not available.)
|
||||
fcntl.lockf(self._file, op | fcntl.LOCK_NB,
|
||||
@@ -163,6 +212,9 @@ def _poll_lock(self, op):
|
||||
if self.debug:
|
||||
# All locks read the owner PID and host
|
||||
self._read_debug_data()
|
||||
tty.debug('{0} locked {1} [{2}:{3}] (owner={4})'
|
||||
.format(lock_type[op], self.path,
|
||||
self._start, self._length, self.pid))
|
||||
|
||||
# Exclusive locks write their PID/host
|
||||
if op == fcntl.LOCK_EX:
|
||||
@@ -171,12 +223,12 @@ def _poll_lock(self, op):
|
||||
return True
|
||||
|
||||
except IOError as e:
|
||||
if e.errno in (errno.EAGAIN, errno.EACCES):
|
||||
# EAGAIN and EACCES == locked by another process
|
||||
pass
|
||||
else:
|
||||
# EAGAIN and EACCES == locked by another process (so try again)
|
||||
if e.errno not in (errno.EAGAIN, errno.EACCES):
|
||||
raise
|
||||
|
||||
return False
|
||||
|
||||
def _ensure_parent_directory(self):
|
||||
parent = os.path.dirname(self.path)
|
||||
|
||||
@@ -231,6 +283,8 @@ def _unlock(self):
|
||||
self._length, self._start, os.SEEK_SET)
|
||||
self._file.close()
|
||||
self._file = None
|
||||
self._reads = 0
|
||||
self._writes = 0
|
||||
|
||||
def acquire_read(self, timeout=None):
|
||||
"""Acquires a recursive, shared lock for reading.
|
||||
@@ -246,15 +300,14 @@ def acquire_read(self, timeout=None):
|
||||
timeout = timeout or self.default_timeout
|
||||
|
||||
if self._reads == 0 and self._writes == 0:
|
||||
self._debug(
|
||||
'READ LOCK: {0.path}[{0._start}:{0._length}] [Acquiring]'
|
||||
.format(self))
|
||||
# can raise LockError.
|
||||
wait_time, nattempts = self._lock(fcntl.LOCK_SH, timeout=timeout)
|
||||
self._acquired_debug('READ LOCK', wait_time, nattempts)
|
||||
self._reads += 1
|
||||
# Log if acquired, which includes counts when verbose
|
||||
self._log_acquired('READ LOCK', wait_time, nattempts)
|
||||
return True
|
||||
else:
|
||||
# Increment the read count for nested lock tracking
|
||||
self._reads += 1
|
||||
return False
|
||||
|
||||
@@ -272,23 +325,90 @@ def acquire_write(self, timeout=None):
|
||||
timeout = timeout or self.default_timeout
|
||||
|
||||
if self._writes == 0:
|
||||
self._debug(
|
||||
'WRITE LOCK: {0.path}[{0._start}:{0._length}] [Acquiring]'
|
||||
.format(self))
|
||||
# can raise LockError.
|
||||
wait_time, nattempts = self._lock(fcntl.LOCK_EX, timeout=timeout)
|
||||
self._acquired_debug('WRITE LOCK', wait_time, nattempts)
|
||||
self._writes += 1
|
||||
return True
|
||||
# Log if acquired, which includes counts when verbose
|
||||
self._log_acquired('WRITE LOCK', wait_time, nattempts)
|
||||
|
||||
# return True only if we weren't nested in a read lock.
|
||||
# TODO: we may need to return two values: whether we got
|
||||
# the write lock, and whether this is acquiring a read OR
|
||||
# write lock for the first time. Now it returns the latter.
|
||||
return self._reads == 0
|
||||
else:
|
||||
# Increment the write count for nested lock tracking
|
||||
self._writes += 1
|
||||
return False
|
||||
|
||||
def release_read(self):
|
||||
def is_write_locked(self):
|
||||
"""Check if the file is write locked
|
||||
|
||||
Return:
|
||||
(bool): ``True`` if the path is write locked, otherwise, ``False``
|
||||
"""
|
||||
try:
|
||||
self.acquire_read()
|
||||
|
||||
# If we have a read lock then no other process has a write lock.
|
||||
self.release_read()
|
||||
except LockTimeoutError:
|
||||
# Another process is holding a write lock on the file
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def downgrade_write_to_read(self, timeout=None):
|
||||
"""
|
||||
Downgrade from an exclusive write lock to a shared read.
|
||||
|
||||
Raises:
|
||||
LockDowngradeError: if this is an attempt at a nested transaction
|
||||
"""
|
||||
timeout = timeout or self.default_timeout
|
||||
|
||||
if self._writes == 1 and self._reads == 0:
|
||||
self._log_downgrading()
|
||||
# can raise LockError.
|
||||
wait_time, nattempts = self._lock(fcntl.LOCK_SH, timeout=timeout)
|
||||
self._reads = 1
|
||||
self._writes = 0
|
||||
self._log_downgraded(wait_time, nattempts)
|
||||
else:
|
||||
raise LockDowngradeError(self.path)
|
||||
|
||||
def upgrade_read_to_write(self, timeout=None):
|
||||
"""
|
||||
Attempts to upgrade from a shared read lock to an exclusive write.
|
||||
|
||||
Raises:
|
||||
LockUpgradeError: if this is an attempt at a nested transaction
|
||||
"""
|
||||
timeout = timeout or self.default_timeout
|
||||
|
||||
if self._reads == 1 and self._writes == 0:
|
||||
self._log_upgrading()
|
||||
# can raise LockError.
|
||||
wait_time, nattempts = self._lock(fcntl.LOCK_EX, timeout=timeout)
|
||||
self._reads = 0
|
||||
self._writes = 1
|
||||
self._log_upgraded(wait_time, nattempts)
|
||||
else:
|
||||
raise LockUpgradeError(self.path)
|
||||
|
||||
def release_read(self, release_fn=None):
|
||||
"""Releases a read lock.
|
||||
|
||||
Returns True if the last recursive lock was released, False if
|
||||
there are still outstanding locks.
|
||||
Arguments:
|
||||
release_fn (callable): function to call *before* the last recursive
|
||||
lock (read or write) is released.
|
||||
|
||||
If the last recursive lock will be released, then this will call
|
||||
release_fn and return its result (if provided), or return True
|
||||
(if release_fn was not provided).
|
||||
|
||||
Otherwise, we are still nested inside some other lock, so do not
|
||||
call the release_fn and, return False.
|
||||
|
||||
Does limited correctness checking: if a read lock is released
|
||||
when none are held, this will raise an assertion error.
|
||||
@@ -296,81 +416,160 @@ def release_read(self):
|
||||
"""
|
||||
assert self._reads > 0
|
||||
|
||||
locktype = 'READ LOCK'
|
||||
if self._reads == 1 and self._writes == 0:
|
||||
self._debug(
|
||||
'READ LOCK: {0.path}[{0._start}:{0._length}] [Released]'
|
||||
.format(self))
|
||||
self._log_releasing(locktype)
|
||||
|
||||
# we need to call release_fn before releasing the lock
|
||||
release_fn = release_fn or true_fn
|
||||
result = release_fn()
|
||||
|
||||
self._unlock() # can raise LockError.
|
||||
self._reads -= 1
|
||||
return True
|
||||
self._reads = 0
|
||||
self._log_released(locktype)
|
||||
return result
|
||||
else:
|
||||
self._reads -= 1
|
||||
return False
|
||||
|
||||
def release_write(self):
|
||||
def release_write(self, release_fn=None):
|
||||
"""Releases a write lock.
|
||||
|
||||
Returns True if the last recursive lock was released, False if
|
||||
there are still outstanding locks.
|
||||
Arguments:
|
||||
release_fn (callable): function to call before the last recursive
|
||||
write is released.
|
||||
|
||||
If the last recursive *write* lock will be released, then this
|
||||
will call release_fn and return its result (if provided), or
|
||||
return True (if release_fn was not provided). Otherwise, we are
|
||||
still nested inside some other write lock, so do not call the
|
||||
release_fn, and return False.
|
||||
|
||||
Does limited correctness checking: if a read lock is released
|
||||
when none are held, this will raise an assertion error.
|
||||
|
||||
"""
|
||||
assert self._writes > 0
|
||||
release_fn = release_fn or true_fn
|
||||
|
||||
locktype = 'WRITE LOCK'
|
||||
if self._writes == 1 and self._reads == 0:
|
||||
self._debug(
|
||||
'WRITE LOCK: {0.path}[{0._start}:{0._length}] [Released]'
|
||||
.format(self))
|
||||
self._log_releasing(locktype)
|
||||
|
||||
# we need to call release_fn before releasing the lock
|
||||
result = release_fn()
|
||||
|
||||
self._unlock() # can raise LockError.
|
||||
self._writes -= 1
|
||||
return True
|
||||
self._writes = 0
|
||||
self._log_released(locktype)
|
||||
return result
|
||||
else:
|
||||
self._writes -= 1
|
||||
return False
|
||||
|
||||
# when the last *write* is released, we call release_fn here
|
||||
# instead of immediately before releasing the lock.
|
||||
if self._writes == 0:
|
||||
return release_fn()
|
||||
else:
|
||||
return False
|
||||
|
||||
def _debug(self, *args):
|
||||
tty.debug(*args)
|
||||
|
||||
def _acquired_debug(self, lock_type, wait_time, nattempts):
|
||||
attempts_format = 'attempt' if nattempts == 1 else 'attempt'
|
||||
if nattempts > 1:
|
||||
acquired_attempts_format = ' after {0:0.2f}s and {1:d} {2}'.format(
|
||||
wait_time, nattempts, attempts_format)
|
||||
else:
|
||||
# Dont print anything if we succeeded immediately
|
||||
acquired_attempts_format = ''
|
||||
self._debug(
|
||||
'{0}: {1.path}[{1._start}:{1._length}] [Acquired{2}]'
|
||||
.format(lock_type, self, acquired_attempts_format))
|
||||
def _get_counts_desc(self):
|
||||
return '(reads {0}, writes {1})'.format(self._reads, self._writes) \
|
||||
if tty.is_verbose() else ''
|
||||
|
||||
def _log_acquired(self, locktype, wait_time, nattempts):
|
||||
attempts_part = _attempts_str(wait_time, nattempts)
|
||||
now = datetime.now()
|
||||
desc = 'Acquired at %s' % now.strftime("%H:%M:%S.%f")
|
||||
self._debug(self._status_msg(locktype, '{0}{1}'.
|
||||
format(desc, attempts_part)))
|
||||
|
||||
def _log_acquiring(self, locktype):
|
||||
self._debug2(self._status_msg(locktype, 'Acquiring'))
|
||||
|
||||
def _log_downgraded(self, wait_time, nattempts):
|
||||
attempts_part = _attempts_str(wait_time, nattempts)
|
||||
now = datetime.now()
|
||||
desc = 'Downgraded at %s' % now.strftime("%H:%M:%S.%f")
|
||||
self._debug(self._status_msg('READ LOCK', '{0}{1}'
|
||||
.format(desc, attempts_part)))
|
||||
|
||||
def _log_downgrading(self):
|
||||
self._debug2(self._status_msg('WRITE LOCK', 'Downgrading'))
|
||||
|
||||
def _log_released(self, locktype):
|
||||
now = datetime.now()
|
||||
desc = 'Released at %s' % now.strftime("%H:%M:%S.%f")
|
||||
self._debug(self._status_msg(locktype, desc))
|
||||
|
||||
def _log_releasing(self, locktype):
|
||||
self._debug2(self._status_msg(locktype, 'Releasing'))
|
||||
|
||||
def _log_upgraded(self, wait_time, nattempts):
|
||||
attempts_part = _attempts_str(wait_time, nattempts)
|
||||
now = datetime.now()
|
||||
desc = 'Upgraded at %s' % now.strftime("%H:%M:%S.%f")
|
||||
self._debug(self._status_msg('WRITE LOCK', '{0}{1}'.
|
||||
format(desc, attempts_part)))
|
||||
|
||||
def _log_upgrading(self):
|
||||
self._debug2(self._status_msg('READ LOCK', 'Upgrading'))
|
||||
|
||||
def _status_msg(self, locktype, status):
|
||||
status_desc = '[{0}] {1}'.format(status, self._get_counts_desc())
|
||||
return '{0}{1.desc}: {1.path}[{1._start}:{1._length}] {2}'.format(
|
||||
locktype, self, status_desc)
|
||||
|
||||
def _debug2(self, *args):
|
||||
# TODO: Easy place to make a single, temporary change to the
|
||||
# TODO: debug level associated with the more detailed messages.
|
||||
# TODO:
|
||||
# TODO: Someday it would be great if we could switch this to
|
||||
# TODO: another level, perhaps _between_ debug and verbose, or
|
||||
# TODO: some other form of filtering so the first level of
|
||||
# TODO: debugging doesn't have to generate these messages. Using
|
||||
# TODO: verbose here did not work as expected because tests like
|
||||
# TODO: test_spec_json will write the verbose messages to the
|
||||
# TODO: output that is used to check test correctness.
|
||||
tty.debug(*args)
|
||||
|
||||
|
||||
class LockTransaction(object):
|
||||
"""Simple nested transaction context manager that uses a file lock.
|
||||
|
||||
This class can trigger actions when the lock is acquired for the
|
||||
first time and released for the last.
|
||||
Arguments:
|
||||
lock (Lock): underlying lock for this transaction to be accquired on
|
||||
enter and released on exit
|
||||
acquire (callable or contextmanager): function to be called after lock
|
||||
is acquired, or contextmanager to enter after acquire and leave
|
||||
before release.
|
||||
release (callable): function to be called before release. If
|
||||
``acquire`` is a contextmanager, this will be called *after*
|
||||
exiting the nexted context and before the lock is released.
|
||||
timeout (float): number of seconds to set for the timeout when
|
||||
accquiring the lock (default no timeout)
|
||||
|
||||
If the ``acquire_fn`` returns a value, it is used as the return value for
|
||||
``__enter__``, allowing it to be passed as the ``as`` argument of a
|
||||
``with`` statement.
|
||||
|
||||
If ``acquire_fn`` returns a context manager, *its* ``__enter__`` function
|
||||
will be called in ``__enter__`` after ``acquire_fn``, and its ``__exit__``
|
||||
funciton will be called before ``release_fn`` in ``__exit__``, allowing you
|
||||
to nest a context manager to be used along with the lock.
|
||||
will be called after the lock is acquired, and its ``__exit__`` funciton
|
||||
will be called before ``release_fn`` in ``__exit__``, allowing you to
|
||||
nest a context manager inside this one.
|
||||
|
||||
Timeout for lock is customizable.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, lock, acquire_fn=None, release_fn=None,
|
||||
timeout=None):
|
||||
def __init__(self, lock, acquire=None, release=None, timeout=None):
|
||||
self._lock = lock
|
||||
self._timeout = timeout
|
||||
self._acquire_fn = acquire_fn
|
||||
self._release_fn = release_fn
|
||||
self._acquire_fn = acquire
|
||||
self._release_fn = release
|
||||
self._as = None
|
||||
|
||||
def __enter__(self):
|
||||
@@ -383,13 +582,18 @@ def __enter__(self):
|
||||
|
||||
def __exit__(self, type, value, traceback):
|
||||
suppress = False
|
||||
if self._exit():
|
||||
if self._as and hasattr(self._as, '__exit__'):
|
||||
if self._as.__exit__(type, value, traceback):
|
||||
suppress = True
|
||||
if self._release_fn:
|
||||
if self._release_fn(type, value, traceback):
|
||||
suppress = True
|
||||
|
||||
def release_fn():
|
||||
if self._release_fn is not None:
|
||||
return self._release_fn(type, value, traceback)
|
||||
|
||||
if self._as and hasattr(self._as, '__exit__'):
|
||||
if self._as.__exit__(type, value, traceback):
|
||||
suppress = True
|
||||
|
||||
if self._exit(release_fn):
|
||||
suppress = True
|
||||
|
||||
return suppress
|
||||
|
||||
|
||||
@@ -398,8 +602,8 @@ class ReadTransaction(LockTransaction):
|
||||
def _enter(self):
|
||||
return self._lock.acquire_read(self._timeout)
|
||||
|
||||
def _exit(self):
|
||||
return self._lock.release_read()
|
||||
def _exit(self, release_fn):
|
||||
return self._lock.release_read(release_fn)
|
||||
|
||||
|
||||
class WriteTransaction(LockTransaction):
|
||||
@@ -407,18 +611,36 @@ class WriteTransaction(LockTransaction):
|
||||
def _enter(self):
|
||||
return self._lock.acquire_write(self._timeout)
|
||||
|
||||
def _exit(self):
|
||||
return self._lock.release_write()
|
||||
def _exit(self, release_fn):
|
||||
return self._lock.release_write(release_fn)
|
||||
|
||||
|
||||
class LockError(Exception):
|
||||
"""Raised for any errors related to locks."""
|
||||
|
||||
|
||||
class LockDowngradeError(LockError):
|
||||
"""Raised when unable to downgrade from a write to a read lock."""
|
||||
def __init__(self, path):
|
||||
msg = "Cannot downgrade lock from write to read on file: %s" % path
|
||||
super(LockDowngradeError, self).__init__(msg)
|
||||
|
||||
|
||||
class LockLimitError(LockError):
|
||||
"""Raised when exceed maximum attempts to acquire a lock."""
|
||||
|
||||
|
||||
class LockTimeoutError(LockError):
|
||||
"""Raised when an attempt to acquire a lock times out."""
|
||||
|
||||
|
||||
class LockUpgradeError(LockError):
|
||||
"""Raised when unable to upgrade from a read to a write lock."""
|
||||
def __init__(self, path):
|
||||
msg = "Cannot upgrade lock from read to write on file: %s" % path
|
||||
super(LockUpgradeError, self).__init__(msg)
|
||||
|
||||
|
||||
class LockPermissionError(LockError):
|
||||
"""Raised when there are permission issues with a lock."""
|
||||
|
||||
|
@@ -1,4 +1,4 @@
|
||||
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
|
||||
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
@@ -1,4 +1,4 @@
|
||||
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
|
||||
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
@@ -135,7 +135,9 @@ def process_stacktrace(countback):
|
||||
def get_timestamp(force=False):
|
||||
"""Get a string timestamp"""
|
||||
if _debug or _timestamp or force:
|
||||
return datetime.now().strftime("[%Y-%m-%d-%H:%M:%S.%f] ")
|
||||
# Note inclusion of the PID is useful for parallel builds.
|
||||
return '[{0}, {1}] '.format(
|
||||
datetime.now().strftime("%Y-%m-%d-%H:%M:%S.%f"), os.getpid())
|
||||
else:
|
||||
return ''
|
||||
|
||||
|
@@ -1,4 +1,4 @@
|
||||
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
|
||||
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
@@ -199,10 +199,16 @@ def colify(elts, **options):
|
||||
def colify_table(table, **options):
|
||||
"""Version of ``colify()`` for data expressed in rows, (list of lists).
|
||||
|
||||
Same as regular colify but takes a list of lists, where each
|
||||
sub-list must be the same length, and each is interpreted as a
|
||||
row in a table. Regular colify displays a sequential list of
|
||||
values in columns.
|
||||
Same as regular colify but:
|
||||
|
||||
1. This takes a list of lists, where each sub-list must be the
|
||||
same length, and each is interpreted as a row in a table.
|
||||
Regular colify displays a sequential list of values in columns.
|
||||
|
||||
2. Regular colify will always print with 1 column when the output
|
||||
is not a tty. This will always print with same dimensions of
|
||||
the table argument.
|
||||
|
||||
"""
|
||||
if table is None:
|
||||
raise TypeError("Can't call colify_table on NoneType")
|
||||
@@ -220,6 +226,9 @@ def transpose():
|
||||
raise ValueError("Cannot override columsn in colify_table.")
|
||||
options['cols'] = columns
|
||||
|
||||
# don't reduce to 1 column for non-tty
|
||||
options['tty'] = True
|
||||
|
||||
colify(transpose(), **options)
|
||||
|
||||
|
||||
|
@@ -1,4 +1,4 @@
|
||||
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
|
||||
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
@@ -1,4 +1,4 @@
|
||||
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
|
||||
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
@@ -7,18 +7,27 @@
|
||||
"""
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import atexit
|
||||
import errno
|
||||
import multiprocessing
|
||||
import os
|
||||
import re
|
||||
import select
|
||||
import sys
|
||||
import traceback
|
||||
import signal
|
||||
from contextlib import contextmanager
|
||||
from six import string_types
|
||||
from six import StringIO
|
||||
|
||||
import llnl.util.tty as tty
|
||||
|
||||
try:
|
||||
import termios
|
||||
except ImportError:
|
||||
termios = None
|
||||
|
||||
|
||||
# Use this to strip escape sequences
|
||||
_escape = re.compile(r'\x1b[^m]*m|\x1b\[?1034h')
|
||||
|
||||
@@ -31,6 +40,25 @@
|
||||
control = re.compile('(\x11\n|\x13\n)')
|
||||
|
||||
|
||||
@contextmanager
|
||||
def ignore_signal(signum):
|
||||
"""Context manager to temporarily ignore a signal."""
|
||||
old_handler = signal.signal(signum, signal.SIG_IGN)
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
signal.signal(signum, old_handler)
|
||||
|
||||
|
||||
def _is_background_tty(stream):
|
||||
"""True if the stream is a tty and calling process is in the background.
|
||||
"""
|
||||
return (
|
||||
stream.isatty() and
|
||||
os.getpgrp() != os.tcgetpgrp(stream.fileno())
|
||||
)
|
||||
|
||||
|
||||
def _strip(line):
|
||||
"""Strip color and control characters from a line."""
|
||||
return _escape.sub('', line)
|
||||
@@ -41,22 +69,75 @@ class keyboard_input(object):
|
||||
|
||||
Use this with ``sys.stdin`` for keyboard input, e.g.::
|
||||
|
||||
with keyboard_input(sys.stdin):
|
||||
r, w, x = select.select([sys.stdin], [], [])
|
||||
# ... do something with keypresses ...
|
||||
with keyboard_input(sys.stdin) as kb:
|
||||
while True:
|
||||
kb.check_fg_bg()
|
||||
r, w, x = select.select([sys.stdin], [], [])
|
||||
# ... do something with keypresses ...
|
||||
|
||||
This disables canonical input so that keypresses are available on the
|
||||
stream immediately. Typically standard input allows line editing,
|
||||
which means keypresses won't be sent until the user hits return.
|
||||
The ``keyboard_input`` context manager disables canonical
|
||||
(line-based) input and echoing, so that keypresses are available on
|
||||
the stream immediately, and they are not printed to the
|
||||
terminal. Typically, standard input is line-buffered, which means
|
||||
keypresses won't be sent until the user hits return. In this mode, a
|
||||
user can hit, e.g., 'v', and it will be read on the other end of the
|
||||
pipe immediately but not printed.
|
||||
|
||||
It also disables echoing, so that keys pressed aren't printed to the
|
||||
terminal. So, the user can hit, e.g., 'v', and it's read on the
|
||||
other end of the pipe immediately but not printed.
|
||||
The handler takes care to ensure that terminal changes only take
|
||||
effect when the calling process is in the foreground. If the process
|
||||
is backgrounded, canonical mode and echo are re-enabled. They are
|
||||
disabled again when the calling process comes back to the foreground.
|
||||
|
||||
When the with block completes, prior TTY settings are restored.
|
||||
This context manager works through a single signal handler for
|
||||
``SIGTSTP``, along with a poolling routine called ``check_fg_bg()``.
|
||||
Here are the relevant states, transitions, and POSIX signals::
|
||||
|
||||
[Running] -------- Ctrl-Z sends SIGTSTP ------------.
|
||||
[ in FG ] <------- fg sends SIGCONT --------------. |
|
||||
^ | |
|
||||
| fg (no signal) | |
|
||||
| | v
|
||||
[Running] <------- bg sends SIGCONT ---------- [Stopped]
|
||||
[ in BG ] [ in BG ]
|
||||
|
||||
We handle all transitions exept for ``SIGTSTP`` generated by Ctrl-Z
|
||||
by periodically calling ``check_fg_bg()``. This routine notices if
|
||||
we are in the background with canonical mode or echo disabled, or if
|
||||
we are in the foreground without canonical disabled and echo enabled,
|
||||
and it fixes the terminal settings in response.
|
||||
|
||||
``check_fg_bg()`` works *except* for when the process is stopped with
|
||||
``SIGTSTP``. We cannot rely on a periodic timer in this case, as it
|
||||
may not rrun before the process stops. We therefore restore terminal
|
||||
settings in the ``SIGTSTP`` handler.
|
||||
|
||||
Additional notes:
|
||||
|
||||
* We mostly use polling here instead of a SIGARLM timer or a
|
||||
thread. This is to avoid the complexities of many interrupts, which
|
||||
seem to make system calls (like I/O) unreliable in older Python
|
||||
versions (2.6 and 2.7). See these issues for details:
|
||||
|
||||
1. https://www.python.org/dev/peps/pep-0475/
|
||||
2. https://bugs.python.org/issue8354
|
||||
|
||||
There are essentially too many ways for asynchronous signals to go
|
||||
wrong if we also have to support older Python versions, so we opt
|
||||
not to use them.
|
||||
|
||||
* ``SIGSTOP`` can stop a process (in the foreground or background),
|
||||
but it can't be caught. Because of this, we can't fix any terminal
|
||||
settings on ``SIGSTOP``, and the terminal will be left with
|
||||
``ICANON`` and ``ECHO`` disabled until it is resumes execution.
|
||||
|
||||
* Technically, a process *could* be sent ``SIGTSTP`` while running in
|
||||
the foreground, without the shell backgrounding that process. This
|
||||
doesn't happen in practice, and we assume that ``SIGTSTP`` always
|
||||
means that defaults should be restored.
|
||||
|
||||
* We rely on ``termios`` support. Without it, or if the stream isn't
|
||||
a TTY, ``keyboard_input`` has no effect.
|
||||
|
||||
Note: this depends on termios support. If termios isn't available,
|
||||
or if the stream isn't a TTY, this context manager has no effect.
|
||||
"""
|
||||
def __init__(self, stream):
|
||||
"""Create a context manager that will enable keyboard input on stream.
|
||||
@@ -69,44 +150,97 @@ def __init__(self, stream):
|
||||
"""
|
||||
self.stream = stream
|
||||
|
||||
def _is_background(self):
|
||||
"""True iff calling process is in the background."""
|
||||
return _is_background_tty(self.stream)
|
||||
|
||||
def _get_canon_echo_flags(self):
|
||||
"""Get current termios canonical and echo settings."""
|
||||
cfg = termios.tcgetattr(self.stream)
|
||||
return (
|
||||
bool(cfg[3] & termios.ICANON),
|
||||
bool(cfg[3] & termios.ECHO),
|
||||
)
|
||||
|
||||
def _enable_keyboard_input(self):
|
||||
"""Disable canonical input and echoing on ``self.stream``."""
|
||||
# "enable" input by disabling canonical mode and echo
|
||||
new_cfg = termios.tcgetattr(self.stream)
|
||||
new_cfg[3] &= ~termios.ICANON
|
||||
new_cfg[3] &= ~termios.ECHO
|
||||
|
||||
# Apply new settings for terminal
|
||||
with ignore_signal(signal.SIGTTOU):
|
||||
termios.tcsetattr(self.stream, termios.TCSANOW, new_cfg)
|
||||
|
||||
def _restore_default_terminal_settings(self):
|
||||
"""Restore the original input configuration on ``self.stream``."""
|
||||
# _restore_default_terminal_settings Can be called in foreground
|
||||
# or background. When called in the background, tcsetattr triggers
|
||||
# SIGTTOU, which we must ignore, or the process will be stopped.
|
||||
with ignore_signal(signal.SIGTTOU):
|
||||
termios.tcsetattr(self.stream, termios.TCSANOW, self.old_cfg)
|
||||
|
||||
def _tstp_handler(self, signum, frame):
|
||||
self._restore_default_terminal_settings()
|
||||
os.kill(os.getpid(), signal.SIGSTOP)
|
||||
|
||||
def check_fg_bg(self):
|
||||
# old_cfg is set up in __enter__ and indicates that we have
|
||||
# termios and a valid stream.
|
||||
if not self.old_cfg:
|
||||
return
|
||||
|
||||
# query terminal flags and fg/bg status
|
||||
flags = self._get_canon_echo_flags()
|
||||
bg = self._is_background()
|
||||
|
||||
# restore sanity if flags are amiss -- see diagram in class docs
|
||||
if not bg and any(flags): # fg, but input not enabled
|
||||
self._enable_keyboard_input()
|
||||
elif bg and not all(flags): # bg, but input enabled
|
||||
self._restore_default_terminal_settings()
|
||||
|
||||
def __enter__(self):
|
||||
"""Enable immediate keypress input on stream.
|
||||
"""Enable immediate keypress input, while this process is foreground.
|
||||
|
||||
If the stream is not a TTY or the system doesn't support termios,
|
||||
do nothing.
|
||||
"""
|
||||
self.old_cfg = None
|
||||
self.old_handlers = {}
|
||||
|
||||
# Ignore all this if the input stream is not a tty.
|
||||
if not self.stream or not self.stream.isatty():
|
||||
return
|
||||
return self
|
||||
|
||||
try:
|
||||
# If this fails, self.old_cfg will remain None
|
||||
import termios
|
||||
if termios:
|
||||
# save old termios settings to restore later
|
||||
self.old_cfg = termios.tcgetattr(self.stream)
|
||||
|
||||
# save old termios settings
|
||||
fd = self.stream.fileno()
|
||||
self.old_cfg = termios.tcgetattr(fd)
|
||||
# Install a signal handler to disable/enable keyboard input
|
||||
# when the process moves between foreground and background.
|
||||
self.old_handlers[signal.SIGTSTP] = signal.signal(
|
||||
signal.SIGTSTP, self._tstp_handler)
|
||||
|
||||
# create new settings with canonical input and echo
|
||||
# disabled, so keypresses are immediate & don't echo.
|
||||
self.new_cfg = termios.tcgetattr(fd)
|
||||
self.new_cfg[3] &= ~termios.ICANON
|
||||
self.new_cfg[3] &= ~termios.ECHO
|
||||
# add an atexit handler to ensure the terminal is restored
|
||||
atexit.register(self._restore_default_terminal_settings)
|
||||
|
||||
# Apply new settings for terminal
|
||||
termios.tcsetattr(fd, termios.TCSADRAIN, self.new_cfg)
|
||||
# enable keyboard input initially (if foreground)
|
||||
if not self._is_background():
|
||||
self._enable_keyboard_input()
|
||||
|
||||
except Exception:
|
||||
pass # some OS's do not support termios, so ignore
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exception, traceback):
|
||||
"""If termios was avaialble, restore old settings."""
|
||||
"""If termios was available, restore old settings."""
|
||||
if self.old_cfg:
|
||||
import termios
|
||||
termios.tcsetattr(
|
||||
self.stream.fileno(), termios.TCSADRAIN, self.old_cfg)
|
||||
self._restore_default_terminal_settings()
|
||||
|
||||
# restore SIGSTP and SIGCONT handlers
|
||||
if self.old_handlers:
|
||||
for signum, old_handler in self.old_handlers.items():
|
||||
signal.signal(signum, old_handler)
|
||||
|
||||
|
||||
class Unbuffered(object):
|
||||
@@ -282,11 +416,11 @@ def __enter__(self):
|
||||
self._saved_debug = tty._debug
|
||||
|
||||
# OS-level pipe for redirecting output to logger
|
||||
self.read_fd, self.write_fd = os.pipe()
|
||||
read_fd, write_fd = os.pipe()
|
||||
|
||||
# Multiprocessing pipe for communication back from the daemon
|
||||
# Currently only used to save echo value between uses
|
||||
self.parent, self.child = multiprocessing.Pipe()
|
||||
self.parent_pipe, child_pipe = multiprocessing.Pipe()
|
||||
|
||||
# Sets a daemon that writes to file what it reads from a pipe
|
||||
try:
|
||||
@@ -297,10 +431,15 @@ def __enter__(self):
|
||||
input_stream = None # just don't forward input if this fails
|
||||
|
||||
self.process = multiprocessing.Process(
|
||||
target=self._writer_daemon, args=(input_stream,))
|
||||
target=_writer_daemon,
|
||||
args=(
|
||||
input_stream, read_fd, write_fd, self.echo, self.log_file,
|
||||
child_pipe
|
||||
)
|
||||
)
|
||||
self.process.daemon = True # must set before start()
|
||||
self.process.start()
|
||||
os.close(self.read_fd) # close in the parent process
|
||||
os.close(read_fd) # close in the parent process
|
||||
|
||||
finally:
|
||||
if input_stream:
|
||||
@@ -322,9 +461,9 @@ def __enter__(self):
|
||||
self._saved_stderr = os.dup(sys.stderr.fileno())
|
||||
|
||||
# redirect to the pipe we created above
|
||||
os.dup2(self.write_fd, sys.stdout.fileno())
|
||||
os.dup2(self.write_fd, sys.stderr.fileno())
|
||||
os.close(self.write_fd)
|
||||
os.dup2(write_fd, sys.stdout.fileno())
|
||||
os.dup2(write_fd, sys.stderr.fileno())
|
||||
os.close(write_fd)
|
||||
|
||||
else:
|
||||
# Handle I/O the Python way. This won't redirect lower-level
|
||||
@@ -337,7 +476,7 @@ def __enter__(self):
|
||||
self._saved_stderr = sys.stderr
|
||||
|
||||
# create a file object for the pipe; redirect to it.
|
||||
pipe_fd_out = os.fdopen(self.write_fd, 'w')
|
||||
pipe_fd_out = os.fdopen(write_fd, 'w')
|
||||
sys.stdout = pipe_fd_out
|
||||
sys.stderr = pipe_fd_out
|
||||
|
||||
@@ -376,14 +515,14 @@ def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
|
||||
# print log contents in parent if needed.
|
||||
if self.write_log_in_parent:
|
||||
string = self.parent.recv()
|
||||
string = self.parent_pipe.recv()
|
||||
self.file_like.write(string)
|
||||
|
||||
if self.close_log_in_parent:
|
||||
self.log_file.close()
|
||||
|
||||
# recover and store echo settings from the child before it dies
|
||||
self.echo = self.parent.recv()
|
||||
self.echo = self.parent_pipe.recv()
|
||||
|
||||
# join the daemon process. The daemon will quit automatically
|
||||
# when the write pipe is closed; we just wait for it here.
|
||||
@@ -408,72 +547,166 @@ def force_echo(self):
|
||||
# exactly before and after the text we want to echo.
|
||||
sys.stdout.write(xon)
|
||||
sys.stdout.flush()
|
||||
yield
|
||||
sys.stdout.write(xoff)
|
||||
sys.stdout.flush()
|
||||
|
||||
def _writer_daemon(self, stdin):
|
||||
"""Daemon that writes output to the log file and stdout."""
|
||||
# Use line buffering (3rd param = 1) since Python 3 has a bug
|
||||
# that prevents unbuffered text I/O.
|
||||
in_pipe = os.fdopen(self.read_fd, 'r', 1)
|
||||
os.close(self.write_fd)
|
||||
|
||||
echo = self.echo # initial echo setting, user-controllable
|
||||
force_echo = False # parent can force echo for certain output
|
||||
|
||||
# list of streams to select from
|
||||
istreams = [in_pipe, stdin] if stdin else [in_pipe]
|
||||
|
||||
log_file = self.log_file
|
||||
try:
|
||||
with keyboard_input(stdin):
|
||||
while True:
|
||||
# No need to set any timeout for select.select
|
||||
# Wait until a key press or an event on in_pipe.
|
||||
rlist, _, _ = select.select(istreams, [], [])
|
||||
|
||||
# Allow user to toggle echo with 'v' key.
|
||||
# Currently ignores other chars.
|
||||
if stdin in rlist:
|
||||
if stdin.read(1) == 'v':
|
||||
echo = not echo
|
||||
|
||||
# Handle output from the with block process.
|
||||
if in_pipe in rlist:
|
||||
# If we arrive here it means that in_pipe was
|
||||
# ready for reading : it should never happen that
|
||||
# line is false-ish
|
||||
line = in_pipe.readline()
|
||||
if not line:
|
||||
break # EOF
|
||||
|
||||
# find control characters and strip them.
|
||||
controls = control.findall(line)
|
||||
line = re.sub(control, '', line)
|
||||
|
||||
# Echo to stdout if requested or forced
|
||||
if echo or force_echo:
|
||||
sys.stdout.write(line)
|
||||
sys.stdout.flush()
|
||||
|
||||
# Stripped output to log file.
|
||||
log_file.write(_strip(line))
|
||||
log_file.flush()
|
||||
|
||||
if xon in controls:
|
||||
force_echo = True
|
||||
if xoff in controls:
|
||||
force_echo = False
|
||||
except BaseException:
|
||||
tty.error("Exception occurred in writer daemon!")
|
||||
traceback.print_exc()
|
||||
|
||||
yield
|
||||
finally:
|
||||
# send written data back to parent if we used a StringIO
|
||||
if self.write_log_in_parent:
|
||||
self.child.send(log_file.getvalue())
|
||||
log_file.close()
|
||||
sys.stdout.write(xoff)
|
||||
sys.stdout.flush()
|
||||
|
||||
# send echo value back to the parent so it can be preserved.
|
||||
self.child.send(echo)
|
||||
|
||||
def _writer_daemon(stdin, read_fd, write_fd, echo, log_file, control_pipe):
|
||||
"""Daemon used by ``log_output`` to write to a log file and to ``stdout``.
|
||||
|
||||
The daemon receives output from the parent process and writes it both
|
||||
to a log and, optionally, to ``stdout``. The relationship looks like
|
||||
this::
|
||||
|
||||
Terminal
|
||||
|
|
||||
| +-------------------------+
|
||||
| | Parent Process |
|
||||
+--------> | with log_output(): |
|
||||
| stdin | ... |
|
||||
| +-------------------------+
|
||||
| ^ | write_fd (parent's redirected stdout)
|
||||
| | control |
|
||||
| | pipe |
|
||||
| | v read_fd
|
||||
| +-------------------------+ stdout
|
||||
| | Writer daemon |------------>
|
||||
+--------> | read from read_fd | log_file
|
||||
stdin | write to out and log |------------>
|
||||
+-------------------------+
|
||||
|
||||
Within the ``log_output`` handler, the parent's output is redirected
|
||||
to a pipe from which the daemon reads. The daemon writes each line
|
||||
from the pipe to a log file and (optionally) to ``stdout``. The user
|
||||
can hit ``v`` to toggle output on ``stdout``.
|
||||
|
||||
In addition to the input and output file descriptors, the daemon
|
||||
interacts with the parent via ``control_pipe``. It reports whether
|
||||
``stdout`` was enabled or disabled when it finished and, if the
|
||||
``log_file`` is a ``StringIO`` object, then the daemon also sends the
|
||||
logged output back to the parent as a string, to be written to the
|
||||
``StringIO`` in the parent. This is mainly for testing.
|
||||
|
||||
Arguments:
|
||||
stdin (stream): input from the terminal
|
||||
read_fd (int): pipe for reading from parent's redirected stdout
|
||||
write_fd (int): parent's end of the pipe will write to (will be
|
||||
immediately closed by the writer daemon)
|
||||
echo (bool): initial echo setting -- controlled by user and
|
||||
preserved across multiple writer daemons
|
||||
log_file (file-like): file to log all output
|
||||
control_pipe (Pipe): multiprocessing pipe on which to send control
|
||||
information to the parent
|
||||
|
||||
"""
|
||||
# Use line buffering (3rd param = 1) since Python 3 has a bug
|
||||
# that prevents unbuffered text I/O.
|
||||
in_pipe = os.fdopen(read_fd, 'r', 1)
|
||||
os.close(write_fd)
|
||||
|
||||
# list of streams to select from
|
||||
istreams = [in_pipe, stdin] if stdin else [in_pipe]
|
||||
force_echo = False # parent can force echo for certain output
|
||||
|
||||
try:
|
||||
with keyboard_input(stdin) as kb:
|
||||
while True:
|
||||
# fix the terminal settings if we recently came to
|
||||
# the foreground
|
||||
kb.check_fg_bg()
|
||||
|
||||
# wait for input from any stream. use a coarse timeout to
|
||||
# allow other checks while we wait for input
|
||||
rlist, _, _ = _retry(select.select)(istreams, [], [], 1e-1)
|
||||
|
||||
# Allow user to toggle echo with 'v' key.
|
||||
# Currently ignores other chars.
|
||||
# only read stdin if we're in the foreground
|
||||
if stdin in rlist and not _is_background_tty(stdin):
|
||||
# it's possible to be backgrounded between the above
|
||||
# check and the read, so we ignore SIGTTIN here.
|
||||
with ignore_signal(signal.SIGTTIN):
|
||||
try:
|
||||
if stdin.read(1) == 'v':
|
||||
echo = not echo
|
||||
except IOError as e:
|
||||
# If SIGTTIN is ignored, the system gives EIO
|
||||
# to let the caller know the read failed b/c it
|
||||
# was in the bg. Ignore that too.
|
||||
if e.errno != errno.EIO:
|
||||
raise
|
||||
|
||||
if in_pipe in rlist:
|
||||
# Handle output from the calling process.
|
||||
line = _retry(in_pipe.readline)()
|
||||
if not line:
|
||||
break
|
||||
|
||||
# find control characters and strip them.
|
||||
controls = control.findall(line)
|
||||
line = control.sub('', line)
|
||||
|
||||
# Echo to stdout if requested or forced.
|
||||
if echo or force_echo:
|
||||
sys.stdout.write(line)
|
||||
sys.stdout.flush()
|
||||
|
||||
# Stripped output to log file.
|
||||
log_file.write(_strip(line))
|
||||
log_file.flush()
|
||||
|
||||
if xon in controls:
|
||||
force_echo = True
|
||||
if xoff in controls:
|
||||
force_echo = False
|
||||
|
||||
except BaseException:
|
||||
tty.error("Exception occurred in writer daemon!")
|
||||
traceback.print_exc()
|
||||
|
||||
finally:
|
||||
# send written data back to parent if we used a StringIO
|
||||
if isinstance(log_file, StringIO):
|
||||
control_pipe.send(log_file.getvalue())
|
||||
log_file.close()
|
||||
|
||||
# send echo value back to the parent so it can be preserved.
|
||||
control_pipe.send(echo)
|
||||
|
||||
|
||||
def _retry(function):
|
||||
"""Retry a call if errors indicating an interrupted system call occur.
|
||||
|
||||
Interrupted system calls return -1 and set ``errno`` to ``EINTR`` if
|
||||
certain flags are not set. Newer Pythons automatically retry them,
|
||||
but older Pythons do not, so we need to retry the calls.
|
||||
|
||||
This function converts a call like this:
|
||||
|
||||
syscall(args)
|
||||
|
||||
and makes it retry by wrapping the function like this:
|
||||
|
||||
_retry(syscall)(args)
|
||||
|
||||
This is a private function because EINTR is unfortunately raised in
|
||||
different ways from different functions, and we only handle the ones
|
||||
relevant for this file.
|
||||
|
||||
"""
|
||||
def wrapped(*args, **kwargs):
|
||||
while True:
|
||||
try:
|
||||
return function(*args, **kwargs)
|
||||
except IOError as e:
|
||||
if e.errno == errno.EINTR:
|
||||
continue
|
||||
raise
|
||||
except select.error as e:
|
||||
if e.args[0] == errno.EINTR:
|
||||
continue
|
||||
raise
|
||||
return wrapped
|
||||
|
344
lib/spack/llnl/util/tty/pty.py
Normal file
344
lib/spack/llnl/util/tty/pty.py
Normal file
@@ -0,0 +1,344 @@
|
||||
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
"""The pty module handles pseudo-terminals.
|
||||
|
||||
Currently, the infrastructure here is only used to test llnl.util.tty.log.
|
||||
|
||||
If this is used outside a testing environment, we will want to reconsider
|
||||
things like timeouts in ``ProcessController.wait()``, which are set to
|
||||
get tests done quickly, not to avoid high CPU usage.
|
||||
|
||||
"""
|
||||
from __future__ import print_function
|
||||
|
||||
import os
|
||||
import signal
|
||||
import multiprocessing
|
||||
import re
|
||||
import sys
|
||||
import termios
|
||||
import time
|
||||
import traceback
|
||||
|
||||
import llnl.util.tty.log as log
|
||||
|
||||
from spack.util.executable import which
|
||||
|
||||
|
||||
class ProcessController(object):
|
||||
"""Wrapper around some fundamental process control operations.
|
||||
|
||||
This allows one process to drive another similar to the way a shell
|
||||
would, by sending signals and I/O.
|
||||
|
||||
"""
|
||||
def __init__(self, pid, master_fd,
|
||||
timeout=1, sleep_time=1e-1, debug=False):
|
||||
"""Create a controller to manipulate the process with id ``pid``
|
||||
|
||||
Args:
|
||||
pid (int): id of process to control
|
||||
master_fd (int): master file descriptor attached to pid's stdin
|
||||
timeout (int): time in seconds for wait operations to time out
|
||||
(default 1 second)
|
||||
sleep_time (int): time to sleep after signals, to control the
|
||||
signal rate of the controller (default 1e-1)
|
||||
debug (bool): whether ``horizontal_line()`` and ``status()`` should
|
||||
produce output when called (default False)
|
||||
|
||||
``sleep_time`` allows the caller to insert delays after calls
|
||||
that signal or modify the controlled process. Python behaves very
|
||||
poorly if signals arrive too fast, and drowning a Python process
|
||||
with a Python handler with signals can kill the process and hang
|
||||
our tests, so we throttle this a closer-to-interactive rate.
|
||||
|
||||
"""
|
||||
self.pid = pid
|
||||
self.pgid = os.getpgid(pid)
|
||||
self.master_fd = master_fd
|
||||
self.timeout = timeout
|
||||
self.sleep_time = sleep_time
|
||||
self.debug = debug
|
||||
|
||||
# we need the ps command to wait for process statuses
|
||||
self.ps = which("ps", required=True)
|
||||
|
||||
def get_canon_echo_attrs(self):
|
||||
"""Get echo and canon attributes of the terminal of master_fd."""
|
||||
cfg = termios.tcgetattr(self.master_fd)
|
||||
return (
|
||||
bool(cfg[3] & termios.ICANON),
|
||||
bool(cfg[3] & termios.ECHO),
|
||||
)
|
||||
|
||||
def horizontal_line(self, name):
|
||||
"""Labled horizontal line for debugging."""
|
||||
if self.debug:
|
||||
sys.stderr.write(
|
||||
"------------------------------------------- %s\n" % name
|
||||
)
|
||||
|
||||
def status(self):
|
||||
"""Print debug message with status info for the child."""
|
||||
if self.debug:
|
||||
canon, echo = self.get_canon_echo_attrs()
|
||||
sys.stderr.write("canon: %s, echo: %s\n" % (
|
||||
"on" if canon else "off",
|
||||
"on" if echo else "off",
|
||||
))
|
||||
sys.stderr.write("input: %s\n" % self.input_on())
|
||||
sys.stderr.write("bg: %s\n" % self.background())
|
||||
sys.stderr.write("\n")
|
||||
|
||||
def input_on(self):
|
||||
"""True if keyboard input is enabled on the master_fd pty."""
|
||||
return self.get_canon_echo_attrs() == (False, False)
|
||||
|
||||
def background(self):
|
||||
"""True if pgid is in a background pgroup of master_fd's terminal."""
|
||||
return self.pgid != os.tcgetpgrp(self.master_fd)
|
||||
|
||||
def tstp(self):
|
||||
"""Send SIGTSTP to the controlled process."""
|
||||
self.horizontal_line("tstp")
|
||||
os.killpg(self.pgid, signal.SIGTSTP)
|
||||
time.sleep(self.sleep_time)
|
||||
|
||||
def cont(self):
|
||||
self.horizontal_line("cont")
|
||||
os.killpg(self.pgid, signal.SIGCONT)
|
||||
time.sleep(self.sleep_time)
|
||||
|
||||
def fg(self):
|
||||
self.horizontal_line("fg")
|
||||
with log.ignore_signal(signal.SIGTTOU):
|
||||
os.tcsetpgrp(self.master_fd, os.getpgid(self.pid))
|
||||
time.sleep(self.sleep_time)
|
||||
|
||||
def bg(self):
|
||||
self.horizontal_line("bg")
|
||||
with log.ignore_signal(signal.SIGTTOU):
|
||||
os.tcsetpgrp(self.master_fd, os.getpgrp())
|
||||
time.sleep(self.sleep_time)
|
||||
|
||||
def write(self, byte_string):
|
||||
self.horizontal_line("write '%s'" % byte_string.decode("utf-8"))
|
||||
os.write(self.master_fd, byte_string)
|
||||
|
||||
def wait(self, condition):
|
||||
start = time.time()
|
||||
while (((time.time() - start) < self.timeout) and not condition()):
|
||||
time.sleep(1e-2)
|
||||
assert condition()
|
||||
|
||||
def wait_enabled(self):
|
||||
self.wait(lambda: self.input_on() and not self.background())
|
||||
|
||||
def wait_disabled(self):
|
||||
self.wait(lambda: not self.input_on() and self.background())
|
||||
|
||||
def wait_disabled_fg(self):
|
||||
self.wait(lambda: not self.input_on() and not self.background())
|
||||
|
||||
def proc_status(self):
|
||||
status = self.ps("-p", str(self.pid), "-o", "stat", output=str)
|
||||
status = re.split(r"\s+", status.strip(), re.M)
|
||||
return status[1]
|
||||
|
||||
def wait_stopped(self):
|
||||
self.wait(lambda: "T" in self.proc_status())
|
||||
|
||||
def wait_running(self):
|
||||
self.wait(lambda: "T" not in self.proc_status())
|
||||
|
||||
|
||||
class PseudoShell(object):
|
||||
"""Sets up master and child processes with a PTY.
|
||||
|
||||
You can create a ``PseudoShell`` if you want to test how some
|
||||
function responds to terminal input. This is a pseudo-shell from a
|
||||
job control perspective; ``master_function`` and ``child_function``
|
||||
are set up with a pseudoterminal (pty) so that the master can drive
|
||||
the child through process control signals and I/O.
|
||||
|
||||
The two functions should have signatures like this::
|
||||
|
||||
def master_function(proc, ctl, **kwargs)
|
||||
def child_function(**kwargs)
|
||||
|
||||
``master_function`` is spawned in its own process and passed three
|
||||
arguments:
|
||||
|
||||
proc
|
||||
the ``multiprocessing.Process`` object representing the child
|
||||
ctl
|
||||
a ``ProcessController`` object tied to the child
|
||||
kwargs
|
||||
keyword arguments passed from ``PseudoShell.start()``.
|
||||
|
||||
``child_function`` is only passed ``kwargs`` delegated from
|
||||
``PseudoShell.start()``.
|
||||
|
||||
The ``ctl.master_fd`` will have its ``master_fd`` connected to
|
||||
``sys.stdin`` in the child process. Both processes will share the
|
||||
same ``sys.stdout`` and ``sys.stderr`` as the process instantiating
|
||||
``PseudoShell``.
|
||||
|
||||
Here are the relationships between processes created::
|
||||
|
||||
._________________________________________________________.
|
||||
| Child Process | pid 2
|
||||
| - runs child_function | pgroup 2
|
||||
|_________________________________________________________| session 1
|
||||
^
|
||||
| create process with master_fd connected to stdin
|
||||
| stdout, stderr are the same as caller
|
||||
._________________________________________________________.
|
||||
| Master Process | pid 1
|
||||
| - runs master_function | pgroup 1
|
||||
| - uses ProcessController and master_fd to control child | session 1
|
||||
|_________________________________________________________|
|
||||
^
|
||||
| create process
|
||||
| stdin, stdout, stderr are the same as caller
|
||||
._________________________________________________________.
|
||||
| Caller | pid 0
|
||||
| - Constructs, starts, joins PseudoShell | pgroup 0
|
||||
| - provides master_function, child_function | session 0
|
||||
|_________________________________________________________|
|
||||
|
||||
"""
|
||||
def __init__(self, master_function, child_function):
|
||||
self.proc = None
|
||||
self.master_function = master_function
|
||||
self.child_function = child_function
|
||||
|
||||
# these can be optionally set to change defaults
|
||||
self.controller_timeout = 1
|
||||
self.sleep_time = 0
|
||||
|
||||
def start(self, **kwargs):
|
||||
"""Start the master and child processes.
|
||||
|
||||
Arguments:
|
||||
kwargs (dict): arbitrary keyword arguments that will be
|
||||
passed to master and child functions
|
||||
|
||||
The master process will create the child, then call
|
||||
``master_function``. The child process will call
|
||||
``child_function``.
|
||||
|
||||
"""
|
||||
self.proc = multiprocessing.Process(
|
||||
target=PseudoShell._set_up_and_run_master_function,
|
||||
args=(self.master_function, self.child_function,
|
||||
self.controller_timeout, self.sleep_time),
|
||||
kwargs=kwargs,
|
||||
)
|
||||
self.proc.start()
|
||||
|
||||
def join(self):
|
||||
"""Wait for the child process to finish, and return its exit code."""
|
||||
self.proc.join()
|
||||
return self.proc.exitcode
|
||||
|
||||
@staticmethod
|
||||
def _set_up_and_run_child_function(
|
||||
tty_name, stdout_fd, stderr_fd, ready, child_function, **kwargs):
|
||||
"""Child process wrapper for PseudoShell.
|
||||
|
||||
Handles the mechanics of setting up a PTY, then calls
|
||||
``child_function``.
|
||||
|
||||
"""
|
||||
# new process group, like a command or pipeline launched by a shell
|
||||
os.setpgrp()
|
||||
|
||||
# take controlling terminal and set up pty IO
|
||||
stdin_fd = os.open(tty_name, os.O_RDWR)
|
||||
os.dup2(stdin_fd, sys.stdin.fileno())
|
||||
os.dup2(stdout_fd, sys.stdout.fileno())
|
||||
os.dup2(stderr_fd, sys.stderr.fileno())
|
||||
os.close(stdin_fd)
|
||||
|
||||
if kwargs.get("debug"):
|
||||
sys.stderr.write(
|
||||
"child: stdin.isatty(): %s\n" % sys.stdin.isatty())
|
||||
|
||||
# tell the parent that we're really running
|
||||
if kwargs.get("debug"):
|
||||
sys.stderr.write("child: ready!\n")
|
||||
ready.value = True
|
||||
|
||||
try:
|
||||
child_function(**kwargs)
|
||||
except BaseException:
|
||||
traceback.print_exc()
|
||||
|
||||
@staticmethod
|
||||
def _set_up_and_run_master_function(
|
||||
master_function, child_function, controller_timeout, sleep_time,
|
||||
**kwargs):
|
||||
"""Set up a pty, spawn a child process, and execute master_function.
|
||||
|
||||
Handles the mechanics of setting up a PTY, then calls
|
||||
``master_function``.
|
||||
|
||||
"""
|
||||
os.setsid() # new session; this process is the controller
|
||||
|
||||
master_fd, child_fd = os.openpty()
|
||||
pty_name = os.ttyname(child_fd)
|
||||
|
||||
# take controlling terminal
|
||||
pty_fd = os.open(pty_name, os.O_RDWR)
|
||||
os.close(pty_fd)
|
||||
|
||||
ready = multiprocessing.Value('i', False)
|
||||
child_process = multiprocessing.Process(
|
||||
target=PseudoShell._set_up_and_run_child_function,
|
||||
args=(pty_name, sys.stdout.fileno(), sys.stderr.fileno(),
|
||||
ready, child_function),
|
||||
kwargs=kwargs,
|
||||
)
|
||||
child_process.start()
|
||||
|
||||
# wait for subprocess to be running and connected.
|
||||
while not ready.value:
|
||||
time.sleep(1e-5)
|
||||
pass
|
||||
|
||||
if kwargs.get("debug"):
|
||||
sys.stderr.write("pid: %d\n" % os.getpid())
|
||||
sys.stderr.write("pgid: %d\n" % os.getpgrp())
|
||||
sys.stderr.write("sid: %d\n" % os.getsid(0))
|
||||
sys.stderr.write("tcgetpgrp: %d\n" % os.tcgetpgrp(master_fd))
|
||||
sys.stderr.write("\n")
|
||||
|
||||
child_pgid = os.getpgid(child_process.pid)
|
||||
sys.stderr.write("child pid: %d\n" % child_process.pid)
|
||||
sys.stderr.write("child pgid: %d\n" % child_pgid)
|
||||
sys.stderr.write("child sid: %d\n" % os.getsid(child_process.pid))
|
||||
sys.stderr.write("\n")
|
||||
sys.stderr.flush()
|
||||
# set up master to ignore SIGTSTP, like a shell
|
||||
signal.signal(signal.SIGTSTP, signal.SIG_IGN)
|
||||
|
||||
# call the master function once the child is ready
|
||||
try:
|
||||
controller = ProcessController(
|
||||
child_process.pid, master_fd, debug=kwargs.get("debug"))
|
||||
controller.timeout = controller_timeout
|
||||
controller.sleep_time = sleep_time
|
||||
error = master_function(child_process, controller, **kwargs)
|
||||
except BaseException:
|
||||
error = 1
|
||||
traceback.print_exc()
|
||||
|
||||
child_process.join()
|
||||
|
||||
# return whether either the parent or child failed
|
||||
return error or child_process.exitcode
|
@@ -1,11 +1,11 @@
|
||||
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
|
||||
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
|
||||
#: major, minor, patch version for Spack, in a tuple
|
||||
spack_version_info = (0, 13, 0)
|
||||
spack_version_info = (0, 14, 3)
|
||||
|
||||
#: String containing Spack version joined with .'s
|
||||
spack_version = '.'.join(str(v) for v in spack_version_info)
|
||||
|
@@ -1,4 +1,4 @@
|
||||
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
|
||||
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
@@ -1,4 +1,4 @@
|
||||
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
|
||||
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
@@ -69,6 +69,7 @@
|
||||
import spack.compiler
|
||||
import spack.paths
|
||||
import spack.error as serr
|
||||
import spack.util.executable
|
||||
import spack.version
|
||||
from spack.util.naming import mod_to_class
|
||||
from spack.util.spack_yaml import syaml_dict
|
||||
@@ -214,7 +215,11 @@ def optimization_flags(self, compiler):
|
||||
import spack.spec
|
||||
if isinstance(compiler, spack.spec.CompilerSpec):
|
||||
compiler = spack.compilers.compilers_for_spec(compiler).pop()
|
||||
compiler_version = compiler.cc_version(compiler.cc)
|
||||
try:
|
||||
compiler_version = compiler.cc_version(compiler.cc)
|
||||
except spack.util.executable.ProcessError as e:
|
||||
# log this and just return compiler.version instead
|
||||
tty.debug(str(e))
|
||||
|
||||
return self.microarchitecture.optimization_flags(
|
||||
compiler.name, str(compiler_version)
|
||||
@@ -351,10 +356,10 @@ def _cmp_key(self):
|
||||
return (self.name, self.version)
|
||||
|
||||
def to_dict(self):
|
||||
return {
|
||||
'name': self.name,
|
||||
'version': self.version
|
||||
}
|
||||
return syaml_dict([
|
||||
('name', self.name),
|
||||
('version', self.version)
|
||||
])
|
||||
|
||||
|
||||
@key_ordering
|
||||
@@ -436,6 +441,7 @@ def from_dict(d):
|
||||
return arch_for_spec(spec)
|
||||
|
||||
|
||||
@memoized
|
||||
def get_platform(platform_name):
|
||||
"""Returns a platform object that corresponds to the given name."""
|
||||
platform_list = all_platforms()
|
||||
|
@@ -1,4 +1,4 @@
|
||||
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
|
||||
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
@@ -18,11 +18,12 @@
|
||||
from six.moves.urllib.error import URLError
|
||||
|
||||
import llnl.util.tty as tty
|
||||
from llnl.util.filesystem import mkdirp, install_tree
|
||||
from llnl.util.filesystem import mkdirp
|
||||
|
||||
import spack.cmd
|
||||
import spack.config as config
|
||||
import spack.fetch_strategy as fs
|
||||
import spack.util.gpg as gpg_util
|
||||
import spack.util.gpg
|
||||
import spack.relocate as relocate
|
||||
import spack.util.spack_yaml as syaml
|
||||
import spack.mirror
|
||||
@@ -32,7 +33,7 @@
|
||||
from spack.spec import Spec
|
||||
from spack.stage import Stage
|
||||
from spack.util.gpg import Gpg
|
||||
from spack.util.executable import ProcessError
|
||||
import spack.architecture as architecture
|
||||
|
||||
_build_cache_relative_path = 'build_cache'
|
||||
|
||||
@@ -109,14 +110,6 @@ class NewLayoutException(spack.error.SpackError):
|
||||
pass
|
||||
|
||||
|
||||
def has_gnupg2():
|
||||
try:
|
||||
gpg_util.Gpg.gpg()('--version', output=os.devnull)
|
||||
return True
|
||||
except ProcessError:
|
||||
return False
|
||||
|
||||
|
||||
def build_cache_relative_path():
|
||||
return _build_cache_relative_path
|
||||
|
||||
@@ -242,27 +235,31 @@ def checksum_tarball(file):
|
||||
|
||||
def sign_tarball(key, force, specfile_path):
|
||||
# Sign the packages if keys available
|
||||
if not has_gnupg2():
|
||||
if spack.util.gpg.Gpg.gpg() is None:
|
||||
raise NoGpgException(
|
||||
"gpg2 is not available in $PATH .\n"
|
||||
"Use spack install gnupg and spack load gnupg.")
|
||||
else:
|
||||
if key is None:
|
||||
keys = Gpg.signing_keys()
|
||||
if len(keys) == 1:
|
||||
key = keys[0]
|
||||
if len(keys) > 1:
|
||||
raise PickKeyException(str(keys))
|
||||
if len(keys) == 0:
|
||||
msg = "No default key available for signing.\n"
|
||||
msg += "Use spack gpg init and spack gpg create"
|
||||
msg += " to create a default key."
|
||||
raise NoKeyException(msg)
|
||||
|
||||
if key is None:
|
||||
keys = Gpg.signing_keys()
|
||||
if len(keys) == 1:
|
||||
key = keys[0]
|
||||
|
||||
if len(keys) > 1:
|
||||
raise PickKeyException(str(keys))
|
||||
|
||||
if len(keys) == 0:
|
||||
msg = "No default key available for signing.\n"
|
||||
msg += "Use spack gpg init and spack gpg create"
|
||||
msg += " to create a default key."
|
||||
raise NoKeyException(msg)
|
||||
|
||||
if os.path.exists('%s.asc' % specfile_path):
|
||||
if force:
|
||||
os.remove('%s.asc' % specfile_path)
|
||||
else:
|
||||
raise NoOverwriteException('%s.asc' % specfile_path)
|
||||
|
||||
Gpg.sign(key, specfile_path, '%s.asc' % specfile_path)
|
||||
|
||||
|
||||
@@ -271,7 +268,7 @@ def generate_package_index(cache_prefix):
|
||||
|
||||
Creates (or replaces) the "index.html" page at the location given in
|
||||
cache_prefix. This page contains a link for each binary package (*.yaml)
|
||||
and signing key (*.key) under cache_prefix.
|
||||
and public key (*.key) under cache_prefix.
|
||||
"""
|
||||
tmpdir = tempfile.mkdtemp()
|
||||
try:
|
||||
@@ -311,7 +308,7 @@ def build_tarball(spec, outdir, force=False, rel=False, unsigned=False,
|
||||
tmpdir = tempfile.mkdtemp()
|
||||
cache_prefix = build_cache_prefix(tmpdir)
|
||||
|
||||
tarfile_name = tarball_name(spec, '.tar.gz')
|
||||
tarfile_name = tarball_name(spec, '.tar.bz2')
|
||||
tarfile_dir = os.path.join(cache_prefix, tarball_directory_name(spec))
|
||||
tarfile_path = os.path.join(tarfile_dir, tarfile_name)
|
||||
spackfile_path = os.path.join(
|
||||
@@ -345,8 +342,18 @@ def build_tarball(spec, outdir, force=False, rel=False, unsigned=False,
|
||||
raise NoOverwriteException(url_util.format(remote_specfile_path))
|
||||
|
||||
# make a copy of the install directory to work with
|
||||
workdir = os.path.join(tempfile.mkdtemp(), os.path.basename(spec.prefix))
|
||||
install_tree(spec.prefix, workdir, symlinks=True)
|
||||
workdir = os.path.join(tmpdir, os.path.basename(spec.prefix))
|
||||
# install_tree copies hardlinks
|
||||
# create a temporary tarfile from prefix and exract it to workdir
|
||||
# tarfile preserves hardlinks
|
||||
temp_tarfile_name = tarball_name(spec, '.tar')
|
||||
temp_tarfile_path = os.path.join(tarfile_dir, temp_tarfile_name)
|
||||
with closing(tarfile.open(temp_tarfile_path, 'w')) as tar:
|
||||
tar.add(name='%s' % spec.prefix,
|
||||
arcname='.')
|
||||
with closing(tarfile.open(temp_tarfile_path, 'r')) as tar:
|
||||
tar.extractall(workdir)
|
||||
os.remove(temp_tarfile_path)
|
||||
|
||||
# create info for later relocation and create tar
|
||||
write_buildinfo_file(spec.prefix, workdir, rel=rel)
|
||||
@@ -371,7 +378,7 @@ def build_tarball(spec, outdir, force=False, rel=False, unsigned=False,
|
||||
tty.die(e)
|
||||
|
||||
# create compressed tarball of the install prefix
|
||||
with closing(tarfile.open(tarfile_path, 'w:gz')) as tar:
|
||||
with closing(tarfile.open(tarfile_path, 'w:bz2')) as tar:
|
||||
tar.add(name='%s' % workdir,
|
||||
arcname='%s' % os.path.basename(spec.prefix))
|
||||
# remove copy of install directory
|
||||
@@ -410,8 +417,8 @@ def build_tarball(spec, outdir, force=False, rel=False, unsigned=False,
|
||||
sign_tarball(key, force, specfile_path)
|
||||
# put tarball, spec and signature files in .spack archive
|
||||
with closing(tarfile.open(spackfile_path, 'w')) as tar:
|
||||
tar.add(name='%s' % tarfile_path, arcname='%s' % tarfile_name)
|
||||
tar.add(name='%s' % specfile_path, arcname='%s' % specfile_name)
|
||||
tar.add(name=tarfile_path, arcname='%s' % tarfile_name)
|
||||
tar.add(name=specfile_path, arcname='%s' % specfile_name)
|
||||
if not unsigned:
|
||||
tar.add(name='%s.asc' % specfile_path,
|
||||
arcname='%s.asc' % specfile_name)
|
||||
@@ -520,8 +527,6 @@ def relocate_package(workdir, spec, allow_root):
|
||||
old_prefix = str(buildinfo.get('spackprefix',
|
||||
'/not/in/buildinfo/dictionary'))
|
||||
rel = buildinfo.get('relative_rpaths', False)
|
||||
if rel:
|
||||
return
|
||||
|
||||
tty.msg("Relocating package from",
|
||||
"%s to %s." % (old_path, new_path))
|
||||
@@ -536,7 +541,22 @@ def relocate_package(workdir, spec, allow_root):
|
||||
newprefix=new_prefix)
|
||||
# If the binary files in the package were not edited to use
|
||||
# relative RPATHs, then the RPATHs need to be relocated
|
||||
if not rel:
|
||||
if rel:
|
||||
if old_path != new_path:
|
||||
files_to_relocate = list(filter(
|
||||
lambda pathname: not relocate.file_is_relocatable(
|
||||
pathname, paths_to_relocate=[old_path, old_prefix]),
|
||||
map(lambda filename: os.path.join(workdir, filename),
|
||||
buildinfo['relocate_binaries'])))
|
||||
|
||||
if len(old_path) < len(new_path) and files_to_relocate:
|
||||
tty.debug('Cannot do a binary string replacement with padding '
|
||||
'for package because %s is longer than %s.' %
|
||||
(new_path, old_path))
|
||||
else:
|
||||
for path_name in files_to_relocate:
|
||||
relocate.replace_prefix_bin(path_name, old_path, new_path)
|
||||
else:
|
||||
path_names = set()
|
||||
for filename in buildinfo['relocate_binaries']:
|
||||
path_name = os.path.join(workdir, filename)
|
||||
@@ -569,17 +589,22 @@ def extract_tarball(spec, filename, allow_root=False, unsigned=False,
|
||||
stagepath = os.path.dirname(filename)
|
||||
spackfile_name = tarball_name(spec, '.spack')
|
||||
spackfile_path = os.path.join(stagepath, spackfile_name)
|
||||
tarfile_name = tarball_name(spec, '.tar.gz')
|
||||
tarfile_name = tarball_name(spec, '.tar.bz2')
|
||||
tarfile_path = os.path.join(tmpdir, tarfile_name)
|
||||
specfile_name = tarball_name(spec, '.spec.yaml')
|
||||
specfile_path = os.path.join(tmpdir, specfile_name)
|
||||
|
||||
with closing(tarfile.open(spackfile_path, 'r')) as tar:
|
||||
tar.extractall(tmpdir)
|
||||
# older buildcache tarfiles use gzip compression
|
||||
if not os.path.exists(tarfile_path):
|
||||
tarfile_name = tarball_name(spec, '.tar.gz')
|
||||
tarfile_path = os.path.join(tmpdir, tarfile_name)
|
||||
if not unsigned:
|
||||
if os.path.exists('%s.asc' % specfile_path):
|
||||
try:
|
||||
Gpg.verify('%s.asc' % specfile_path, specfile_path)
|
||||
suppress = config.get('config:suppress_gpg_warnings', False)
|
||||
Gpg.verify('%s.asc' % specfile_path, specfile_path, suppress)
|
||||
except Exception as e:
|
||||
shutil.rmtree(tmpdir)
|
||||
tty.die(e)
|
||||
@@ -627,7 +652,17 @@ def extract_tarball(spec, filename, allow_root=False, unsigned=False,
|
||||
# so the pathname should be the same now that the directory layout
|
||||
# is confirmed
|
||||
workdir = os.path.join(tmpdir, os.path.basename(spec.prefix))
|
||||
install_tree(workdir, spec.prefix, symlinks=True)
|
||||
# install_tree copies hardlinks
|
||||
# create a temporary tarfile from prefix and exract it to workdir
|
||||
# tarfile preserves hardlinks
|
||||
temp_tarfile_name = tarball_name(spec, '.tar')
|
||||
temp_tarfile_path = os.path.join(tmpdir, temp_tarfile_name)
|
||||
with closing(tarfile.open(temp_tarfile_path, 'w')) as tar:
|
||||
tar.add(name='%s' % workdir,
|
||||
arcname='.')
|
||||
with closing(tarfile.open(temp_tarfile_path, 'r')) as tar:
|
||||
tar.extractall(spec.prefix)
|
||||
os.remove(temp_tarfile_path)
|
||||
|
||||
# cleanup
|
||||
os.remove(tarfile_path)
|
||||
@@ -649,48 +684,17 @@ def extract_tarball(spec, filename, allow_root=False, unsigned=False,
|
||||
shutil.rmtree(tmpdir)
|
||||
|
||||
|
||||
#: Internal cache for get_specs
|
||||
_cached_specs = None
|
||||
# Internal cache for downloaded specs
|
||||
_cached_specs = set()
|
||||
|
||||
|
||||
def get_specs(force=False):
|
||||
"""
|
||||
Get spec.yaml's for build caches available on mirror
|
||||
"""
|
||||
def try_download_specs(urls=None, force=False):
|
||||
'''
|
||||
Try to download the urls and cache them
|
||||
'''
|
||||
global _cached_specs
|
||||
|
||||
if _cached_specs:
|
||||
tty.debug("Using previously-retrieved specs")
|
||||
return _cached_specs
|
||||
|
||||
if not spack.mirror.MirrorCollection():
|
||||
tty.warn("No Spack mirrors are currently configured")
|
||||
if urls is None:
|
||||
return {}
|
||||
|
||||
urls = set()
|
||||
for mirror in spack.mirror.MirrorCollection().values():
|
||||
fetch_url_build_cache = url_util.join(
|
||||
mirror.fetch_url, _build_cache_relative_path)
|
||||
|
||||
mirror_dir = url_util.local_file_path(fetch_url_build_cache)
|
||||
if mirror_dir:
|
||||
tty.msg("Finding buildcaches in %s" % mirror_dir)
|
||||
if os.path.exists(mirror_dir):
|
||||
files = os.listdir(mirror_dir)
|
||||
for file in files:
|
||||
if re.search('spec.yaml', file):
|
||||
link = url_util.join(fetch_url_build_cache, file)
|
||||
urls.add(link)
|
||||
else:
|
||||
tty.msg("Finding buildcaches at %s" %
|
||||
url_util.format(fetch_url_build_cache))
|
||||
p, links = web_util.spider(
|
||||
url_util.join(fetch_url_build_cache, 'index.html'))
|
||||
for link in links:
|
||||
if re.search("spec.yaml", link):
|
||||
urls.add(link)
|
||||
|
||||
_cached_specs = []
|
||||
for link in urls:
|
||||
with Stage(link, name="build_cache", keep=True) as stage:
|
||||
if force and os.path.exists(stage.save_filename):
|
||||
@@ -706,11 +710,92 @@ def get_specs(force=False):
|
||||
# we need to mark this spec concrete on read-in.
|
||||
spec = Spec.from_yaml(f)
|
||||
spec._mark_concrete()
|
||||
_cached_specs.append(spec)
|
||||
_cached_specs.add(spec)
|
||||
|
||||
return _cached_specs
|
||||
|
||||
|
||||
def get_spec(spec=None, force=False):
|
||||
"""
|
||||
Check if spec.yaml exists on mirrors and return it if it does
|
||||
"""
|
||||
global _cached_specs
|
||||
urls = set()
|
||||
if spec is None:
|
||||
return {}
|
||||
specfile_name = tarball_name(spec, '.spec.yaml')
|
||||
|
||||
if not spack.mirror.MirrorCollection():
|
||||
tty.debug("No Spack mirrors are currently configured")
|
||||
return {}
|
||||
|
||||
if spec in _cached_specs:
|
||||
return _cached_specs
|
||||
|
||||
for mirror in spack.mirror.MirrorCollection().values():
|
||||
fetch_url_build_cache = url_util.join(
|
||||
mirror.fetch_url, _build_cache_relative_path)
|
||||
|
||||
mirror_dir = url_util.local_file_path(fetch_url_build_cache)
|
||||
if mirror_dir:
|
||||
tty.msg("Finding buildcaches in %s" % mirror_dir)
|
||||
link = url_util.join(fetch_url_build_cache, specfile_name)
|
||||
urls.add(link)
|
||||
|
||||
else:
|
||||
tty.msg("Finding buildcaches at %s" %
|
||||
url_util.format(fetch_url_build_cache))
|
||||
link = url_util.join(fetch_url_build_cache, specfile_name)
|
||||
urls.add(link)
|
||||
|
||||
return try_download_specs(urls=urls, force=force)
|
||||
|
||||
|
||||
def get_specs(force=False, allarch=False):
|
||||
"""
|
||||
Get spec.yaml's for build caches available on mirror
|
||||
"""
|
||||
arch = architecture.Arch(architecture.platform(),
|
||||
'default_os', 'default_target')
|
||||
arch_pattern = ('([^-]*-[^-]*-[^-]*)')
|
||||
if not allarch:
|
||||
arch_pattern = '(%s-%s-[^-]*)' % (arch.platform, arch.os)
|
||||
|
||||
regex_pattern = '%s(.*)(spec.yaml$)' % (arch_pattern)
|
||||
arch_re = re.compile(regex_pattern)
|
||||
|
||||
if not spack.mirror.MirrorCollection():
|
||||
tty.debug("No Spack mirrors are currently configured")
|
||||
return {}
|
||||
|
||||
urls = set()
|
||||
for mirror in spack.mirror.MirrorCollection().values():
|
||||
fetch_url_build_cache = url_util.join(
|
||||
mirror.fetch_url, _build_cache_relative_path)
|
||||
|
||||
mirror_dir = url_util.local_file_path(fetch_url_build_cache)
|
||||
if mirror_dir:
|
||||
tty.msg("Finding buildcaches in %s" % mirror_dir)
|
||||
if os.path.exists(mirror_dir):
|
||||
files = os.listdir(mirror_dir)
|
||||
for file in files:
|
||||
m = arch_re.search(file)
|
||||
if m:
|
||||
link = url_util.join(fetch_url_build_cache, file)
|
||||
urls.add(link)
|
||||
else:
|
||||
tty.msg("Finding buildcaches at %s" %
|
||||
url_util.format(fetch_url_build_cache))
|
||||
p, links = web_util.spider(
|
||||
url_util.join(fetch_url_build_cache, 'index.html'))
|
||||
for link in links:
|
||||
m = arch_re.search(link)
|
||||
if m:
|
||||
urls.add(link)
|
||||
|
||||
return try_download_specs(urls=urls, force=force)
|
||||
|
||||
|
||||
def get_keys(install=False, trust=False, force=False):
|
||||
"""
|
||||
Get pgp public keys available on mirror
|
||||
@@ -788,10 +873,10 @@ def needs_rebuild(spec, mirror_url, rebuild_on_errors=False):
|
||||
try:
|
||||
_, _, yaml_file = web_util.read_from_url(file_path)
|
||||
yaml_contents = codecs.getreader('utf-8')(yaml_file).read()
|
||||
except URLError as url_err:
|
||||
except (URLError, web_util.SpackWebError) as url_err:
|
||||
err_msg = [
|
||||
'Unable to determine whether {0} needs rebuilding,',
|
||||
' caught URLError attempting to read from {1}.',
|
||||
' caught exception attempting to read from {1}.',
|
||||
]
|
||||
tty.error(''.join(err_msg).format(spec.short_spec, file_path))
|
||||
tty.debug(url_err)
|
||||
@@ -893,11 +978,16 @@ def _download_buildcache_entry(mirror_root, descriptions):
|
||||
return True
|
||||
|
||||
|
||||
def download_buildcache_entry(file_descriptions):
|
||||
if not spack.mirror.MirrorCollection():
|
||||
tty.die("Please add a spack mirror to allow " +
|
||||
def download_buildcache_entry(file_descriptions, mirror_url=None):
|
||||
if not mirror_url and not spack.mirror.MirrorCollection():
|
||||
tty.die("Please provide or add a spack mirror to allow " +
|
||||
"download of buildcache entries.")
|
||||
|
||||
if mirror_url:
|
||||
mirror_root = os.path.join(
|
||||
mirror_url, _build_cache_relative_path)
|
||||
return _download_buildcache_entry(mirror_root, file_descriptions)
|
||||
|
||||
for mirror in spack.mirror.MirrorCollection().values():
|
||||
mirror_root = os.path.join(
|
||||
mirror.fetch_url,
|
||||
|
@@ -1,4 +1,4 @@
|
||||
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
|
||||
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
@@ -39,7 +39,6 @@
|
||||
import sys
|
||||
import traceback
|
||||
import types
|
||||
from six import iteritems
|
||||
from six import StringIO
|
||||
|
||||
import llnl.util.tty as tty
|
||||
@@ -52,6 +51,7 @@
|
||||
import spack.config
|
||||
import spack.main
|
||||
import spack.paths
|
||||
import spack.schema.environment
|
||||
import spack.store
|
||||
from spack.util.string import plural
|
||||
from spack.util.environment import (
|
||||
@@ -342,21 +342,7 @@ def set_build_environment_variables(pkg, env, dirty):
|
||||
# Set environment variables if specified for
|
||||
# the given compiler
|
||||
compiler = pkg.compiler
|
||||
environment = compiler.environment
|
||||
|
||||
for command, variable in iteritems(environment):
|
||||
if command == 'set':
|
||||
for name, value in iteritems(variable):
|
||||
env.set(name, value)
|
||||
elif command == 'unset':
|
||||
for name, _ in iteritems(variable):
|
||||
env.unset(name)
|
||||
elif command == 'prepend-path':
|
||||
for name, value in iteritems(variable):
|
||||
env.prepend_path(name, value)
|
||||
elif command == 'append-path':
|
||||
for name, value in iteritems(variable):
|
||||
env.append_path(name, value)
|
||||
env.extend(spack.schema.environment.parse(compiler.environment))
|
||||
|
||||
if compiler.extra_rpaths:
|
||||
extra_rpaths = ':'.join(compiler.extra_rpaths)
|
||||
@@ -422,6 +408,11 @@ def set_build_environment_variables(pkg, env, dirty):
|
||||
|
||||
def _set_variables_for_single_module(pkg, module):
|
||||
"""Helper function to set module variables for single module."""
|
||||
# Put a marker on this module so that it won't execute the body of this
|
||||
# function again, since it is not needed
|
||||
marker = '_set_run_already_called'
|
||||
if getattr(module, marker, False):
|
||||
return
|
||||
|
||||
jobs = spack.config.get('config:build_jobs') if pkg.parallel else 1
|
||||
jobs = min(jobs, multiprocessing.cpu_count())
|
||||
@@ -489,6 +480,10 @@ def static_to_shared_library(static_lib, shared_lib=None, **kwargs):
|
||||
|
||||
m.static_to_shared_library = static_to_shared_library
|
||||
|
||||
# Put a marker on this module so that it won't execute the body of this
|
||||
# function again, since it is not needed
|
||||
setattr(m, marker, True)
|
||||
|
||||
|
||||
def set_module_variables_for_package(pkg):
|
||||
"""Populate the module scope of install() with some useful functions.
|
||||
@@ -613,7 +608,7 @@ def get_rpaths(pkg):
|
||||
# module show output.
|
||||
if pkg.compiler.modules and len(pkg.compiler.modules) > 1:
|
||||
rpaths.append(get_path_from_module(pkg.compiler.modules[1]))
|
||||
return rpaths
|
||||
return list(dedupe(filter_system_paths(rpaths)))
|
||||
|
||||
|
||||
def get_std_cmake_args(pkg):
|
||||
|
@@ -1,4 +1,4 @@
|
||||
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
|
||||
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
@@ -1,4 +1,4 @@
|
||||
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
|
||||
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
@@ -1,4 +1,4 @@
|
||||
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
|
||||
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
@@ -56,8 +56,9 @@ class AutotoolsPackage(PackageBase):
|
||||
#: This attribute is used in UI queries that need to know the build
|
||||
#: system base class
|
||||
build_system_class = 'AutotoolsPackage'
|
||||
#: Whether or not to update ``config.guess`` on old architectures
|
||||
patch_config_guess = True
|
||||
#: Whether or not to update ``config.guess`` and ``config.sub`` on old
|
||||
#: architectures
|
||||
patch_config_files = True
|
||||
#: Whether or not to update ``libtool``
|
||||
#: (currently only for Arm/Clang/Fujitsu compilers)
|
||||
patch_libtool = True
|
||||
@@ -86,72 +87,92 @@ def archive_files(self):
|
||||
return [os.path.join(self.build_directory, 'config.log')]
|
||||
|
||||
@run_after('autoreconf')
|
||||
def _do_patch_config_guess(self):
|
||||
"""Some packages ship with an older config.guess and need to have
|
||||
this updated when installed on a newer architecture. In particular,
|
||||
config.guess fails for PPC64LE for version prior to a 2013-06-10
|
||||
build date (automake 1.13.4) and for ARM (aarch64)."""
|
||||
def _do_patch_config_files(self):
|
||||
"""Some packages ship with older config.guess/config.sub files and
|
||||
need to have these updated when installed on a newer architecture.
|
||||
In particular, config.guess fails for PPC64LE for version prior
|
||||
to a 2013-06-10 build date (automake 1.13.4) and for ARM (aarch64)."""
|
||||
|
||||
if not self.patch_config_guess or (
|
||||
if not self.patch_config_files or (
|
||||
not self.spec.satisfies('target=ppc64le:') and
|
||||
not self.spec.satisfies('target=aarch64:')
|
||||
):
|
||||
return
|
||||
my_config_guess = None
|
||||
config_guess = None
|
||||
if os.path.exists('config.guess'):
|
||||
# First search the top-level source directory
|
||||
my_config_guess = 'config.guess'
|
||||
|
||||
# TODO: Expand this to select the 'config.sub'-compatible architecture
|
||||
# for each platform (e.g. 'config.sub' doesn't accept 'power9le', but
|
||||
# does accept 'ppc64le').
|
||||
if self.spec.satisfies('target=ppc64le:'):
|
||||
config_arch = 'ppc64le'
|
||||
elif self.spec.satisfies('target=aarch64:'):
|
||||
config_arch = 'aarch64'
|
||||
else:
|
||||
# Then search in all sub directories.
|
||||
# We would like to use AC_CONFIG_AUX_DIR, but not all packages
|
||||
# ship with their configure.in or configure.ac.
|
||||
d = '.'
|
||||
dirs = [os.path.join(d, o) for o in os.listdir(d)
|
||||
if os.path.isdir(os.path.join(d, o))]
|
||||
for dirname in dirs:
|
||||
path = os.path.join(dirname, 'config.guess')
|
||||
config_arch = 'local'
|
||||
|
||||
my_config_files = {'guess': None, 'sub': None}
|
||||
config_files = {'guess': None, 'sub': None}
|
||||
config_args = {'guess': [], 'sub': [config_arch]}
|
||||
|
||||
for config_name in config_files.keys():
|
||||
config_file = 'config.{0}'.format(config_name)
|
||||
if os.path.exists(config_file):
|
||||
# First search the top-level source directory
|
||||
my_config_files[config_name] = os.path.abspath(config_file)
|
||||
else:
|
||||
# Then search in all sub directories recursively.
|
||||
# We would like to use AC_CONFIG_AUX_DIR, but not all packages
|
||||
# ship with their configure.in or configure.ac.
|
||||
config_path = next((os.path.abspath(os.path.join(r, f))
|
||||
for r, ds, fs in os.walk('.') for f in fs
|
||||
if f == config_file), None)
|
||||
my_config_files[config_name] = config_path
|
||||
|
||||
if my_config_files[config_name] is not None:
|
||||
try:
|
||||
config_path = my_config_files[config_name]
|
||||
check_call([config_path] + config_args[config_name],
|
||||
stdout=PIPE, stderr=PIPE)
|
||||
# The package's config file already runs OK, so just use it
|
||||
continue
|
||||
except Exception as e:
|
||||
tty.debug(e)
|
||||
else:
|
||||
continue
|
||||
|
||||
# Look for a spack-installed automake package
|
||||
if 'automake' in self.spec:
|
||||
automake_dir = 'automake-' + str(self.spec['automake'].version)
|
||||
automake_path = os.path.join(self.spec['automake'].prefix,
|
||||
'share', automake_dir)
|
||||
path = os.path.join(automake_path, config_file)
|
||||
if os.path.exists(path):
|
||||
my_config_guess = path
|
||||
config_files[config_name] = path
|
||||
# Look for the system's config.guess
|
||||
if (config_files[config_name] is None and
|
||||
os.path.exists('/usr/share')):
|
||||
automake_dir = [s for s in os.listdir('/usr/share') if
|
||||
"automake" in s]
|
||||
if automake_dir:
|
||||
automake_path = os.path.join('/usr/share', automake_dir[0])
|
||||
path = os.path.join(automake_path, config_file)
|
||||
if os.path.exists(path):
|
||||
config_files[config_name] = path
|
||||
if config_files[config_name] is not None:
|
||||
try:
|
||||
config_path = config_files[config_name]
|
||||
my_config_path = my_config_files[config_name]
|
||||
|
||||
if my_config_guess is not None:
|
||||
try:
|
||||
check_call([my_config_guess], stdout=PIPE, stderr=PIPE)
|
||||
# The package's config.guess already runs OK, so just use it
|
||||
return
|
||||
except Exception as e:
|
||||
tty.debug(e)
|
||||
else:
|
||||
return
|
||||
check_call([config_path] + config_args[config_name],
|
||||
stdout=PIPE, stderr=PIPE)
|
||||
|
||||
# Look for a spack-installed automake package
|
||||
if 'automake' in self.spec:
|
||||
automake_path = os.path.join(self.spec['automake'].prefix, 'share',
|
||||
'automake-' +
|
||||
str(self.spec['automake'].version))
|
||||
path = os.path.join(automake_path, 'config.guess')
|
||||
if os.path.exists(path):
|
||||
config_guess = path
|
||||
# Look for the system's config.guess
|
||||
if config_guess is None and os.path.exists('/usr/share'):
|
||||
automake_dir = [s for s in os.listdir('/usr/share') if
|
||||
"automake" in s]
|
||||
if automake_dir:
|
||||
automake_path = os.path.join('/usr/share', automake_dir[0])
|
||||
path = os.path.join(automake_path, 'config.guess')
|
||||
if os.path.exists(path):
|
||||
config_guess = path
|
||||
if config_guess is not None:
|
||||
try:
|
||||
check_call([config_guess], stdout=PIPE, stderr=PIPE)
|
||||
mod = os.stat(my_config_guess).st_mode & 0o777 | stat.S_IWUSR
|
||||
os.chmod(my_config_guess, mod)
|
||||
shutil.copyfile(config_guess, my_config_guess)
|
||||
return
|
||||
except Exception as e:
|
||||
tty.debug(e)
|
||||
m = os.stat(my_config_path).st_mode & 0o777 | stat.S_IWUSR
|
||||
os.chmod(my_config_path, m)
|
||||
shutil.copyfile(config_path, my_config_path)
|
||||
continue
|
||||
except Exception as e:
|
||||
tty.debug(e)
|
||||
|
||||
raise RuntimeError('Failed to find suitable config.guess')
|
||||
raise RuntimeError('Failed to find suitable ' + config_file)
|
||||
|
||||
@run_after('configure')
|
||||
def _do_patch_libtool(self):
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user