Compare commits

..

164 Commits

Author SHA1 Message Date
Carson Woods
c7ffdf5367 Merge branch 'develop' into releases/v0.13.4 2020-02-12 11:58:23 -05:00
Carson Woods
3f954b8157 Merge branch 'features/shared' into releases/v0.13.4 2020-02-12 11:32:06 -05:00
Carson Woods
cdcd3dcedd Merge branch 'develop' into features/shared 2020-01-27 15:28:28 -05:00
Carson Woods
7c1083916a Fix bash completion script 2020-01-24 11:36:26 -05:00
Carson Woods
c07bbe1a25 Fix flake8 error 2020-01-24 11:00:08 -05:00
Carson Woods
85032c6224 Resolve merge conflicts with develop 2020-01-24 10:41:44 -05:00
Carson Woods
7b7898a69c Merge branch 'develop' into features/shared 2020-01-21 18:55:21 -05:00
Carson Woods
84c5d76eae Merge branch 'develop' into features/shared 2020-01-18 13:57:50 -08:00
Carson Woods
bcd47f0bd6 Merge branch 'develop' into features/shared 2020-01-17 14:32:47 -08:00
Carson Woods
cb6a959cdb Merge branch 'develop' into features/shared 2020-01-15 14:41:14 -05:00
Carson Woods
32cd12bff7 Merge branch 'develop' into features/shared 2020-01-10 16:19:37 -08:00
Carson Woods
7021965159 Fix merge conflicts and repair broken unit test. 2020-01-09 20:12:39 -08:00
Carson Woods
5c5743ca33 Merge branch 'develop' into features/shared to support Spack 0.13.3 2019-12-26 21:00:09 -06:00
Carson Woods
034a7662ac Merge branch 'develop' into features/shared 2019-11-21 12:52:24 -07:00
Carson Woods
e6b6ac5898 Fixed error message to use proper --upstream rather than -u 2019-11-21 12:30:15 -07:00
Carson Woods
35037bf088 Merge branch 'develop' into features/shared 2019-11-17 16:37:47 -07:00
Carson Woods
d14c245411 Merge branch 'develop' into features/shared 2019-11-10 22:05:20 -05:00
Carson Woods
6e2ad01f20 Fix flake8 formatting 2019-11-06 13:25:15 -05:00
Carson Woods
ef9b5a8f74 Fix unit test failing 2019-11-06 13:24:10 -05:00
Carson Woods
4921ed29d5 Fix a few broken unit tests 2019-11-06 09:56:22 -05:00
Carson Woods
f4c720e902 Ensure feature supports Spack version 0.13.0-0.13.1 2019-11-05 16:38:18 -05:00
Carson Woods
0a71b1d5ac Merge branch 'develop' into features/shared 2019-10-31 21:29:33 -04:00
Carson Woods
3593a7be6a Better comment the purpose of new unit tests 2019-09-20 19:05:56 -04:00
Carson Woods
e4d2cf4441 Fix flake8 error to avoid failing CI testing 2019-09-20 15:29:46 -04:00
Carson Woods
911e51bd89 Merge branch 'develop' into features/shared
Done to resolve merge conflicts that had arisen since work on this
feature completed.
2019-09-20 15:28:44 -04:00
Carson Woods
6ec8aea6f7 Rebase branch 'features/shared' of github.com:carsonwoods/spack against develop 2019-08-07 18:57:48 -06:00
Carson Woods
5b11f7aa4c Fix bug where environments would ignore global path 2019-08-07 18:32:28 -06:00
Carson Woods
97e46981b9 Remove old doc from doc index 2019-08-07 18:32:28 -06:00
Carson Woods
873ac5e890 Remove old documentation for shared mode 2019-08-07 18:32:28 -06:00
Carson Woods
4d7dae5716 Remove old share command from tests 2019-08-07 18:32:28 -06:00
Carson Woods
b19f0fafcc Remove outdate share command 2019-08-07 18:32:28 -06:00
Carson Woods
11b1bdd119 Pep8 Compliance Fix 2019-08-07 18:32:28 -06:00
Carson Woods
f749821dc2 Pep8 Compliance 2019-08-07 18:32:28 -06:00
Carson Woods
5abb20dcab Rename test 2019-08-07 18:32:28 -06:00
Carson Woods
0c233bdd0f Add test for validating upstream database initialization 2019-08-07 18:32:28 -06:00
Carson Woods
0f171c7ded Replace space with = in command parameter 2019-08-07 18:32:28 -06:00
Carson Woods
b4c7520dd8 Flake8 Test Compliance 2019-08-07 18:32:28 -06:00
Carson Woods
9ab7d8f01d Add config parameter for active upstream to set install location for modules 2019-08-07 18:32:28 -06:00
Carson Woods
a7ad344c2a Add upstreams.yaml testing file so calls to upstreams['global] doesn't cause tests to fail 2019-08-07 18:32:28 -06:00
Carson Woods
deb2d3745c Fix .spack-db/index.json not being created in global upstream if previously uninitialized 2019-08-07 18:32:28 -06:00
Carson Woods
ff96ec430b Can now specify upstream of anyname through -u/--upstream flag 2019-08-07 18:32:28 -06:00
Carson Woods
d4a959736a Flake8 Compliance Changes 2019-08-07 18:32:28 -06:00
Carson Woods
5ba51a0be0 --global option now works for both install and uninstall 2019-08-07 18:32:28 -06:00
Carson Woods
27e1140df7 Reset active directory after each global install 2019-08-07 18:32:28 -06:00
Carson Woods
7ab6af8a3b Add scope to setting active tree to ensure that it is set at user level 2019-08-07 18:32:28 -06:00
Carson Woods
0e6e93eaac Fix unit test config.yaml 2019-08-07 18:32:28 -06:00
Carson Woods
38f8bdd2bb Home expansion was removed because it was no longer being used 2019-08-07 18:32:27 -06:00
Carson Woods
8e45a3fc2f Fix flake8 compliance 2019-08-07 18:32:27 -06:00
Carson Woods
c22af99b04 Fix how upstream db paths are canonicalized 2019-08-07 18:32:27 -06:00
Carson Woods
fc3a909fbc Set staging location to ~/.spack/var 2019-08-07 18:32:27 -06:00
Carson Woods
9665754eae Fix default install tree 2019-08-07 18:32:27 -06:00
Carson Woods
0f9f9f3a85 Revise default var path 2019-08-07 18:32:27 -06:00
Carson Woods
777a5682a6 Fix default install location 2019-08-07 18:32:27 -06:00
Carson Woods
8994b4aab6 Fix flake8 compliance 2019-08-07 18:32:27 -06:00
Carson Woods
98ec366470 Set root of store object to active tree 2019-08-07 18:32:27 -06:00
Carson Woods
c61f4d7c82 Add logic to set the active install tree 2019-08-07 18:32:27 -06:00
Carson Woods
811b304230 Remove old code 2019-08-07 18:32:27 -06:00
Carson Woods
8f0c9ad409 Change name of global parameter to install_global 2019-08-07 18:32:27 -06:00
Carson Woods
6a423a5d8a Typo fix 2019-08-07 18:32:27 -06:00
Carson Woods
23c37063bd Add default global upstream of /opt/spack 2019-08-07 18:32:27 -06:00
Carson Woods
478f3a5a99 Fix whitespace issue 2019-08-07 18:32:27 -06:00
Carson Woods
02afb30990 Remove unit testing for shared spack mode 2019-08-07 18:32:27 -06:00
Carson Woods
06e3f15e47 Remove old shared spack code 2019-08-07 18:32:27 -06:00
Carson Woods
f13ce3540d Add dest name of install_global to --global parameter 2019-08-07 18:32:27 -06:00
Carson Woods
7ae34087e3 Set remove old shared spack code 2019-08-07 18:32:27 -06:00
Carson Woods
f0fea97e88 Set source_cache to user's ~/.spack directory 2019-08-07 18:32:27 -06:00
Carson Woods
54893197ed Set staging location to be based out of users .spack directory 2019-08-07 18:32:27 -06:00
Carson Woods
80da1d50d1 Make var_path point to ~/.spack/var/spack 2019-08-07 18:32:27 -06:00
Carson Woods
944c5d75cd Add --global flag to install cmd to install to globally accessible location 2019-08-07 18:32:27 -06:00
Carson Woods
9ef4bc9d50 Add macro for expanding home directory 2019-08-07 18:32:27 -06:00
Carson Woods
a2af432833 Temporarily disable module file location overrride while feature is being implemented 2019-08-07 18:32:27 -06:00
Carson Woods
aefed311af Change modulefiles install location 2019-08-07 18:32:27 -06:00
Carson Woods
6ffacddcf4 Change default install tree to user's ~/.spack directory 2019-08-07 18:32:27 -06:00
Carson Woods
e17824f82f Remove shared mode set self as upstream 2019-08-07 18:32:27 -06:00
Carson Woods
57ca47f035 Remove testing for shared mode 2019-08-07 18:32:27 -06:00
Carson Woods
4532a56b4e Remove shared disable from unit testing 2019-08-07 18:32:27 -06:00
Carson Woods
86e69a48a2 Fix flake8 error 2019-08-07 18:32:27 -06:00
Carson Woods
2508295d81 Fix error caused by SPACK_PATH environment variable not existing 2019-08-07 18:32:27 -06:00
Carson Woods
1a041c051a Fix flake8 error 2019-08-07 18:32:27 -06:00
Carson Woods
2262ca2e67 Add test for install in shared mode 2019-08-07 18:32:27 -06:00
Carson Woods
2269771a91 Fix typo 2019-08-07 18:32:27 -06:00
Carson Woods
7f32574dd8 Fix shared cmd test file 2019-08-07 18:32:27 -06:00
Carson Woods
d15ac30f62 Add shared to toctree 2019-08-07 18:32:27 -06:00
Carson Woods
1f41347ab8 Share feature Unit testing 2019-08-07 18:32:27 -06:00
Carson Woods
1f4f01103b Add command interface for share feature 2019-08-07 18:32:27 -06:00
Carson Woods
8f46fcb512 When running tests, disable shared mode because it will break other tests. Custom tests must be written 2019-08-07 18:32:27 -06:00
Carson Woods
2d3b973ebc When shared mode is active store installed packages in SPACK_PATH 2019-08-07 18:32:27 -06:00
Carson Woods
7e62e0f27f When shared mode is active set stage path to SPACK_PATH 2019-08-07 18:32:27 -06:00
Carson Woods
ea0db4c0f9 Prevent packages from being installed upstream 2019-08-07 18:32:27 -06:00
Carson Woods
0afc68e60b Change module root path when shared mode is active 2019-08-07 18:32:27 -06:00
Carson Woods
8ad25d5013 Uninstall from SPACK_PATH when shared mode is active 2019-08-07 18:32:27 -06:00
Carson Woods
e90db68321 Install to SPACK_PATH when shared mode is active 2019-08-07 18:32:27 -06:00
Carson Woods
9e96b89f02 Add documentation for spack share command 2019-08-07 18:32:27 -06:00
Carson Woods
b4dae1b7fd When shared mode is active, spack treats the normal install directory as an upstream 2019-08-07 18:32:27 -06:00
Carson Woods
9e9adf1d2f When shared mode is active, set cache location to SPACK_PATH 2019-08-07 18:32:27 -06:00
Carson Woods
de9255247a Fix bug where environments would ignore global path 2019-08-06 17:49:17 -06:00
Carson Woods
de5d3e3229 Remove old doc from doc index 2019-07-26 08:54:12 -06:00
Carson Woods
e621aafc77 Remove old documentation for shared mode 2019-07-25 16:40:00 -06:00
Carson Woods
c53427c98d Remove old share command from tests 2019-07-25 14:22:43 -06:00
Carson Woods
7a75148d1b Remove outdate share command 2019-07-25 13:32:44 -06:00
Carson Woods
4210520c9d Pep8 Compliance Fix 2019-07-25 13:32:44 -06:00
Carson Woods
4f3fb50ae7 Pep8 Compliance 2019-07-25 13:32:44 -06:00
Carson Woods
7660659107 Rename test 2019-07-25 13:32:44 -06:00
Carson Woods
fcca2a518b Add test for validating upstream database initialization 2019-07-25 13:32:44 -06:00
Carson Woods
23e1cd7775 Replace space with = in command parameter 2019-07-25 13:32:44 -06:00
Carson Woods
58e794e95a Flake8 Test Compliance 2019-07-25 13:32:44 -06:00
Carson Woods
7ed59ed835 Add config parameter for active upstream to set install location for modules 2019-07-25 13:32:43 -06:00
Carson Woods
512726ae5b Add upstreams.yaml testing file so calls to upstreams['global] doesn't cause tests to fail 2019-07-25 13:32:43 -06:00
Carson Woods
20851a6e6c Fix .spack-db/index.json not being created in global upstream if previously uninitialized 2019-07-25 13:32:43 -06:00
Carson Woods
92bbbb9659 Can now specify upstream of anyname through -u/--upstream flag 2019-07-25 13:32:43 -06:00
Carson Woods
5f2f2bfb84 Flake8 Compliance Changes 2019-07-25 13:32:43 -06:00
Carson Woods
9b63f72d6b --global option now works for both install and uninstall 2019-07-25 13:32:43 -06:00
Carson Woods
4c60f01bae Reset active directory after each global install 2019-07-25 13:32:43 -06:00
Carson Woods
cd08308463 Add scope to setting active tree to ensure that it is set at user level 2019-07-25 13:32:43 -06:00
Carson Woods
fe69997043 Fix unit test config.yaml 2019-07-25 13:32:43 -06:00
Carson Woods
1584a6e3c6 Home expansion was removed because it was no longer being used 2019-07-25 13:32:43 -06:00
Carson Woods
c393880852 Fix flake8 compliance 2019-07-25 13:32:43 -06:00
Carson Woods
bbe9e6bf54 Fix how upstream db paths are canonicalized 2019-07-25 13:32:43 -06:00
Carson Woods
d7a00b71d4 Set staging location to ~/.spack/var 2019-07-25 13:32:43 -06:00
Carson Woods
6775d2546a Fix default install tree 2019-07-25 13:32:43 -06:00
Carson Woods
8a154333f2 Revise default var path 2019-07-25 13:32:43 -06:00
Carson Woods
5e637a04fd Fix default install location 2019-07-25 13:32:43 -06:00
Carson Woods
0213869439 Fix flake8 compliance 2019-07-25 13:32:43 -06:00
Carson Woods
22e9a9792a Set root of store object to active tree 2019-07-25 13:32:43 -06:00
Carson Woods
4f23da9d26 Add logic to set the active install tree 2019-07-25 13:32:43 -06:00
Carson Woods
f9430e2fd4 Remove old code 2019-07-25 13:32:43 -06:00
Carson Woods
a2f86d5d18 Change name of global parameter to install_global 2019-07-25 13:32:43 -06:00
Carson Woods
0efab6637c Typo fix 2019-07-25 13:32:43 -06:00
Carson Woods
2b11694b94 Add default global upstream of /opt/spack 2019-07-25 13:32:43 -06:00
Carson Woods
088798a727 Fix whitespace issue 2019-07-25 13:32:43 -06:00
Carson Woods
bddbb1c22e Remove unit testing for shared spack mode 2019-07-25 13:32:42 -06:00
Carson Woods
92f447cf1c Remove old shared spack code 2019-07-25 13:32:42 -06:00
Carson Woods
96f266c3e3 Add dest name of install_global to --global parameter 2019-07-25 13:32:42 -06:00
Carson Woods
d5093c20c5 Set remove old shared spack code 2019-07-25 13:32:42 -06:00
Carson Woods
2064241c37 Set source_cache to user's ~/.spack directory 2019-07-25 13:32:42 -06:00
Carson Woods
721742b764 Set staging location to be based out of users .spack directory 2019-07-25 13:32:42 -06:00
Carson Woods
c45bf153d8 Make var_path point to ~/.spack/var/spack 2019-07-25 13:32:42 -06:00
Carson Woods
b98e5e66e7 Add --global flag to install cmd to install to globally accessible location 2019-07-25 13:32:42 -06:00
Carson Woods
3d18bf345f Add macro for expanding home directory 2019-07-25 13:32:42 -06:00
Carson Woods
f8e9cf4081 Temporarily disable module file location overrride while feature is being implemented 2019-07-25 13:32:42 -06:00
Carson Woods
98e0f8b89b Change modulefiles install location 2019-07-25 13:32:42 -06:00
Carson Woods
263275b7ea Change default install tree to user's ~/.spack directory 2019-07-25 13:32:42 -06:00
Carson Woods
3e13002d7f Remove shared mode set self as upstream 2019-07-25 13:32:42 -06:00
Carson Woods
654e5cc924 Remove testing for shared mode 2019-07-25 13:32:42 -06:00
Carson Woods
04a72c1834 Remove shared disable from unit testing 2019-07-25 13:32:42 -06:00
Carson Woods
53cf6eb194 Fix flake8 error 2019-07-25 13:32:42 -06:00
Carson Woods
5a7f186176 Fix error caused by SPACK_PATH environment variable not existing 2019-07-25 13:32:42 -06:00
Carson Woods
987adfa9c9 Fix flake8 error 2019-07-25 13:32:42 -06:00
Carson Woods
e476bb1400 Add test for install in shared mode 2019-07-25 13:32:42 -06:00
Carson Woods
dc12233610 Fix typo 2019-07-25 13:32:42 -06:00
Carson Woods
29d21a0a5d Fix shared cmd test file 2019-07-25 13:32:42 -06:00
Carson Woods
762f505da5 Add shared to toctree 2019-07-25 13:32:42 -06:00
Carson Woods
8e1c326174 Share feature Unit testing 2019-07-25 13:32:42 -06:00
Carson Woods
0bac5d527d Add command interface for share feature 2019-07-25 13:32:42 -06:00
Carson Woods
79256eeb5c When running tests, disable shared mode because it will break other tests. Custom tests must be written 2019-07-25 13:32:42 -06:00
Carson Woods
de760942f2 When shared mode is active store installed packages in SPACK_PATH 2019-07-25 13:32:41 -06:00
Carson Woods
860641bfab When shared mode is active set stage path to SPACK_PATH 2019-07-25 13:32:41 -06:00
Carson Woods
673e55f14d Prevent packages from being installed upstream 2019-07-25 13:32:41 -06:00
Carson Woods
54777a4f3e Change module root path when shared mode is active 2019-07-25 13:32:41 -06:00
Carson Woods
db36e66592 Uninstall from SPACK_PATH when shared mode is active 2019-07-25 13:32:41 -06:00
Carson Woods
0d36e94407 Install to SPACK_PATH when shared mode is active 2019-07-25 13:32:41 -06:00
Carson Woods
92c3b5b8b2 Add documentation for spack share command 2019-07-25 13:32:41 -06:00
Carson Woods
71220a3656 When shared mode is active, spack treats the normal install directory as an upstream 2019-07-25 13:32:41 -06:00
Carson Woods
09bd29d816 When shared mode is active, set cache location to SPACK_PATH 2019-07-25 13:32:41 -06:00
2038 changed files with 11825 additions and 57114 deletions

View File

@@ -4,7 +4,8 @@ coverage:
range: 60...90
status:
project:
default: yes
default:
threshold: 0.3%
ignore:
- lib/spack/spack/test/.*

View File

@@ -9,7 +9,7 @@
#
# Note that we also add *per-line* exemptions for certain patterns in the
# `spack flake8` command. This is where F403 for `from spack import *`
# is added (because we *only* allow that wildcard).
# is added (beause we *only* allow that wildcard).
#
# See .flake8 for regular exceptions.
#

View File

@@ -1,11 +1,14 @@
---
name: "\U0001F41E Bug report"
about: Report a bug in the core of Spack (command not working as expected, etc.)
labels: "bug,triage"
name: Bug report
about: Report a bug in the core of Spack (command not working as expected, etc.)
labels: bug
---
<!-- Explain, in a clear and concise way, the command you ran and the result you were trying to achieve.
Example: "I ran `spack find` to list all the installed packages and ..." -->
*Explain, in a clear and concise way, the command you ran and the result you were trying to achieve.
Example: "I ran Spack find to list all the installed packages and..."*
### Steps to reproduce the issue
@@ -17,26 +20,30 @@ $ spack <command2> <spec>
### Error Message
<!-- If Spack reported an error, provide the error message. If it did not report an error but the output appears incorrect, provide the incorrect output. If there was no error message and no output but the result is incorrect, describe how it does not match what you expect. -->
If Spack reported an error, provide the error message. If it did not report an error
but the output appears incorrect, provide the incorrect output. If there was no error
message and no output but the result is incorrect, describe how it does not match
what you expect. To provide more information you might re-run the commands with
the additional -d/--stacktrace flags:
```console
$ spack --debug --stacktrace <command>
$ spack -d --stacktrace <command1> <spec>
$ spack -d --stacktrace <command2> <spec>
...
```
that activate the full debug output.
### Information on your system
<!-- Please include the output of `spack debug report` -->
This includes:
<!-- If you have any relevant configuration detail (custom `packages.yaml` or `modules.yaml`, etc.) you can add that here as well. -->
1. which platform you are using
2. any relevant configuration detail (custom `packages.yaml` or `modules.yaml`, etc.)
### Additional information
-----
<!-- These boxes can be checked by replacing [ ] with [x] or by clicking them after submitting the issue. -->
- [ ] I have run `spack debug report` and reported the version of Spack/Python/Platform
- [ ] I have searched the issues of this repo and believe this is not a duplicate
- [ ] I have run the failing commands in debug mode and reported the output
<!-- We encourage you to try, as much as possible, to reduce your problem to the minimal example that still reproduces the issue. That would help us a lot in fixing it quickly and effectively!
We encourage you to try, as much as possible, to reduce your problem to the minimal example that still reproduces the issue. That would help us a lot in fixing it quickly and effectively!
If you want to ask a question about the tool (how to use it, what it can currently do, etc.), try the `#general` channel on our Slack first. We have a welcoming community and chances are you'll get your reply faster and without opening an issue.
Other than that, thanks for taking the time to contribute to Spack! -->
Other than that, thanks for taking the time to contribute to Spack!

View File

@@ -1,43 +1,78 @@
---
name: "\U0001F4A5 Build error"
about: Some package in Spack didn't build correctly
title: "Installation issue: "
name: Build error
about: Some package in Spack didn't build correctly
labels: "build-error"
---
<!-- Thanks for taking the time to report this build failure. To proceed with the report please:
*Thanks for taking the time to report this build failure. To proceed with the
report please:*
1. Title the issue "Installation issue: <name-of-the-package>".
2. Provide the information required below.
3. Remove the template instructions before posting the issue.
We encourage you to try, as much as possible, to reduce your problem to the minimal example that still reproduces the issue. That would help us a lot in fixing it quickly and effectively! -->
We encourage you to try, as much as possible, to reduce your problem to the minimal example that still reproduces the issue. That would help us a lot in fixing it quickly and effectively!
---
### Steps to reproduce the issue
<!-- Fill in the exact spec you are trying to build and the relevant part of the error message -->
```console
$ spack install <spec>
...
$ spack install <spec> # Fill in the exact spec you are using
... # and the relevant part of the error message
```
### Information on your system
### Platform and user environment
<!-- Please include the output of `spack debug report` -->
Please report your OS here:
```commandline
$ uname -a
Linux nuvolari 4.15.0-29-generic #31-Ubuntu SMP Tue Jul 17 15:39:52 UTC 2018 x86_64 x86_64 x86_64 GNU/Linux
$ lsb_release -d
Description: Ubuntu 18.04.1 LTS
```
and, if relevant, post or attach:
<!-- If you have any relevant configuration detail (custom `packages.yaml` or `modules.yaml`, etc.) you can add that here as well. -->
- `packages.yaml`
- `compilers.yaml`
to the issue
### Additional information
<!-- Please upload the following files. They should be present in the stage directory of the failing build. Also upload any config.log or similar file if one exists. -->
* [spack-build-out.txt]()
* [spack-build-env.txt]()
Sometimes the issue benefits from additional details. In these cases there are
a few things we can suggest doing. First of all, you can post the full output of:
```console
$ spack spec --install-status <spec>
...
```
to show people whether Spack installed a faulty software or if it was not able to
build it at all.
<!-- Some packages have maintainers who have volunteered to debug build failures. Run `spack maintainers <name-of-the-package>` and @mention them here if they exist. -->
If your build didn't make it past the configure stage, Spack as also commands to parse
logs and report error and warning messages:
```console
$ spack log-parse --show=errors,warnings <file-to-parse>
```
You might want to run this command on the `config.log` or any other similar file
found in the stage directory:
```console
$ spack location -s <spec>
```
In case in `config.log` there are other settings that you think might be the cause
of the build failure, you can consider attaching the file to this issue.
### General information
Rebuilding the package with the following options:
```console
$ spack -d install -j 1 <spec>
...
```
will provide additional debug information. After the failure you will find two files in the current directory:
<!-- These boxes can be checked by replacing [ ] with [x] or by clicking them after submitting the issue. -->
- [ ] I have run `spack debug report` and reported the version of Spack/Python/Platform
- [ ] I have run `spack maintainers <name-of-the-package>` and @mentioned any maintainers
- [ ] I have uploaded the build log and environment files
- [ ] I have searched the issues of this repo and believe this is not a duplicate
1. `spack-cc-<spec>.in`, which contains details on the command given in input
to Spack's compiler wrapper
1. `spack-cc-<spec>.out`, which contains the command used to compile / link the
failed object after Spack's compiler wrapper did its processing
You can post or attach those files to provide maintainers with more information on what
is causing the failure.

View File

@@ -1,33 +1,28 @@
---
name: "\U0001F38A Feature request"
name: Feature request
about: Suggest adding a feature that is not yet in Spack
labels: feature
---
<!--*Please add a concise summary of your suggestion here.*-->
*Please add a concise summary of your suggestion here.*
### Rationale
<!--*Is your feature request related to a problem? Please describe it!*-->
*Is your feature request related to a problem? Please describe it!*
### Description
<!--*Describe the solution you'd like and the alternatives you have considered.*-->
*Describe the solution you'd like and the alternatives you have considered.*
### Additional information
<!--*Add any other context about the feature request here.*-->
*Add any other context about the feature request here.*
### General information
- [ ] I have run `spack --version` and reported the version of Spack
- [ ] I have searched the issues of this repo and believe this is not a duplicate
-----
If you want to ask a question about the tool (how to use it, what it can currently do, etc.), try the `#general` channel on our Slack first. We have a welcoming community and chances are you'll get your reply faster and without opening an issue.
<!--If you want to ask a question about the tool (how to use it, what it can currently do, etc.), try the `#general` channel on our Slack first. We have a welcoming community and chances are you'll get your reply faster and without opening an issue.
Other than that, thanks for taking the time to contribute to Spack!
-->
Other than that, thanks for taking the time to contribute to Spack!

View File

@@ -1,5 +0,0 @@
#!/usr/bin/env sh
git clone https://github.com/spack/spack.git
echo -e "config:\n build_jobs: 2" > spack/etc/spack/config.yaml
. spack/share/spack/setup-env.sh
spack compilers

View File

@@ -5,7 +5,6 @@ on:
branches:
- master
- develop
- releases/**
pull_request:
branches:
- master
@@ -19,8 +18,6 @@ on:
- '!var/spack/repos/builtin/packages/py-setuptools/**'
- '!var/spack/repos/builtin/packages/openjpeg/**'
- '!var/spack/repos/builtin/packages/r-rcpp/**'
# Don't run if we only modified documentation
- 'lib/spack/docs/**'
jobs:
build:
@@ -31,7 +28,7 @@ jobs:
matrix:
package: [lz4, mpich, tut, py-setuptools, openjpeg, r-rcpp]
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v1
- name: Cache ccache's store
uses: actions/cache@v1
with:
@@ -45,7 +42,6 @@ jobs:
python-version: 3.8
- name: Install System Packages
run: |
sudo apt-get update
sudo apt-get -yqq install ccache gfortran perl perl-base r-base r-base-core r-base-dev findutils openssl libssl-dev libpciaccess-dev
R --version
perl --version

View File

@@ -1,62 +0,0 @@
name: linux tests
on:
push:
branches:
- master
- develop
- releases/**
pull_request:
branches:
- master
- develop
jobs:
unittests:
runs-on: ubuntu-latest
strategy:
matrix:
python-version: [2.7, 3.5, 3.6, 3.7, 3.8]
steps:
- uses: actions/checkout@v2
- name: Setup Python ${{ matrix.python-version }}
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
- name: Install System packages
run: |
sudo apt-get -y update
sudo apt-get install -y coreutils gfortran graphviz gnupg2 mercurial ninja-build patchelf
# Needed for kcov
sudo apt-get -y install cmake binutils-dev libcurl4-openssl-dev zlib1g-dev libdw-dev libiberty-dev
- name: Install Python packages
run: |
pip install --upgrade pip six setuptools codecov coverage
- name: Setup git configuration
run: |
# Need this for the git tests to succeed.
git --version
git config --global user.email "spack@example.com"
git config --global user.name "Test User"
git fetch -u origin develop:develop
- name: Install kcov for bash script coverage
env:
KCOV_VERSION: 34
run: |
KCOV_ROOT=$(mktemp -d)
wget --output-document=${KCOV_ROOT}/${KCOV_VERSION}.tar.gz https://github.com/SimonKagstrom/kcov/archive/v${KCOV_VERSION}.tar.gz
tar -C ${KCOV_ROOT} -xzvf ${KCOV_ROOT}/${KCOV_VERSION}.tar.gz
mkdir -p ${KCOV_ROOT}/build
cd ${KCOV_ROOT}/build && cmake -Wno-dev ${KCOV_ROOT}/kcov-${KCOV_VERSION} && cd -
make -C ${KCOV_ROOT}/build && sudo make -C ${KCOV_ROOT}/build install
- name: Run unit tests
env:
COVERAGE: true
run: |
share/spack/qa/run-unit-tests
coverage combine
coverage xml
- name: Upload to codecov.io
uses: codecov/codecov-action@v1
with:
flags: unittests,linux

View File

@@ -1,58 +0,0 @@
# These are nightly package tests for macOS
# focus areas:
# - initial user experience
# - scientific python stack
name: macOS builds nightly
on:
schedule:
# nightly at 1 AM
- cron: '0 1 * * *'
# GitHub Action Limits
# https://help.github.com/en/actions/reference/workflow-syntax-for-github-actions
jobs:
install_gcc:
name: gcc with clang
runs-on: macos-latest
steps:
- uses: actions/checkout@v2
- name: spack install
run: |
. .github/workflows/install_spack.sh
spack install -v gcc
install_jupyter_clang:
name: jupyter
runs-on: macos-latest
timeout-minutes: 700
steps:
- uses: actions/checkout@v2
- name: spack install
run: |
. .github/workflows/install_spack.sh
spack install -v py-jupyter %clang
install_scipy_clang:
name: scipy, mpl, pd
runs-on: macos-latest
steps:
- uses: actions/checkout@v2
- name: spack install
run: |
. .github/workflows/install_spack.sh
spack install -v py-scipy %clang
spack install -v py-matplotlib %clang
spack install -v py-pandas %clang
install_mpi4py_clang:
name: mpi4py, petsc4py
runs-on: macos-latest
steps:
- uses: actions/checkout@v2
- name: spack install
run: |
. .github/workflows/install_spack.sh
spack install -v py-mpi4py %clang
spack install -v py-petsc4py %clang

View File

@@ -1,49 +0,0 @@
name: macos tests
on:
push:
branches:
- master
- develop
- releases/**
pull_request:
branches:
- master
- develop
jobs:
build:
runs-on: macos-latest
strategy:
matrix:
python-version: [3.7]
steps:
- uses: actions/checkout@v2
- name: Setup Python ${{ matrix.python-version }}
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
- name: Install Python packages
run: |
pip install --upgrade pip six setuptools
pip install --upgrade codecov coverage
pip install --upgrade flake8 pep8-naming
- name: Setup Homebrew packages
run: |
brew update
brew upgrade
brew install gcc gnupg2 dash kcov
- name: Run unit tests
run: |
git --version
git fetch -u origin develop:develop
. share/spack/setup-env.sh
coverage run $(which spack) test
coverage combine
coverage xml
- name: Upload to codecov.io
uses: codecov/codecov-action@v1
with:
file: ./coverage.xml
flags: unittests,macos

View File

@@ -5,7 +5,6 @@ on:
branches:
- master
- develop
- releases/**
pull_request:
branches:
- master
@@ -16,7 +15,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v1
- name: Setup Python
uses: actions/setup-python@v1
with:

11
.gitignore vendored
View File

@@ -4,7 +4,6 @@
/var/spack/environments
/var/spack/repos/*/index.yaml
/var/spack/repos/*/lock
__pycache__/
*.pyc
/opt
*~
@@ -23,18 +22,8 @@ __pycache__/
.coverage
\#*
.#*
.cache
lib/spack/spack/test/.cache
/bin/spackc
*.in.log
*.out.log
*.orig
# Eclipse files
.project
.cproject
.pydevproject
# VSCode files
.vscode
.devcontainer

View File

@@ -12,7 +12,7 @@ branches:
# Build matrix
#=============================================================================
dist: bionic
dist: xenial
jobs:
fast_finish: true
@@ -22,34 +22,45 @@ jobs:
os: linux
language: python
env: TEST_SUITE=flake8
# Shell integration with module files
- python: '3.8'
os: linux
language: python
env: [ TEST_SUITE=bootstrap ]
- stage: 'unit tests + documentation'
python: '2.6'
dist: trusty
os: linux
language: python
addons:
apt:
# Everything but patchelf, that is not available for trusty
packages:
- ccache
- gfortran
- graphviz
- gnupg2
- kcov
- mercurial
- ninja-build
- realpath
- zsh
- fish
env: [ TEST_SUITE=unit, COVERAGE=true ]
- python: '3.8'
os: linux
language: python
env: [ TEST_SUITE=shell, COVERAGE=true, KCOV_VERSION=38 ]
- python: '2.7'
os: linux
language: python
env: [ TEST_SUITE=unit, COVERAGE=true ]
- python: '3.5'
os: linux
language: python
env: TEST_SUITE=unit
- python: '3.6'
os: linux
language: python
env: TEST_SUITE=unit
- python: '3.7'
os: linux
language: python
env: TEST_SUITE=unit
- python: '3.8'
os: linux
language: python
env: [ TEST_SUITE=unit, COVERAGE=true ]
- python: '3.8'
os: linux
language: python
env: TEST_SUITE=doc
- os: osx
language: generic
env: [ TEST_SUITE=unit, PYTHON_VERSION=2.7, COVERAGE=true ]
if: type != pull_request
stages:
- 'style checks'
@@ -66,15 +77,29 @@ addons:
apt:
packages:
- ccache
- coreutils
- cmake
- gfortran
- graphviz
- gnupg2
- kcov
- mercurial
- ninja-build
- patchelf
- perl
- perl-base
- realpath
- r-base
- r-base-core
- r-base-dev
- zsh
- fish
# for Mac builds, we use Homebrew
homebrew:
packages:
- python@2
- gcc
- gnupg2
- ccache
- dash
- kcov
update: true
# ~/.ccache needs to be cached directly as Travis is not taking care of it
@@ -85,18 +110,15 @@ cache:
directories:
- ~/.ccache
# Work around Travis's lack of support for Python on OSX
before_install:
- ccache -M 2G && ccache -z
# Install kcov manually, since it's not packaged for bionic beaver
- if [[ "$KCOV_VERSION" ]]; then
sudo apt-get -y install cmake binutils-dev libcurl4-openssl-dev zlib1g-dev libdw-dev libiberty-dev;
KCOV_ROOT=$(mktemp -d);
wget --output-document=${KCOV_ROOT}/${KCOV_VERSION}.tar.gz https://github.com/SimonKagstrom/kcov/archive/v${KCOV_VERSION}.tar.gz;
tar -C ${KCOV_ROOT} -xzvf ${KCOV_ROOT}/${KCOV_VERSION}.tar.gz;
mkdir -p ${KCOV_ROOT}/build;
cd ${KCOV_ROOT}/build && cmake -Wno-dev ${KCOV_ROOT}/kcov-${KCOV_VERSION} && cd - ;
make -C ${KCOV_ROOT}/build && sudo make -C ${KCOV_ROOT}/build install;
- if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then
pip2 install --upgrade pip;
pip2 install virtualenv;
virtualenv venv;
source venv/bin/activate;
fi
- ccache -M 2G && ccache -z
# Install various dependencies
install:
@@ -134,10 +156,6 @@ after_success:
--flags "${TEST_SUITE}${TRAVIS_OS_NAME}";
fi
;;
shell)
codecov --env PYTHON_VERSION
--required
--flags "${TEST_SUITE}${TRAVIS_OS_NAME}";
esac
#=============================================================================
@@ -145,8 +163,6 @@ after_success:
#=============================================================================
notifications:
email:
recipients:
- tgamblin@llnl.gov
- massimiliano.culpo@gmail.com
recipients: tgamblin@llnl.gov
on_success: change
on_failure: always

View File

@@ -1,237 +1,3 @@
# v0.15.1 (2020-07-10)
This minor release includes several important fixes:
* Fix shell support on Cray (#17386)
* Fix use of externals installed with other Spack instances (#16954)
* Fix gcc+binutils build (#9024)
* Fixes for usage of intel-mpi (#17378 and #17382)
* Fixes to Autotools config.guess detection (#17333 and #17356)
* Update `spack install` message to prompt user when an environment is not
explicitly activated (#17454)
This release also adds a mirror for all sources that are
fetched in Spack (#17077). It is expected to be useful when the
official website for a Spack package is unavailable.
# v0.15.0 (2020-06-28)
`v0.15.0` is a major feature release.
## Major Features in this release
1. **Cray support** Spack will now work properly on Cray "Cluster"
systems (non XC systems) and after a `module purge` command on Cray
systems. See #12989
2. **Virtual package configuration** Virtual packages are allowed in
packages.yaml configuration. This allows users to specify a virtual
package as non-buildable without needing to specify for each
implementation. See #14934
3. **New config subcommands** This release adds `spack config add` and
`spack config remove` commands to add to and remove from yaml
configuration files from the CLI. See #13920
4. **Environment activation** Anonymous environments are **no longer**
automatically activated in the current working directory. To activate
an environment from a `spack.yaml` file in the current directory, use
the `spack env activate .` command. This removes a concern that users
were too easily polluting their anonymous environments with accidental
installations. See #17258
5. **Apple clang compiler** The clang compiler and the apple-clang
compiler are now separate compilers in Spack. This allows Spack to
improve support for the apple-clang compiler. See #17110
6. **Finding external packages** Spack packages can now support an API
for finding external installations. This allows the `spack external
find` command to automatically add installations of those packages to
the user's configuration. See #15158
## Additional new features of note
* support for using Spack with the fish shell (#9279)
* `spack load --first` option to load first match (instead of prompting user) (#15622)
* support the Cray cce compiler both new and classic versions (#17256, #12989)
* `spack dev-build` command:
* supports stopping before a specified phase (#14699)
* supports automatically launching a shell in the build environment (#14887)
* `spack install --fail-fast` allows builds to fail at the first error (rather than best-effort) (#15295)
* environments: SpecList references can be dereferenced as compiler or dependency constraints (#15245)
* `spack view` command: new support for a copy/relocate view type (#16480)
* ci pipelines: see documentation for several improvements
* `spack mirror -a` command now supports excluding packages (#14154)
* `spack buildcache create` is now environment-aware (#16580)
* module generation: more flexible format for specifying naming schemes (#16629)
* lmod module generation: packages can be configured as core specs for lmod hierarchy (#16517)
## Deprecations and Removals
The following commands were deprecated in v0.13.0, and have now been removed:
* `spack configure`
* `spack build`
* `spack diy`
The following commands were deprecated in v0.14.0, and will be removed in the next major release:
* `spack bootstrap`
## Bugfixes
Some of the most notable bugfixes in this release include:
* Spack environments can now contain the string `-h` (#15429)
* The `spack install` gracefully handles being backgrounded (#15723, #14682)
* Spack uses `-isystem` instead of `-I` in cases that the underlying build system does as well (#16077)
* Spack no longer prints any specs that cannot be safely copied into a Spack command (#16462)
* Incomplete Spack environments containing python no longer cause problems (#16473)
* Several improvements to binary package relocation
## Package Improvements
The Spack project is constantly engaged in routine maintenance,
bugfixes, and improvements for the package ecosystem. Of particular
note in this release are the following:
* Spack now contains 4339 packages. There are 430 newly supported packages in v0.15.0
* GCC now builds properly on ARM architectures (#17280)
* Python: patched to support compiling mixed C/C++ python modules through distutils (#16856)
* improvements to pytorch and py-tensorflow packages
* improvements to major MPI implementations: mvapich2, mpich, openmpi, and others
## Spack Project Management:
* Much of the Spack CI infrastructure has moved from Travis to GitHub Actions (#16610, #14220, #16345)
* All merges to the `develop` branch run E4S CI pipeline (#16338)
* New `spack debug report` command makes reporting bugs easier (#15834)
# v0.14.2 (2020-04-15)
This is a minor release on the `0.14` series. It includes performance
improvements and bug fixes:
* Improvements to how `spack install` handles foreground/background (#15723)
* Major performance improvements for reading the package DB (#14693, #15777)
* No longer check for the old `index.yaml` database file (#15298)
* Properly activate environments with '-h' in the name (#15429)
* External packages have correct `.prefix` in environments/views (#15475)
* Improvements to computing env modifications from sourcing files (#15791)
* Bugfix on Cray machines when getting `TERM` env variable (#15630)
* Avoid adding spurious `LMOD` env vars to Intel modules (#15778)
* Don't output [+] for mock installs run during tests (#15609)
# v0.14.1 (2020-03-20)
This is a bugfix release on top of `v0.14.0`. Specific fixes include:
* several bugfixes for parallel installation (#15339, #15341, #15220, #15197)
* `spack load` now works with packages that have been renamed (#14348)
* bugfix for `suite-sparse` installation (#15326)
* deduplicate identical suffixes added to module names (#14920)
* fix issues with `configure_args` during module refresh (#11084)
* increased test coverage and test fixes (#15237, #15354, #15346)
* remove some unused code (#15431)
# v0.14.0 (2020-02-23)
`v0.14.0` is a major feature release, with 3 highlighted features:
1. **Distributed builds.** Multiple Spack instances will now coordinate
properly with each other through locks. This works on a single node
(where you've called `spack` several times) or across multiple nodes
with a shared filesystem. For example, with SLURM, you could build
`trilinos` and its dependencies on 2 24-core nodes, with 3 Spack
instances per node and 8 build jobs per instance, with `srun -N 2 -n 6
spack install -j 8 trilinos`. This requires a filesystem with locking
enabled, but not MPI or any other library for parallelism.
2. **Build pipelines.** You can also build in parallel through Gitlab
CI. Simply create a Spack environment and push it to Gitlab to build
on Gitlab runners. Pipeline support is now integrated into a single
`spack ci` command, so setting it up is easier than ever. See the
[Pipelines section](https://spack.readthedocs.io/en/v0.14.0/pipelines.html)
in the docs.
3. **Container builds.** The new `spack containerize` command allows you
to create a Docker or Singularity recipe from any Spack environment.
There are options to customize the build if you need them. See the
[Container Images section](https://spack.readthedocs.io/en/latest/containers.html)
in the docs.
In addition, there are several other new commands, many bugfixes and
improvements, and `spack load` no longer requires modules, so you can use
it the same way on your laptop or on your supercomputer.
Spack grew by over 300 packages since our last release in November 2019,
and the project grew to over 500 contributors. Thanks to all of you for
making yet another great release possible. Detailed notes below.
## Major new core features
* Distributed builds: spack instances coordinate and build in parallel (#13100)
* New `spack ci` command to manage CI pipelines (#12854)
* Generate container recipes from environments: `spack containerize` (#14202)
* `spack load` now works without using modules (#14062, #14628)
* Garbage collect old/unused installations with `spack gc` (#13534)
* Configuration files all set environment modifications the same way (#14372,
[docs](https://spack.readthedocs.io/en/v0.14.0/configuration.html#environment-modifications))
* `spack commands --format=bash` auto-generates completion (#14393, #14607)
* Packages can specify alternate fetch URLs in case one fails (#13881)
## Improvements
* Improved locking for concurrency with environments (#14676, #14621, #14692)
* `spack test` sends args to `pytest`, supports better listing (#14319)
* Better support for aarch64 and cascadelake microarch (#13825, #13780, #13820)
* Archspec is now a separate library (see https://github.com/archspec/archspec)
* Many improvements to the `spack buildcache` command (#14237, #14346,
#14466, #14467, #14639, #14642, #14659, #14696, #14698, #14714, #14732,
#14929, #15003, #15086, #15134)
## Selected Bugfixes
* Compilers now require an exact match on version (#8735, #14730, #14752)
* Bugfix for patches that specified specific versions (#13989)
* `spack find -p` now works in environments (#10019, #13972)
* Dependency queries work correctly in `spack find` (#14757)
* Bugfixes for locking upstream Spack instances chains (#13364)
* Fixes for PowerPC clang optimization flags (#14196)
* Fix for issue with compilers and specific microarchitectures (#13733, #14798)
## New commands and options
* `spack ci` (#12854)
* `spack containerize` (#14202)
* `spack gc` (#13534)
* `spack load` accepts `--only package`, `--only dependencies` (#14062, #14628)
* `spack commands --format=bash` (#14393)
* `spack commands --update-completion` (#14607)
* `spack install --with-cache` has new option: `--no-check-signature` (#11107)
* `spack test` now has `--list`, `--list-long`, and `--list-names` (#14319)
* `spack install --help-cdash` moves CDash help out of the main help (#13704)
## Deprecations
* `spack release-jobs` has been rolled into `spack ci`
* `spack bootstrap` will be removed in a future version, as it is no longer
needed to set up modules (see `spack load` improvements above)
## Documentation
* New section on building container images with Spack (see
[docs](https://spack.readthedocs.io/en/latest/containers.html))
* New section on using `spack ci` command to build pipelines (see
[docs](https://spack.readthedocs.io/en/latest/pipelines.html))
* Document how to add conditional dependencies (#14694)
* Document how to use Spack to replace Homebrew/Conda (#13083, see
[docs](https://spack.readthedocs.io/en/latest/workflows.html#using-spack-to-replace-homebrew-conda))
## Important package changes
* 3,908 total packages (345 added since 0.13.0)
* Added first cut at a TensorFlow package (#13112)
* We now build R without "recommended" packages, manage them w/Spack (#12015)
* Elpa and OpenBLAS now leverage microarchitecture support (#13655, #14380)
* Fix `octave` compiler wrapper usage (#14726)
* Enforce that packages in `builtin` aren't missing dependencies (#13949)
# v0.13.4 (2020-02-07)
This release contains several bugfixes:
@@ -250,8 +16,8 @@ RHEL8.
* mirror bugfixes: symlinks, duplicate patches, and exception handling (#13789)
* don't try to fetch `BundlePackages` (#13908)
* avoid re-fetching patches already added to a mirror (#13908)
* avoid re-fetching already added patches (#13908)
* avoid re-fetching already added patches (#13908)
* avoid re-fetching alread added patches (#13908)
* avoid re-fetching alread added patches (#13908)
* allow repeated invocations of `spack mirror create` on the same dir (#13908)
* bugfix for RHEL8 when `python` is unavailable (#14252)
* improve concretization performance in environments (#14190)

View File

@@ -1,10 +1,7 @@
# <img src="https://cdn.rawgit.com/spack/spack/develop/share/spack/logo/spack-logo.svg" width="64" valign="middle" alt="Spack"/> Spack
[![MacOS Tests](https://github.com/spack/spack/workflows/macos%20tests/badge.svg)](https://github.com/spack/spack/actions)
[![Linux Tests](https://github.com/spack/spack/workflows/linux%20tests/badge.svg)](https://github.com/spack/spack/actions)
[![Build Status](https://travis-ci.org/spack/spack.svg?branch=develop)](https://travis-ci.org/spack/spack)
[![Linux Builds](https://github.com/spack/spack/workflows/linux%20builds/badge.svg)](https://github.com/spack/spack/actions)
[![macOS Builds (nightly)](https://github.com/spack/spack/workflows/macOS%20builds%20nightly/badge.svg?branch=develop)](https://github.com/spack/spack/actions?query=workflow%3A%22macOS+builds+nightly%22)
[![Build Status](https://travis-ci.com/spack/spack.svg?branch=develop)](https://travis-ci.com/spack/spack)
[![codecov](https://codecov.io/gh/spack/spack/branch/develop/graph/badge.svg)](https://codecov.io/gh/spack/spack)
[![Read the Docs](https://readthedocs.org/projects/spack/badge/?version=latest)](https://spack.readthedocs.io)
[![Slack](https://spackpm.herokuapp.com/badge.svg)](https://spackpm.herokuapp.com)
@@ -123,4 +120,4 @@ See [LICENSE-MIT](https://github.com/spack/spack/blob/develop/LICENSE-MIT),
SPDX-License-Identifier: (Apache-2.0 OR MIT)
LLNL-CODE-811652
LLNL-CODE-647188

View File

@@ -22,4 +22,4 @@
#
# This is compatible across platforms.
#
exec /usr/bin/env spack python "$@"
/usr/bin/env spack python "$@"

View File

@@ -16,7 +16,7 @@
config:
# This is the path to the root of the Spack install tree.
# You can use $spack here to refer to the root of the spack instance.
install_tree: $spack/opt/spack
install_tree: ~/.spack/opt/spack
# Locations where templates should be found
@@ -30,8 +30,8 @@ config:
# Locations where different types of modules should be installed.
module_roots:
tcl: $spack/share/spack/modules
lmod: $spack/share/spack/lmod
tcl: ~/.spack/share/spack/modules
lmod: ~/.spack/share/spack/lmod
# Temporary locations Spack can try to use for builds.
@@ -67,7 +67,7 @@ config:
# Cache directory for already downloaded source tarballs and archived
# repositories. This can be purged with `spack clean --downloads`.
source_cache: $spack/var/spack/cache
source_cache: ~/.spack/var/spack/cache
# Cache directory for miscellaneous files, like the package index.
@@ -75,12 +75,6 @@ config:
misc_cache: ~/.spack/cache
# Timeout in seconds used for downloading sources etc. This only applies
# to the connection phase and can be increased for slow connections or
# servers. 0 means no timeout.
connect_timeout: 10
# If this is false, tools like curl that use SSL will not verify
# certifiates. (e.g., curl will use use the -k option)
verify_ssl: true
@@ -143,7 +137,7 @@ config:
# when Spack needs to manage its own package metadata and all operations are
# expected to complete within the default time limit. The timeout should
# therefore generally be left untouched.
db_lock_timeout: 3
db_lock_timeout: 120
# How long to wait when attempting to modify a package (e.g. to install it).
@@ -157,7 +151,3 @@ config:
# Has no effect on macOS. DO NOT MIX these within the same install tree.
# See the Spack documentation for details.
shared_linking: 'rpath'
# Set to 'false' to allow installation on filesystems that doesn't allow setgid bit
# manipulation by unprivileged user (e.g. AFS)
allow_sgid: true

View File

@@ -16,6 +16,8 @@
modules:
prefix_inspections:
lib:
- DYLD_LIBRARY_PATH
- DYLD_FALLBACK_LIBRARY_PATH
lib64:
- DYLD_LIBRARY_PATH
- DYLD_FALLBACK_LIBRARY_PATH

View File

@@ -15,11 +15,7 @@
# -------------------------------------------------------------------------
packages:
all:
compiler:
- apple-clang
- clang
- gcc
- intel
compiler: [clang, gcc, intel]
providers:
elf: [libelf]
unwind: [apple-libunwind]

View File

@@ -1,2 +0,0 @@
mirrors:
spack-public: https://spack-llnl-mirror.s3-us-west-2.amazonaws.com/

View File

@@ -30,11 +30,7 @@ modules:
lib64:
- LIBRARY_PATH
include:
- C_INCLUDE_PATH
- CPLUS_INCLUDE_PATH
# The INCLUDE env variable specifies paths to look for
# .mod file for Intel Fortran compilers
- INCLUDE
- CPATH
lib/pkgconfig:
- PKG_CONFIG_PATH
lib64/pkgconfig:

View File

@@ -27,7 +27,6 @@ packages:
glx: [mesa+glx, opengl]
glu: [mesa-glu, openglu]
golang: [gcc]
iconv: [libiconv]
ipp: [intel-ipp]
java: [openjdk, jdk, ibm-java]
jpeg: [libjpeg-turbo, libjpeg]
@@ -40,13 +39,10 @@ packages:
opencl: [pocl]
pil: [py-pillow]
pkgconfig: [pkgconf, pkg-config]
rpc: [libtirpc]
scalapack: [netlib-scalapack]
sycl: [hipsycl]
szip: [libszip, libaec]
tbb: [intel-tbb]
unwind: [libunwind]
sycl: [hipsycl]
permissions:
read: world
write: user

View File

@@ -0,0 +1,7 @@
upstreams:
global:
install_tree: $spack/opt/spack
modules:
tcl: $spack/share/spack/modules
lmod: $spack/share/spack/lmod
dotkit: $spack/share/spack/dotkit

View File

@@ -25,14 +25,6 @@ It is recommended that the following be put in your ``.bashrc`` file:
alias less='less -R'
If you do not see colorized output when using ``less -R`` it is because color
is being disabled in the piped output. In this case, tell spack to force
colorized output.
.. code-block:: console
$ spack --color always | less -R
--------------------------
Listing available packages
--------------------------
@@ -53,7 +45,7 @@ can install:
.. command-output:: spack list
:ellipsis: 10
There are thousands of them, so we've truncated the output above, but you
There are thosands of them, so we've truncated the output above, but you
can find a :ref:`full list here <package-list>`.
Packages are listed by name in alphabetical order.
A pattern to match with no wildcards, ``*`` or ``?``,
@@ -275,7 +267,7 @@ the ``spack gc`` ("garbage collector") command, which will uninstall all unneede
-- linux-ubuntu18.04-broadwell / gcc@9.0.1 ----------------------
hdf5@1.10.5 libiconv@1.16 libpciaccess@0.13.5 libszip@2.1.1 libxml2@2.9.9 mpich@3.3.2 openjpeg@2.3.1 xz@5.2.4 zlib@1.2.11
In the example above Spack went through all the packages in the package database
In the example above Spack went through all the packages in the DB
and removed everything that is not either:
1. A package installed upon explicit request of the user
@@ -627,8 +619,8 @@ output metadata on specs and all dependencies as json:
"target": "x86_64"
},
"compiler": {
"name": "apple-clang",
"version": "10.0.0"
"name": "clang",
"version": "10.0.0-apple"
},
"namespace": "builtin",
"parameters": {
@@ -862,7 +854,7 @@ Variants are named options associated with a particular package. They are
optional, as each package must provide default values for each variant it
makes available. Variants can be specified using
a flexible parameter syntax ``name=<value>``. For example,
``spack install libelf debug=True`` will install libelf built with debug
``spack install libelf debug=True`` will install libelf build with debug
flags. The names of particular variants available for a package depend on
what was provided by the package author. ``spack info <package>`` will
provide information on what build variants are available.
@@ -925,7 +917,7 @@ contains any spaces. Any of ``cppflags=-O3``, ``cppflags="-O3"``,
``cppflags='-O3'``, and ``cppflags="-O3 -fPIC"`` are acceptable, but
``cppflags=-O3 -fPIC`` is not. Additionally, if the value of the
compiler flags is not the last thing on the line, it must be followed
by a space. The command ``spack install libelf cppflags="-O3"%intel``
by a space. The commmand ``spack install libelf cppflags="-O3"%intel``
will be interpreted as an attempt to set ``cppflags="-O3%intel"``.
The six compiler flags are injected in the order of implicit make commands
@@ -1075,13 +1067,13 @@ of failing:
In the snippet above, for instance, the microarchitecture was demoted to ``haswell`` when
compiling with ``gcc@4.8`` since support to optimize for ``broadwell`` starts from ``gcc@4.9:``.
Finally, if Spack has no information to match compiler and target, it will
Finally if Spack has no information to match compiler and target, it will
proceed with the installation but avoid injecting any microarchitecture
specific flags.
.. warning::
Currently, Spack doesn't print any warning to the user if it has no information
Currently Spack doesn't print any warning to the user if it has no information
on which optimization flags should be used for a given compiler. This behavior
might change in the future.
@@ -1091,7 +1083,7 @@ specific flags.
Virtual dependencies
--------------------
The dependency graph for ``mpileaks`` we saw above wasn't *quite*
The dependence graph for ``mpileaks`` we saw above wasn't *quite*
accurate. ``mpileaks`` uses MPI, which is an interface that has many
different implementations. Above, we showed ``mpileaks`` and
``callpath`` depending on ``mpich``, which is one *particular*
@@ -1234,8 +1226,6 @@ add a version specifier to the spec:
Notice that the package versions that provide insufficient MPI
versions are now filtered out.
.. _extensions:
---------------------------
Extensions & Python support
---------------------------
@@ -1243,7 +1233,8 @@ Extensions & Python support
Spack's installation model assumes that each package will live in its
own install prefix. However, certain packages are typically installed
*within* the directory hierarchy of other packages. For example,
`Python <https://www.python.org>`_ packages are typically installed in the
modules in interpreted languages like `Python
<https://www.python.org>`_ are typically installed in the
``$prefix/lib/python-2.7/site-packages`` directory.
Spack has support for this type of installation as well. In Spack,
@@ -1419,12 +1410,12 @@ packages listed as activated:
py-nose@1.3.4 py-numpy@1.9.1 py-setuptools@11.3.1
Now, when a user runs python, ``numpy`` will be available for import
*without* the user having to explicitly load it. ``python@2.7.8`` now
*without* the user having to explicitly loaded. ``python@2.7.8`` now
acts like a system Python installation with ``numpy`` installed inside
of it.
Spack accomplishes this by symbolically linking the *entire* prefix of
the ``py-numpy`` package into the prefix of the ``python`` package. To the
the ``py-numpy`` into the prefix of the ``python`` package. To the
python interpreter, it looks like ``numpy`` is installed in the
``site-packages`` directory.

View File

@@ -57,12 +57,6 @@ Build caches are installed via:
$ spack buildcache install
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
List of popular build caches
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
* `Extreme-scale Scientific Software Stack (E4S) <https://e4s-project.github.io/>`_: `build cache <https://oaciss.uoregon.edu/e4s/inventory.html>`_
----------
Relocation

View File

@@ -58,9 +58,9 @@ directory. Here's an example of an external configuration:
packages:
openmpi:
paths:
openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64: /opt/openmpi-1.4.3
openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64+debug: /opt/openmpi-1.4.3-debug
openmpi@1.6.5%intel@10.1 arch=linux-debian7-x86_64: /opt/openmpi-1.6.5-intel
openmpi@1.4.3%gcc@4.4.7 arch=linux-x86_64-debian7: /opt/openmpi-1.4.3
openmpi@1.4.3%gcc@4.4.7 arch=linux-x86_64-debian7+debug: /opt/openmpi-1.4.3-debug
openmpi@1.6.5%intel@10.1 arch=linux-x86_64-debian7: /opt/openmpi-1.6.5-intel
This example lists three installations of OpenMPI, one built with GCC,
one built with GCC and debug information, and another built with Intel.
@@ -107,9 +107,9 @@ be:
packages:
openmpi:
paths:
openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64: /opt/openmpi-1.4.3
openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64+debug: /opt/openmpi-1.4.3-debug
openmpi@1.6.5%intel@10.1 arch=linux-debian7-x86_64: /opt/openmpi-1.6.5-intel
openmpi@1.4.3%gcc@4.4.7 arch=linux-x86_64-debian7: /opt/openmpi-1.4.3
openmpi@1.4.3%gcc@4.4.7 arch=linux-x86_64-debian7+debug: /opt/openmpi-1.4.3-debug
openmpi@1.6.5%intel@10.1 arch=linux-x86_64-debian7: /opt/openmpi-1.6.5-intel
buildable: False
The addition of the ``buildable`` flag tells Spack that it should never build
@@ -124,78 +124,6 @@ The ``buildable`` does not need to be paired with external packages.
It could also be used alone to forbid packages that may be
buggy or otherwise undesirable.
Virtual packages in Spack can also be specified as not buildable, and
external implementations can be provided. In the example above,
OpenMPI is configured as not buildable, but Spack will often prefer
other MPI implementations over the externally available OpenMPI. Spack
can be configured with every MPI provider not buildable individually,
but more conveniently:
.. code-block:: yaml
packages:
mpi:
buildable: False
openmpi:
paths:
openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64: /opt/openmpi-1.4.3
openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64+debug: /opt/openmpi-1.4.3-debug
openmpi@1.6.5%intel@10.1 arch=linux-debian7-x86_64: /opt/openmpi-1.6.5-intel
Implementations can also be listed immediately under the virtual they provide:
.. code-block:: yaml
packages:
mpi:
buildable: False
openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64: /opt/openmpi-1.4.3
openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64+debug: /opt/openmpi-1.4.3-debug
openmpi@1.6.5%intel@10.1 arch=linux-debian7-x86_64: /opt/openmpi-1.6.5-intel
mpich@3.3 %clang@9.0.0 arch=linux-debian7-x86_64: /opt/mpich-3.3-intel
Spack can then use any of the listed external implementations of MPI
to satisfy a dependency, and will choose depending on the compiler and
architecture.
.. _cmd-spack-external-find:
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Automatically Find External Packages
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
You can run the :ref:`spack external find <spack-external-find>` command
to search for system-provided packages and add them to ``packages.yaml``.
After running this command your ``packages.yaml`` may include new entries:
.. code-block:: yaml
packages:
cmake:
paths:
cmake@3.17.2: /usr
Generally this is useful for detecting a small set of commonly-used packages;
for now this is generally limited to finding build-only dependencies.
Specific limitations include:
* Packages are not discoverable by default: For a package to be
discoverable with ``spack external find``, it needs to add special
logic. See :ref:`here <make-package-findable>` for more details.
* The current implementation only collects and examines executable files,
so it is typically only useful for build/run dependencies (in some cases
if a library package also provides an executable, it may be possible to
extract a meaningful Spec by running the executable - for example the
compiler wrappers in MPI implementations).
* The logic does not search through module files, it can only detect
packages with executables defined in ``PATH``; you can help Spack locate
externals which use module files by loading any associated modules for
packages that you want Spack to know about before running
``spack external find``.
* Spack does not overwrite existing entries in the package configuration:
If there is an external defined for a spec at any configuration scope,
then Spack will not add a new external entry (``spack config blame packages``
can help locate all external entries).
.. _concretization-preferences:

View File

@@ -233,124 +233,7 @@ You may have noticed that most of the Autotools flags are of the form
``--without-baz``. Since these flags are so common, Spack provides a
couple of helper functions to make your life easier.
"""""""""""""""""
enable_or_disable
"""""""""""""""""
Autotools flags for simple boolean variants can be automatically
generated by calling the ``enable_or_disable`` method. This is
typically used to enable or disable some feature within the package.
.. code-block:: python
variant(
'memchecker',
default=False,
description='Memchecker support for debugging [degrades performance]'
)
config_args.extend(self.enable_or_disable('memchecker'))
In this example, specifying the variant ``+memchecker`` will generate
the following configuration options:
.. code-block:: console
--enable-memchecker
"""""""""""""""
with_or_without
"""""""""""""""
Autotools flags for more complex variants, including boolean variants
and multi-valued variants, can be automatically generated by calling
the ``with_or_without`` method.
.. code-block:: python
variant(
'schedulers',
values=disjoint_sets(
('auto',), ('alps', 'lsf', 'tm', 'slurm', 'sge', 'loadleveler')
).with_non_feature_values('auto', 'none'),
description="List of schedulers for which support is enabled; "
"'auto' lets openmpi determine",
)
if 'schedulers=auto' not in spec:
config_args.extend(self.with_or_without('schedulers'))
In this example, specifying the variant ``schedulers=slurm,sge`` will
generate the following configuration options:
.. code-block:: console
--with-slurm --with-sge
``enable_or_disable`` is actually functionally equivalent with
``with_or_without``, and accepts the same arguments and variant types;
but idiomatic autotools packages often follow these naming
conventions.
""""""""""""""""
activation_value
""""""""""""""""
Autotools parameters that require an option can still be automatically
generated, using the ``activation_value`` argument to
``with_or_without`` (or, rarely, ``enable_or_disable``).
.. code-block:: python
variant(
'fabrics',
values=disjoint_sets(
('auto',), ('psm', 'psm2', 'verbs', 'mxm', 'ucx', 'libfabric')
).with_non_feature_values('auto', 'none'),
description="List of fabrics that are enabled; "
"'auto' lets openmpi determine",
)
if 'fabrics=auto' not in spec:
config_args.extend(self.with_or_without('fabrics',
activation_value='prefix'))
``activation_value`` accepts a callable that generates the configure
parameter value given the variant value; but the special value
``prefix`` tells Spack to automatically use the dependenency's
installation prefix, which is the most common use for such
parameters. In this example, specifying the variant
``fabrics=libfabric`` will generate the following configuration
options:
.. code-block:: console
--with-libfabric=</path/to/libfabric>
""""""""""""""""""""
activation overrides
""""""""""""""""""""
Finally, the behavior of either ``with_or_without`` or
``enable_or_disable`` can be overridden for specific variant
values. This is most useful for multi-values variants where some of
the variant values require atypical behavior.
.. code-block:: python
def with_or_without_verbs(self, activated):
# Up through version 1.6, this option was named --with-openib.
# In version 1.7, it was renamed to be --with-verbs.
opt = 'verbs' if self.spec.satisfies('@1.7:') else 'openib'
if not activated:
return '--without-{0}'.format(opt)
return '--with-{0}={1}'.format(opt, self.spec['rdma-core'].prefix)
Defining ``with_or_without_verbs`` overrides the behavior of a
``fabrics=verbs`` variant, changing the configure-time option to
``--with-openib`` for older versions of the package and specifying an
alternative dependency name:
.. code-block::
--with-openib=</path/to/rdma-core>
TODO: document ``with_or_without`` and ``enable_or_disable``.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Configure script in a sub-directory

View File

@@ -128,20 +128,17 @@ Adding flags to cmake
^^^^^^^^^^^^^^^^^^^^^
To add additional flags to the ``cmake`` call, simply override the
``cmake_args`` function. The following example defines values for the flags
``WHATEVER``, ``ENABLE_BROKEN_FEATURE``, ``DETECT_HDF5``, and ``THREADS`` with
and without the :py:meth:`~.CMakePackage.define` and
:py:meth:`~.CMakePackage.define_from_variant` helper functions:
``cmake_args`` function:
.. code-block:: python
def cmake_args(self):
args = [
'-DWHATEVER:STRING=somevalue',
self.define('ENABLE_BROKEN_FEATURE', False),
self.define_from_variant('DETECT_HDF5', 'hdf5'),
self.define_from_variant('THREADS'), # True if +threads
]
args = []
if '+hdf5' in self.spec:
args.append('-DDETECT_HDF5=ON')
else:
args.append('-DDETECT_HDF5=OFF')
return args

View File

@@ -37,7 +37,7 @@ In order to use it, just add another base class to your package, for example:
if '+cuda' in spec:
options.append('-DWITH_CUDA=ON')
cuda_arch = spec.variants['cuda_arch'].value
if cuda_arch != 'none':
if cuda_arch is not None:
options.append('-DCUDA_FLAGS=-arch=sm_{0}'.format(cuda_arch[0]))
else:
options.append('-DWITH_CUDA=OFF')

View File

@@ -553,7 +553,7 @@ follow `the next section <intel-install-libs_>`_ instead.
f77: stub
fc: stub
Replace ``18.0.3`` with the version that you determined in the preceding
Replace ``18.0.3`` with the version that you determined in the preceeding
step. The contents under ``paths:`` do not matter yet.
You are right to ask: "Why on earth is that necessary?" [fn8]_.
@@ -696,7 +696,7 @@ follow `the next section <intel-install-libs_>`_ instead.
- /home/$user/spack-stage
Do not duplicate the ``config:`` line if it already is present.
Adapt the location, which here is the same as in the preceding example.
Adapt the location, which here is the same as in the preceeding example.
3. Retry installing the large package.
@@ -965,7 +965,7 @@ a *virtual* ``mkl`` package is declared in Spack.
Likewise, in a
:ref:`MakefilePackage <makefilepackage>`
or similar package that does not use AutoTools you may need to provide include
or similiar package that does not use AutoTools you may need to provide include
and link options for use on command lines or in environment variables.
For example, to generate an option string of the form ``-I<dir>``, use:
@@ -1055,6 +1055,6 @@ Footnotes
2. Set the hash length in ``install-path-scheme``, also in ``config.yaml``
(:ref:`q.v. <config-yaml>`).
3. You will want to set the *same* hash length for
:ref:`module files <modules-projections>`
if you have Spack produce them for you, under ``projections`` in
``modules.yaml``.
:ref:`tcl module files <modules-naming-scheme>`
if you have Spack produce them for you, under ``naming_scheme`` in
``modules.yaml``. Other module dialects cannot be altered in this manner.

View File

@@ -51,8 +51,10 @@ Build system dependencies
``SIPPackage`` requires several dependencies. Python is needed to run
the ``configure.py`` build script, and to run the resulting Python
libraries. Qt is needed to provide the ``qmake`` command. SIP is also
needed to build the package. All of these dependencies are automatically
added via the base class
needed to build the package. SIP is an unusual dependency in that it
must be installed in the same installation directory as the package,
so instead of a ``depends_on``, we use a ``resource``. All of these
dependencies are automatically added via the base class
.. code-block:: python
@@ -60,7 +62,11 @@ added via the base class
depends_on('qt', type='build')
depends_on('py-sip', type='build')
resource(name='sip',
url='https://www.riverbankcomputing.com/static/Downloads/sip/4.19.18/sip-4.19.18.tar.gz',
sha256='c0bd863800ed9b15dcad477c4017cdb73fa805c25908b0240564add74d697e1e',
destination='.')
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Passing arguments to ``configure.py``

View File

@@ -47,9 +47,8 @@ Each phase provides a ``<phase>`` function that runs:
where ``<jobs>`` is the number of parallel jobs to build with. Each phase
also has a ``<phase_args>`` function that can pass arguments to this call.
All of these functions are empty. The ``configure`` phase
automatically adds ``--prefix=/path/to/installation/prefix``, so you
don't need to add that in the ``configure_args``.
All of these functions are empty except for the ``configure_args``
function, which passes ``--prefix=/path/to/installation/prefix``.
^^^^^^^
Testing

View File

@@ -44,7 +44,7 @@ Environments:
&& echo " install_tree: /opt/software" \
&& echo " view: /opt/view") > /opt/spack-environment/spack.yaml
# Install the software, remove unnecessary deps
# Install the software, remove unecessary deps
RUN cd /opt/spack-environment && spack install && spack gc -y
# Strip all the binaries
@@ -108,7 +108,7 @@ are currently supported are summarized in the table below:
- ``ubuntu:16.04``
- ``spack/ubuntu-xenial``
* - Ubuntu 18.04
- ``ubuntu:18.04``
- ``ubuntu:16.04``
- ``spack/ubuntu-bionic``
* - CentOS 6
- ``centos:6``
@@ -165,7 +165,7 @@ of environments:
# Extra instructions
extra_instructions:
final: |
RUN echo 'export PS1="\[$(tput bold)\]\[$(tput setaf 1)\][gromacs]\[$(tput setaf 2)\]\u\[$(tput sgr0)\]:\w $ \[$(tput sgr0)\]"' >> ~/.bashrc
RUN echo 'export PS1="\[$(tput bold)\]\[$(tput setaf 1)\][gromacs]\[$(tput setaf 2)\]\u\[$(tput sgr0)\]:\w $ \[$(tput sgr0)\]"' >> ~/.bashrc
# Labels for the image
labels:
@@ -266,7 +266,7 @@ following ``Dockerfile``:
&& echo " install_tree: /opt/software" \
&& echo " view: /opt/view") > /opt/spack-environment/spack.yaml
# Install the software, remove unnecessary deps
# Install the software, remove unecessary deps
RUN cd /opt/spack-environment && spack install && spack gc -y
# Strip all the binaries
@@ -304,4 +304,4 @@ following ``Dockerfile``:
.. note::
Spack can also produce Singularity definition files to build the image. The
minimum version of Singularity required to build a SIF (Singularity Image Format)
from them is ``3.5.3``.
from them is ``3.5.3``.

View File

@@ -385,8 +385,8 @@ coverage. This helps us tell what percentage of lines of code in Spack are
covered by unit tests. Although code covered by unit tests can still contain
bugs, it is much less error prone than code that is not covered by unit tests.
Codecov provides `browser extensions <https://github.com/codecov/sourcegraph-codecov>`_
for Google Chrome and Firefox. These extensions integrate with GitHub
Codecov provides `browser extensions <https://github.com/codecov/browser-extension>`_
for Google Chrome, Firefox, and Opera. These extensions integrate with GitHub
and allow you to see coverage line-by-line when viewing the Spack repository.
If you are new to Spack, a great way to get started is to write unit tests to
increase coverage!

View File

@@ -130,7 +130,7 @@ To activate an environment, use the following command:
By default, the ``spack env activate`` will load the view associated
with the Environment into the user environment. The ``-v,
--with-view`` argument ensures this behavior, and the ``-V,
--without-view`` argument activates the environment without changing
--without-vew`` argument activates the environment without changing
the user environment variables.
The ``-p`` option to the ``spack env activate`` command modifies the
@@ -167,6 +167,15 @@ Any directory can be treated as an environment if it contains a file
$ spack env activate -d /path/to/directory
Spack commands that are environment sensitive will also act on the
environment any time the current working directory contains a
``spack.yaml`` file. Changing working directory to a directory
containing a ``spack.yaml`` file is equivalent to the command:
.. code-block:: console
$ spack env activate -d /path/to/dir --without-view
Anonymous specs can be created in place using the command:
.. code-block:: console
@@ -272,18 +281,18 @@ in the lockfile, nor does it install the spec.
The ``spack add`` command is environment aware. It adds to the
currently active environment. All environment aware commands can also
be called using the ``spack -e`` flag to specify the environment.
be called using the ``spack -E`` flag to specify the environment.
.. code-block:: console
$ spack env activate myenv
$ spack activate myenv
$ spack add mpileaks
or
.. code-block:: console
$ spack -e myenv add python
$ spack -E myenv add python
.. _environments_concretization:
@@ -593,7 +602,7 @@ files are identical.
spack:
definitions:
- first: [libelf, libdwarf]
- compilers: ['%gcc', '%intel']
- compilers: ['%gcc', '^intel']
- second:
- $first
- matrix:
@@ -638,7 +647,7 @@ named list ``compilers`` is ``['%gcc', '%clang', '%intel']`` on
spack:
definitions:
- compilers: ['%gcc', '%clang']
- when: arch.satisfies('x86_64:')
- when: target == 'x86_64'
compilers: ['%intel']
.. note::
@@ -657,12 +666,8 @@ The valid variables for a ``when`` clause are:
#. ``target``. The target string of the default Spack
architecture on the system.
#. ``architecture`` or ``arch``. A Spack spec satisfying the default Spack
architecture on the system. This supports querying via the ``satisfies``
method, as shown above.
#. ``arch_str``. The architecture string of the default Spack architecture
on the system.
#. ``architecture`` or ``arch``. The full string of the
default Spack architecture on the system.
#. ``re``. The standard regex module in Python.
@@ -671,40 +676,6 @@ The valid variables for a ``when`` clause are:
#. ``hostname``. The hostname of the system (if ``hostname`` is an
executable in the user's PATH).
""""""""""""""""""""""""
SpecLists as Constraints
""""""""""""""""""""""""
Dependencies and compilers in Spack can be both packages in an
environment and constraints on other packages. References to SpecLists
allow a shorthand to treat packages in a list as either a compiler or
a dependency using the ``$%`` or ``$^`` syntax respectively.
For example, the following environment has three root packages:
``gcc@8.1.0``, ``mvapich2@2.3.1 %gcc@8.1.0``, and ``hdf5+mpi
%gcc@8.1.0 ^mvapich2@2.3.1``.
.. code-block:: yaml
spack:
definitions:
- compilers: [gcc@8.1.0]
- mpis: [mvapich2@2.3.1]
- packages: [hdf5+mpi]
specs:
- $compilers
- matrix:
- [$mpis]
- [$%compilers]
- matrix:
- [$packages]
- [$^mpis]
- [$%compilers]
This allows for a much-needed reduction in redundancy between packages
and constraints.
^^^^^^^^^^^^^^^^^^^^^^^^^
Environment-managed Views
^^^^^^^^^^^^^^^^^^^^^^^^^

View File

@@ -71,6 +71,10 @@ This automatically adds Spack to your ``PATH`` and allows the ``spack``
command to be used to execute spack :ref:`commands <shell-support>` and
:ref:`useful packaging commands <packaging-shell-support>`.
If :ref:`environment-modules <InstallEnvironmentModules>` is
installed and available, the ``spack`` command can also load and unload
:ref:`modules <modules>`.
^^^^^^^^^^^^^^^^^
Clean Environment
^^^^^^^^^^^^^^^^^
@@ -478,7 +482,7 @@ Fortran.
cxx: /usr/bin/clang++
f77: /path/to/bin/gfortran
fc: /path/to/bin/gfortran
spec: apple-clang@11.0.0
spec: clang@11.0.0-apple
If you used Spack to install GCC, you can get the installation prefix by
@@ -847,7 +851,7 @@ from websites and from git.
.. warning::
This workaround should be used ONLY as a last resort! Without SSL
This workaround should be used ONLY as a last resort! Wihout SSL
certificate verification, spack and git will download from sites you
wouldn't normally trust. The code you download and run may then be
compromised! While this is not a major issue for archives that will
@@ -896,8 +900,9 @@ Core Spack Utilities
^^^^^^^^^^^^^^^^^^^^
Core Spack uses the following packages, mainly to download and unpack
source code: ``curl``, ``env``, ``git``, ``go``, ``hg``, ``svn``,
``tar``, ``unzip``, ``patch``
source code, and to load generated environment modules: ``curl``,
``env``, ``git``, ``go``, ``hg``, ``svn``, ``tar``, ``unzip``,
``patch``, ``environment-modules``.
As long as the user's environment is set up to successfully run these
programs from outside of Spack, they should work inside of Spack as
@@ -905,6 +910,10 @@ well. They can generally be activated as in the ``curl`` example above;
or some systems might already have an appropriate hand-built
environment module that may be loaded. Either way works.
If you find that you are missing some of these programs, ``spack`` can
build some of them for you with ``spack bootstrap``. Currently supported
programs are ``environment-modules``.
A few notes on specific programs in this list:
""""""""""""""""""""""""""
@@ -932,6 +941,45 @@ other programs will also not work, because they also rely on OpenSSL.
Once ``curl`` has been installed, you can similarly install the others.
.. _InstallEnvironmentModules:
"""""""""""""""""""
Environment Modules
"""""""""""""""""""
In order to use Spack's generated module files, you must have
installed ``environment-modules`` or ``lmod``. The simplest way
to get the latest version of either of these tools is installing
it as part of Spack's bootstrap procedure:
.. code-block:: console
$ spack bootstrap
.. warning::
At the moment ``spack bootstrap`` is only able to install ``environment-modules``.
Extending its capabilities to prefer ``lmod`` where possible is in the roadmap,
and likely to happen before the next release.
Alternatively, on many Linux distributions, you can install a pre-built binary
from the vendor's repository. On Fedora/RHEL/CentOS, for example, this can be
done with the command:
.. code-block:: console
$ yum install environment-modules
Once you have the tool installed and available in your path, you can source
Spack's setup file:
.. code-block:: console
$ source share/spack/setup-env.sh
This activates :ref:`shell support <shell-support>` and makes commands like
``spack load`` available for use.
^^^^^^^^^^^^^^^^^
Package Utilities
^^^^^^^^^^^^^^^^^

View File

@@ -17,16 +17,22 @@ Spack integrates with `Environment Modules
<http://lmod.readthedocs.io/en/latest/>`_ by
providing post-install hooks that generate module files and commands to manipulate them.
.. note::
If your machine does not already have a module system installed,
we advise you to use either Environment Modules or LMod. See :ref:`InstallEnvironmentModules`
for more details.
.. _shell-support:
----------------------------
Using module files via Spack
----------------------------
If you have installed a supported module system you should be able to
run either ``module avail`` or ``use -l spack`` to see what module
files have been installed. Here is sample output of those programs,
showing lots of installed packages:
If you have installed a supported module system either manually or through
``spack bootstrap``, you should be able to run either ``module avail`` or
``use -l spack`` to see what module files have been installed. Here is
sample output of those programs, showing lots of installed packages:
.. code-block:: console
@@ -87,7 +93,9 @@ Note that in the latter case it is necessary to explicitly set ``SPACK_ROOT``
before sourcing the setup file (you will get a meaningful error message
if you don't).
If you want to have Spack's shell support available on the command line at
When ``bash`` and ``ksh`` users update their environment with ``setup-env.sh``, it will check for spack-installed environment modules and add the ``module`` command to their environment; This only occurs if the module command is not already available. You can install ``environment-modules`` with ``spack bootstrap`` as described in :ref:`InstallEnvironmentModules`.
Finally, if you want to have Spack's shell support available on the command line at
any login you can put this source line in one of the files that are sourced
at startup (like ``.profile``, ``.bashrc`` or ``.cshrc``). Be aware though
that the startup time may be slightly increased because of that.
@@ -157,6 +165,8 @@ used ``gcc``. You could therefore just type:
To identify just the one built with the Intel compiler.
.. _extensions:
.. _cmd-spack-module-loads:
^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -459,14 +469,14 @@ is compiled with ``gcc@4.4.7``, with the only exception of any ``gcc``
or any ``llvm`` installation.
.. _modules-projections:
.. _modules-naming-scheme:
"""""""""""""""""""""""""""""""
Customize the naming of modules
"""""""""""""""""""""""""""""""
"""""""""""""""""""""""""""
Customize the naming scheme
"""""""""""""""""""""""""""
The names of environment modules generated by spack are not always easy to
fully comprehend due to the long hash in the name. There are three module
fully comprehend due to the long hash in the name. There are two module
configuration options to help with that. The first is a global setting to
adjust the hash length. It can be set anywhere from 0 to 32 and has a default
length of 7. This is the representation of the hash in the module file name and
@@ -500,46 +510,20 @@ version of python a set of python extensions is associated with. Likewise, the
``openblas`` string is attached to any program that has openblas in the spec,
most likely via the ``+blas`` variant specification.
The most heavyweight solution to module naming is to change the entire
naming convention for module files. This uses the projections format
covered in :ref:`adding_projections_to_views`.
.. code-block:: yaml
modules:
tcl:
projections:
all: '{name}/{version}-{compiler.name}-{compiler.version}-module'
^mpi: '{name}/{version}-{^mpi.name}-{^mpi.version}-{compiler.name}-{compiler.version}-module'
will create module files that are nested in directories by package
name, contain the version and compiler name and version, and have the
word ``module`` before the hash for all specs that do not depend on
mpi, and will have the same information plus the MPI implementation
name and version for all packages that depend on mpi.
When specifying module names by projection for Lmod modules, we
recommend NOT including names of dependencies (e.g., MPI, compilers)
that are already in the LMod hierarchy.
.. note::
TCL modules
TCL modules also allow for explicit conflicts between modulefiles.
TCL module files
A modification that is specific to ``tcl`` module files is the possibility
to change the naming scheme of modules.
.. code-block:: yaml
modules:
enable:
- tcl
tcl:
projections:
all: '{name}/{version}-{compiler.name}-{compiler.version}'
all:
conflict:
- '{name}'
- 'intel/14.0.1'
modules:
tcl:
naming_scheme: '{name}/{version}-{compiler.name}-{compiler.version}'
all:
conflict:
- '{name}'
- 'intel/14.0.1'
will create module files that will conflict with ``intel/14.0.1`` and with the
base directory of the same module, effectively preventing the possibility to
@@ -564,8 +548,6 @@ that are already in the LMod hierarchy.
lmod:
core_compilers:
- 'gcc@4.8'
core_specs:
- 'python'
hierarchy:
- 'mpi'
- 'lapack'
@@ -575,15 +557,6 @@ that are already in the LMod hierarchy.
implementations of ``mpi`` and ``lapack``, and let LMod switch safely from one to the
other.
All packages built with a compiler in ``core_compilers`` and all
packages that satisfy a spec in ``core_specs`` will be put in the
``Core`` hierarchy of the lua modules.
.. warning::
Consistency of Core packages
The user is responsible for maintining consistency among core packages, as ``core_specs``
bypasses the hierarchy that allows LMod to safely switch between coherent software stacks.
.. warning::
Deep hierarchies and ``lmod spider``
For hierarchies that are deeper than three layers ``lmod spider`` may have some issues.

View File

@@ -1675,15 +1675,15 @@ can see the patches that would be applied to ``m4``::
Concretized
--------------------------------
m4@1.4.18%apple-clang@9.0.0 patches=3877ab548f88597ab2327a2230ee048d2d07ace1062efe81fc92e91b7f39cd00,c0a408fbffb7255fcc75e26bd8edab116fc81d216bfd18b473668b7739a4158e,fc9b61654a3ba1a8d6cd78ce087e7c96366c290bc8d2c299f09828d793b853c8 +sigsegv arch=darwin-highsierra-x86_64
^libsigsegv@2.11%apple-clang@9.0.0 arch=darwin-highsierra-x86_64
m4@1.4.18%clang@9.0.0-apple patches=3877ab548f88597ab2327a2230ee048d2d07ace1062efe81fc92e91b7f39cd00,c0a408fbffb7255fcc75e26bd8edab116fc81d216bfd18b473668b7739a4158e,fc9b61654a3ba1a8d6cd78ce087e7c96366c290bc8d2c299f09828d793b853c8 +sigsegv arch=darwin-highsierra-x86_64
^libsigsegv@2.11%clang@9.0.0-apple arch=darwin-highsierra-x86_64
You can also see patches that have been applied to installed packages
with ``spack find -v``::
$ spack find -v m4
==> 1 installed package
-- darwin-highsierra-x86_64 / apple-clang@9.0.0 -----------------
-- darwin-highsierra-x86_64 / clang@9.0.0-apple -----------------
m4@1.4.18 patches=3877ab548f88597ab2327a2230ee048d2d07ace1062efe81fc92e91b7f39cd00,c0a408fbffb7255fcc75e26bd8edab116fc81d216bfd18b473668b7739a4158e,fc9b61654a3ba1a8d6cd78ce087e7c96366c290bc8d2c299f09828d793b853c8 +sigsegv
.. _cmd-spack-resource:
@@ -1713,7 +1713,7 @@ wonder where the extra boost patches are coming from::
$ spack spec dealii ^boost@1.68.0 ^hdf5+fortran | grep '\^boost'
^boost@1.68.0
^boost@1.68.0%apple-clang@9.0.0+atomic+chrono~clanglibcpp cxxstd=default +date_time~debug+exception+filesystem+graph~icu+iostreams+locale+log+math~mpi+multithreaded~numpy patches=2ab6c72d03dec6a4ae20220a9dfd5c8c572c5294252155b85c6874d97c323199,b37164268f34f7133cbc9a4066ae98fda08adf51e1172223f6a969909216870f ~pic+program_options~python+random+regex+serialization+shared+signals~singlethreaded+system~taggedlayout+test+thread+timer~versionedlayout+wave arch=darwin-highsierra-x86_64
^boost@1.68.0%clang@9.0.0-apple+atomic+chrono~clanglibcpp cxxstd=default +date_time~debug+exception+filesystem+graph~icu+iostreams+locale+log+math~mpi+multithreaded~numpy patches=2ab6c72d03dec6a4ae20220a9dfd5c8c572c5294252155b85c6874d97c323199,b37164268f34f7133cbc9a4066ae98fda08adf51e1172223f6a969909216870f ~pic+program_options~python+random+regex+serialization+shared+signals~singlethreaded+system~taggedlayout+test+thread+timer~versionedlayout+wave arch=darwin-highsierra-x86_64
$ spack resource show b37164268
b37164268f34f7133cbc9a4066ae98fda08adf51e1172223f6a969909216870f
path: /home/spackuser/src/spack/var/spack/repos/builtin/packages/dealii/boost_1.68.0.patch
@@ -2169,17 +2169,13 @@ Adding the following to a package:
.. code-block:: python
conflicts('%intel', when='@:1.2',
msg='<myNicePackage> <= v1.2 cannot be built with Intel ICC, '
'please use a newer release.')
conflicts('%intel', when='@1.2')
we express the fact that the current package *cannot be built* with the Intel
compiler when we are trying to install a version "<=1.2". The ``when`` argument
can be omitted, in which case the conflict will always be active.
compiler when we are trying to install version "1.2". The ``when`` argument can
be omitted, in which case the conflict will always be active.
Conflicts are always evaluated after the concretization step has been performed,
and if any match is found a detailed error message is shown to the user.
You can add an additional message via the ``msg=`` parameter to a conflict that
provideds more specific instructions for users.
.. _packaging_extensions:
@@ -2201,7 +2197,7 @@ property to ``True``, e.g.:
extendable = True
...
To make a package into an extension, simply add an
To make a package into an extension, simply add simply add an
``extends`` call in the package definition, and pass it the name of an
extendable package:
@@ -2216,10 +2212,6 @@ Now, the ``py-numpy`` package can be used as an argument to ``spack
activate``. When it is activated, all the files in its prefix will be
symbolically linked into the prefix of the python package.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Adding additional constraints
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Some packages produce a Python extension, but are only compatible with
Python 3, or with Python 2. In those cases, a ``depends_on()``
declaration should be made in addition to the ``extends()``
@@ -2239,7 +2231,8 @@ variant(s) are selected. This may be accomplished with conditional
.. code-block:: python
class FooLib(Package):
variant('python', default=True, description='Build the Python extension Module')
variant('python', default=True, description= \
'Build the Python extension Module')
extends('python', when='+python')
...
@@ -2920,7 +2913,7 @@ discover its dependencies.
If you want to see the environment that a package will build with, or
if you want to run commands in that environment to test them out, you
can use the :ref:`cmd-spack-build-env` command, documented
can use the :ref:`cmd-spack-env` command, documented
below.
^^^^^^^^^^^^^^^^^^^^^
@@ -3614,7 +3607,7 @@ the command line.
For most compilers, ``$rpath_flag`` is ``-Wl,-rpath,``. However, NAG
passes its flags to GCC instead of passing them directly to the linker.
Therefore, its ``$rpath_flag`` is doubly wrapped: ``-Wl,-Wl,,-rpath,``.
``$rpath_flag`` can be overridden on a compiler specific basis in
``$rpath_flag`` can be overriden on a compiler specific basis in
``lib/spack/spack/compilers/$compiler.py``.
The compiler wrappers also pass the compiler flags specified by the user from
@@ -4048,70 +4041,6 @@ File functions
:py:func:`touch(path) <spack.touch>`
Create an empty file at ``path``.
.. _make-package-findable:
----------------------------------------------------------
Making a package discoverable with ``spack external find``
----------------------------------------------------------
To make a package discoverable with
:ref:`spack external find <cmd-spack-external-find>` you must
define one or more executables associated with the package and must
implement a method to generate a Spec when given an executable.
The executables are specified as a package level ``executables``
attribute which is a list of strings (see example below); each string
is treated as a regular expression (e.g. 'gcc' would match 'gcc', 'gcc-8.3',
'my-weird-gcc', etc.).
The method ``determine_spec_details`` has the following signature:
.. code-block:: python
def determine_spec_details(prefix, exes_in_prefix):
# exes_in_prefix = a set of paths, each path is an executable
# prefix = a prefix that is common to each path in exes_in_prefix
# return None or [] if none of the exes represent an instance of
# the package. Return one or more Specs for each instance of the
# package which is thought to be installed in the provided prefix
``determine_spec_details`` takes as parameters a set of discovered
executables (which match those specified by the user) as well as a
common prefix shared by all of those executables. The function must
return one or more Specs associated with the executables (it can also
return ``None`` to indicate that no provided executables are associated
with the package).
Say for example we have a package called ``foo-package`` which
builds an executable called ``foo``. ``FooPackage`` would appear as
follows:
.. code-block:: python
class FooPackage(Package):
homepage = "..."
url = "..."
version(...)
# Each string provided here is treated as a regular expression, and
# would match for example 'foo', 'foobar', and 'bazfoo'.
executables = ['foo']
@classmethod
def determine_spec_details(cls, prefix, exes_in_prefix):
candidates = list(x for x in exes_in_prefix
if os.path.basename(x) == 'foo')
if not candidates:
return
# This implementation is lazy and only checks the first candidate
exe_path = candidates[0]
exe = spack.util.executable.Executable(exe_path)
output = exe('--version')
version_str = ... # parse output for version string
return Spec('foo-package@{0}'.format(version_str))
.. _package-lifecycle:
-----------------------------
@@ -4167,23 +4096,16 @@ want to clean up the temporary directory, or if the package isn't
downloading properly, you might want to run *only* the ``fetch`` stage
of the build.
Spack performs best-effort installation of package dependencies by default,
which means it will continue to install as many dependencies as possible
after detecting failures. If you are trying to install a package with a
lot of dependencies where one or more may fail to build, you might want to
try the ``--fail-fast`` option to stop the installation process on the first
failure.
A typical package workflow might look like this:
.. code-block:: console
$ spack edit mypackage
$ spack install --fail-fast mypackage
$ spack install mypackage
... build breaks! ...
$ spack clean mypackage
$ spack edit mypackage
$ spack install --fail-fast mypackage
$ spack install mypackage
... repeat clean/install until install works ...
Below are some commands that will allow you some finer-grained
@@ -4252,29 +4174,23 @@ Does this in one of two ways:
``spack clean``
^^^^^^^^^^^^^^^
Cleans up Spack's temporary and cached files. This command can be used to
Cleans up all of Spack's temporary and cached files. This can be used to
recover disk space if temporary files from interrupted or failed installs
accumulate.
accumulate in the staging area.
When called with ``--stage`` or without arguments this removes all staged
files.
The ``--downloads`` option removes cached :ref:`cached <caching>` downloads.
When called with ``--downloads`` this will clear all resources
:ref:`cached <caching>` during installs.
You can force the removal of all install failure tracking markers using the
``--failures`` option. Note that ``spack install`` will automatically clear
relevant failure markings prior to performing the requested installation(s).
Long-lived caches, like the virtual package index, are removed using the
``--misc-cache`` option.
The ``--python-cache`` option removes `.pyc`, `.pyo`, and `__pycache__`
folders.
When called with ``--user-cache`` this will remove caches in the user home
directory, including cached virtual indices.
To remove all of the above, the command can be called with ``--all``.
When called with positional arguments, this command cleans up temporary files
only for a particular package. If ``fetch``, ``stage``, or ``install``
When called with positional arguments, cleans up temporary files only
for a particular package. If ``fetch``, ``stage``, or ``install``
are run again after this, Spack's build process will start from scratch.
@@ -4416,31 +4332,31 @@ directory, install directory, package directory) and others change to
core spack locations. For example, ``spack cd --module-dir`` will take you to
the main python source directory of your spack install.
.. _cmd-spack-build-env:
.. _cmd-spack-env:
^^^^^^^^^^^^^^^^^^^
``spack build-env``
^^^^^^^^^^^^^^^^^^^
^^^^^^^^^^^^^
``spack env``
^^^^^^^^^^^^^
``spack build-env`` functions much like the standard unix ``build-env``
command, but it takes a spec as an argument. You can use it to see the
``spack env`` functions much like the standard unix ``env`` command,
but it takes a spec as an argument. You can use it to see the
environment variables that will be set when a particular build runs,
for example:
.. code-block:: console
$ spack build-env mpileaks@1.1%intel
$ spack env mpileaks@1.1%intel
This will display the entire environment that will be set when the
``mpileaks@1.1%intel`` build runs.
To run commands in a package's build environment, you can simply
provide them after the spec argument to ``spack build-env``:
provide them after the spec argument to ``spack env``:
.. code-block:: console
$ spack cd mpileaks@1.1%intel
$ spack build-env mpileaks@1.1%intel ./configure
$ spack env mpileaks@1.1%intel ./configure
This will cd to the build directory and then run ``configure`` in the
package's build environment.
@@ -4538,7 +4454,7 @@ translate variant flags into CMake definitions. For example:
.. code-block:: python
def cmake_args(self):
def configure_args(self):
spec = self.spec
return [
'-DUSE_EVERYTRACE=%s' % ('YES' if '+everytrace' in spec else 'NO'),

View File

@@ -32,47 +32,30 @@ for setting up a build pipeline are as follows:
#. Create a repository on your gitlab instance
#. Add a ``spack.yaml`` at the root containing your pipeline environment (see
below for details)
#. Add a ``.gitlab-ci.yml`` at the root containing two jobs (one to generate
the pipeline dynamically, and one to run the generated jobs), similar to
#. Add a ``.gitlab-ci.yml`` at the root containing a single job, similar to
this one:
.. code-block:: yaml
stages: [generate, build]
generate-pipeline:
stage: generate
pipeline-job:
tags:
- <custom-tag>
...
script:
- spack env activate .
- spack ci generate
--output-file "${CI_PROJECT_DIR}/jobs_scratch_dir/pipeline.yml"
artifacts:
paths:
- "${CI_PROJECT_DIR}/jobs_scratch_dir/pipeline.yml"
build-jobs:
stage: build
trigger:
include:
- artifact: "jobs_scratch_dir/pipeline.yml"
job: generate-pipeline
strategy: depend
- spack ci start
#. Add any secrets required by the CI process to environment variables using the
CI web ui
#. Push a commit containing the ``spack.yaml`` and ``.gitlab-ci.yml`` mentioned above
to the gitlab repository
The ``<custom-tag>``, above, is used to pick one of your configured runners to
run the pipeline generation phase (this is implemented in the ``spack ci generate``
command, which assumes the runner has an appropriate version of spack installed
and configured for use). Of course, there are many ways to customize the process.
You can configure CDash reporting on the progress of your builds, set up S3 buckets
to mirror binaries built by the pipeline, clone a custom spack repository/ref for
use by the pipeline, and more.
The ``<custom-tag>``, above, is used to pick one of your configured runners,
while the use of the ``spack ci start`` command implies that runner has an
appropriate version of spack installed and configured for use. Of course, there
are myriad ways to customize the process. You can configure CDash reporting
on the progress of your builds, set up S3 buckets to mirror binaries built by
the pipeline, clone a custom spack repository/ref for use by the pipeline, and
more.
While it is possible to set up pipelines on gitlab.com, the builds there are
limited to 60 minutes and generic hardware. It is also possible to
@@ -81,30 +64,21 @@ Gitlab to Google Kubernetes Engine (`GKE <https://cloud.google.com/kubernetes-en
or Amazon Elastic Kubernetes Service (`EKS <https://aws.amazon.com/eks>`_), though those
topics are outside the scope of this document.
Spack's pipelines are now making use of the
`trigger <https://docs.gitlab.com/12.9/ee/ci/yaml/README.html#trigger>` syntax to run
dynamically generated
`child pipelines <https://docs.gitlab.com/12.9/ee/ci/parent_child_pipelines.html>`.
Note that the use of dynamic child pipelines requires running Gitlab version
``>= 12.9``.
-----------------------------------
Spack commands supporting pipelines
-----------------------------------
Spack provides a command ``ci`` with two sub-commands: ``spack ci generate`` generates
a pipeline (a .gitlab-ci.yml file) from a spack environment, and ``spack ci rebuild``
checks a spec against a remote mirror and possibly rebuilds it from source and updates
the binary mirror with the latest built package. Both ``spack ci ...`` commands must
be run from within the same environment, as each one makes use of the environment for
different purposes. Additionally, some options to the commands (or conditions present
in the spack environment file) may require particular environment variables to be
Spack provides a command `ci` with sub-commands for doing various things related
to automated build pipelines. All of the ``spack ci ...`` commands must be run
from within a environment, as each one makes use of the environment for different
purposes. Additionally, some options to the commands (or conditions present in
the spack environment file) may require particular environment variables to be
set in order to function properly. Examples of these are typically secrets
needed for pipeline operation that should not be visible in a spack environment
file. These environment variables are described in more detail
:ref:`ci_environment_variables`.
.. _cmd-spack-ci:
.. _cmd_spack_ci:
^^^^^^^^^^^^^^^^^^
``spack ci``
@@ -113,7 +87,16 @@ file. These environment variables are described in more detail
Super-command for functionality related to generating pipelines and executing
pipeline jobs.
.. _cmd-spack-ci-generate:
.. _cmd_spack_ci_start:
^^^^^^^^^^^^^^^^^^
``spack ci start``
^^^^^^^^^^^^^^^^^^
Currently this command is a short-cut to first run ``spack ci generate``, followed
by ``spack ci pushyaml``.
.. _cmd_spack_ci_generate:
^^^^^^^^^^^^^^^^^^^^^
``spack ci generate``
@@ -122,7 +105,19 @@ pipeline jobs.
Concretizes the specs in the active environment, stages them (as described in
:ref:`staging_algorithm`), and writes the resulting ``.gitlab-ci.yml`` to disk.
.. _cmd-spack-ci-rebuild:
.. _cmd_spack_ci_pushyaml:
^^^^^^^^^^^^^^^^^^^^^
``spack ci pushyaml``
^^^^^^^^^^^^^^^^^^^^^
Generates a commit containing the generated ``.gitlab-ci.yml`` and pushes it to a
``DOWNSTREAM_CI_REPO``, which is frequently the same repository. The branch
created has the same name as the current branch being tested, but has ``multi-ci-``
prepended to the branch name. Once Gitlab CI has full support for dynamically
defined workloads, this command will be deprecated.
.. _cmd_spack_ci_rebuild:
^^^^^^^^^^^^^^^^^^^^
``spack ci rebuild``
@@ -137,7 +132,7 @@ A pipeline-enabled spack environment
------------------------------------
Here's an example of a spack environment file that has been enhanced with
sections describing a build pipeline:
sections desribing a build pipeline:
.. code-block:: yaml
@@ -163,14 +158,14 @@ sections describing a build pipeline:
- os=ubuntu18.04
runner-attributes:
tags:
- spack-kube
image: spack/ubuntu-bionic
- spack-k8s
image: spack/spack_builder_ubuntu_18.04
- match:
- os=centos7
runner-attributes:
tags:
- spack-kube
image: spack/centos7
- spack-k8s
image: spack/spack_builder_centos_7
cdash:
build-group: Release Testing
url: https://cdash.spack.io
@@ -373,30 +368,22 @@ containing the url and branch/tag you want to clone (calling them, for example,
``SPACK_REPO`` and ``SPACK_REF``), use them to clone spack in your pre-ci
``before_script``, and finally pass those same values along to the workload
generation process via the ``spack-repo`` and ``spack-ref`` cli args. Here's
the ``generate-pipeline`` job from the top of this document, updated to clone
a custom spack and make sure the generated rebuild jobs will clone it too:
an example:
.. code-block:: yaml
generate-pipeline:
pipeline-job:
tags:
- <some-other-tag>
before_script:
- git clone ${SPACK_REPO} --branch ${SPACK_REF}
- . ./spack/share/spack/setup-env.sh
script:
- spack env activate .
- spack ci generate
--spack-repo ${SPACK_REPO} --spack-ref ${SPACK_REF}
--output-file "${CI_PROJECT_DIR}/jobs_scratch_dir/pipeline.yml"
- spack ci start --spack-repo ${SPACK_REPO} --spack-ref ${SPACK_REF} <...args>
after_script:
- rm -rf ./spack
artifacts:
paths:
- "${CI_PROJECT_DIR}/jobs_scratch_dir/pipeline.yml"
If the ``spack ci generate`` command receives those extra command line arguments,
If the ``spack ci start`` command receives those extra command line arguments,
then it adds similar ``before_script`` and ``after_script`` sections for each of
the ``spack ci rebuild`` jobs it generates (cloning and sourcing a custom
spack in the ``before_script`` and removing it again in the ``after_script``).
@@ -443,3 +430,10 @@ SPACK_SIGNING_KEY
^^^^^^^^^^^^^^^^^
Needed to sign/verify binary packages from the remote binary mirror.
^^^^^^^^^^^^^^^^^^
DOWNSTREAM_CI_REPO
^^^^^^^^^^^^^^^^^^
Needed until Gitlab CI supports dynamic job generation. Can contain connection
credentials, and could be the same repository or a different one.

View File

@@ -280,16 +280,16 @@ you install it, you can use ``spack spec -N``:
Concretized
--------------------------------
builtin.hdf5@1.10.0-patch1%apple-clang@7.0.2+cxx~debug+fortran+mpi+shared~szip~threadsafe arch=darwin-elcapitan-x86_64
^builtin.openmpi@2.0.1%apple-clang@7.0.2~mxm~pmi~psm~psm2~slurm~sqlite3~thread_multiple~tm~verbs+vt arch=darwin-elcapitan-x86_64
^builtin.hwloc@1.11.4%apple-clang@7.0.2 arch=darwin-elcapitan-x86_64
^builtin.libpciaccess@0.13.4%apple-clang@7.0.2 arch=darwin-elcapitan-x86_64
^builtin.libtool@2.4.6%apple-clang@7.0.2 arch=darwin-elcapitan-x86_64
^builtin.m4@1.4.17%apple-clang@7.0.2+sigsegv arch=darwin-elcapitan-x86_64
^builtin.libsigsegv@2.10%apple-clang@7.0.2 arch=darwin-elcapitan-x86_64
^builtin.pkg-config@0.29.1%apple-clang@7.0.2+internal_glib arch=darwin-elcapitan-x86_64
^builtin.util-macros@1.19.0%apple-clang@7.0.2 arch=darwin-elcapitan-x86_64
^builtin.zlib@1.2.8%apple-clang@7.0.2+pic arch=darwin-elcapitan-x86_64
builtin.hdf5@1.10.0-patch1%clang@7.0.2-apple+cxx~debug+fortran+mpi+shared~szip~threadsafe arch=darwin-elcapitan-x86_64
^builtin.openmpi@2.0.1%clang@7.0.2-apple~mxm~pmi~psm~psm2~slurm~sqlite3~thread_multiple~tm~verbs+vt arch=darwin-elcapitan-x86_64
^builtin.hwloc@1.11.4%clang@7.0.2-apple arch=darwin-elcapitan-x86_64
^builtin.libpciaccess@0.13.4%clang@7.0.2-apple arch=darwin-elcapitan-x86_64
^builtin.libtool@2.4.6%clang@7.0.2-apple arch=darwin-elcapitan-x86_64
^builtin.m4@1.4.17%clang@7.0.2-apple+sigsegv arch=darwin-elcapitan-x86_64
^builtin.libsigsegv@2.10%clang@7.0.2-apple arch=darwin-elcapitan-x86_64
^builtin.pkg-config@0.29.1%clang@7.0.2-apple+internal_glib arch=darwin-elcapitan-x86_64
^builtin.util-macros@1.19.0%clang@7.0.2-apple arch=darwin-elcapitan-x86_64
^builtin.zlib@1.2.8%clang@7.0.2-apple+pic arch=darwin-elcapitan-x86_64
.. warning::

View File

@@ -284,10 +284,8 @@ have some drawbacks:
The ``spack load`` and ``spack module tcl loads`` commands, on the
other hand, are not very smart: if the user-supplied spec matches
more than one installed package, then ``spack module tcl loads`` will
fail. This default behavior may change in the future. For now,
the workaround is to either be more specific on any failing ``spack load``
commands or to use ``spack load --first`` to allow spack to load the
first matching spec.
fail. This may change in the future. For now, the workaround is to
be more specific on any ``spack load`` commands that fail.
""""""""""""""""""""""
@@ -446,7 +444,7 @@ environment.
A single-prefix filesystem view is a single directory tree that is the
union of the directory hierarchies of a number of installed packages;
it is similar to the directory hierarchy that might exist under
it is similar to the directory hiearchy that might exist under
``/usr/local``. The files of the view's installed packages are
brought into the view by symbolic or hard links, referencing the
original Spack installation.
@@ -1239,7 +1237,7 @@ you can also manually set them in your ``.bashrc``.
2. Other package managers like Homebrew will try to install things to the
same directory. If you plan on using Homebrew in conjunction with Spack,
don't symlink things to ``/usr/local``.
3. If you are on a shared workstation, or don't have sudo privileges, you
3. If you are on a shared workstation, or don't have sudo priveleges, you
can't do this.
If you still want to do this anyway, there are several ways around SIP.
@@ -1358,14 +1356,6 @@ The main points that are implemented below:
the spack builds in the config.
(The Travis yaml parser is a bit buggy on the echo command.)
#. Without control for the user, Travis jobs will run on various
``x86_64`` microarchitectures. If you plan to cache build results,
e.g. to accelerate dependency builds, consider building for the
generic ``x86_64`` target only.
Limiting the microarchitecture will also find more packages when
working with the
`E4S Spack build cache <https://oaciss.uoregon.edu/e4s/e4s_buildcache_inventory.html>`_.
#. Builds over 10 minutes need to be prefixed with ``travis_wait``.
Alternatively, generate output once with ``spack install -v``.
@@ -1408,9 +1398,7 @@ The main points that are implemented below:
- if ! which spack >/dev/null; then
mkdir -p $SPACK_ROOT &&
git clone --depth 50 https://github.com/spack/spack.git $SPACK_ROOT &&
echo -e "config:""\n build_jobs:"" 2" > $SPACK_ROOT/etc/spack/config.yaml **
echo -e "packages:""\n all:""\n target:"" ['x86_64']"
> $SPACK_ROOT/etc/spack/packages.yaml;
echo -e "config:""\n build_jobs:"" 2" > $SPACK_ROOT/etc/spack/config.yaml;
fi
- travis_wait spack install cmake@3.7.2~openssl~ncurses
- travis_wait spack install boost@1.62.0~graph~iostream~locale~log~wave
@@ -1442,7 +1430,12 @@ The following functionality is prepared:
#. Base image: the example starts from a minimal ubuntu.
#. Pre-install the spack dependencies.
#. Installing as root: docker images are usually set up as root.
Since some autotools scripts might complain about this being unsafe, we set
``FORCE_UNSAFE_CONFIGURE=1`` to avoid configure errors.
#. Pre-install the spack dependencies, including modules from the packages.
This avoids needing to build those from scratch via ``spack bootstrap``.
Package installs are followed by a clean-up of the system package index,
to avoid outdated information and it saves space.
@@ -1471,9 +1464,10 @@ In order to build and run the image, execute:
# general environment for docker
ENV DEBIAN_FRONTEND=noninteractive \
SPACK_ROOT=/usr/local
SPACK_ROOT=/usr/local \
FORCE_UNSAFE_CONFIGURE=1
# install minimal spack dependencies
# install minimal spack depedencies
RUN apt-get update \
&& apt-get install -y --no-install-recommends \
autoconf \

50
lib/spack/env/cc vendored
View File

@@ -15,9 +15,9 @@
# 1. It allows Spack to swap compilers into and out of builds easily.
# 2. It adds several options to the compile line so that spack
# packages can find their dependencies at build time and run time:
# -I and/or -isystem arguments for dependency /include directories.
# -L arguments for dependency /lib directories.
# -Wl,-rpath arguments for dependency /lib directories.
# -I arguments for dependency /include directories.
# -L arguments for dependency /lib directories.
# -Wl,-rpath arguments for dependency /lib directories.
#
# This is an array of environment variables that need to be set before
@@ -43,7 +43,7 @@ parameters=(
# The compiler input variables are checked for sanity later:
# SPACK_CC, SPACK_CXX, SPACK_F77, SPACK_FC
# The default compiler flags are passed from these variables:
# SPACK_CFLAGS, SPACK_CXXFLAGS, SPACK_FFLAGS,
# SPACK_CFLAGS, SPACK_CXXFLAGS, SPACK_FCFLAGS, SPACK_FFLAGS,
# SPACK_LDFLAGS, SPACK_LDLIBS
# Debug env var is optional; set to "TRUE" for debug logging:
# SPACK_DEBUG
@@ -251,11 +251,10 @@ input_command="$*"
#
# Parse the command line arguments.
#
# We extract -L, -I, -isystem and -Wl,-rpath arguments from the
# command line and recombine them with Spack arguments later. We
# parse these out so that we can make sure that system paths come
# last, that package arguments come first, and that Spack arguments
# are injected properly.
# We extract -L, -I, and -Wl,-rpath arguments from the command line and
# recombine them with Spack arguments later. We parse these out so that
# we can make sure that system paths come last, that package arguments
# come first, and that Spack arguments are injected properly.
#
# All other arguments, including -l arguments, are treated as
# 'other_args' and left in their original order. This ensures that
@@ -274,24 +273,12 @@ system_libdirs=()
system_rpaths=()
libs=()
other_args=()
isystem_system_includes=()
isystem_includes=()
while [ -n "$1" ]; do
# an RPATH to be added after the case statement.
rp=""
case "$1" in
-isystem*)
arg="${1#-isystem}"
isystem_was_used=true
if [ -z "$arg" ]; then shift; arg="$1"; fi
if system_dir "$arg"; then
isystem_system_includes+=("$arg")
else
isystem_includes+=("$arg")
fi
;;
-I*)
arg="${1#-I}"
if [ -z "$arg" ]; then shift; arg="$1"; fi
@@ -438,6 +425,12 @@ then
esac
fi
# Prepend include directories
IFS=':' read -ra include_dirs <<< "$SPACK_INCLUDE_DIRS"
if [[ $mode == cpp || $mode == cc || $mode == as || $mode == ccld ]]; then
includes=("${includes[@]}" "${include_dirs[@]}")
fi
IFS=':' read -ra rpath_dirs <<< "$SPACK_RPATH_DIRS"
if [[ $mode == ccld || $mode == ld ]]; then
@@ -488,22 +481,9 @@ args=()
# flags assembled earlier
args+=("${flags[@]}")
# Insert include directories just prior to any system include directories
# include directory search paths
for dir in "${includes[@]}"; do args+=("-I$dir"); done
for dir in "${isystem_includes[@]}"; do args+=("-isystem$dir"); done
IFS=':' read -ra spack_include_dirs <<< "$SPACK_INCLUDE_DIRS"
if [[ $mode == cpp || $mode == cc || $mode == as || $mode == ccld ]]; then
if [[ "$isystem_was_used" == "true" ]] ; then
for dir in "${spack_include_dirs[@]}"; do args+=("-isystem$dir"); done
else
for dir in "${spack_include_dirs[@]}"; do args+=("-I$dir"); done
fi
fi
for dir in "${system_includes[@]}"; do args+=("-I$dir"); done
for dir in "${isystem_system_includes[@]}"; do args+=("-isystem$dir"); done
# Library search paths
for dir in "${libdirs[@]}"; do args+=("-L$dir"); done

View File

@@ -88,11 +88,10 @@
* Homepage: https://pypi.python.org/pypi/pytest
* Usage: Testing framework used by Spack.
* Version: 3.2.5 (last version supporting Python 2.6)
* Note: This package has been slightly modified:
* We improve Python 2.6 compatibility. See:
https://github.com/spack/spack/pull/6801.
* We have patched pytest not to depend on setuptools. See:
https://github.com/spack/spack/pull/15612
* Note: This package has been slightly modified to improve
Python 2.6 compatibility. See the following commit if the
vendored copy ever needs to be updated again:
https://github.com/spack/spack/pull/6801/commits/ff513c39f2c67ff615de5cbc581dd69a8ec96526
ruamel.yaml
------
@@ -126,5 +125,4 @@
* Homepage: https://altgraph.readthedocs.io/en/latest/index.html
* Usage: dependency of macholib
* Version: 0.16.1
"""

View File

@@ -1028,13 +1028,34 @@ def _consider_importhook(self, args):
except SystemError:
mode = 'plain'
else:
# REMOVED FOR SPACK: This routine imports `pkg_resources` from
# `setuptools`, but we do not need it for Spack. We have removed
# it from Spack to avoid a dependency on setuptools.
# self._mark_plugins_for_rewrite(hook)
pass
self._mark_plugins_for_rewrite(hook)
self._warn_about_missing_assertion(mode)
def _mark_plugins_for_rewrite(self, hook):
"""
Given an importhook, mark for rewrite any top-level
modules or packages in the distribution package for
all pytest plugins.
"""
import pkg_resources
self.pluginmanager.rewrite_hook = hook
# 'RECORD' available for plugins installed normally (pip install)
# 'SOURCES.txt' available for plugins installed in dev mode (pip install -e)
# for installed plugins 'SOURCES.txt' returns an empty list, and vice-versa
# so it shouldn't be an issue
metadata_files = 'RECORD', 'SOURCES.txt'
package_files = (
entry.split(',')[0]
for entrypoint in pkg_resources.iter_entry_points('pytest11')
for metadata in metadata_files
for entry in entrypoint.dist._get_metadata(metadata)
)
for name in _iter_rewritable_modules(package_files):
hook.mark_rewrite(name)
def _warn_about_missing_assertion(self, mode):
try:
assert False
@@ -1060,12 +1081,7 @@ def _preparse(self, args, addopts=True):
self._checkversion()
self._consider_importhook(args)
self.pluginmanager.consider_preparse(args)
# REMOVED FOR SPACK: This routine imports `pkg_resources` from
# `setuptools`, but we do not need it for Spack. We have removed
# it from Spack to avoid a dependency on setuptools.
# self.pluginmanager.load_setuptools_entrypoints('pytest11')
self.pluginmanager.load_setuptools_entrypoints('pytest11')
self.pluginmanager.consider_env()
self.known_args_namespace = ns = self._parser.parse_known_args(args, namespace=self.option.copy())
if self.known_args_namespace.confcutdir is None and self.inifile:

View File

@@ -497,6 +497,26 @@ def check_pending(self):
"unknown hook %r in plugin %r" %
(name, hookimpl.plugin))
def load_setuptools_entrypoints(self, entrypoint_name):
""" Load modules from querying the specified setuptools entrypoint name.
Return the number of loaded plugins. """
from pkg_resources import (iter_entry_points, DistributionNotFound,
VersionConflict)
for ep in iter_entry_points(entrypoint_name):
# is the plugin registered or blocked?
if self.get_plugin(ep.name) or self.is_blocked(ep.name):
continue
try:
plugin = ep.load()
except DistributionNotFound:
continue
except VersionConflict as e:
raise PluginValidationError(
"Plugin %r could not be loaded: %s!" % (ep.name, e))
self.register(plugin, name=ep.name)
self._plugin_distinfo.append((plugin, ep.dist))
return len(self._plugin_distinfo)
def list_plugin_distinfo(self):
""" return list of distinfo/plugin tuples for all setuptools registered
plugins. """

View File

@@ -139,12 +139,9 @@
@contributor: U{Reka Albert <http://www.phys.psu.edu/~ralbert/>}
'''
# import pkg_resources
# __version__ = pkg_resources.require('altgraph')[0].version
# pkg_resources is not finding the altgraph import despite the fact that it is in sys.path
# there is no .dist-info or .egg-info for pkg_resources to query the version from
# so it must be set manually
__version__ = '0.16.1'
import pkg_resources
__version__ = pkg_resources.require('altgraph')[0].version
class GraphError(ValueError):
pass

View File

@@ -64,7 +64,6 @@
'enterpriseenterprise': 'oracle', # Oracle Enterprise Linux
'redhatenterpriseworkstation': 'rhel', # RHEL 6, 7 Workstation
'redhatenterpriseserver': 'rhel', # RHEL 6, 7 Server
'redhatenterprisecomputenode': 'rhel', # RHEL 6 ComputeNode
}
#: Translation table for normalizing the distro ID derived from the file name

View File

@@ -45,18 +45,18 @@ def __init__(self, prog, description, usage,
class ArgparseWriter(argparse.HelpFormatter):
"""Analyzes an argparse ArgumentParser for easy generation of help."""
def __init__(self, prog, out=None, aliases=False):
def __init__(self, prog, out=sys.stdout, aliases=False):
"""Initializes a new ArgparseWriter instance.
Parameters:
prog (str): the program name
out (file object): the file to write to (default sys.stdout)
out (file object): the file to write to
aliases (bool): whether or not to include subparsers for aliases
"""
super(ArgparseWriter, self).__init__(prog)
self.level = 0
self.prog = prog
self.out = sys.stdout if out is None else out
self.out = out
self.aliases = aliases
def parse(self, parser, prog):
@@ -167,7 +167,7 @@ def write(self, parser):
class ArgparseRstWriter(ArgparseWriter):
"""Write argparse output as rst sections."""
def __init__(self, prog, out=None, aliases=False,
def __init__(self, prog, out=sys.stdout, aliases=False,
rst_levels=_rst_levels):
"""Create a new ArgparseRstWriter.
@@ -178,7 +178,6 @@ def __init__(self, prog, out=None, aliases=False,
rst_levels (list of str): list of characters
for rst section headings
"""
out = sys.stdout if out is None else out
super(ArgparseRstWriter, self).__init__(prog, out, aliases)
self.rst_levels = rst_levels

View File

@@ -204,21 +204,9 @@ def optimization_flags(self, compiler, version):
compiler (str): name of the compiler to be used
version (str): version of the compiler to be used
"""
# If we don't have information on compiler at all
# return an empty string
if compiler not in self.family.compilers:
return ''
# If we have information but it stops before this
# microarchitecture, fall back to the best known target
# If we don't have information on compiler return an empty string
if compiler not in self.compilers:
best_target = [
x for x in self.ancestors if compiler in x.compilers
][0]
msg = ("'{0}' compiler is known to optimize up to the '{1}'"
" microarchitecture in the '{2}' architecture family")
msg = msg.format(compiler, best_target, best_target.family)
raise UnsupportedMicroarchitecture(msg)
return ''
# If we have information on this compiler we need to check the
# version being used
@@ -231,10 +219,15 @@ def optimization_flags(self, compiler, version):
def satisfies_constraint(entry, version):
min_version, max_version = entry['versions'].split(':')
# Extract numeric part of the version
min_version, _ = version_components(min_version)
max_version, _ = version_components(max_version)
version, _ = version_components(version)
# Check version suffixes
min_version, min_suffix = version_components(min_version)
max_version, max_suffix = version_components(max_version)
version, suffix = version_components(version)
# If the suffixes are not all equal there's no match
if ((suffix != min_suffix and min_version) or
(suffix != max_suffix and max_version)):
return False
# Assume compiler versions fit into semver
tuplify = lambda x: tuple(int(y) for y in x.split('.'))

View File

@@ -61,14 +61,12 @@
"flags": "-march={name} -mtune={name}"
}
],
"apple-clang": [
"clang": [
{
"versions": ":",
"versions": "0.0.0-apple:",
"name": "x86-64",
"flags": "-march={name}"
}
],
"clang": [
},
{
"versions": ":",
"name": "x86-64",

View File

@@ -456,7 +456,7 @@ def copy_tree(src, dest, symlinks=True, ignore=None, _permissions=False):
if os.path.isdir(s):
mkdirp(d)
else:
shutil.copy2(s, d)
shutil.copyfile(s, d)
if _permissions:
set_install_permissions(d)
@@ -624,9 +624,9 @@ def replace_directory_transaction(directory_name, tmp_root=None):
# Check the input is indeed a directory with absolute path.
# Raise before anything is done to avoid moving the wrong directory
assert os.path.isdir(directory_name), \
'Invalid directory: ' + directory_name
'"directory_name" must be a valid directory'
assert os.path.isabs(directory_name), \
'"directory_name" must contain an absolute path: ' + directory_name
'"directory_name" must contain an absolute path'
directory_basename = os.path.basename(directory_name)

View File

@@ -619,28 +619,3 @@ def load_module_from_file(module_name, module_path):
import imp
module = imp.load_source(module_name, module_path)
return module
def uniq(sequence):
"""Remove strings of duplicate elements from a list.
This works like the command-line ``uniq`` tool. It filters strings
of duplicate elements in a list. Adjacent matching elements are
merged into the first occurrence.
For example::
uniq([1, 1, 1, 1, 2, 2, 2, 3, 3]) == [1, 2, 3]
uniq([1, 1, 1, 1, 2, 2, 2, 1, 1]) == [1, 2, 1]
"""
if not sequence:
return []
uniq_list = [sequence[0]]
last = sequence[0]
for element in sequence[1:]:
if element != last:
uniq_list.append(element)
last = element
return uniq_list

View File

@@ -8,32 +8,14 @@
import errno
import time
import socket
from datetime import datetime
import llnl.util.tty as tty
import spack.util.string
__all__ = ['Lock', 'LockTransaction', 'WriteTransaction', 'ReadTransaction',
'LockError', 'LockTimeoutError',
'LockPermissionError', 'LockROFileError', 'CantCreateLockError']
#: Mapping of supported locks to description
lock_type = {fcntl.LOCK_SH: 'read', fcntl.LOCK_EX: 'write'}
#: A useful replacement for functions that should return True when not provided
#: for example.
true_fn = lambda: True
def _attempts_str(wait_time, nattempts):
# Don't print anything if we succeeded on the first try
if nattempts <= 1:
return ''
attempts = spack.util.string.plural(nattempts, 'attempt')
return ' after {0:0.2f}s and {1}'.format(wait_time, attempts)
class Lock(object):
"""This is an implementation of a filesystem lock using Python's lockf.
@@ -49,8 +31,8 @@ class Lock(object):
maintain multiple locks on the same file.
"""
def __init__(self, path, start=0, length=0, default_timeout=None,
debug=False, desc=''):
def __init__(self, path, start=0, length=0, debug=False,
default_timeout=None):
"""Construct a new lock on the file at ``path``.
By default, the lock applies to the whole file. Optionally,
@@ -61,16 +43,6 @@ def __init__(self, path, start=0, length=0, default_timeout=None,
not currently expose the ``whence`` parameter -- ``whence`` is
always ``os.SEEK_SET`` and ``start`` is always evaluated from the
beginning of the file.
Args:
path (str): path to the lock
start (int): optional byte offset at which the lock starts
length (int): optional number of bytes to lock
default_timeout (int): number of seconds to wait for lock attempts,
where None means to wait indefinitely
debug (bool): debug mode specific to locking
desc (str): optional debug message lock description, which is
helpful for distinguishing between different Spack locks.
"""
self.path = path
self._file = None
@@ -84,9 +56,6 @@ def __init__(self, path, start=0, length=0, default_timeout=None,
# enable debug mode
self.debug = debug
# optional debug description
self.desc = ' ({0})'.format(desc) if desc else ''
# If the user doesn't set a default timeout, or if they choose
# None, 0, etc. then lock attempts will not time out (unless the
# user sets a timeout for each attempt)
@@ -120,20 +89,6 @@ def _poll_interval_generator(_wait_times=None):
num_requests += 1
yield wait_time
def __repr__(self):
"""Formal representation of the lock."""
rep = '{0}('.format(self.__class__.__name__)
for attr, value in self.__dict__.items():
rep += '{0}={1}, '.format(attr, value.__repr__())
return '{0})'.format(rep.strip(', '))
def __str__(self):
"""Readable string (with key fields) of the lock."""
location = '{0}[{1}:{2}]'.format(self.path, self._start, self._length)
timeout = 'timeout={0}'.format(self.default_timeout)
activity = '#reads={0}, #writes={1}'.format(self._reads, self._writes)
return '({0}, {1}, {2})'.format(location, timeout, activity)
def _lock(self, op, timeout=None):
"""This takes a lock using POSIX locks (``fcntl.lockf``).
@@ -144,9 +99,8 @@ def _lock(self, op, timeout=None):
successfully acquired, the total wait time and the number of attempts
is returned.
"""
assert op in lock_type
assert op in (fcntl.LOCK_SH, fcntl.LOCK_EX)
self._log_acquiring('{0} LOCK'.format(lock_type[op].upper()))
timeout = timeout or self.default_timeout
# Create file and parent directories if they don't exist.
@@ -174,9 +128,6 @@ def _lock(self, op, timeout=None):
# If the file were writable, we'd have opened it 'r+'
raise LockROFileError(self.path)
tty.debug("{0} locking [{1}:{2}]: timeout {3} sec"
.format(lock_type[op], self._start, self._length, timeout))
poll_intervals = iter(Lock._poll_interval_generator())
start_time = time.time()
num_attempts = 0
@@ -188,21 +139,17 @@ def _lock(self, op, timeout=None):
time.sleep(next(poll_intervals))
# TBD: Is an extra attempt after timeout needed/appropriate?
num_attempts += 1
if self._poll_lock(op):
total_wait_time = time.time() - start_time
return total_wait_time, num_attempts
raise LockTimeoutError("Timed out waiting for a {0} lock."
.format(lock_type[op]))
raise LockTimeoutError("Timed out waiting for lock.")
def _poll_lock(self, op):
"""Attempt to acquire the lock in a non-blocking manner. Return whether
the locking attempt succeeds
"""
assert op in lock_type
try:
# Try to get the lock (will raise if not available.)
fcntl.lockf(self._file, op | fcntl.LOCK_NB,
@@ -212,9 +159,6 @@ def _poll_lock(self, op):
if self.debug:
# All locks read the owner PID and host
self._read_debug_data()
tty.debug('{0} locked {1} [{2}:{3}] (owner={4})'
.format(lock_type[op], self.path,
self._start, self._length, self.pid))
# Exclusive locks write their PID/host
if op == fcntl.LOCK_EX:
@@ -223,12 +167,12 @@ def _poll_lock(self, op):
return True
except IOError as e:
# EAGAIN and EACCES == locked by another process (so try again)
if e.errno not in (errno.EAGAIN, errno.EACCES):
if e.errno in (errno.EAGAIN, errno.EACCES):
# EAGAIN and EACCES == locked by another process
pass
else:
raise
return False
def _ensure_parent_directory(self):
parent = os.path.dirname(self.path)
@@ -283,8 +227,6 @@ def _unlock(self):
self._length, self._start, os.SEEK_SET)
self._file.close()
self._file = None
self._reads = 0
self._writes = 0
def acquire_read(self, timeout=None):
"""Acquires a recursive, shared lock for reading.
@@ -300,14 +242,15 @@ def acquire_read(self, timeout=None):
timeout = timeout or self.default_timeout
if self._reads == 0 and self._writes == 0:
self._debug(
'READ LOCK: {0.path}[{0._start}:{0._length}] [Acquiring]'
.format(self))
# can raise LockError.
wait_time, nattempts = self._lock(fcntl.LOCK_SH, timeout=timeout)
self._acquired_debug('READ LOCK', wait_time, nattempts)
self._reads += 1
# Log if acquired, which includes counts when verbose
self._log_acquired('READ LOCK', wait_time, nattempts)
return True
else:
# Increment the read count for nested lock tracking
self._reads += 1
return False
@@ -325,11 +268,13 @@ def acquire_write(self, timeout=None):
timeout = timeout or self.default_timeout
if self._writes == 0:
self._debug(
'WRITE LOCK: {0.path}[{0._start}:{0._length}] [Acquiring]'
.format(self))
# can raise LockError.
wait_time, nattempts = self._lock(fcntl.LOCK_EX, timeout=timeout)
self._acquired_debug('WRITE LOCK', wait_time, nattempts)
self._writes += 1
# Log if acquired, which includes counts when verbose
self._log_acquired('WRITE LOCK', wait_time, nattempts)
# return True only if we weren't nested in a read lock.
# TODO: we may need to return two values: whether we got
@@ -337,65 +282,9 @@ def acquire_write(self, timeout=None):
# write lock for the first time. Now it returns the latter.
return self._reads == 0
else:
# Increment the write count for nested lock tracking
self._writes += 1
return False
def is_write_locked(self):
"""Check if the file is write locked
Return:
(bool): ``True`` if the path is write locked, otherwise, ``False``
"""
try:
self.acquire_read()
# If we have a read lock then no other process has a write lock.
self.release_read()
except LockTimeoutError:
# Another process is holding a write lock on the file
return True
return False
def downgrade_write_to_read(self, timeout=None):
"""
Downgrade from an exclusive write lock to a shared read.
Raises:
LockDowngradeError: if this is an attempt at a nested transaction
"""
timeout = timeout or self.default_timeout
if self._writes == 1 and self._reads == 0:
self._log_downgrading()
# can raise LockError.
wait_time, nattempts = self._lock(fcntl.LOCK_SH, timeout=timeout)
self._reads = 1
self._writes = 0
self._log_downgraded(wait_time, nattempts)
else:
raise LockDowngradeError(self.path)
def upgrade_read_to_write(self, timeout=None):
"""
Attempts to upgrade from a shared read lock to an exclusive write.
Raises:
LockUpgradeError: if this is an attempt at a nested transaction
"""
timeout = timeout or self.default_timeout
if self._reads == 1 and self._writes == 0:
self._log_upgrading()
# can raise LockError.
wait_time, nattempts = self._lock(fcntl.LOCK_EX, timeout=timeout)
self._reads = 0
self._writes = 1
self._log_upgraded(wait_time, nattempts)
else:
raise LockUpgradeError(self.path)
def release_read(self, release_fn=None):
"""Releases a read lock.
@@ -416,17 +305,17 @@ def release_read(self, release_fn=None):
"""
assert self._reads > 0
locktype = 'READ LOCK'
if self._reads == 1 and self._writes == 0:
self._log_releasing(locktype)
self._debug(
'READ LOCK: {0.path}[{0._start}:{0._length}] [Released]'
.format(self))
# we need to call release_fn before releasing the lock
release_fn = release_fn or true_fn
result = release_fn()
result = True
if release_fn is not None:
result = release_fn()
self._unlock() # can raise LockError.
self._reads = 0
self._log_released(locktype)
self._reads -= 1
return result
else:
self._reads -= 1
@@ -450,91 +339,45 @@ def release_write(self, release_fn=None):
"""
assert self._writes > 0
release_fn = release_fn or true_fn
locktype = 'WRITE LOCK'
if self._writes == 1 and self._reads == 0:
self._log_releasing(locktype)
self._debug(
'WRITE LOCK: {0.path}[{0._start}:{0._length}] [Released]'
.format(self))
# we need to call release_fn before releasing the lock
result = release_fn()
result = True
if release_fn is not None:
result = release_fn()
self._unlock() # can raise LockError.
self._writes = 0
self._log_released(locktype)
self._writes -= 1
return result
else:
self._writes -= 1
# when the last *write* is released, we call release_fn here
# instead of immediately before releasing the lock.
if self._writes == 0:
return release_fn()
return release_fn() if release_fn is not None else True
else:
return False
def _debug(self, *args):
tty.debug(*args)
def _get_counts_desc(self):
return '(reads {0}, writes {1})'.format(self._reads, self._writes) \
if tty.is_verbose() else ''
def _log_acquired(self, locktype, wait_time, nattempts):
attempts_part = _attempts_str(wait_time, nattempts)
now = datetime.now()
desc = 'Acquired at %s' % now.strftime("%H:%M:%S.%f")
self._debug(self._status_msg(locktype, '{0}{1}'.
format(desc, attempts_part)))
def _log_acquiring(self, locktype):
self._debug2(self._status_msg(locktype, 'Acquiring'))
def _log_downgraded(self, wait_time, nattempts):
attempts_part = _attempts_str(wait_time, nattempts)
now = datetime.now()
desc = 'Downgraded at %s' % now.strftime("%H:%M:%S.%f")
self._debug(self._status_msg('READ LOCK', '{0}{1}'
.format(desc, attempts_part)))
def _log_downgrading(self):
self._debug2(self._status_msg('WRITE LOCK', 'Downgrading'))
def _log_released(self, locktype):
now = datetime.now()
desc = 'Released at %s' % now.strftime("%H:%M:%S.%f")
self._debug(self._status_msg(locktype, desc))
def _log_releasing(self, locktype):
self._debug2(self._status_msg(locktype, 'Releasing'))
def _log_upgraded(self, wait_time, nattempts):
attempts_part = _attempts_str(wait_time, nattempts)
now = datetime.now()
desc = 'Upgraded at %s' % now.strftime("%H:%M:%S.%f")
self._debug(self._status_msg('WRITE LOCK', '{0}{1}'.
format(desc, attempts_part)))
def _log_upgrading(self):
self._debug2(self._status_msg('READ LOCK', 'Upgrading'))
def _status_msg(self, locktype, status):
status_desc = '[{0}] {1}'.format(status, self._get_counts_desc())
return '{0}{1.desc}: {1.path}[{1._start}:{1._length}] {2}'.format(
locktype, self, status_desc)
def _debug2(self, *args):
# TODO: Easy place to make a single, temporary change to the
# TODO: debug level associated with the more detailed messages.
# TODO:
# TODO: Someday it would be great if we could switch this to
# TODO: another level, perhaps _between_ debug and verbose, or
# TODO: some other form of filtering so the first level of
# TODO: debugging doesn't have to generate these messages. Using
# TODO: verbose here did not work as expected because tests like
# TODO: test_spec_json will write the verbose messages to the
# TODO: output that is used to check test correctness.
tty.debug(*args)
def _acquired_debug(self, lock_type, wait_time, nattempts):
attempts_format = 'attempt' if nattempts == 1 else 'attempt'
if nattempts > 1:
acquired_attempts_format = ' after {0:0.2f}s and {1:d} {2}'.format(
wait_time, nattempts, attempts_format)
else:
# Dont print anything if we succeeded immediately
acquired_attempts_format = ''
self._debug(
'{0}: {1.path}[{1._start}:{1._length}] [Acquired{2}]'
.format(lock_type, self, acquired_attempts_format))
class LockTransaction(object):
@@ -619,28 +462,10 @@ class LockError(Exception):
"""Raised for any errors related to locks."""
class LockDowngradeError(LockError):
"""Raised when unable to downgrade from a write to a read lock."""
def __init__(self, path):
msg = "Cannot downgrade lock from write to read on file: %s" % path
super(LockDowngradeError, self).__init__(msg)
class LockLimitError(LockError):
"""Raised when exceed maximum attempts to acquire a lock."""
class LockTimeoutError(LockError):
"""Raised when an attempt to acquire a lock times out."""
class LockUpgradeError(LockError):
"""Raised when unable to upgrade from a read to a write lock."""
def __init__(self, path):
msg = "Cannot upgrade lock from read to write on file: %s" % path
super(LockUpgradeError, self).__init__(msg)
class LockPermissionError(LockError):
"""Raised when there are permission issues with a lock."""

View File

@@ -135,9 +135,7 @@ def process_stacktrace(countback):
def get_timestamp(force=False):
"""Get a string timestamp"""
if _debug or _timestamp or force:
# Note inclusion of the PID is useful for parallel builds.
return '[{0}, {1}] '.format(
datetime.now().strftime("%Y-%m-%d-%H:%M:%S.%f"), os.getpid())
return datetime.now().strftime("[%Y-%m-%d-%H:%M:%S.%f] ")
else:
return ''

View File

@@ -215,22 +215,20 @@ def cextra(string):
return len(''.join(re.findall(r'\033[^m]*m', string)))
def cwrite(string, stream=None, color=None):
def cwrite(string, stream=sys.stdout, color=None):
"""Replace all color expressions in string with ANSI control
codes and write the result to the stream. If color is
False, this will write plain text with no color. If True,
then it will always write colored output. If not supplied,
then it will be set based on stream.isatty().
"""
stream = sys.stdout if stream is None else stream
if color is None:
color = get_color_when()
stream.write(colorize(string, color=color))
def cprint(string, stream=None, color=None):
def cprint(string, stream=sys.stdout, color=None):
"""Same as cwrite, but writes a trailing newline to the stream."""
stream = sys.stdout if stream is None else stream
cwrite(string + "\n", stream, color)

View File

@@ -7,27 +7,18 @@
"""
from __future__ import unicode_literals
import atexit
import errno
import multiprocessing
import os
import re
import select
import sys
import traceback
import signal
from contextlib import contextmanager
from six import string_types
from six import StringIO
import llnl.util.tty as tty
try:
import termios
except ImportError:
termios = None
# Use this to strip escape sequences
_escape = re.compile(r'\x1b[^m]*m|\x1b\[?1034h')
@@ -40,25 +31,6 @@
control = re.compile('(\x11\n|\x13\n)')
@contextmanager
def ignore_signal(signum):
"""Context manager to temporarily ignore a signal."""
old_handler = signal.signal(signum, signal.SIG_IGN)
try:
yield
finally:
signal.signal(signum, old_handler)
def _is_background_tty(stream):
"""True if the stream is a tty and calling process is in the background.
"""
return (
stream.isatty() and
os.getpgrp() != os.tcgetpgrp(stream.fileno())
)
def _strip(line):
"""Strip color and control characters from a line."""
return _escape.sub('', line)
@@ -69,75 +41,22 @@ class keyboard_input(object):
Use this with ``sys.stdin`` for keyboard input, e.g.::
with keyboard_input(sys.stdin) as kb:
while True:
kb.check_fg_bg()
r, w, x = select.select([sys.stdin], [], [])
# ... do something with keypresses ...
with keyboard_input(sys.stdin):
r, w, x = select.select([sys.stdin], [], [])
# ... do something with keypresses ...
The ``keyboard_input`` context manager disables canonical
(line-based) input and echoing, so that keypresses are available on
the stream immediately, and they are not printed to the
terminal. Typically, standard input is line-buffered, which means
keypresses won't be sent until the user hits return. In this mode, a
user can hit, e.g., 'v', and it will be read on the other end of the
pipe immediately but not printed.
This disables canonical input so that keypresses are available on the
stream immediately. Typically standard input allows line editing,
which means keypresses won't be sent until the user hits return.
The handler takes care to ensure that terminal changes only take
effect when the calling process is in the foreground. If the process
is backgrounded, canonical mode and echo are re-enabled. They are
disabled again when the calling process comes back to the foreground.
It also disables echoing, so that keys pressed aren't printed to the
terminal. So, the user can hit, e.g., 'v', and it's read on the
other end of the pipe immediately but not printed.
This context manager works through a single signal handler for
``SIGTSTP``, along with a poolling routine called ``check_fg_bg()``.
Here are the relevant states, transitions, and POSIX signals::
[Running] -------- Ctrl-Z sends SIGTSTP ------------.
[ in FG ] <------- fg sends SIGCONT --------------. |
^ | |
| fg (no signal) | |
| | v
[Running] <------- bg sends SIGCONT ---------- [Stopped]
[ in BG ] [ in BG ]
We handle all transitions exept for ``SIGTSTP`` generated by Ctrl-Z
by periodically calling ``check_fg_bg()``. This routine notices if
we are in the background with canonical mode or echo disabled, or if
we are in the foreground without canonical disabled and echo enabled,
and it fixes the terminal settings in response.
``check_fg_bg()`` works *except* for when the process is stopped with
``SIGTSTP``. We cannot rely on a periodic timer in this case, as it
may not rrun before the process stops. We therefore restore terminal
settings in the ``SIGTSTP`` handler.
Additional notes:
* We mostly use polling here instead of a SIGARLM timer or a
thread. This is to avoid the complexities of many interrupts, which
seem to make system calls (like I/O) unreliable in older Python
versions (2.6 and 2.7). See these issues for details:
1. https://www.python.org/dev/peps/pep-0475/
2. https://bugs.python.org/issue8354
There are essentially too many ways for asynchronous signals to go
wrong if we also have to support older Python versions, so we opt
not to use them.
* ``SIGSTOP`` can stop a process (in the foreground or background),
but it can't be caught. Because of this, we can't fix any terminal
settings on ``SIGSTOP``, and the terminal will be left with
``ICANON`` and ``ECHO`` disabled until it is resumes execution.
* Technically, a process *could* be sent ``SIGTSTP`` while running in
the foreground, without the shell backgrounding that process. This
doesn't happen in practice, and we assume that ``SIGTSTP`` always
means that defaults should be restored.
* We rely on ``termios`` support. Without it, or if the stream isn't
a TTY, ``keyboard_input`` has no effect.
When the with block completes, prior TTY settings are restored.
Note: this depends on termios support. If termios isn't available,
or if the stream isn't a TTY, this context manager has no effect.
"""
def __init__(self, stream):
"""Create a context manager that will enable keyboard input on stream.
@@ -150,97 +69,44 @@ def __init__(self, stream):
"""
self.stream = stream
def _is_background(self):
"""True iff calling process is in the background."""
return _is_background_tty(self.stream)
def _get_canon_echo_flags(self):
"""Get current termios canonical and echo settings."""
cfg = termios.tcgetattr(self.stream)
return (
bool(cfg[3] & termios.ICANON),
bool(cfg[3] & termios.ECHO),
)
def _enable_keyboard_input(self):
"""Disable canonical input and echoing on ``self.stream``."""
# "enable" input by disabling canonical mode and echo
new_cfg = termios.tcgetattr(self.stream)
new_cfg[3] &= ~termios.ICANON
new_cfg[3] &= ~termios.ECHO
# Apply new settings for terminal
with ignore_signal(signal.SIGTTOU):
termios.tcsetattr(self.stream, termios.TCSANOW, new_cfg)
def _restore_default_terminal_settings(self):
"""Restore the original input configuration on ``self.stream``."""
# _restore_default_terminal_settings Can be called in foreground
# or background. When called in the background, tcsetattr triggers
# SIGTTOU, which we must ignore, or the process will be stopped.
with ignore_signal(signal.SIGTTOU):
termios.tcsetattr(self.stream, termios.TCSANOW, self.old_cfg)
def _tstp_handler(self, signum, frame):
self._restore_default_terminal_settings()
os.kill(os.getpid(), signal.SIGSTOP)
def check_fg_bg(self):
# old_cfg is set up in __enter__ and indicates that we have
# termios and a valid stream.
if not self.old_cfg:
return
# query terminal flags and fg/bg status
flags = self._get_canon_echo_flags()
bg = self._is_background()
# restore sanity if flags are amiss -- see diagram in class docs
if not bg and any(flags): # fg, but input not enabled
self._enable_keyboard_input()
elif bg and not all(flags): # bg, but input enabled
self._restore_default_terminal_settings()
def __enter__(self):
"""Enable immediate keypress input, while this process is foreground.
"""Enable immediate keypress input on stream.
If the stream is not a TTY or the system doesn't support termios,
do nothing.
"""
self.old_cfg = None
self.old_handlers = {}
# Ignore all this if the input stream is not a tty.
if not self.stream or not self.stream.isatty():
return self
return
if termios:
# save old termios settings to restore later
self.old_cfg = termios.tcgetattr(self.stream)
try:
# If this fails, self.old_cfg will remain None
import termios
# Install a signal handler to disable/enable keyboard input
# when the process moves between foreground and background.
self.old_handlers[signal.SIGTSTP] = signal.signal(
signal.SIGTSTP, self._tstp_handler)
# save old termios settings
fd = self.stream.fileno()
self.old_cfg = termios.tcgetattr(fd)
# add an atexit handler to ensure the terminal is restored
atexit.register(self._restore_default_terminal_settings)
# create new settings with canonical input and echo
# disabled, so keypresses are immediate & don't echo.
self.new_cfg = termios.tcgetattr(fd)
self.new_cfg[3] &= ~termios.ICANON
self.new_cfg[3] &= ~termios.ECHO
# enable keyboard input initially (if foreground)
if not self._is_background():
self._enable_keyboard_input()
# Apply new settings for terminal
termios.tcsetattr(fd, termios.TCSADRAIN, self.new_cfg)
return self
except Exception:
pass # some OS's do not support termios, so ignore
def __exit__(self, exc_type, exception, traceback):
"""If termios was available, restore old settings."""
"""If termios was avaialble, restore old settings."""
if self.old_cfg:
self._restore_default_terminal_settings()
# restore SIGSTP and SIGCONT handlers
if self.old_handlers:
for signum, old_handler in self.old_handlers.items():
signal.signal(signum, old_handler)
import termios
termios.tcsetattr(
self.stream.fileno(), termios.TCSADRAIN, self.old_cfg)
class Unbuffered(object):
@@ -416,11 +282,11 @@ def __enter__(self):
self._saved_debug = tty._debug
# OS-level pipe for redirecting output to logger
read_fd, write_fd = os.pipe()
self.read_fd, self.write_fd = os.pipe()
# Multiprocessing pipe for communication back from the daemon
# Currently only used to save echo value between uses
self.parent_pipe, child_pipe = multiprocessing.Pipe()
self.parent, self.child = multiprocessing.Pipe()
# Sets a daemon that writes to file what it reads from a pipe
try:
@@ -431,15 +297,10 @@ def __enter__(self):
input_stream = None # just don't forward input if this fails
self.process = multiprocessing.Process(
target=_writer_daemon,
args=(
input_stream, read_fd, write_fd, self.echo, self.log_file,
child_pipe
)
)
target=self._writer_daemon, args=(input_stream,))
self.process.daemon = True # must set before start()
self.process.start()
os.close(read_fd) # close in the parent process
os.close(self.read_fd) # close in the parent process
finally:
if input_stream:
@@ -461,9 +322,9 @@ def __enter__(self):
self._saved_stderr = os.dup(sys.stderr.fileno())
# redirect to the pipe we created above
os.dup2(write_fd, sys.stdout.fileno())
os.dup2(write_fd, sys.stderr.fileno())
os.close(write_fd)
os.dup2(self.write_fd, sys.stdout.fileno())
os.dup2(self.write_fd, sys.stderr.fileno())
os.close(self.write_fd)
else:
# Handle I/O the Python way. This won't redirect lower-level
@@ -476,7 +337,7 @@ def __enter__(self):
self._saved_stderr = sys.stderr
# create a file object for the pipe; redirect to it.
pipe_fd_out = os.fdopen(write_fd, 'w')
pipe_fd_out = os.fdopen(self.write_fd, 'w')
sys.stdout = pipe_fd_out
sys.stderr = pipe_fd_out
@@ -515,14 +376,14 @@ def __exit__(self, exc_type, exc_val, exc_tb):
# print log contents in parent if needed.
if self.write_log_in_parent:
string = self.parent_pipe.recv()
string = self.parent.recv()
self.file_like.write(string)
if self.close_log_in_parent:
self.log_file.close()
# recover and store echo settings from the child before it dies
self.echo = self.parent_pipe.recv()
self.echo = self.parent.recv()
# join the daemon process. The daemon will quit automatically
# when the write pipe is closed; we just wait for it here.
@@ -547,166 +408,72 @@ def force_echo(self):
# exactly before and after the text we want to echo.
sys.stdout.write(xon)
sys.stdout.flush()
yield
sys.stdout.write(xoff)
sys.stdout.flush()
def _writer_daemon(self, stdin):
"""Daemon that writes output to the log file and stdout."""
# Use line buffering (3rd param = 1) since Python 3 has a bug
# that prevents unbuffered text I/O.
in_pipe = os.fdopen(self.read_fd, 'r', 1)
os.close(self.write_fd)
echo = self.echo # initial echo setting, user-controllable
force_echo = False # parent can force echo for certain output
# list of streams to select from
istreams = [in_pipe, stdin] if stdin else [in_pipe]
log_file = self.log_file
try:
yield
with keyboard_input(stdin):
while True:
# No need to set any timeout for select.select
# Wait until a key press or an event on in_pipe.
rlist, _, _ = select.select(istreams, [], [])
# Allow user to toggle echo with 'v' key.
# Currently ignores other chars.
if stdin in rlist:
if stdin.read(1) == 'v':
echo = not echo
# Handle output from the with block process.
if in_pipe in rlist:
# If we arrive here it means that in_pipe was
# ready for reading : it should never happen that
# line is false-ish
line = in_pipe.readline()
if not line:
break # EOF
# find control characters and strip them.
controls = control.findall(line)
line = re.sub(control, '', line)
# Echo to stdout if requested or forced
if echo or force_echo:
sys.stdout.write(line)
sys.stdout.flush()
# Stripped output to log file.
log_file.write(_strip(line))
log_file.flush()
if xon in controls:
force_echo = True
if xoff in controls:
force_echo = False
except BaseException:
tty.error("Exception occurred in writer daemon!")
traceback.print_exc()
finally:
sys.stdout.write(xoff)
sys.stdout.flush()
# send written data back to parent if we used a StringIO
if self.write_log_in_parent:
self.child.send(log_file.getvalue())
log_file.close()
def _writer_daemon(stdin, read_fd, write_fd, echo, log_file, control_pipe):
"""Daemon used by ``log_output`` to write to a log file and to ``stdout``.
The daemon receives output from the parent process and writes it both
to a log and, optionally, to ``stdout``. The relationship looks like
this::
Terminal
|
| +-------------------------+
| | Parent Process |
+--------> | with log_output(): |
| stdin | ... |
| +-------------------------+
| ^ | write_fd (parent's redirected stdout)
| | control |
| | pipe |
| | v read_fd
| +-------------------------+ stdout
| | Writer daemon |------------>
+--------> | read from read_fd | log_file
stdin | write to out and log |------------>
+-------------------------+
Within the ``log_output`` handler, the parent's output is redirected
to a pipe from which the daemon reads. The daemon writes each line
from the pipe to a log file and (optionally) to ``stdout``. The user
can hit ``v`` to toggle output on ``stdout``.
In addition to the input and output file descriptors, the daemon
interacts with the parent via ``control_pipe``. It reports whether
``stdout`` was enabled or disabled when it finished and, if the
``log_file`` is a ``StringIO`` object, then the daemon also sends the
logged output back to the parent as a string, to be written to the
``StringIO`` in the parent. This is mainly for testing.
Arguments:
stdin (stream): input from the terminal
read_fd (int): pipe for reading from parent's redirected stdout
write_fd (int): parent's end of the pipe will write to (will be
immediately closed by the writer daemon)
echo (bool): initial echo setting -- controlled by user and
preserved across multiple writer daemons
log_file (file-like): file to log all output
control_pipe (Pipe): multiprocessing pipe on which to send control
information to the parent
"""
# Use line buffering (3rd param = 1) since Python 3 has a bug
# that prevents unbuffered text I/O.
in_pipe = os.fdopen(read_fd, 'r', 1)
os.close(write_fd)
# list of streams to select from
istreams = [in_pipe, stdin] if stdin else [in_pipe]
force_echo = False # parent can force echo for certain output
try:
with keyboard_input(stdin) as kb:
while True:
# fix the terminal settings if we recently came to
# the foreground
kb.check_fg_bg()
# wait for input from any stream. use a coarse timeout to
# allow other checks while we wait for input
rlist, _, _ = _retry(select.select)(istreams, [], [], 1e-1)
# Allow user to toggle echo with 'v' key.
# Currently ignores other chars.
# only read stdin if we're in the foreground
if stdin in rlist and not _is_background_tty(stdin):
# it's possible to be backgrounded between the above
# check and the read, so we ignore SIGTTIN here.
with ignore_signal(signal.SIGTTIN):
try:
if stdin.read(1) == 'v':
echo = not echo
except IOError as e:
# If SIGTTIN is ignored, the system gives EIO
# to let the caller know the read failed b/c it
# was in the bg. Ignore that too.
if e.errno != errno.EIO:
raise
if in_pipe in rlist:
# Handle output from the calling process.
line = _retry(in_pipe.readline)()
if not line:
break
# find control characters and strip them.
controls = control.findall(line)
line = control.sub('', line)
# Echo to stdout if requested or forced.
if echo or force_echo:
sys.stdout.write(line)
sys.stdout.flush()
# Stripped output to log file.
log_file.write(_strip(line))
log_file.flush()
if xon in controls:
force_echo = True
if xoff in controls:
force_echo = False
except BaseException:
tty.error("Exception occurred in writer daemon!")
traceback.print_exc()
finally:
# send written data back to parent if we used a StringIO
if isinstance(log_file, StringIO):
control_pipe.send(log_file.getvalue())
log_file.close()
# send echo value back to the parent so it can be preserved.
control_pipe.send(echo)
def _retry(function):
"""Retry a call if errors indicating an interrupted system call occur.
Interrupted system calls return -1 and set ``errno`` to ``EINTR`` if
certain flags are not set. Newer Pythons automatically retry them,
but older Pythons do not, so we need to retry the calls.
This function converts a call like this:
syscall(args)
and makes it retry by wrapping the function like this:
_retry(syscall)(args)
This is a private function because EINTR is unfortunately raised in
different ways from different functions, and we only handle the ones
relevant for this file.
"""
def wrapped(*args, **kwargs):
while True:
try:
return function(*args, **kwargs)
except IOError as e:
if e.errno == errno.EINTR:
continue
raise
except select.error as e:
if e.args[0] == errno.EINTR:
continue
raise
return wrapped
# send echo value back to the parent so it can be preserved.
self.child.send(echo)

View File

@@ -1,344 +0,0 @@
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""The pty module handles pseudo-terminals.
Currently, the infrastructure here is only used to test llnl.util.tty.log.
If this is used outside a testing environment, we will want to reconsider
things like timeouts in ``ProcessController.wait()``, which are set to
get tests done quickly, not to avoid high CPU usage.
"""
from __future__ import print_function
import os
import signal
import multiprocessing
import re
import sys
import termios
import time
import traceback
import llnl.util.tty.log as log
from spack.util.executable import which
class ProcessController(object):
"""Wrapper around some fundamental process control operations.
This allows one process to drive another similar to the way a shell
would, by sending signals and I/O.
"""
def __init__(self, pid, master_fd,
timeout=1, sleep_time=1e-1, debug=False):
"""Create a controller to manipulate the process with id ``pid``
Args:
pid (int): id of process to control
master_fd (int): master file descriptor attached to pid's stdin
timeout (int): time in seconds for wait operations to time out
(default 1 second)
sleep_time (int): time to sleep after signals, to control the
signal rate of the controller (default 1e-1)
debug (bool): whether ``horizontal_line()`` and ``status()`` should
produce output when called (default False)
``sleep_time`` allows the caller to insert delays after calls
that signal or modify the controlled process. Python behaves very
poorly if signals arrive too fast, and drowning a Python process
with a Python handler with signals can kill the process and hang
our tests, so we throttle this a closer-to-interactive rate.
"""
self.pid = pid
self.pgid = os.getpgid(pid)
self.master_fd = master_fd
self.timeout = timeout
self.sleep_time = sleep_time
self.debug = debug
# we need the ps command to wait for process statuses
self.ps = which("ps", required=True)
def get_canon_echo_attrs(self):
"""Get echo and canon attributes of the terminal of master_fd."""
cfg = termios.tcgetattr(self.master_fd)
return (
bool(cfg[3] & termios.ICANON),
bool(cfg[3] & termios.ECHO),
)
def horizontal_line(self, name):
"""Labled horizontal line for debugging."""
if self.debug:
sys.stderr.write(
"------------------------------------------- %s\n" % name
)
def status(self):
"""Print debug message with status info for the child."""
if self.debug:
canon, echo = self.get_canon_echo_attrs()
sys.stderr.write("canon: %s, echo: %s\n" % (
"on" if canon else "off",
"on" if echo else "off",
))
sys.stderr.write("input: %s\n" % self.input_on())
sys.stderr.write("bg: %s\n" % self.background())
sys.stderr.write("\n")
def input_on(self):
"""True if keyboard input is enabled on the master_fd pty."""
return self.get_canon_echo_attrs() == (False, False)
def background(self):
"""True if pgid is in a background pgroup of master_fd's terminal."""
return self.pgid != os.tcgetpgrp(self.master_fd)
def tstp(self):
"""Send SIGTSTP to the controlled process."""
self.horizontal_line("tstp")
os.killpg(self.pgid, signal.SIGTSTP)
time.sleep(self.sleep_time)
def cont(self):
self.horizontal_line("cont")
os.killpg(self.pgid, signal.SIGCONT)
time.sleep(self.sleep_time)
def fg(self):
self.horizontal_line("fg")
with log.ignore_signal(signal.SIGTTOU):
os.tcsetpgrp(self.master_fd, os.getpgid(self.pid))
time.sleep(self.sleep_time)
def bg(self):
self.horizontal_line("bg")
with log.ignore_signal(signal.SIGTTOU):
os.tcsetpgrp(self.master_fd, os.getpgrp())
time.sleep(self.sleep_time)
def write(self, byte_string):
self.horizontal_line("write '%s'" % byte_string.decode("utf-8"))
os.write(self.master_fd, byte_string)
def wait(self, condition):
start = time.time()
while (((time.time() - start) < self.timeout) and not condition()):
time.sleep(1e-2)
assert condition()
def wait_enabled(self):
self.wait(lambda: self.input_on() and not self.background())
def wait_disabled(self):
self.wait(lambda: not self.input_on() and self.background())
def wait_disabled_fg(self):
self.wait(lambda: not self.input_on() and not self.background())
def proc_status(self):
status = self.ps("-p", str(self.pid), "-o", "stat", output=str)
status = re.split(r"\s+", status.strip(), re.M)
return status[1]
def wait_stopped(self):
self.wait(lambda: "T" in self.proc_status())
def wait_running(self):
self.wait(lambda: "T" not in self.proc_status())
class PseudoShell(object):
"""Sets up master and child processes with a PTY.
You can create a ``PseudoShell`` if you want to test how some
function responds to terminal input. This is a pseudo-shell from a
job control perspective; ``master_function`` and ``child_function``
are set up with a pseudoterminal (pty) so that the master can drive
the child through process control signals and I/O.
The two functions should have signatures like this::
def master_function(proc, ctl, **kwargs)
def child_function(**kwargs)
``master_function`` is spawned in its own process and passed three
arguments:
proc
the ``multiprocessing.Process`` object representing the child
ctl
a ``ProcessController`` object tied to the child
kwargs
keyword arguments passed from ``PseudoShell.start()``.
``child_function`` is only passed ``kwargs`` delegated from
``PseudoShell.start()``.
The ``ctl.master_fd`` will have its ``master_fd`` connected to
``sys.stdin`` in the child process. Both processes will share the
same ``sys.stdout`` and ``sys.stderr`` as the process instantiating
``PseudoShell``.
Here are the relationships between processes created::
._________________________________________________________.
| Child Process | pid 2
| - runs child_function | pgroup 2
|_________________________________________________________| session 1
^
| create process with master_fd connected to stdin
| stdout, stderr are the same as caller
._________________________________________________________.
| Master Process | pid 1
| - runs master_function | pgroup 1
| - uses ProcessController and master_fd to control child | session 1
|_________________________________________________________|
^
| create process
| stdin, stdout, stderr are the same as caller
._________________________________________________________.
| Caller | pid 0
| - Constructs, starts, joins PseudoShell | pgroup 0
| - provides master_function, child_function | session 0
|_________________________________________________________|
"""
def __init__(self, master_function, child_function):
self.proc = None
self.master_function = master_function
self.child_function = child_function
# these can be optionally set to change defaults
self.controller_timeout = 1
self.sleep_time = 0
def start(self, **kwargs):
"""Start the master and child processes.
Arguments:
kwargs (dict): arbitrary keyword arguments that will be
passed to master and child functions
The master process will create the child, then call
``master_function``. The child process will call
``child_function``.
"""
self.proc = multiprocessing.Process(
target=PseudoShell._set_up_and_run_master_function,
args=(self.master_function, self.child_function,
self.controller_timeout, self.sleep_time),
kwargs=kwargs,
)
self.proc.start()
def join(self):
"""Wait for the child process to finish, and return its exit code."""
self.proc.join()
return self.proc.exitcode
@staticmethod
def _set_up_and_run_child_function(
tty_name, stdout_fd, stderr_fd, ready, child_function, **kwargs):
"""Child process wrapper for PseudoShell.
Handles the mechanics of setting up a PTY, then calls
``child_function``.
"""
# new process group, like a command or pipeline launched by a shell
os.setpgrp()
# take controlling terminal and set up pty IO
stdin_fd = os.open(tty_name, os.O_RDWR)
os.dup2(stdin_fd, sys.stdin.fileno())
os.dup2(stdout_fd, sys.stdout.fileno())
os.dup2(stderr_fd, sys.stderr.fileno())
os.close(stdin_fd)
if kwargs.get("debug"):
sys.stderr.write(
"child: stdin.isatty(): %s\n" % sys.stdin.isatty())
# tell the parent that we're really running
if kwargs.get("debug"):
sys.stderr.write("child: ready!\n")
ready.value = True
try:
child_function(**kwargs)
except BaseException:
traceback.print_exc()
@staticmethod
def _set_up_and_run_master_function(
master_function, child_function, controller_timeout, sleep_time,
**kwargs):
"""Set up a pty, spawn a child process, and execute master_function.
Handles the mechanics of setting up a PTY, then calls
``master_function``.
"""
os.setsid() # new session; this process is the controller
master_fd, child_fd = os.openpty()
pty_name = os.ttyname(child_fd)
# take controlling terminal
pty_fd = os.open(pty_name, os.O_RDWR)
os.close(pty_fd)
ready = multiprocessing.Value('i', False)
child_process = multiprocessing.Process(
target=PseudoShell._set_up_and_run_child_function,
args=(pty_name, sys.stdout.fileno(), sys.stderr.fileno(),
ready, child_function),
kwargs=kwargs,
)
child_process.start()
# wait for subprocess to be running and connected.
while not ready.value:
time.sleep(1e-5)
pass
if kwargs.get("debug"):
sys.stderr.write("pid: %d\n" % os.getpid())
sys.stderr.write("pgid: %d\n" % os.getpgrp())
sys.stderr.write("sid: %d\n" % os.getsid(0))
sys.stderr.write("tcgetpgrp: %d\n" % os.tcgetpgrp(master_fd))
sys.stderr.write("\n")
child_pgid = os.getpgid(child_process.pid)
sys.stderr.write("child pid: %d\n" % child_process.pid)
sys.stderr.write("child pgid: %d\n" % child_pgid)
sys.stderr.write("child sid: %d\n" % os.getsid(child_process.pid))
sys.stderr.write("\n")
sys.stderr.flush()
# set up master to ignore SIGTSTP, like a shell
signal.signal(signal.SIGTSTP, signal.SIG_IGN)
# call the master function once the child is ready
try:
controller = ProcessController(
child_process.pid, master_fd, debug=kwargs.get("debug"))
controller.timeout = controller_timeout
controller.sleep_time = sleep_time
error = master_function(child_process, controller, **kwargs)
except BaseException:
error = 1
traceback.print_exc()
child_process.join()
# return whether either the parent or child failed
return error or child_process.exitcode

View File

@@ -5,7 +5,7 @@
#: major, minor, patch version for Spack, in a tuple
spack_version_info = (0, 15, 1)
spack_version_info = (0, 13, 4)
#: String containing Spack version joined with .'s
spack_version = '.'.join(str(v) for v in spack_version_info)

View File

@@ -209,15 +209,14 @@ def optimization_flags(self, compiler):
compiler_version = compiler.version
version_number, suffix = cpu.version_components(compiler.version)
if not version_number or suffix not in ('', 'apple'):
# Try to deduce the underlying version of the compiler, regardless
# of its name in compilers.yaml. Depending on where this function
# is called we might get either a CompilerSpec or a fully fledged
# compiler object.
# Try to deduce the correct version. Depending on where this
# function is called we might get either a CompilerSpec or a
# fully fledged compiler object
import spack.spec
if isinstance(compiler, spack.spec.CompilerSpec):
compiler = spack.compilers.compilers_for_spec(compiler).pop()
try:
compiler_version = compiler.get_real_version()
compiler_version = compiler.cc_version(compiler.cc)
except spack.util.executable.ProcessError as e:
# log this and just return compiler.version instead
tty.debug(str(e))

View File

@@ -10,9 +10,6 @@
import shutil
import tempfile
import hashlib
import glob
import platform
from contextlib import closing
import ruamel.yaml as yaml
@@ -21,11 +18,10 @@
from six.moves.urllib.error import URLError
import llnl.util.tty as tty
from llnl.util.filesystem import mkdirp
from llnl.util.filesystem import mkdirp, install_tree
import spack.cmd
import spack.config as config
import spack.database as spack_db
import spack.fetch_strategy as fs
import spack.util.gpg
import spack.relocate as relocate
@@ -33,6 +29,7 @@
import spack.mirror
import spack.util.url as url_util
import spack.util.web as web_util
from spack.spec import Spec
from spack.stage import Stage
from spack.util.gpg import Gpg
@@ -56,7 +53,7 @@
BUILD_CACHE_INDEX_ENTRY_TEMPLATE = ' <li><a href="{path}">{path}</a></li>'
class NoOverwriteException(spack.error.SpackError):
class NoOverwriteException(Exception):
"""
Raised when a file exists and must be overwritten.
"""
@@ -71,18 +68,14 @@ class NoGpgException(spack.error.SpackError):
"""
Raised when gpg2 is not in PATH
"""
def __init__(self, msg):
super(NoGpgException, self).__init__(msg)
pass
class NoKeyException(spack.error.SpackError):
"""
Raised when gpg has no default key added.
"""
def __init__(self, msg):
super(NoKeyException, self).__init__(msg)
pass
class PickKeyException(spack.error.SpackError):
@@ -91,7 +84,7 @@ class PickKeyException(spack.error.SpackError):
"""
def __init__(self, keys):
err_msg = "Multiple keys available for signing\n%s\n" % keys
err_msg = "Multi keys available for signing\n%s\n" % keys
err_msg += "Use spack buildcache create -k <key hash> to pick a key."
super(PickKeyException, self).__init__(err_msg)
@@ -114,9 +107,7 @@ class NewLayoutException(spack.error.SpackError):
"""
Raised if directory layout is different from buildcache.
"""
def __init__(self, msg):
super(NewLayoutException, self).__init__(msg)
pass
def build_cache_relative_path():
@@ -146,21 +137,15 @@ def read_buildinfo_file(prefix):
return buildinfo
def write_buildinfo_file(spec, workdir, rel=False):
def write_buildinfo_file(prefix, workdir, rel=False):
"""
Create a cache file containing information
required for the relocation
"""
prefix = spec.prefix
text_to_relocate = []
binary_to_relocate = []
link_to_relocate = []
blacklist = (".spack", "man")
prefix_to_hash = dict()
prefix_to_hash[str(spec.package.prefix)] = spec.dag_hash()
deps = spack.build_environment.get_rpath_deps(spec.package)
for d in deps:
prefix_to_hash[str(d.prefix)] = d.dag_hash()
# Do this at during tarball creation to save time when tarball unpacked.
# Used by make_package_relative to determine binaries to change.
for root, dirs, files in os.walk(prefix, topdown=True):
@@ -177,8 +162,8 @@ def write_buildinfo_file(spec, workdir, rel=False):
link_to_relocate.append(rel_path_name)
else:
msg = 'Absolute link %s to %s ' % (path_name, link)
msg += 'outside of prefix %s ' % prefix
msg += 'should not be relocated.'
msg += 'outside of stage %s ' % prefix
msg += 'cannot be relocated.'
tty.warn(msg)
if relocate.needs_binary_relocation(m_type, m_subtype):
@@ -199,7 +184,6 @@ def write_buildinfo_file(spec, workdir, rel=False):
buildinfo['relocate_textfiles'] = text_to_relocate
buildinfo['relocate_binaries'] = binary_to_relocate
buildinfo['relocate_links'] = link_to_relocate
buildinfo['prefix_to_hash'] = prefix_to_hash
filename = buildinfo_file_name(workdir)
with open(filename, 'w') as outfile:
outfile.write(syaml.dump(buildinfo, default_flow_style=True))
@@ -282,47 +266,31 @@ def sign_tarball(key, force, specfile_path):
def generate_package_index(cache_prefix):
"""Create the build cache index page.
Creates (or replaces) the "index.json" page at the location given in
Creates (or replaces) the "index.html" page at the location given in
cache_prefix. This page contains a link for each binary package (*.yaml)
and public key (*.key) under cache_prefix.
"""
tmpdir = tempfile.mkdtemp()
db_root_dir = os.path.join(tmpdir, 'db_root')
db = spack_db.Database(None, db_dir=db_root_dir,
enable_transaction_locking=False,
record_fields=['spec', 'ref_count'])
file_list = (
entry
for entry in web_util.list_url(cache_prefix)
if entry.endswith('.yaml'))
tty.debug('Retrieving spec.yaml files from {0} to build index'.format(
cache_prefix))
for file_path in file_list:
try:
yaml_url = url_util.join(cache_prefix, file_path)
tty.debug('fetching {0}'.format(yaml_url))
_, _, yaml_file = web_util.read_from_url(yaml_url)
yaml_contents = codecs.getreader('utf-8')(yaml_file).read()
# yaml_obj = syaml.load(yaml_contents)
# s = Spec.from_yaml(yaml_obj)
s = Spec.from_yaml(yaml_contents)
db.add(s, None)
except (URLError, web_util.SpackWebError) as url_err:
tty.error('Error reading spec.yaml: {0}'.format(file_path))
tty.error(url_err)
try:
index_json_path = os.path.join(db_root_dir, 'index.json')
with open(index_json_path, 'w') as f:
db._write_to_file(f)
index_html_path = os.path.join(tmpdir, 'index.html')
file_list = (
entry
for entry in web_util.list_url(cache_prefix)
if (entry.endswith('.yaml')
or entry.endswith('.key')))
with open(index_html_path, 'w') as f:
f.write(BUILD_CACHE_INDEX_TEMPLATE.format(
title='Spack Package Index',
path_list='\n'.join(
BUILD_CACHE_INDEX_ENTRY_TEMPLATE.format(path=path)
for path in file_list)))
web_util.push_to_url(
index_json_path,
url_util.join(cache_prefix, 'index.json'),
index_html_path,
url_util.join(cache_prefix, 'index.html'),
keep_original=False,
extra_args={'ContentType': 'application/json'})
extra_args={'ContentType': 'text/html'})
finally:
shutil.rmtree(tmpdir)
@@ -374,21 +342,11 @@ def build_tarball(spec, outdir, force=False, rel=False, unsigned=False,
raise NoOverwriteException(url_util.format(remote_specfile_path))
# make a copy of the install directory to work with
workdir = os.path.join(tmpdir, os.path.basename(spec.prefix))
# install_tree copies hardlinks
# create a temporary tarfile from prefix and exract it to workdir
# tarfile preserves hardlinks
temp_tarfile_name = tarball_name(spec, '.tar')
temp_tarfile_path = os.path.join(tarfile_dir, temp_tarfile_name)
with closing(tarfile.open(temp_tarfile_path, 'w')) as tar:
tar.add(name='%s' % spec.prefix,
arcname='.')
with closing(tarfile.open(temp_tarfile_path, 'r')) as tar:
tar.extractall(workdir)
os.remove(temp_tarfile_path)
workdir = os.path.join(tempfile.mkdtemp(), os.path.basename(spec.prefix))
install_tree(spec.prefix, workdir, symlinks=True)
# create info for later relocation and create tar
write_buildinfo_file(spec, workdir, rel)
write_buildinfo_file(spec.prefix, workdir, rel=rel)
# optionally make the paths in the binaries relative to each other
# in the spack install tree before creating tarball
@@ -402,14 +360,14 @@ def build_tarball(spec, outdir, force=False, rel=False, unsigned=False,
tty.die(e)
else:
try:
check_package_relocatable(workdir, spec, allow_root)
make_package_placeholder(workdir, spec, allow_root)
except Exception as e:
shutil.rmtree(workdir)
shutil.rmtree(tarfile_dir)
shutil.rmtree(tmpdir)
tty.die(e)
# create gzip compressed tarball of the install prefix
# create compressed tarball of the install prefix
with closing(tarfile.open(tarfile_path, 'w:gz')) as tar:
tar.add(name='%s' % workdir,
arcname='%s' % os.path.basename(spec.prefix))
@@ -432,7 +390,6 @@ def build_tarball(spec, outdir, force=False, rel=False, unsigned=False,
buildinfo = {}
buildinfo['relative_prefix'] = os.path.relpath(
spec.prefix, spack.store.layout.root)
buildinfo['relative_rpaths'] = rel
spec_dict['buildinfo'] = buildinfo
spec_dict['full_hash'] = spec.full_hash()
@@ -450,8 +407,8 @@ def build_tarball(spec, outdir, force=False, rel=False, unsigned=False,
sign_tarball(key, force, specfile_path)
# put tarball, spec and signature files in .spack archive
with closing(tarfile.open(spackfile_path, 'w')) as tar:
tar.add(name=tarfile_path, arcname='%s' % tarfile_name)
tar.add(name=specfile_path, arcname='%s' % specfile_name)
tar.add(name='%s' % tarfile_path, arcname='%s' % tarfile_name)
tar.add(name='%s' % specfile_path, arcname='%s' % specfile_name)
if not unsigned:
tar.add(name='%s.asc' % specfile_path,
arcname='%s.asc' % specfile_name)
@@ -466,9 +423,6 @@ def build_tarball(spec, outdir, force=False, rel=False, unsigned=False,
web_util.push_to_url(
specfile_path, remote_specfile_path, keep_original=False)
tty.msg('Buildache for "%s" written to \n %s' %
(spec, remote_spackfile_path))
try:
# create an index.html for the build_cache directory so specs can be
# found
@@ -514,144 +468,100 @@ def make_package_relative(workdir, spec, allow_root):
"""
prefix = spec.prefix
buildinfo = read_buildinfo_file(workdir)
old_layout_root = buildinfo['buildpath']
old_path = buildinfo['buildpath']
orig_path_names = list()
cur_path_names = list()
for filename in buildinfo['relocate_binaries']:
orig_path_names.append(os.path.join(prefix, filename))
cur_path_names.append(os.path.join(workdir, filename))
if (spec.architecture.platform == 'darwin' or
spec.architecture.platform == 'test' and
platform.system().lower() == 'darwin'):
if spec.architecture.platform == 'darwin':
relocate.make_macho_binaries_relative(cur_path_names, orig_path_names,
old_layout_root)
if (spec.architecture.platform == 'linux' or
spec.architecture.platform == 'test' and
platform.system().lower() == 'linux'):
old_path, allow_root)
else:
relocate.make_elf_binaries_relative(cur_path_names, orig_path_names,
old_layout_root)
relocate.raise_if_not_relocatable(cur_path_names, allow_root)
old_path, allow_root)
orig_path_names = list()
cur_path_names = list()
for linkname in buildinfo.get('relocate_links', []):
orig_path_names.append(os.path.join(prefix, linkname))
cur_path_names.append(os.path.join(workdir, linkname))
for filename in buildinfo.get('relocate_links', []):
orig_path_names.append(os.path.join(prefix, filename))
cur_path_names.append(os.path.join(workdir, filename))
relocate.make_link_relative(cur_path_names, orig_path_names)
def check_package_relocatable(workdir, spec, allow_root):
def make_package_placeholder(workdir, spec, allow_root):
"""
Check if package binaries are relocatable.
Change links to placeholder links.
"""
prefix = spec.prefix
buildinfo = read_buildinfo_file(workdir)
cur_path_names = list()
for filename in buildinfo['relocate_binaries']:
cur_path_names.append(os.path.join(workdir, filename))
relocate.raise_if_not_relocatable(cur_path_names, allow_root)
relocate.check_files_relocatable(cur_path_names, allow_root)
cur_path_names = list()
for filename in buildinfo.get('relocate_links', []):
cur_path_names.append(os.path.join(workdir, filename))
relocate.make_link_placeholder(cur_path_names, workdir, prefix)
def relocate_package(spec, allow_root):
def relocate_package(workdir, spec, allow_root):
"""
Relocate the given package
"""
workdir = str(spec.prefix)
buildinfo = read_buildinfo_file(workdir)
new_layout_root = str(spack.store.layout.root)
new_prefix = str(spec.prefix)
new_rel_prefix = str(os.path.relpath(new_prefix, new_layout_root))
new_spack_prefix = str(spack.paths.prefix)
old_layout_root = str(buildinfo['buildpath'])
old_spack_prefix = str(buildinfo.get('spackprefix'))
old_rel_prefix = buildinfo.get('relative_prefix')
old_prefix = os.path.join(old_layout_root, old_rel_prefix)
rel = buildinfo.get('relative_rpaths')
prefix_to_hash = buildinfo.get('prefix_to_hash', None)
if (old_rel_prefix != new_rel_prefix and not prefix_to_hash):
msg = "Package tarball was created from an install "
msg += "prefix with a different directory layout and an older "
msg += "buildcache create implementation. It cannot be relocated."
raise NewLayoutException(msg)
# older buildcaches do not have the prefix_to_hash dictionary
# need to set an empty dictionary and add one entry to
# prefix_to_prefix to reproduce the old behavior
if not prefix_to_hash:
prefix_to_hash = dict()
hash_to_prefix = dict()
hash_to_prefix[spec.format('{hash}')] = str(spec.package.prefix)
new_deps = spack.build_environment.get_rpath_deps(spec.package)
for d in new_deps:
hash_to_prefix[d.format('{hash}')] = str(d.prefix)
prefix_to_prefix = dict()
for orig_prefix, hash in prefix_to_hash.items():
prefix_to_prefix[orig_prefix] = hash_to_prefix.get(hash, None)
prefix_to_prefix[old_prefix] = new_prefix
prefix_to_prefix[old_layout_root] = new_layout_root
new_path = str(spack.store.layout.root)
new_prefix = str(spack.paths.prefix)
old_path = str(buildinfo['buildpath'])
old_prefix = str(buildinfo.get('spackprefix',
'/not/in/buildinfo/dictionary'))
rel = buildinfo.get('relative_rpaths', False)
tty.debug("Relocating package from",
"%s to %s." % (old_layout_root, new_layout_root))
def is_backup_file(file):
return file.endswith('~')
# Text files containing the prefix text
text_names = list()
tty.msg("Relocating package from",
"%s to %s." % (old_path, new_path))
path_names = set()
for filename in buildinfo['relocate_textfiles']:
text_name = os.path.join(workdir, filename)
path_name = os.path.join(workdir, filename)
# Don't add backup files generated by filter_file during install step.
if not is_backup_file(text_name):
text_names.append(text_name)
if not path_name.endswith('~'):
path_names.add(path_name)
relocate.relocate_text(path_names, oldpath=old_path,
newpath=new_path, oldprefix=old_prefix,
newprefix=new_prefix)
# If the binary files in the package were not edited to use
# relative RPATHs, then the RPATHs need to be relocated
if rel:
if old_path != new_path:
files_to_relocate = list(filter(
lambda pathname: not relocate.file_is_relocatable(
pathname, paths_to_relocate=[old_path, old_prefix]),
map(lambda filename: os.path.join(workdir, filename),
buildinfo['relocate_binaries'])))
# If we are installing back to the same location don't replace anything
if old_layout_root != new_layout_root:
paths_to_relocate = [old_spack_prefix, old_layout_root]
paths_to_relocate.extend(prefix_to_hash.keys())
files_to_relocate = list(filter(
lambda pathname: not relocate.file_is_relocatable(
pathname, paths_to_relocate=paths_to_relocate),
map(lambda filename: os.path.join(workdir, filename),
buildinfo['relocate_binaries'])))
# If the buildcache was not created with relativized rpaths
# do the relocation of path in binaries
if (spec.architecture.platform == 'darwin' or
spec.architecture.platform == 'test' and
platform.system().lower() == 'darwin'):
relocate.relocate_macho_binaries(files_to_relocate,
old_layout_root,
new_layout_root,
prefix_to_prefix, rel,
old_prefix,
new_prefix)
if (spec.architecture.platform == 'linux' or
spec.architecture.platform == 'test' and
platform.system().lower() == 'linux'):
relocate.relocate_elf_binaries(files_to_relocate,
old_layout_root,
new_layout_root,
prefix_to_prefix, rel,
old_prefix,
new_prefix)
# Relocate links to the new install prefix
links = [link for link in buildinfo.get('relocate_links', [])]
relocate.relocate_links(
links, old_layout_root, old_prefix, new_prefix
)
# For all buildcaches
# relocate the install prefixes in text files including dependencies
relocate.relocate_text(text_names,
old_layout_root, new_layout_root,
old_prefix, new_prefix,
old_spack_prefix,
new_spack_prefix,
prefix_to_prefix)
# relocate the install prefixes in binary files including dependencies
relocate.relocate_text_bin(files_to_relocate,
old_prefix, new_prefix,
old_spack_prefix,
new_spack_prefix,
prefix_to_prefix)
if len(old_path) < len(new_path) and files_to_relocate:
tty.debug('Cannot do a binary string replacement with padding '
'for package because %s is longer than %s.' %
(new_path, old_path))
else:
for path_name in files_to_relocate:
relocate.replace_prefix_bin(path_name, old_path, new_path)
else:
path_names = set()
for filename in buildinfo['relocate_binaries']:
path_name = os.path.join(workdir, filename)
path_names.add(path_name)
if spec.architecture.platform == 'darwin':
relocate.relocate_macho_binaries(path_names, old_path,
new_path, allow_root)
else:
relocate.relocate_elf_binaries(path_names, old_path,
new_path, allow_root)
path_names = set()
for filename in buildinfo.get('relocate_links', []):
path_name = os.path.join(workdir, filename)
path_names.add(path_name)
relocate.relocate_links(path_names, old_path, new_path)
def extract_tarball(spec, filename, allow_root=False, unsigned=False,
@@ -676,10 +586,6 @@ def extract_tarball(spec, filename, allow_root=False, unsigned=False,
with closing(tarfile.open(spackfile_path, 'r')) as tar:
tar.extractall(tmpdir)
# some buildcache tarfiles use bzip2 compression
if not os.path.exists(tarfile_path):
tarfile_name = tarball_name(spec, '.tar.bz2')
tarfile_path = os.path.join(tmpdir, tarfile_name)
if not unsigned:
if os.path.exists('%s.asc' % specfile_path):
try:
@@ -687,7 +593,7 @@ def extract_tarball(spec, filename, allow_root=False, unsigned=False,
Gpg.verify('%s.asc' % specfile_path, specfile_path, suppress)
except Exception as e:
shutil.rmtree(tmpdir)
raise e
tty.die(e)
else:
shutil.rmtree(tmpdir)
raise NoVerifyException(
@@ -716,51 +622,33 @@ def extract_tarball(spec, filename, allow_root=False, unsigned=False,
# if the original relative prefix is in the spec file use it
buildinfo = spec_dict.get('buildinfo', {})
old_relative_prefix = buildinfo.get('relative_prefix', new_relative_prefix)
rel = buildinfo.get('relative_rpaths')
# if the original relative prefix and new relative prefix differ the
# directory layout has changed and the buildcache cannot be installed
# if it was created with relative rpaths
info = 'old relative prefix %s\nnew relative prefix %s\nrelative rpaths %s'
tty.debug(info %
(old_relative_prefix, new_relative_prefix, rel))
# if (old_relative_prefix != new_relative_prefix and (rel)):
# shutil.rmtree(tmpdir)
# msg = "Package tarball was created from an install "
# msg += "prefix with a different directory layout. "
# msg += "It cannot be relocated because it "
# msg += "uses relative rpaths."
# raise NewLayoutException(msg)
if old_relative_prefix != new_relative_prefix:
shutil.rmtree(tmpdir)
msg = "Package tarball was created from an install "
msg += "prefix with a different directory layout.\n"
msg += "It cannot be relocated."
raise NewLayoutException(msg)
# extract the tarball in a temp directory
with closing(tarfile.open(tarfile_path, 'r')) as tar:
tar.extractall(path=tmpdir)
# get the parent directory of the file .spack/binary_distribution
# this should the directory unpacked from the tarball whose
# name is unknown because the prefix naming is unknown
bindist_file = glob.glob('%s/*/.spack/binary_distribution' % tmpdir)[0]
workdir = re.sub('/.spack/binary_distribution$', '', bindist_file)
tty.debug('workdir %s' % workdir)
# install_tree copies hardlinks
# create a temporary tarfile from prefix and exract it to workdir
# tarfile preserves hardlinks
temp_tarfile_name = tarball_name(spec, '.tar')
temp_tarfile_path = os.path.join(tmpdir, temp_tarfile_name)
with closing(tarfile.open(temp_tarfile_path, 'w')) as tar:
tar.add(name='%s' % workdir,
arcname='.')
with closing(tarfile.open(temp_tarfile_path, 'r')) as tar:
tar.extractall(spec.prefix)
os.remove(temp_tarfile_path)
# the base of the install prefix is used when creating the tarball
# so the pathname should be the same now that the directory layout
# is confirmed
workdir = os.path.join(tmpdir, os.path.basename(spec.prefix))
install_tree(workdir, spec.prefix, symlinks=True)
# cleanup
os.remove(tarfile_path)
os.remove(specfile_path)
try:
relocate_package(spec, allow_root)
relocate_package(spec.prefix, spec, allow_root)
except Exception as e:
shutil.rmtree(spec.prefix)
raise e
tty.die(e)
else:
manifest_file = os.path.join(spec.prefix,
spack.store.layout.metadata_dir,
@@ -770,8 +658,6 @@ def extract_tarball(spec, filename, allow_root=False, unsigned=False,
tty.warn('No manifest file in tarball for spec %s' % spec_id)
finally:
shutil.rmtree(tmpdir)
if os.path.exists(filename):
os.remove(filename)
# Internal cache for downloaded specs
@@ -819,7 +705,7 @@ def get_spec(spec=None, force=False):
tty.debug("No Spack mirrors are currently configured")
return {}
if _cached_specs and spec in _cached_specs:
if spec in _cached_specs:
return _cached_specs
for mirror in spack.mirror.MirrorCollection().values():
@@ -841,61 +727,54 @@ def get_spec(spec=None, force=False):
return try_download_specs(urls=urls, force=force)
def get_specs(allarch=False):
def get_specs(force=False, allarch=False):
"""
Get spec.yaml's for build caches available on mirror
"""
global _cached_specs
arch = architecture.Arch(architecture.platform(),
'default_os', 'default_target')
arch_pattern = ('([^-]*-[^-]*-[^-]*)')
if not allarch:
arch_pattern = '(%s-%s-[^-]*)' % (arch.platform, arch.os)
regex_pattern = '%s(.*)(spec.yaml$)' % (arch_pattern)
arch_re = re.compile(regex_pattern)
if not spack.mirror.MirrorCollection():
tty.debug("No Spack mirrors are currently configured")
return {}
urls = set()
for mirror in spack.mirror.MirrorCollection().values():
fetch_url_build_cache = url_util.join(
mirror.fetch_url, _build_cache_relative_path)
tty.msg("Finding buildcaches at %s" %
url_util.format(fetch_url_build_cache))
mirror_dir = url_util.local_file_path(fetch_url_build_cache)
if mirror_dir:
tty.msg("Finding buildcaches in %s" % mirror_dir)
if os.path.exists(mirror_dir):
files = os.listdir(mirror_dir)
for file in files:
m = arch_re.search(file)
if m:
link = url_util.join(fetch_url_build_cache, file)
urls.add(link)
else:
tty.msg("Finding buildcaches at %s" %
url_util.format(fetch_url_build_cache))
p, links = web_util.spider(
url_util.join(fetch_url_build_cache, 'index.html'))
for link in links:
m = arch_re.search(link)
if m:
urls.add(link)
index_url = url_util.join(fetch_url_build_cache, 'index.json')
try:
_, _, file_stream = web_util.read_from_url(
index_url, 'application/json')
index_object = codecs.getreader('utf-8')(file_stream).read()
except (URLError, web_util.SpackWebError) as url_err:
tty.error('Failed to read index {0}'.format(index_url))
tty.debug(url_err)
# Just return whatever specs we may already have cached
return _cached_specs
tmpdir = tempfile.mkdtemp()
index_file_path = os.path.join(tmpdir, 'index.json')
with open(index_file_path, 'w') as fd:
fd.write(index_object)
db_root_dir = os.path.join(tmpdir, 'db_root')
db = spack_db.Database(None, db_dir=db_root_dir,
enable_transaction_locking=False)
db._read_from_file(index_file_path)
spec_list = db.query_local(installed=False)
for indexed_spec in spec_list:
spec_arch = architecture.arch_for_spec(indexed_spec.architecture)
if (allarch is True or spec_arch == arch):
_cached_specs.add(indexed_spec)
return _cached_specs
return try_download_specs(urls=urls, force=force)
def get_keys(install=False, trust=False, force=False):
"""
Get pgp public keys available on mirror
with suffix .key or .pub
"""
if not spack.mirror.MirrorCollection():
tty.die("Please add a spack mirror to allow " +
@@ -910,20 +789,18 @@ def get_keys(install=False, trust=False, force=False):
mirror_dir = url_util.local_file_path(fetch_url_build_cache)
if mirror_dir:
tty.msg("Finding public keys in %s" % mirror_dir)
files = os.listdir(str(mirror_dir))
files = os.listdir(mirror_dir)
for file in files:
if re.search(r'\.key', file) or re.search(r'\.pub', file):
if re.search(r'\.key', file):
link = url_util.join(fetch_url_build_cache, file)
keys.add(link)
else:
tty.msg("Finding public keys at %s" %
url_util.format(fetch_url_build_cache))
# For s3 mirror need to request index.html directly
p, links = web_util.spider(
url_util.join(fetch_url_build_cache, 'index.html'))
p, links = web_util.spider(fetch_url_build_cache, depth=1)
for link in links:
if re.search(r'\.key', link) or re.search(r'\.pub', link):
if re.search(r'\.key', link):
keys.add(link)
for link in keys:

View File

@@ -32,7 +32,6 @@
Skimming this module is a nice way to get acquainted with the types of
calls you can make from within the install() function.
"""
import re
import inspect
import multiprocessing
import os
@@ -54,7 +53,6 @@
import spack.paths
import spack.schema.environment
import spack.store
import spack.architecture as arch
from spack.util.string import plural
from spack.util.environment import (
env_flag, filter_system_paths, get_path, is_system_path,
@@ -62,7 +60,7 @@
from spack.util.environment import system_dirs
from spack.error import NoLibrariesError, NoHeadersError
from spack.util.executable import Executable
from spack.util.module_cmd import load_module, get_path_from_module, module
from spack.util.module_cmd import load_module, get_path_from_module
from spack.util.log_parse import parse_log_events, make_log_context
@@ -147,32 +145,6 @@ def clean_environment():
env.unset('CPATH')
env.unset('LD_RUN_PATH')
env.unset('DYLD_LIBRARY_PATH')
env.unset('DYLD_FALLBACK_LIBRARY_PATH')
# On Cray "cluster" systems, unset CRAY_LD_LIBRARY_PATH to avoid
# interference with Spack dependencies.
# CNL requires these variables to be set (or at least some of them,
# depending on the CNL version).
hostarch = arch.Arch(arch.platform(), 'default_os', 'default_target')
on_cray = str(hostarch.platform) == 'cray'
using_cnl = re.match(r'cnl\d+', str(hostarch.os))
if on_cray and not using_cnl:
env.unset('CRAY_LD_LIBRARY_PATH')
for varname in os.environ.keys():
if 'PKGCONF' in varname:
env.unset(varname)
# Unset the following variables because they can affect installation of
# Autotools and CMake packages.
build_system_vars = [
'CC', 'CFLAGS', 'CPP', 'CPPFLAGS', # C variables
'CXX', 'CCC', 'CXXFLAGS', 'CXXCPP', # C++ variables
'F77', 'FFLAGS', 'FLIBS', # Fortran77 variables
'FC', 'FCFLAGS', 'FCLIBS', # Fortran variables
'LDFLAGS', 'LIBS' # linker variables
]
for v in build_system_vars:
env.unset(v)
build_lang = spack.config.get('config:build_language')
if build_lang:
@@ -198,9 +170,6 @@ def set_compiler_environment_variables(pkg, env):
compiler = pkg.compiler
spec = pkg.spec
# Make sure the executables for this compiler exist
compiler.verify_executables()
# Set compiler variables used by CMake and autotools
assert all(key in compiler.link_paths for key in (
'cc', 'cxx', 'f77', 'fc'))
@@ -379,6 +348,10 @@ def set_build_environment_variables(pkg, env, dirty):
extra_rpaths = ':'.join(compiler.extra_rpaths)
env.set('SPACK_COMPILER_EXTRA_RPATHS', extra_rpaths)
implicit_rpaths = compiler.implicit_rpaths()
if implicit_rpaths:
env.set('SPACK_COMPILER_IMPLICIT_RPATHS', ':'.join(implicit_rpaths))
# Add bin directories from dependencies to the PATH for the build.
for prefix in build_prefixes:
for dirname in ['bin', 'bin64']:
@@ -441,7 +414,7 @@ def _set_variables_for_single_module(pkg, module):
if getattr(module, marker, False):
return
jobs = spack.config.get('config:build_jobs', 16) if pkg.parallel else 1
jobs = spack.config.get('config:build_jobs') if pkg.parallel else 1
jobs = min(jobs, multiprocessing.cpu_count())
assert jobs is not None, "no default set for config:build_jobs"
@@ -558,7 +531,7 @@ def _static_to_shared_library(arch, compiler, static_lib, shared_lib=None,
# TODO: Compiler arguments should not be hardcoded but provided by
# the different compiler classes.
if 'linux' in arch or 'cray' in arch:
if 'linux' in arch:
soname = os.path.basename(shared_lib)
if compat_version:
@@ -635,7 +608,7 @@ def get_rpaths(pkg):
# module show output.
if pkg.compiler.modules and len(pkg.compiler.modules) > 1:
rpaths.append(get_path_from_module(pkg.compiler.modules[1]))
return list(dedupe(filter_system_paths(rpaths)))
return rpaths
def get_std_cmake_args(pkg):
@@ -743,21 +716,11 @@ def setup_package(pkg, dirty):
load_module("cce")
load_module(mod)
# kludge to handle cray libsci being automatically loaded by PrgEnv
# modules on cray platform. Module unload does no damage when
# unnecessary
module('unload', 'cray-libsci')
if pkg.architecture.target.module_name:
load_module(pkg.architecture.target.module_name)
load_external_modules(pkg)
implicit_rpaths = pkg.compiler.implicit_rpaths()
if implicit_rpaths:
build_env.set('SPACK_COMPILER_IMPLICIT_RPATHS',
':'.join(implicit_rpaths))
# Make sure nothing's strange about the Spack environment.
validate(build_env, tty.warn)
build_env.apply_modifications()
@@ -837,11 +800,12 @@ def child_process(child_pipe, input_stream):
setup_package(pkg, dirty=dirty)
return_value = function()
child_pipe.send(return_value)
except StopIteration as e:
# StopIteration is used to stop installations
# before the final stage, mainly for debug purposes
tty.msg(e)
child_pipe.send(None)
except StopPhase as e:
# Do not create a full ChildError from this, it's not an error
# it's a control statement.
child_pipe.send(e)
except BaseException:
# catch ANYTHING that goes wrong in the child process
exc_type, exc, tb = sys.exc_info()
@@ -893,20 +857,15 @@ def child_process(child_pipe, input_stream):
child_result = parent_pipe.recv()
p.join()
# If returns a StopPhase, raise it
if isinstance(child_result, StopPhase):
# do not print
raise child_result
# let the caller know which package went wrong.
if isinstance(child_result, InstallError):
child_result.pkg = pkg
# If the child process raised an error, print its output here rather
# than waiting until the call to SpackError.die() in main(). This
# allows exception handling output to be logged from within Spack.
# see spack.main.SpackCommand.
if isinstance(child_result, ChildError):
# If the child process raised an error, print its output here rather
# than waiting until the call to SpackError.die() in main(). This
# allows exception handling output to be logged from within Spack.
# see spack.main.SpackCommand.
child_result.print_context()
raise child_result
@@ -1095,13 +1054,3 @@ def __reduce__(self):
def _make_child_error(msg, module, name, traceback, build_log, context):
"""Used by __reduce__ in ChildError to reconstruct pickled errors."""
return ChildError(msg, module, name, traceback, build_log, context)
class StopPhase(spack.error.SpackError):
"""Pickle-able exception to control stopped builds."""
def __reduce__(self):
return _make_stop_phase, (self.message, self.long_message)
def _make_stop_phase(msg, long_msg):
return StopPhase(msg, long_msg)

View File

@@ -11,7 +11,6 @@
import shutil
import stat
import sys
import re
from subprocess import PIPE
from subprocess import check_call
@@ -57,9 +56,8 @@ class AutotoolsPackage(PackageBase):
#: This attribute is used in UI queries that need to know the build
#: system base class
build_system_class = 'AutotoolsPackage'
#: Whether or not to update ``config.guess`` and ``config.sub`` on old
#: architectures
patch_config_files = True
#: Whether or not to update ``config.guess`` on old architectures
patch_config_guess = True
#: Whether or not to update ``libtool``
#: (currently only for Arm/Clang/Fujitsu compilers)
patch_libtool = True
@@ -88,107 +86,72 @@ def archive_files(self):
return [os.path.join(self.build_directory, 'config.log')]
@run_after('autoreconf')
def _do_patch_config_files(self):
"""Some packages ship with older config.guess/config.sub files and
need to have these updated when installed on a newer architecture.
In particular, config.guess fails for PPC64LE for version prior
to a 2013-06-10 build date (automake 1.13.4) and for ARM (aarch64)."""
def _do_patch_config_guess(self):
"""Some packages ship with an older config.guess and need to have
this updated when installed on a newer architecture. In particular,
config.guess fails for PPC64LE for version prior to a 2013-06-10
build date (automake 1.13.4) and for ARM (aarch64)."""
if not self.patch_config_files or (
if not self.patch_config_guess or (
not self.spec.satisfies('target=ppc64le:') and
not self.spec.satisfies('target=aarch64:')
):
return
# TODO: Expand this to select the 'config.sub'-compatible architecture
# for each platform (e.g. 'config.sub' doesn't accept 'power9le', but
# does accept 'ppc64le').
if self.spec.satisfies('target=ppc64le:'):
config_arch = 'ppc64le'
elif self.spec.satisfies('target=aarch64:'):
config_arch = 'aarch64'
my_config_guess = None
config_guess = None
if os.path.exists('config.guess'):
# First search the top-level source directory
my_config_guess = 'config.guess'
else:
config_arch = 'local'
my_config_files = {'guess': None, 'sub': None}
config_files = {'guess': None, 'sub': None}
config_args = {'guess': [], 'sub': [config_arch]}
for config_name in config_files.keys():
config_file = 'config.{0}'.format(config_name)
if os.path.exists(config_file):
# First search the top-level source directory
my_config_files[config_name] = os.path.abspath(config_file)
else:
# Then search in all sub directories recursively.
# We would like to use AC_CONFIG_AUX_DIR, but not all packages
# ship with their configure.in or configure.ac.
config_path = next((os.path.abspath(os.path.join(r, f))
for r, ds, fs in os.walk('.') for f in fs
if f == config_file), None)
my_config_files[config_name] = config_path
if my_config_files[config_name] is not None:
try:
config_path = my_config_files[config_name]
check_call([config_path] + config_args[config_name],
stdout=PIPE, stderr=PIPE)
# The package's config file already runs OK, so just use it
continue
except Exception as e:
tty.debug(e)
else:
continue
# Look for a spack-installed automake package
if 'automake' in self.spec:
automake_dir = 'automake-' + str(self.spec['automake'].version)
automake_path = os.path.join(self.spec['automake'].prefix,
'share', automake_dir)
path = os.path.join(automake_path, config_file)
# Then search in all sub directories.
# We would like to use AC_CONFIG_AUX_DIR, but not all packages
# ship with their configure.in or configure.ac.
d = '.'
dirs = [os.path.join(d, o) for o in os.listdir(d)
if os.path.isdir(os.path.join(d, o))]
for dirname in dirs:
path = os.path.join(dirname, 'config.guess')
if os.path.exists(path):
config_files[config_name] = path
# Look for the system's config.guess
if (config_files[config_name] is None and
os.path.exists('/usr/share')):
automake_dir = [s for s in os.listdir('/usr/share') if
"automake" in s]
if automake_dir:
automake_path = os.path.join('/usr/share', automake_dir[0])
path = os.path.join(automake_path, config_file)
if os.path.exists(path):
config_files[config_name] = path
if config_files[config_name] is not None:
try:
config_path = config_files[config_name]
my_config_path = my_config_files[config_name]
my_config_guess = path
check_call([config_path] + config_args[config_name],
stdout=PIPE, stderr=PIPE)
if my_config_guess is not None:
try:
check_call([my_config_guess], stdout=PIPE, stderr=PIPE)
# The package's config.guess already runs OK, so just use it
return
except Exception as e:
tty.debug(e)
else:
return
m = os.stat(my_config_path).st_mode & 0o777 | stat.S_IWUSR
os.chmod(my_config_path, m)
shutil.copyfile(config_path, my_config_path)
continue
except Exception as e:
tty.debug(e)
# Look for a spack-installed automake package
if 'automake' in self.spec:
automake_path = os.path.join(self.spec['automake'].prefix, 'share',
'automake-' +
str(self.spec['automake'].version))
path = os.path.join(automake_path, 'config.guess')
if os.path.exists(path):
config_guess = path
# Look for the system's config.guess
if config_guess is None and os.path.exists('/usr/share'):
automake_dir = [s for s in os.listdir('/usr/share') if
"automake" in s]
if automake_dir:
automake_path = os.path.join('/usr/share', automake_dir[0])
path = os.path.join(automake_path, 'config.guess')
if os.path.exists(path):
config_guess = path
if config_guess is not None:
try:
check_call([config_guess], stdout=PIPE, stderr=PIPE)
mod = os.stat(my_config_guess).st_mode & 0o777 | stat.S_IWUSR
os.chmod(my_config_guess, mod)
shutil.copyfile(config_guess, my_config_guess)
return
except Exception as e:
tty.debug(e)
raise RuntimeError('Failed to find suitable ' + config_file)
@run_before('configure')
def _set_autotools_environment_variables(self):
"""Many autotools builds use a version of mknod.m4 that fails when
running as root unless FORCE_UNSAFE_CONFIGURE is set to 1.
We set this to 1 and expect the user to take responsibility if
they are running as root. They have to anyway, as this variable
doesn't actually prevent configure from doing bad things as root.
Without it, configure just fails halfway through, but it can
still run things *before* this check. Forcing this just removes a
nuisance -- this is not circumventing any real protection.
"""
os.environ["FORCE_UNSAFE_CONFIGURE"] = "1"
raise RuntimeError('Failed to find suitable config.guess')
@run_after('configure')
def _do_patch_libtool(self):
@@ -206,9 +169,7 @@ def _do_patch_libtool(self):
line = 'wl="-Wl,"\n'
if line == 'pic_flag=""\n':
line = 'pic_flag="{0}"\n'\
.format(self.compiler.cc_pic_flag)
if self.spec.satisfies('%fj') and 'fjhpctag.o' in line:
line = re.sub(r'/\S*/fjhpctag.o', '', line)
.format(self.compiler.pic_flag)
sys.stdout.write(line)
@property
@@ -258,11 +219,11 @@ def autoreconf(self, spec, prefix):
# This line is what is needed most of the time
# --install, --verbose, --force
autoreconf_args = ['-ivf']
for dep in spec.dependencies(deptype='build'):
if os.path.exists(dep.prefix.share.aclocal):
autoreconf_args.extend([
'-I', dep.prefix.share.aclocal
])
if 'pkgconfig' in spec:
autoreconf_args += [
'-I',
os.path.join(spec['pkgconfig'].prefix, 'share', 'aclocal'),
]
autoreconf_args += self.autoreconf_extra_args
m.autoreconf(*autoreconf_args)
@@ -302,12 +263,6 @@ def flags_to_build_system_args(self, flags):
if values:
values_str = '{0}={1}'.format(flag.upper(), ' '.join(values))
self.configure_flag_args.append(values_str)
# Spack's fflags are meant for both F77 and FC, therefore we
# additionaly set FCFLAGS if required.
values = flags.get('fflags', None)
if values:
values_str = 'FCFLAGS={0}'.format(' '.join(values))
self.configure_flag_args.append(values_str)
def configure(self, spec, prefix):
"""Runs configure with the arguments specified in

View File

@@ -147,129 +147,33 @@ def _std_args(pkg):
except KeyError:
build_type = 'RelWithDebInfo'
define = CMakePackage.define
args = [
'-G', generator,
define('CMAKE_INSTALL_PREFIX', pkg.prefix),
define('CMAKE_BUILD_TYPE', build_type),
'-DCMAKE_INSTALL_PREFIX:PATH={0}'.format(pkg.prefix),
'-DCMAKE_BUILD_TYPE:STRING={0}'.format(build_type),
]
if primary_generator == 'Unix Makefiles':
args.append(define('CMAKE_VERBOSE_MAKEFILE', True))
args.append('-DCMAKE_VERBOSE_MAKEFILE:BOOL=ON')
if platform.mac_ver()[0]:
args.extend([
define('CMAKE_FIND_FRAMEWORK', "LAST"),
define('CMAKE_FIND_APPBUNDLE', "LAST"),
'-DCMAKE_FIND_FRAMEWORK:STRING=LAST',
'-DCMAKE_FIND_APPBUNDLE:STRING=LAST'
])
# Set up CMake rpath
args.extend([
define('CMAKE_INSTALL_RPATH_USE_LINK_PATH', False),
define('CMAKE_INSTALL_RPATH',
spack.build_environment.get_rpaths(pkg)),
])
args.append('-DCMAKE_INSTALL_RPATH_USE_LINK_PATH:BOOL=FALSE')
rpaths = ';'.join(spack.build_environment.get_rpaths(pkg))
args.append('-DCMAKE_INSTALL_RPATH:STRING={0}'.format(rpaths))
# CMake's find_package() looks in CMAKE_PREFIX_PATH first, help CMake
# to find immediate link dependencies in right places:
deps = [d.prefix for d in
pkg.spec.dependencies(deptype=('build', 'link'))]
deps = filter_system_paths(deps)
args.append(define('CMAKE_PREFIX_PATH', deps))
args.append('-DCMAKE_PREFIX_PATH:STRING={0}'.format(';'.join(deps)))
return args
@staticmethod
def define(cmake_var, value):
"""Return a CMake command line argument that defines a variable.
The resulting argument will convert boolean values to OFF/ON
and lists/tuples to CMake semicolon-separated string lists. All other
values will be interpreted as strings.
Examples:
.. code-block:: python
[define('BUILD_SHARED_LIBS', True),
define('CMAKE_CXX_STANDARD', 14),
define('swr', ['avx', 'avx2'])]
will generate the following configuration options:
.. code-block:: console
["-DBUILD_SHARED_LIBS:BOOL=ON",
"-DCMAKE_CXX_STANDARD:STRING=14",
"-DSWR:STRING=avx;avx2]
"""
# Create a list of pairs. Each pair includes a configuration
# option and whether or not that option is activated
if isinstance(value, bool):
kind = 'BOOL'
value = "ON" if value else "OFF"
else:
kind = 'STRING'
if isinstance(value, (list, tuple)):
value = ";".join(str(v) for v in value)
else:
value = str(value)
return "".join(["-D", cmake_var, ":", kind, "=", value])
def define_from_variant(self, cmake_var, variant=None):
"""Return a CMake command line argument from the given variant's value.
The optional ``variant`` argument defaults to the lower-case transform
of ``cmake_var``.
This utility function is similar to
:py:meth:`~.AutotoolsPackage.with_or_without`.
Examples:
Given a package with:
.. code-block:: python
variant('cxxstd', default='11', values=('11', '14'),
multi=False, description='')
variant('shared', default=True, description='')
variant('swr', values=any_combination_of('avx', 'avx2'),
description='')
calling this function like:
.. code-block:: python
[define_from_variant('BUILD_SHARED_LIBS', 'shared'),
define_from_variant('CMAKE_CXX_STANDARD', 'cxxstd'),
define_from_variant('SWR')]
will generate the following configuration options:
.. code-block:: console
["-DBUILD_SHARED_LIBS:BOOL=ON",
"-DCMAKE_CXX_STANDARD:STRING=14",
"-DSWR:STRING=avx;avx2]
for ``<spec-name> cxxstd=14 +shared swr=avx,avx2``
"""
if variant is None:
variant = cmake_var.lower()
if variant not in self.variants:
raise KeyError(
'"{0}" is not a variant of "{1}"'.format(variant, self.name))
value = self.spec.variants[variant].value
if isinstance(value, (tuple, list)):
# Sort multi-valued variants for reproducibility
value = sorted(value)
return self.define(cmake_var, value)
def flags_to_build_system_args(self, flags):
"""Produces a list of all command line arguments to pass the specified
compiler flags to cmake. Note CMAKE does not have a cppflags option,
@@ -331,9 +235,9 @@ def cmake_args(self):
def cmake(self, spec, prefix):
"""Runs ``cmake`` in the build directory"""
options = self.std_cmake_args
options = [os.path.abspath(self.root_cmakelists_dir)]
options += self.std_cmake_args
options += self.cmake_args()
options.append(os.path.abspath(self.root_cmakelists_dir))
with working_dir(self.build_directory, create=True):
inspect.getmodule(self).cmake(*options)

View File

@@ -13,65 +13,39 @@ class CudaPackage(PackageBase):
"""Auxiliary class which contains CUDA variant, dependencies and conflicts
and is meant to unify and facilitate its usage.
"""
maintainers = ['ax3l', 'svenevs']
# https://docs.nvidia.com/cuda/cuda-compiler-driver-nvcc/index.html#gpu-feature-list
# https://developer.nvidia.com/cuda-gpus
# https://en.wikipedia.org/wiki/CUDA#GPUs_supported
cuda_arch_values = [
'10', '11', '12', '13',
'20', '21',
'30', '32', '35', '37',
'50', '52', '53',
'60', '61', '62',
'70', '72', '75',
]
# FIXME: keep cuda and cuda_arch separate to make usage easier until
# FIXME: keep cuda and cuda_arch separate to make usage easier untill
# Spack has depends_on(cuda, when='cuda_arch!=None') or alike
variant('cuda', default=False,
description='Build with CUDA')
# see http://docs.nvidia.com/cuda/cuda-compiler-driver-nvcc/index.html#gpu-feature-list
# https://developer.nvidia.com/cuda-gpus
variant('cuda_arch',
description='CUDA architecture',
values=spack.variant.any_combination_of(*cuda_arch_values))
values=spack.variant.any_combination_of(
'20', '30', '32', '35', '50', '52', '53', '60', '61',
'62', '70', '72', '75'
))
# https://docs.nvidia.com/cuda/cuda-compiler-driver-nvcc/index.html#nvcc-examples
# https://llvm.org/docs/CompileCudaWithLLVM.html#compiling-cuda-code
# see http://docs.nvidia.com/cuda/cuda-compiler-driver-nvcc/index.html#nvcc-examples
# and http://llvm.org/docs/CompileCudaWithLLVM.html#compiling-cuda-code
@staticmethod
def cuda_flags(arch_list):
return [('--generate-code arch=compute_{0},code=sm_{0} '
'--generate-code arch=compute_{0},code=compute_{0}').format(s)
for s in arch_list]
depends_on('cuda', when='+cuda')
depends_on("cuda@7:", when='+cuda')
# CUDA version vs Architecture
# https://en.wikipedia.org/wiki/CUDA#GPUs_supported
depends_on('cuda@:6.0', when='cuda_arch=10')
depends_on('cuda@:6.5', when='cuda_arch=11')
depends_on('cuda@2.1:6.5', when='cuda_arch=12')
depends_on('cuda@2.1:6.5', when='cuda_arch=13')
depends_on("cuda@8:", when='cuda_arch=60')
depends_on("cuda@8:", when='cuda_arch=61')
depends_on("cuda@8:", when='cuda_arch=62')
depends_on("cuda@9:", when='cuda_arch=70')
depends_on("cuda@9:", when='cuda_arch=72')
depends_on("cuda@10:", when='cuda_arch=75')
depends_on('cuda@3.0:8.0', when='cuda_arch=20')
depends_on('cuda@3.2:8.0', when='cuda_arch=21')
depends_on('cuda@5.0:10.2', when='cuda_arch=30')
depends_on('cuda@5.0:10.2', when='cuda_arch=32')
depends_on('cuda@5.0:10.2', when='cuda_arch=35')
depends_on('cuda@6.5:10.2', when='cuda_arch=37')
depends_on('cuda@6.0:', when='cuda_arch=50')
depends_on('cuda@6.5:', when='cuda_arch=52')
depends_on('cuda@6.5:', when='cuda_arch=53')
depends_on('cuda@8.0:', when='cuda_arch=60')
depends_on('cuda@8.0:', when='cuda_arch=61')
depends_on('cuda@8.0:', when='cuda_arch=62')
depends_on('cuda@9.0:', when='cuda_arch=70')
depends_on('cuda@9.0:', when='cuda_arch=72')
depends_on('cuda@10.0:', when='cuda_arch=75')
depends_on('cuda@:8', when='cuda_arch=20')
# There are at least three cases to be aware of for compiler conflicts
# 1. Linux x86_64
@@ -140,12 +114,12 @@ def cuda_flags(arch_list):
conflicts('%intel@16.0:', when='+cuda ^cuda@:8.0.43')
conflicts('%intel@17.0:', when='+cuda ^cuda@:8.0.60')
conflicts('%intel@18.0:', when='+cuda ^cuda@:9.9')
conflicts('%intel@19.0:', when='+cuda ^cuda@:10.0')
conflicts('%intel@19.0:', when='+cuda ^cuda@:10.2.89')
# XL is mostly relevant for ppc64le Linux
conflicts('%xl@:12,14:', when='+cuda ^cuda@:9.1')
conflicts('%xl@:12,14:15,17:', when='+cuda ^cuda@9.2')
conflicts('%xl@17:', when='+cuda ^cuda@:10.2.89')
conflicts('%xl@17:', when='+cuda ^cuda@10.0.130:10.2.89')
# Mac OS X
# platform = ' platform=darwin'
@@ -156,8 +130,18 @@ def cuda_flags(arch_list):
# `clang-apple@x.y.z as a possible fix.
# Compiler conflicts will be eventual taken from here:
# https://docs.nvidia.com/cuda/cuda-installation-guide-mac-os-x/index.html#abstract
conflicts('platform=darwin', when='+cuda ^cuda@11.0:')
# Make sure cuda_arch can not be used without +cuda
for value in cuda_arch_values:
conflicts('~cuda', when='cuda_arch=' + value)
conflicts('~cuda', when='cuda_arch=20')
conflicts('~cuda', when='cuda_arch=30')
conflicts('~cuda', when='cuda_arch=32')
conflicts('~cuda', when='cuda_arch=35')
conflicts('~cuda', when='cuda_arch=50')
conflicts('~cuda', when='cuda_arch=52')
conflicts('~cuda', when='cuda_arch=53')
conflicts('~cuda', when='cuda_arch=60')
conflicts('~cuda', when='cuda_arch=61')
conflicts('~cuda', when='cuda_arch=62')
conflicts('~cuda', when='cuda_arch=70')
conflicts('~cuda', when='cuda_arch=72')
conflicts('~cuda', when='cuda_arch=75')

View File

@@ -1073,15 +1073,6 @@ def _setup_dependent_env_callback(
# which performs dizzyingly similar but necessarily different
# actions, and (b) function code leaves a bit more breathing
# room within the suffocating corset of flake8 line length.
# Intel MPI since 2019 depends on libfabric which is not in the
# lib directory but in a directory of its own which should be
# included in the rpath
if self.version >= ver('2019'):
d = ancestor(self.component_lib_dir('mpi'))
libfabrics_path = os.path.join(d, 'libfabric', 'lib')
env.append_path('SPACK_COMPILER_EXTRA_RPATHS',
libfabrics_path)
else:
raise InstallError('compilers_of_client arg required for MPI')

View File

@@ -5,10 +5,9 @@
import inspect
from llnl.util.filesystem import working_dir, join_path
from spack.directives import depends_on, extends
from spack.package import PackageBase, run_after
import os
from llnl.util.filesystem import working_dir
from spack.directives import depends_on, extends, resource
from spack.package import PackageBase, run_before, run_after
class SIPPackage(PackageBase):
@@ -41,12 +40,33 @@ class SIPPackage(PackageBase):
extends('python')
depends_on('qt')
depends_on('py-sip')
resource(name='sip',
url='https://www.riverbankcomputing.com/static/Downloads/sip/4.19.18/sip-4.19.18.tar.gz',
sha256='c0bd863800ed9b15dcad477c4017cdb73fa805c25908b0240564add74d697e1e',
destination='.')
def python(self, *args, **kwargs):
"""The python ``Executable``."""
inspect.getmodule(self).python(*args, **kwargs)
@run_before('configure')
def install_sip(self):
args = [
'--sip-module={0}'.format(self.sip_module),
'--bindir={0}'.format(self.prefix.bin),
'--destdir={0}'.format(inspect.getmodule(self).site_packages_dir),
'--incdir={0}'.format(inspect.getmodule(self).python_include_dir),
'--sipdir={0}'.format(self.prefix.share.sip),
'--stubsdir={0}'.format(inspect.getmodule(self).site_packages_dir),
]
with working_dir('sip-4.19.18'):
self.python('configure.py', *args)
inspect.getmodule(self).make()
inspect.getmodule(self).make('install')
def configure_file(self):
"""Returns the name of the configure file to use."""
return 'configure.py'
@@ -57,15 +77,12 @@ def configure(self, spec, prefix):
args = self.configure_args()
python_include_dir = 'python' + str(spec['python'].version.up_to(2))
args.extend([
'--verbose',
'--confirm-license',
'--qmake', spec['qt'].prefix.bin.qmake,
'--sip', spec['py-sip'].prefix.bin.sip,
'--sip-incdir', join_path(spec['py-sip'].prefix.include,
python_include_dir),
'--sip', prefix.bin.sip,
'--sip-incdir', inspect.getmodule(self).python_include_dir,
'--bindir', prefix.bin,
'--destdir', inspect.getmodule(self).site_packages_dir,
])
@@ -114,14 +131,3 @@ def import_module_test(self):
# Check that self.prefix is there after installation
run_after('install')(PackageBase.sanity_check_prefix)
@run_after('install')
def extend_path_setup(self):
# See github issue #14121 and PR #15297
module = self.spec['py-sip'].variants['module'].value
if module != 'sip':
module = module.split('.')[0]
with working_dir(inspect.getmodule(self).site_packages_dir):
with open(os.path.join(module, '__init__.py'), 'a') as f:
f.write('from pkgutil import extend_path\n')
f.write('__path__ = extend_path(__path__, __name__)\n')

View File

@@ -1,40 +0,0 @@
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import spack.util.url
import spack.package
class SourceforgePackage(spack.package.PackageBase):
"""Mixin that takes care of setting url and mirrors for Sourceforge
packages."""
#: Path of the package in a Sourceforge mirror
sourceforge_mirror_path = None
#: List of Sourceforge mirrors used by Spack
base_mirrors = [
'https://prdownloads.sourceforge.net/',
'https://freefr.dl.sourceforge.net/',
'https://netcologne.dl.sourceforge.net/',
'https://pilotfiber.dl.sourceforge.net/',
'https://downloads.sourceforge.net/',
'http://kent.dl.sourceforge.net/sourceforge/'
]
@property
def urls(self):
self._ensure_sourceforge_mirror_path_is_set_or_raise()
return [
spack.util.url.join(m, self.sourceforge_mirror_path,
resolve_href=True)
for m in self.base_mirrors
]
def _ensure_sourceforge_mirror_path_is_set_or_raise(self):
if self.sourceforge_mirror_path is None:
cls_name = type(self).__name__
msg = ('{0} must define a `sourceforge_mirror_path` attribute'
' [none defined]')
raise AttributeError(msg.format(cls_name))

View File

@@ -1,37 +0,0 @@
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import spack.util.url
import spack.package
class SourcewarePackage(spack.package.PackageBase):
"""Mixin that takes care of setting url and mirrors for Sourceware.org
packages."""
#: Path of the package in a Sourceware mirror
sourceware_mirror_path = None
#: List of Sourceware mirrors used by Spack
base_mirrors = [
'https://sourceware.org/pub/',
'https://mirrors.kernel.org/sourceware/',
'https://ftp.gwdg.de/pub/linux/sources.redhat.com/'
]
@property
def urls(self):
self._ensure_sourceware_mirror_path_is_set_or_raise()
return [
spack.util.url.join(m, self.sourceware_mirror_path,
resolve_href=True)
for m in self.base_mirrors
]
def _ensure_sourceware_mirror_path_is_set_or_raise(self):
if self.sourceware_mirror_path is None:
cls_name = type(self).__name__
msg = ('{0} must define a `sourceware_mirror_path` attribute'
' [none defined]')
raise AttributeError(msg.format(cls_name))

View File

@@ -75,14 +75,13 @@ def waf(self, *args, **kwargs):
def configure(self, spec, prefix):
"""Configures the project."""
args = ['--prefix={0}'.format(self.prefix)]
args += self.configure_args()
args = self.configure_args()
self.waf('configure', *args)
def configure_args(self):
"""Arguments to pass to configure."""
return []
return ['--prefix={0}'.format(self.prefix)]
def build(self, spec, prefix):
"""Executes the build."""

View File

@@ -1,42 +0,0 @@
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import spack.util.url
import spack.package
class XorgPackage(spack.package.PackageBase):
"""Mixin that takes care of setting url and mirrors for x.org
packages."""
#: Path of the package in a x.org mirror
xorg_mirror_path = None
#: List of x.org mirrors used by Spack
# Note: x.org mirrors are a bit tricky, since many are out-of-sync or off.
# A good package to test with is `util-macros`, which had a "recent"
# release.
base_mirrors = [
'https://www.x.org/archive/individual/',
'https://mirrors.ircam.fr/pub/x.org/individual/',
'https://mirror.transip.net/xorg/individual/',
'ftp://ftp.freedesktop.org/pub/xorg/individual/',
'http://xorg.mirrors.pair.com/individual/'
]
@property
def urls(self):
self._ensure_xorg_mirror_path_is_set_or_raise()
return [
spack.util.url.join(m, self.xorg_mirror_path,
resolve_href=True)
for m in self.base_mirrors
]
def _ensure_xorg_mirror_path_is_set_or_raise(self):
if self.xorg_mirror_path is None:
cls_name = type(self).__name__
msg = ('{0} must define a `xorg_mirror_path` attribute'
' [none defined]')
raise AttributeError(msg.format(cls_name))

View File

@@ -42,6 +42,7 @@ def _fetch_cache():
building the same package different ways or multiple times.
"""
path = spack.config.get('config:source_cache')
if not path:
path = os.path.join(spack.paths.var_path, "cache")
path = spack.util.path.canonicalize_path(path)
@@ -50,9 +51,8 @@ def _fetch_cache():
class MirrorCache(object):
def __init__(self, root, skip_unstable_versions):
def __init__(self, root):
self.root = os.path.abspath(root)
self.skip_unstable_versions = skip_unstable_versions
def store(self, fetcher, relative_dest):
"""Fetch and relocate the fetcher's target into our mirror cache."""
@@ -85,3 +85,5 @@ def symlink(self, mirror_ref):
#: Spack's local cache for downloaded source archives
fetch_cache = llnl.util.lang.Singleton(_fetch_cache)
mirror_cache = None

View File

@@ -24,6 +24,7 @@
import spack.compilers as compilers
import spack.config as cfg
import spack.environment as ev
from spack.dependency import all_deptypes
from spack.error import SpackError
import spack.hash_types as ht
from spack.main import SpackCommand
@@ -33,10 +34,6 @@
import spack.util.web as web_util
JOB_RETRY_CONDITIONS = [
'always',
]
spack_gpg = SpackCommand('gpg')
spack_compiler = SpackCommand('compiler')
@@ -345,24 +342,25 @@ def compute_spec_deps(spec_list):
],
"specs": [
{
"root_spec": "readline@7.0%apple-clang@9.1.0 arch=darwin-...",
"spec": "readline@7.0%apple-clang@9.1.0 arch=darwin-highs...",
"root_spec": "readline@7.0%clang@9.1.0-apple arch=darwin-...",
"spec": "readline@7.0%clang@9.1.0-apple arch=darwin-highs...",
"label": "readline/ip6aiun"
},
{
"root_spec": "readline@7.0%apple-clang@9.1.0 arch=darwin-...",
"spec": "ncurses@6.1%apple-clang@9.1.0 arch=darwin-highsi...",
"root_spec": "readline@7.0%clang@9.1.0-apple arch=darwin-...",
"spec": "ncurses@6.1%clang@9.1.0-apple arch=darwin-highsi...",
"label": "ncurses/y43rifz"
},
{
"root_spec": "readline@7.0%apple-clang@9.1.0 arch=darwin-...",
"spec": "pkgconf@1.5.4%apple-clang@9.1.0 arch=darwin-high...",
"root_spec": "readline@7.0%clang@9.1.0-apple arch=darwin-...",
"spec": "pkgconf@1.5.4%clang@9.1.0-apple arch=darwin-high...",
"label": "pkgconf/eg355zb"
}
]
}
"""
deptype = all_deptypes
spec_labels = {}
specs = []
@@ -382,7 +380,7 @@ def append_dep(s, d):
rkey, rlabel = spec_deps_key_label(spec)
for s in spec.traverse(deptype=all):
for s in spec.traverse(deptype=deptype):
if s.external:
tty.msg('Will not stage external pkg: {0}'.format(s))
continue
@@ -394,7 +392,7 @@ def append_dep(s, d):
}
append_dep(rlabel, slabel)
for d in s.dependencies(deptype=all):
for d in s.dependencies(deptype=deptype):
dkey, dlabel = spec_deps_key_label(d)
if d.external:
tty.msg('Will not stage external dep: {0}'.format(d))
@@ -402,11 +400,11 @@ def append_dep(s, d):
append_dep(slabel, dlabel)
for spec_label, spec_holder in spec_labels.items():
for l, d in spec_labels.items():
specs.append({
'label': spec_label,
'spec': spec_holder['spec'],
'root_spec': spec_holder['root'],
'label': l,
'spec': d['spec'],
'root_spec': d['root'],
})
deps_json_obj = {
@@ -433,24 +431,8 @@ def pkg_name_from_spec_label(spec_label):
return spec_label[:spec_label.index('/')]
def format_job_needs(phase_name, strip_compilers, dep_jobs,
osname, build_group, enable_artifacts_buildcache):
needs_list = []
for dep_job in dep_jobs:
needs_list.append({
'job': get_job_name(phase_name,
strip_compilers,
dep_job,
osname,
build_group),
'artifacts': enable_artifacts_buildcache,
})
return needs_list
def generate_gitlab_ci_yaml(env, print_summary, output_file,
custom_spack_repo=None, custom_spack_ref=None,
run_optimizer=False, use_dependencies=False):
custom_spack_repo=None, custom_spack_ref=None):
# FIXME: What's the difference between one that opens with 'spack'
# and one that opens with 'env'? This will only handle the former.
with spack.concretize.disable_compiler_existence_check():
@@ -484,10 +466,6 @@ def generate_gitlab_ci_yaml(env, print_summary, output_file,
tty.verbose("Using CDash auth token from environment")
cdash_auth_token = os.environ.get('SPACK_CDASH_AUTH_TOKEN')
is_pr_pipeline = (
os.environ.get('SPACK_IS_PR_PIPELINE', '').lower() == 'true'
)
# Make sure we use a custom spack if necessary
before_script = None
after_script = None
@@ -495,9 +473,10 @@ def generate_gitlab_ci_yaml(env, print_summary, output_file,
if not custom_spack_ref:
custom_spack_ref = 'master'
before_script = [
('git clone "{0}"'.format(custom_spack_repo)),
'pushd ./spack && git checkout "{0}" && popd'.format(
custom_spack_ref),
('git clone "{0}" --branch "{1}" --depth 1 '
'--single-branch'.format(custom_spack_repo, custom_spack_ref)),
# Next line just shows spack version in pipeline output
'pushd ./spack && git rev-parse HEAD && popd',
'. "./spack/share/spack/setup-env.sh"',
]
after_script = [
@@ -559,9 +538,6 @@ def generate_gitlab_ci_yaml(env, print_summary, output_file,
stage_names = []
max_length_needs = 0
max_needs_job = ''
for phase in phases:
phase_name = phase['name']
strip_compilers = phase['strip-compilers']
@@ -612,10 +588,7 @@ def generate_gitlab_ci_yaml(env, print_summary, output_file,
if 'enable-debug-messages' in gitlab_ci:
debug_flag = '-d '
job_scripts = [
'spack env activate .',
'spack {0}ci rebuild'.format(debug_flag),
]
job_scripts = ['spack {0}ci rebuild'.format(debug_flag)]
compiler_action = 'NONE'
if len(phases) > 1:
@@ -628,35 +601,25 @@ def generate_gitlab_ci_yaml(env, print_summary, output_file,
root_spec, main_phase, strip_compilers),
'SPACK_JOB_SPEC_PKG_NAME': release_spec.name,
'SPACK_COMPILER_ACTION': compiler_action,
'SPACK_IS_PR_PIPELINE': str(is_pr_pipeline),
}
job_dependencies = []
if spec_label in dependencies:
if enable_artifacts_buildcache:
dep_jobs = [
d for d in release_spec.traverse(deptype=all,
root=False)
]
else:
dep_jobs = []
for dep_label in dependencies[spec_label]:
dep_pkg = pkg_name_from_spec_label(dep_label)
dep_root = spec_labels[dep_label]['rootSpec']
dep_jobs.append(dep_root[dep_pkg])
job_dependencies.extend(
format_job_needs(phase_name, strip_compilers, dep_jobs,
osname, build_group,
enable_artifacts_buildcache))
for dep_label in dependencies[spec_label]:
dep_pkg = pkg_name_from_spec_label(dep_label)
dep_spec = spec_labels[dep_label]['rootSpec'][dep_pkg]
dep_job_name = get_job_name(
phase_name, strip_compilers, dep_spec, osname,
build_group)
job_dependencies.append(dep_job_name)
# This next section helps gitlab make sure the right
# bootstrapped compiler exists in the artifacts buildcache by
# creating an artificial dependency between this spec and its
# compiler. So, if we are in the main phase, and if the
# compiler we are supposed to use is listed in any of the
# bootstrap spec lists, then we will add more dependencies to
# the job (that compiler and maybe it's dependencies as well).
# bootstrap spec lists, then we will add one more dependency to
# "job_dependencies" (that compiler).
if is_main_phase(phase_name):
compiler_pkg_spec = compilers.pkg_spec_for_compiler(
release_spec.compiler)
@@ -664,25 +627,12 @@ def generate_gitlab_ci_yaml(env, print_summary, output_file,
bs_arch = bs['spec'].architecture
if (bs['spec'].satisfies(compiler_pkg_spec) and
bs_arch == release_spec.architecture):
# We found the bootstrap compiler this release spec
# should be built with, so for DAG scheduling
# purposes, we will at least add the compiler spec
# to the jobs "needs". But if artifact buildcache
# is enabled, we'll have to add all transtive deps
# of the compiler as well.
dep_jobs = [bs['spec']]
if enable_artifacts_buildcache:
dep_jobs = [
d for d in bs['spec'].traverse(deptype=all)
]
job_dependencies.extend(
format_job_needs(bs['phase-name'],
bs['strip-compilers'],
dep_jobs,
str(bs_arch),
build_group,
enable_artifacts_buildcache))
c_job_name = get_job_name(bs['phase-name'],
bs['strip-compilers'],
bs['spec'],
str(bs_arch),
build_group)
job_dependencies.append(c_job_name)
if enable_cdash_reporting:
cdash_build_name = get_cdash_build_name(
@@ -697,7 +647,7 @@ def generate_gitlab_ci_yaml(env, print_summary, output_file,
job_vars['SPACK_CDASH_BUILD_NAME'] = cdash_build_name
job_vars['SPACK_RELATED_BUILDS_CDASH'] = ';'.join(
sorted(related_builds))
related_builds)
variables.update(job_vars)
@@ -707,12 +657,7 @@ def generate_gitlab_ci_yaml(env, print_summary, output_file,
]
if enable_artifacts_buildcache:
bc_root = 'local_mirror/build_cache'
artifact_paths.extend([os.path.join(bc_root, p) for p in [
bindist.tarball_name(release_spec, '.spec.yaml'),
bindist.tarball_name(release_spec, '.cdashid'),
bindist.tarball_directory_name(release_spec),
]])
artifact_paths.append('local_mirror/build_cache')
job_object = {
'stage': stage_name,
@@ -723,18 +668,9 @@ def generate_gitlab_ci_yaml(env, print_summary, output_file,
'paths': artifact_paths,
'when': 'always',
},
'needs': sorted(job_dependencies, key=lambda d: d['job']),
'retry': {
'max': 2,
'when': JOB_RETRY_CONDITIONS,
}
'dependencies': job_dependencies,
}
length_needs = len(job_dependencies)
if length_needs > max_length_needs:
max_length_needs = length_needs
max_needs_job = job_name
if before_script:
job_object['before_script'] = before_script
@@ -755,9 +691,6 @@ def generate_gitlab_ci_yaml(env, print_summary, output_file,
tty.debug('{0} build jobs generated in {1} stages'.format(
job_id, stage_id))
tty.debug('The max_needs_job is {0}, with {1} needs'.format(
max_needs_job, max_length_needs))
# Use "all_job_names" to populate the build group for this set
if enable_cdash_reporting and cdash_auth_token:
try:
@@ -768,7 +701,7 @@ def generate_gitlab_ci_yaml(env, print_summary, output_file,
else:
tty.warn('Unable to populate buildgroup without CDash credentials')
if final_job_config and not is_pr_pipeline:
if final_job_config:
# Add an extra, final job to regenerate the index
final_stage = 'stage-rebuild-index'
final_job = {
@@ -788,22 +721,8 @@ def generate_gitlab_ci_yaml(env, print_summary, output_file,
output_object['stages'] = stage_names
sorted_output = {}
for output_key, output_value in sorted(output_object.items()):
sorted_output[output_key] = output_value
# TODO(opadron): remove this or refactor
if run_optimizer:
import spack.ci_optimization as ci_opt
sorted_output = ci_opt.optimizer(sorted_output)
# TODO(opadron): remove this or refactor
if use_dependencies:
import spack.ci_needs_workaround as cinw
sorted_output = cinw.needs_to_dependencies(sorted_output)
with open(output_file, 'w') as outf:
outf.write(syaml.dump_config(sorted_output, default_flow_style=True))
outf.write(syaml.dump_config(output_object, default_flow_style=True))
def url_encode_string(input_string):
@@ -1028,9 +947,8 @@ def read_cdashid_from_mirror(spec, mirror_url):
def push_mirror_contents(env, spec, yaml_path, mirror_url, build_id):
if mirror_url:
tty.debug('Creating buildcache')
buildcache._createtarball(env, spec_yaml=yaml_path, add_deps=False,
output_location=mirror_url, force=True,
allow_root=True)
buildcache._createtarball(env, yaml_path, None, mirror_url, None,
True, True, False, False, True, False)
if build_id:
tty.debug('Writing cdashid ({0}) to remote mirror: {1}'.format(
build_id, mirror_url))

View File

@@ -1,47 +0,0 @@
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import collections
try:
# dynamically import to keep vermin from complaining
collections_abc = __import__('collections.abc')
except ImportError:
collections_abc = collections
get_job_name = lambda needs_entry: (
needs_entry.get('job') if (
isinstance(needs_entry, collections_abc.Mapping) and
needs_entry.get('artifacts', True))
else
needs_entry if isinstance(needs_entry, str)
else None)
def convert_job(job_entry):
if not isinstance(job_entry, collections_abc.Mapping):
return job_entry
needs = job_entry.get('needs')
if needs is None:
return job_entry
new_job = {}
new_job.update(job_entry)
del new_job['needs']
new_job['dependencies'] = list(filter(
(lambda x: x is not None),
(get_job_name(needs_entry) for needs_entry in needs)))
return new_job
def needs_to_dependencies(yaml):
return dict((k, convert_job(v)) for k, v in yaml.items())

View File

@@ -1,394 +0,0 @@
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import collections
try:
# dynamically import to keep vermin from complaining
collections_abc = __import__('collections.abc')
except ImportError:
collections_abc = collections
import copy
import hashlib
import spack.util.spack_yaml as syaml
def sort_yaml_obj(obj):
if isinstance(obj, collections_abc.Mapping):
return syaml.syaml_dict(
(k, sort_yaml_obj(v))
for k, v in
sorted(obj.items(), key=(lambda item: str(item[0]))))
if isinstance(obj, collections_abc.Sequence) and not isinstance(obj, str):
return syaml.syaml_list(sort_yaml_obj(x) for x in obj)
return obj
def matches(obj, proto):
"""Returns True if the test object "obj" matches the prototype object
"proto".
If obj and proto are mappings, obj matches proto if (key in obj) and
(obj[key] matches proto[key]) for every key in proto.
If obj and proto are sequences, obj matches proto if they are of the same
length and (a matches b) for every (a,b) in zip(obj, proto).
Otherwise, obj matches proto if obj == proto.
Precondition: proto must not have any reference cycles
"""
if isinstance(obj, collections_abc.Mapping):
if not isinstance(proto, collections_abc.Mapping):
return False
return all(
(key in obj and matches(obj[key], val))
for key, val in proto.items()
)
if (isinstance(obj, collections_abc.Sequence) and
not isinstance(obj, str)):
if not (isinstance(proto, collections_abc.Sequence) and
not isinstance(proto, str)):
return False
if len(obj) != len(proto):
return False
return all(
matches(obj[index], val)
for index, val in enumerate(proto)
)
return obj == proto
def subkeys(obj, proto):
"""Returns the test mapping "obj" after factoring out the items it has in
common with the prototype mapping "proto".
Consider a recursive merge operation, merge(a, b) on mappings a and b, that
returns a mapping, m, whose keys are the union of the keys of a and b, and
for every such key, "k", its corresponding value is:
- merge(a[key], b[key]) if a[key] and b[key] are mappings, or
- b[key] if (key in b) and not matches(a[key], b[key]),
or
- a[key] otherwise
If obj and proto are mappings, the returned object is the smallest object,
"a", such that merge(a, proto) matches obj.
Otherwise, obj is returned.
"""
if not (isinstance(obj, collections_abc.Mapping) and
isinstance(proto, collections_abc.Mapping)):
return obj
new_obj = {}
for key, value in obj.items():
if key not in proto:
new_obj[key] = value
continue
if (matches(value, proto[key]) and
matches(proto[key], value)):
continue
if isinstance(value, collections_abc.Mapping):
new_obj[key] = subkeys(value, proto[key])
continue
new_obj[key] = value
return new_obj
def add_extends(yaml, key):
"""Modifies the given object "yaml" so that it includes an "extends" key
whose value features "key".
If "extends" is not in yaml, then yaml is modified such that
yaml["extends"] == key.
If yaml["extends"] is a str, then yaml is modified such that
yaml["extends"] == [yaml["extends"], key]
If yaml["extends"] is a list that does not include key, then key is
appended to the list.
Otherwise, yaml is left unchanged.
"""
has_key = ('extends' in yaml)
extends = yaml.get('extends')
if has_key and not isinstance(extends, (str, collections_abc.Sequence)):
return
if extends is None:
yaml['extends'] = key
return
if isinstance(extends, str):
if extends != key:
yaml['extends'] = [extends, key]
return
if key not in extends:
extends.append(key)
def common_subobject(yaml, sub):
"""Factor prototype object "sub" out of the values of mapping "yaml".
Consider a modified copy of yaml, "new", where for each key, "key" in yaml:
- If yaml[key] matches sub, then new[key] = subkeys(yaml[key], sub).
- Otherwise, new[key] = yaml[key].
If the above match criteria is not satisfied for any such key, then (yaml,
None) is returned. The yaml object is returned unchanged.
Otherwise, each matching value in new is modified as in
add_extends(new[key], common_key), and then new[common_key] is set to sub.
The common_key value is chosen such that it does not match any preexisting
key in new. In this case, (new, common_key) is returned.
"""
match_list = set(k for k, v in yaml.items() if matches(v, sub))
if not match_list:
return yaml, None
common_prefix = '.c'
common_index = 0
while True:
common_key = ''.join((common_prefix, str(common_index)))
if common_key not in yaml:
break
common_index += 1
new_yaml = {}
for key, val in yaml.items():
new_yaml[key] = copy.deepcopy(val)
if not matches(val, sub):
continue
new_yaml[key] = subkeys(new_yaml[key], sub)
add_extends(new_yaml[key], common_key)
new_yaml[common_key] = sub
return new_yaml, common_key
def print_delta(name, old, new, applied=None):
delta = new - old
reldelta = (1000 * delta) // old
reldelta = (reldelta // 10, reldelta % 10)
if applied is None:
applied = (new <= old)
print('\n'.join((
'{0} {1}:',
' before: {2: 10d}',
' after : {3: 10d}',
' delta : {4:+10d} ({5:=+3d}.{6}%)',
)).format(
name,
('+' if applied else 'x'),
old,
new,
delta,
reldelta[0],
reldelta[1]
))
def try_optimization_pass(name, yaml, optimization_pass, *args, **kwargs):
"""Try applying an optimization pass and return information about the
result
"name" is a string describing the nature of the pass. If it is a non-empty
string, summary statistics are also printed to stdout.
"yaml" is the object to apply the pass to.
"optimization_pass" is the function implementing the pass to be applied.
"args" and "kwargs" are the additional arguments to pass to optimization
pass. The pass is applied as
>>> (new_yaml, *other_results) = optimization_pass(yaml, *args, **kwargs)
The pass's results are greedily rejected if it does not modify the original
yaml document, or if it produces a yaml document that serializes to a
larger string.
Returns (new_yaml, yaml, applied, other_results) if applied, or
(yaml, new_yaml, applied, other_results) otherwise.
"""
result = optimization_pass(yaml, *args, **kwargs)
new_yaml, other_results = result[0], result[1:]
if new_yaml is yaml:
# pass was not applied
return (yaml, new_yaml, False, other_results)
pre_size = len(syaml.dump_config(
sort_yaml_obj(yaml), default_flow_style=True))
post_size = len(syaml.dump_config(
sort_yaml_obj(new_yaml), default_flow_style=True))
# pass makes the size worse: not applying
applied = (post_size <= pre_size)
if applied:
yaml, new_yaml = new_yaml, yaml
if name:
print_delta(name, pre_size, post_size, applied)
return (yaml, new_yaml, applied, other_results)
def build_histogram(iterator, key):
"""Builds a histogram of values given an iterable of mappings and a key.
For each mapping "m" with key "key" in iterator, the value m[key] is
considered.
Returns a list of tuples (hash, count, proportion, value), where
- "hash" is a sha1sum hash of the value.
- "count" is the number of occurences of values that hash to "hash".
- "proportion" is the proportion of all values considered above that
hash to "hash".
- "value" is one of the values considered above that hash to "hash".
Which value is chosen when multiple values hash to the same "hash" is
undefined.
The list is sorted in descending order by count, yielding the most
frequently occuring hashes first.
"""
buckets = collections.defaultdict(int)
values = {}
num_objects = 0
for obj in iterator:
num_objects += 1
try:
val = obj[key]
except (KeyError, TypeError):
continue
value_hash = hashlib.sha1()
value_hash.update(syaml.dump_config(sort_yaml_obj(val)).encode())
value_hash = value_hash.hexdigest()
buckets[value_hash] += 1
values[value_hash] = val
return [(h, buckets[h], float(buckets[h]) / num_objects, values[h])
for h in sorted(buckets.keys(), key=lambda k: -buckets[k])]
def optimizer(yaml):
original_size = len(syaml.dump_config(
sort_yaml_obj(yaml), default_flow_style=True))
# try factoring out commonly repeated portions
common_job = {
'variables': {
'SPACK_COMPILER_ACTION': 'NONE',
'SPACK_RELATED_BUILDS_CDASH': ''
},
'after_script': ['rm -rf "./spack"'],
'artifacts': {
'paths': ['jobs_scratch_dir', 'cdash_report'],
'when': 'always'
},
}
# look for a list of tags that appear frequently
_, count, proportion, tags = next(iter(
build_histogram(yaml.values(), 'tags')),
(None,) * 4)
# If a list of tags is found, and there are more than one job that uses it,
# *and* the jobs that do use it represent at least 70% of all jobs, then
# add the list to the prototype object.
if tags and count > 1 and proportion >= 0.70:
common_job['tags'] = tags
# apply common object factorization
yaml, other, applied, rest = try_optimization_pass(
'general common object factorization',
yaml, common_subobject, common_job)
# look for a common script, and try factoring that out
_, count, proportion, script = next(iter(
build_histogram(yaml.values(), 'script')),
(None,) * 4)
if script and count > 1 and proportion >= 0.70:
yaml, other, applied, rest = try_optimization_pass(
'script factorization',
yaml, common_subobject, {'script': script})
# look for a common before_script, and try factoring that out
_, count, proportion, script = next(iter(
build_histogram(yaml.values(), 'before_script')),
(None,) * 4)
if script and count > 1 and proportion >= 0.70:
yaml, other, applied, rest = try_optimization_pass(
'before_script factorization',
yaml, common_subobject, {'before_script': script})
# Look specifically for the SPACK_ROOT_SPEC environment variables.
# Try to factor them out.
h = build_histogram((
getattr(val, 'get', lambda *args: {})('variables')
for val in yaml.values()), 'SPACK_ROOT_SPEC')
# In this case, we try to factor out *all* instances of the SPACK_ROOT_SPEC
# environment variable; not just the one that appears with the greatest
# frequency. We only require that more than 1 job uses a given instance's
# value, because we expect the value to be very large, and so expect even
# few-to-one factorizations to yield large space savings.
counter = 0
for _, count, proportion, spec in h:
if count <= 1:
continue
counter += 1
yaml, other, applied, rest = try_optimization_pass(
'SPACK_ROOT_SPEC factorization ({count})'.format(count=counter),
yaml,
common_subobject,
{'variables': {'SPACK_ROOT_SPEC': spec}})
new_size = len(syaml.dump_config(
sort_yaml_obj(yaml), default_flow_style=True))
print('\n')
print_delta('overall summary', original_size, new_size)
print('\n')
return yaml

View File

@@ -9,7 +9,6 @@
import re
import sys
import argparse
import ruamel.yaml as yaml
import six
@@ -17,7 +16,7 @@
from llnl.util.lang import attr_setdefault, index_by
from llnl.util.tty.colify import colify
from llnl.util.tty.color import colorize
from llnl.util.filesystem import join_path
from llnl.util.filesystem import working_dir
import spack.config
import spack.error
@@ -27,7 +26,6 @@
import spack.store
import spack.util.spack_json as sjson
import spack.util.string
from ruamel.yaml.error import MarkedYAMLError
# cmd has a submodule called "list" so preserve the python list module
@@ -45,28 +43,11 @@ def python_name(cmd_name):
return cmd_name.replace("-", "_")
def require_python_name(pname):
"""Require that the provided name is a valid python name (per
python_name()). Useful for checking parameters for function
prerequisites."""
if python_name(pname) != pname:
raise PythonNameError(pname)
def cmd_name(python_name):
"""Convert module name (with ``_``) to command name (with ``-``)."""
return python_name.replace('_', '-')
def require_cmd_name(cname):
"""Require that the provided name is a valid command name (per
cmd_name()). Useful for checking parameters for function
prerequisites.
"""
if cmd_name(cname) != cname:
raise CommandNameError(cname)
#: global, cached list of all commands -- access through all_commands()
_all_commands = None
@@ -110,7 +91,6 @@ def get_module(cmd_name):
cmd_name (str): name of the command for which to get a module
(contains ``-``, not ``_``).
"""
require_cmd_name(cmd_name)
pname = python_name(cmd_name)
try:
@@ -122,6 +102,8 @@ def get_module(cmd_name):
tty.debug('Imported {0} from built-in commands'.format(pname))
except ImportError:
module = spack.extensions.get_module(cmd_name)
if not module:
raise
attr_setdefault(module, SETUP_PARSER, lambda *args: None) # null-op
attr_setdefault(module, DESCRIPTION, "")
@@ -134,16 +116,14 @@ def get_module(cmd_name):
def get_command(cmd_name):
"""Imports the command function associated with cmd_name.
The function's name is derived from cmd_name using python_name().
"""Imports the command's function from a module and returns it.
Args:
cmd_name (str): name of the command (contains ``-``, not ``_``).
cmd_name (str): name of the command for which to get a module
(contains ``-``, not ``_``).
"""
require_cmd_name(cmd_name)
pname = python_name(cmd_name)
return getattr(get_module(cmd_name), pname)
return getattr(get_module(pname), pname)
def parse_specs(args, **kwargs):
@@ -197,7 +177,7 @@ def elide_list(line_list, max_num=10):
return line_list
def disambiguate_spec(spec, env, local=False, installed=True, first=False):
def disambiguate_spec(spec, env, local=False, installed=True):
"""Given a spec, figure out which installed package it refers to.
Arguments:
@@ -210,11 +190,10 @@ def disambiguate_spec(spec, env, local=False, installed=True, first=False):
database query. See ``spack.database.Database._query`` for details.
"""
hashes = env.all_hashes() if env else None
return disambiguate_spec_from_hashes(spec, hashes, local, installed, first)
return disambiguate_spec_from_hashes(spec, hashes, local, installed)
def disambiguate_spec_from_hashes(spec, hashes, local=False,
installed=True, first=False):
def disambiguate_spec_from_hashes(spec, hashes, local=False, installed=True):
"""Given a spec and a list of hashes, get concrete spec the spec refers to.
Arguments:
@@ -234,9 +213,6 @@ def disambiguate_spec_from_hashes(spec, hashes, local=False,
if not matching_specs:
tty.die("Spec '%s' matches no installed packages." % spec)
elif first:
return matching_specs[0]
elif len(matching_specs) > 1:
format_string = '{name}{@version}{%compiler}{arch=architecture}'
args = ["%s matches multiple packages." % spec,
@@ -435,39 +411,8 @@ def format_list(specs):
def spack_is_git_repo():
"""Ensure that this instance of Spack is a git clone."""
return is_git_repo(spack.paths.prefix)
def is_git_repo(path):
dotgit_path = join_path(path, '.git')
if os.path.isdir(dotgit_path):
# we are in a regular git repo
return True
if os.path.isfile(dotgit_path):
# we might be in a git worktree
try:
with open(dotgit_path, "rb") as f:
dotgit_content = yaml.load(f)
return os.path.isdir(dotgit_content.get("gitdir", dotgit_path))
except MarkedYAMLError:
pass
return False
class PythonNameError(spack.error.SpackError):
"""Exception class thrown for impermissible python names"""
def __init__(self, name):
self.name = name
super(PythonNameError, self).__init__(
'{0} is not a permissible Python name.'.format(name))
class CommandNameError(spack.error.SpackError):
"""Exception class thrown for impermissible command names"""
def __init__(self, name):
self.name = name
super(CommandNameError, self).__init__(
'{0} is not a permissible Spack command name.'.format(name))
with working_dir(spack.paths.prefix):
return os.path.isdir('.git')
########################################

View File

@@ -0,0 +1,80 @@
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import llnl.util.cpu
import llnl.util.tty as tty
import spack.repo
import spack.spec
import spack.cmd.common.arguments as arguments
description = "Bootstrap packages needed for spack to run smoothly"
section = "admin"
level = "long"
def setup_parser(subparser):
arguments.add_common_arguments(subparser, ['jobs'])
subparser.add_argument(
'--keep-prefix', action='store_true', dest='keep_prefix',
help="don't remove the install prefix if installation fails")
subparser.add_argument(
'--keep-stage', action='store_true', dest='keep_stage',
help="don't remove the build stage if installation succeeds")
arguments.add_common_arguments(subparser, ['no_checksum'])
subparser.add_argument(
'-v', '--verbose', action='store_true', dest='verbose',
help="display verbose build output while installing")
cache_group = subparser.add_mutually_exclusive_group()
cache_group.add_argument(
'--use-cache', action='store_true', dest='use_cache', default=True,
help="check for pre-built Spack packages in mirrors (default)")
cache_group.add_argument(
'--no-cache', action='store_false', dest='use_cache', default=True,
help="do not check for pre-built Spack packages in mirrors")
cache_group.add_argument(
'--cache-only', action='store_true', dest='cache_only', default=False,
help="only install package from binary mirrors")
cd_group = subparser.add_mutually_exclusive_group()
arguments.add_common_arguments(cd_group, ['clean', 'dirty'])
def bootstrap(parser, args, **kwargs):
kwargs.update({
'keep_prefix': args.keep_prefix,
'keep_stage': args.keep_stage,
'install_deps': 'dependencies',
'verbose': args.verbose,
'dirty': args.dirty,
'use_cache': args.use_cache,
'cache_only': args.cache_only
})
# Define requirement dictionary defining general specs which need
# to be satisfied, and the specs to install when the general spec
# isn't satisfied.
requirement_dict = {
# Install environment-modules with generic optimizations
'environment-modules': 'environment-modules~X target={0}'.format(
llnl.util.cpu.host().family
)
}
for requirement in requirement_dict:
installed_specs = spack.store.db.query(requirement)
if(len(installed_specs) > 0):
tty.msg("Requirement %s is satisfied with installed "
"package %s" % (requirement, installed_specs[0]))
else:
# Install requirement
spec_to_install = spack.spec.Spec(requirement_dict[requirement])
spec_to_install.concretize()
tty.msg("Installing %s to satisfy requirement for %s" %
(spec_to_install, requirement))
kwargs['explicit'] = True
package = spack.repo.get(spec_to_install)
package.do_install(**kwargs)

View File

@@ -0,0 +1,45 @@
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import spack.cmd.configure as cfg
import llnl.util.tty as tty
from spack.build_systems.autotools import AutotoolsPackage
from spack.build_systems.cmake import CMakePackage
from spack.build_systems.qmake import QMakePackage
from spack.build_systems.scons import SConsPackage
from spack.build_systems.waf import WafPackage
from spack.build_systems.python import PythonPackage
from spack.build_systems.perl import PerlPackage
from spack.build_systems.meson import MesonPackage
from spack.build_systems.sip import SIPPackage
description = 'DEPRECATED: stops at build stage when installing a package'
section = "build"
level = "long"
build_system_to_phase = {
AutotoolsPackage: 'build',
CMakePackage: 'build',
QMakePackage: 'build',
SConsPackage: 'build',
WafPackage: 'build',
PythonPackage: 'build',
PerlPackage: 'build',
MesonPackage: 'build',
SIPPackage: 'build',
}
def setup_parser(subparser):
cfg.setup_parser(subparser)
def build(parser, args):
tty.warn("This command is deprecated. Use `spack install --until` to"
" select an end phase instead. The `spack build` command will be"
" removed in a future version of Spack")
cfg._stop_at_phase_during_install(args, build, build_system_to_phase)

View File

@@ -52,35 +52,16 @@ def setup_parser(subparser):
create.add_argument('-k', '--key', metavar='key',
type=str, default=None,
help="Key for signing.")
output = create.add_mutually_exclusive_group(required=True)
output.add_argument('-d', '--directory',
metavar='directory',
type=str,
help="local directory where " +
"buildcaches will be written.")
output.add_argument('-m', '--mirror-name',
metavar='mirror-name',
type=str,
help="name of the mirror where " +
"buildcaches will be written.")
output.add_argument('--mirror-url',
metavar='mirror-url',
type=str,
help="URL of the mirror where " +
"buildcaches will be written.")
create.add_argument('--rebuild-index', action='store_true',
default=False, help="Regenerate buildcache index " +
"after building package(s)")
create.add_argument('-d', '--directory', metavar='directory',
type=str, default='.',
help="directory in which to save the tarballs.")
create.add_argument('--no-rebuild-index', action='store_true',
default=False, help="skip rebuilding index after " +
"building package(s)")
create.add_argument('-y', '--spec-yaml', default=None,
help='Create buildcache entry for spec from yaml file')
create.add_argument('--only', default='package,dependencies',
dest='things_to_install',
choices=['package', 'dependencies'],
help=('Select the buildcache mode. the default is to'
' build a cache for the package along with all'
' its dependencies. Alternatively, one can'
' decide to build a cache for only the package'
' or only the dependencies'))
create.add_argument('--no-deps', action='store_true', default='false',
help='Create buildcache entry wo/ dependencies')
arguments.add_common_arguments(create, ['specs'])
create.set_defaults(func=createtarball)
@@ -95,10 +76,6 @@ def setup_parser(subparser):
install.add_argument('-u', '--unsigned', action='store_true',
help="install unsigned buildcache" +
" tarballs for testing")
install.add_argument('-o', '--otherarch', action='store_true',
help="install specs from other architectures" +
" instead of default platform and OS")
arguments.add_common_arguments(install, ['specs'])
install.set_defaults(func=installtarball)
@@ -108,6 +85,8 @@ def setup_parser(subparser):
action='store_true',
dest='variants',
help='show variants in output (can be long)')
listcache.add_argument('-f', '--force', action='store_true',
help="force new download of specs")
listcache.add_argument('-a', '--allarch', action='store_true',
help="list specs for all available architectures" +
" instead of default platform and OS")
@@ -273,8 +252,7 @@ def find_matching_specs(pkgs, allow_multiple_matches=False, env=None):
return specs_from_cli
def match_downloaded_specs(pkgs, allow_multiple_matches=False, force=False,
other_arch=False):
def match_downloaded_specs(pkgs, allow_multiple_matches=False, force=False):
"""Returns a list of specs matching the not necessarily
concretized specs given from cli
@@ -288,8 +266,8 @@ def match_downloaded_specs(pkgs, allow_multiple_matches=False, force=False,
# List of specs that match expressions given via command line
specs_from_cli = []
has_errors = False
allarch = other_arch
specs = bindist.get_specs(allarch)
allarch = False
specs = bindist.get_specs(force, allarch)
for pkg in pkgs:
matches = []
tty.msg("buildcache spec(s) matching %s \n" % pkg)
@@ -321,10 +299,8 @@ def match_downloaded_specs(pkgs, allow_multiple_matches=False, force=False,
return specs_from_cli
def _createtarball(env, spec_yaml=None, packages=None, add_spec=True,
add_deps=True, output_location=os.getcwd(),
signing_key=None, force=False, make_relative=False,
unsigned=False, allow_root=False, rebuild_index=False):
def _createtarball(env, spec_yaml, packages, directory, key, no_deps, force,
rel, unsigned, allow_root, no_rebuild_index):
if spec_yaml:
packages = set()
with open(spec_yaml, 'r') as fd:
@@ -337,22 +313,23 @@ def _createtarball(env, spec_yaml=None, packages=None, add_spec=True,
elif packages:
packages = packages
elif env:
packages = env.concretized_user_specs
else:
tty.die("build cache file creation requires at least one" +
" installed package spec, an activate environment," +
" or else a path to a yaml file containing a spec" +
" to install")
" installed package argument or else path to a" +
" yaml file containing a spec to install")
pkgs = set(packages)
specs = set()
mirror = spack.mirror.MirrorCollection().lookup(output_location)
outdir = '.'
if directory:
outdir = directory
mirror = spack.mirror.MirrorCollection().lookup(outdir)
outdir = url_util.format(mirror.push_url)
msg = 'Buildcache files will be output to %s/build_cache' % outdir
tty.msg(msg)
signkey = None
if key:
signkey = key
matches = find_matching_specs(pkgs, env=env)
@@ -365,23 +342,14 @@ def _createtarball(env, spec_yaml=None, packages=None, add_spec=True,
tty.debug('skipping external or virtual spec %s' %
match.format())
else:
if add_spec:
tty.debug('adding matching spec %s' % match.format())
specs.add(match)
else:
tty.debug('skipping matching spec %s' % match.format())
if not add_deps:
tty.debug('adding matching spec %s' % match.format())
specs.add(match)
if no_deps is True:
continue
tty.debug('recursing dependencies')
for d, node in match.traverse(order='post',
depth=True,
deptype=('link', 'run')):
# skip root, since it's handled above
if d == 0:
continue
if node.external or node.virtual:
tty.debug('skipping external or virtual dependency %s' %
node.format())
@@ -392,10 +360,14 @@ def _createtarball(env, spec_yaml=None, packages=None, add_spec=True,
tty.debug('writing tarballs to %s/build_cache' % outdir)
for spec in specs:
tty.debug('creating binary cache file for package %s ' % spec.format())
bindist.build_tarball(spec, outdir, force, make_relative,
unsigned, allow_root, signing_key,
rebuild_index)
tty.msg('creating binary cache file for package %s ' % spec.format())
try:
bindist.build_tarball(spec, outdir, force, rel,
unsigned, allow_root, signkey,
not no_rebuild_index)
except Exception as e:
tty.warn('%s' % e)
pass
def createtarball(args):
@@ -404,50 +376,9 @@ def createtarball(args):
# restrict matching to current environment if one is active
env = ev.get_env(args, 'buildcache create')
output_location = None
if args.directory:
output_location = args.directory
# User meant to provide a path to a local directory.
# Ensure that they did not accidentally pass a URL.
scheme = url_util.parse(output_location, scheme='<missing>').scheme
if scheme != '<missing>':
raise ValueError(
'"--directory" expected a local path; got a URL, instead')
# User meant to provide a path to a local directory.
# Ensure that the mirror lookup does not mistake it for a named mirror.
output_location = 'file://' + output_location
elif args.mirror_name:
output_location = args.mirror_name
# User meant to provide the name of a preconfigured mirror.
# Ensure that the mirror lookup actually returns a named mirror.
result = spack.mirror.MirrorCollection().lookup(output_location)
if result.name == "<unnamed>":
raise ValueError(
'no configured mirror named "{name}"'.format(
name=output_location))
elif args.mirror_url:
output_location = args.mirror_url
# User meant to provide a URL for an anonymous mirror.
# Ensure that they actually provided a URL.
scheme = url_util.parse(output_location, scheme='<missing>').scheme
if scheme == '<missing>':
raise ValueError(
'"{url}" is not a valid URL'.format(url=output_location))
add_spec = ('package' in args.things_to_install)
add_deps = ('dependencies' in args.things_to_install)
_createtarball(env, spec_yaml=args.spec_yaml, packages=args.specs,
add_spec=add_spec, add_deps=add_deps,
output_location=output_location, signing_key=args.key,
force=args.force, make_relative=args.rel,
unsigned=args.unsigned, allow_root=args.allow_root,
rebuild_index=args.rebuild_index)
_createtarball(env, args.spec_yaml, args.specs, args.directory,
args.key, args.no_deps, args.force, args.rel, args.unsigned,
args.allow_root, args.no_rebuild_index)
def installtarball(args):
@@ -456,7 +387,7 @@ def installtarball(args):
tty.die("build cache file installation requires" +
" at least one package spec argument")
pkgs = set(args.specs)
matches = match_downloaded_specs(pkgs, args.multiple, args.otherarch)
matches = match_downloaded_specs(pkgs, args.multiple, args.force)
for match in matches:
install_tarball(match, args)
@@ -488,7 +419,7 @@ def install_tarball(spec, args):
def listspecs(args):
"""list binary packages available from mirrors"""
specs = bindist.get_specs(args.allarch)
specs = bindist.get_specs(args.force, args.allarch)
if args.specs:
constraints = set(args.specs)
specs = [s for s in specs if any(s.satisfies(c) for c in constraints)]
@@ -610,7 +541,7 @@ def get_concrete_spec(args):
if spec_str:
try:
spec = find_matching_specs(spec_str)[0]
spec = Spec(spec_str)
spec.concretize()
except SpecError as spec_error:
tty.error('Unable to concrectize spec {0}'.format(args.spec))

View File

@@ -26,9 +26,6 @@ def setup_parser(subparser):
subparser.add_argument(
'--keep-stage', action='store_true',
help="don't clean up staging area when command completes")
subparser.add_argument(
'-b', '--batch', action='store_true',
help="don't ask which versions to checksum")
arguments.add_common_arguments(subparser, ['package'])
subparser.add_argument(
'versions', nargs=argparse.REMAINDER,
@@ -36,11 +33,6 @@ def setup_parser(subparser):
def checksum(parser, args):
# Did the user pass 'package@version' string?
if len(args.versions) == 0 and '@' in args.package:
args.versions = [args.package.split('@')[1]]
args.package = args.package.split('@')[0]
# Make sure the user provided a package and not a URL
if not valid_fully_qualified_module_name(args.package):
tty.die("`spack checksum` accepts package names, not URLs.")
@@ -64,9 +56,7 @@ def checksum(parser, args):
tty.die("Could not find any versions for {0}".format(pkg.name))
version_lines = spack.stage.get_checksums_for_versions(
url_dict, pkg.name, keep_stage=args.keep_stage,
batch=(args.batch or len(args.versions) > 0),
fetch_options=pkg.fetch_options)
url_dict, pkg.name, keep_stage=args.keep_stage)
print()
print(version_lines)

View File

@@ -34,11 +34,42 @@ def setup_parser(subparser):
setup_parser.parser = subparser
subparsers = subparser.add_subparsers(help='CI sub-commands')
start = subparsers.add_parser('start', help=ci_start.__doc__)
start.add_argument(
'--output-file', default=None,
help="Absolute path to file where generated jobs file should be " +
"written. The default is .gitlab-ci.yml in the root of the " +
"repository.")
start.add_argument(
'--copy-to', default=None,
help="Absolute path of additional location where generated jobs " +
"yaml file should be copied. Default is not to copy.")
start.add_argument(
'--spack-repo', default=None,
help="Provide a url for this argument if a custom spack repo " +
"should be cloned as a step in each generated job.")
start.add_argument(
'--spack-ref', default=None,
help="Provide a git branch or tag if a custom spack branch " +
"should be checked out as a step in each generated job. " +
"This argument is ignored if no --spack-repo is provided.")
start.add_argument(
'--downstream-repo', default=None,
help="Url to repository where commit containing jobs yaml file " +
"should be pushed.")
start.add_argument(
'--branch-name', default='default-branch',
help="Name of current branch, used in generation of pushed commit.")
start.add_argument(
'--commit-sha', default='none',
help="SHA of current commit, used in generation of pushed commit.")
start.set_defaults(func=ci_start)
# Dynamic generation of the jobs yaml from a spack environment
generate = subparsers.add_parser('generate', help=ci_generate.__doc__)
generate.add_argument(
'--output-file', default=None,
help="Path to file where generated jobs file should be " +
help="Absolute path to file where generated jobs file should be " +
"written. The default is .gitlab-ci.yml in the root of the " +
"repository.")
generate.add_argument(
@@ -54,17 +85,22 @@ def setup_parser(subparser):
help="Provide a git branch or tag if a custom spack branch " +
"should be checked out as a step in each generated job. " +
"This argument is ignored if no --spack-repo is provided.")
generate.add_argument(
'--optimize', action='store_true', default=False,
help="(Experimental) run the generated document through a series of "
"optimization passes designed to reduce the size of the "
"generated file.")
generate.add_argument(
'--dependencies', action='store_true', default=False,
help="(Experimental) disable DAG scheduling; use "
' "plain" dependencies.')
generate.set_defaults(func=ci_generate)
# Commit and push jobs yaml to a downstream CI repo
pushyaml = subparsers.add_parser('pushyaml', help=ci_pushyaml.__doc__)
pushyaml.add_argument(
'--downstream-repo', default=None,
help="Url to repository where commit containing jobs yaml file " +
"should be pushed.")
pushyaml.add_argument(
'--branch-name', default='default-branch',
help="Name of current branch, used in generation of pushed commit.")
pushyaml.add_argument(
'--commit-sha', default='none',
help="SHA of current commit, used in generation of pushed commit.")
pushyaml.set_defaults(func=ci_pushyaml)
# Check a spec against mirror. Rebuild, create buildcache and push to
# mirror (if necessary).
rebuild = subparsers.add_parser('rebuild', help=ci_rebuild.__doc__)
@@ -84,22 +120,18 @@ def ci_generate(args):
copy_yaml_to = args.copy_to
spack_repo = args.spack_repo
spack_ref = args.spack_ref
run_optimizer = args.optimize
use_dependencies = args.dependencies
if not output_file:
output_file = os.path.abspath(".gitlab-ci.yml")
gen_ci_dir = os.getcwd()
output_file = os.path.join(gen_ci_dir, '.gitlab-ci.yml')
else:
output_file_path = os.path.abspath(output_file)
gen_ci_dir = os.path.dirname(output_file_path)
gen_ci_dir = os.path.dirname(output_file)
if not os.path.exists(gen_ci_dir):
os.makedirs(gen_ci_dir)
# Generate the jobs
spack_ci.generate_gitlab_ci_yaml(
env, True, output_file, spack_repo, spack_ref,
run_optimizer=run_optimizer,
use_dependencies=use_dependencies)
env, True, output_file, spack_repo, spack_ref)
if copy_yaml_to:
copy_to_dir = os.path.dirname(copy_yaml_to)
@@ -108,6 +140,64 @@ def ci_generate(args):
shutil.copyfile(output_file, copy_yaml_to)
def ci_pushyaml(args):
"""Push the generated jobs yaml file to a remote repository. The file
(.gitlab-ci.yaml) is expected to be in the current directory, which
should be the root of the repository."""
downstream_repo = args.downstream_repo
branch_name = args.branch_name
commit_sha = args.commit_sha
if not downstream_repo:
tty.die('No downstream repo to push to, exiting')
working_dir = os.getcwd()
jobs_yaml = os.path.join(working_dir, '.gitlab-ci.yml')
git_dir = os.path.join(working_dir, '.git')
if not os.path.exists(jobs_yaml):
tty.die('.gitlab-ci.yml must exist in current directory')
if not os.path.exists(git_dir):
tty.die('.git directory must exist in current directory')
# Create a temporary working directory
with spack_ci.TemporaryDirectory() as temp_dir:
git = exe.which('git', required=True)
# Push a commit with the generated file to the downstream ci repo
saved_git_dir = os.path.join(temp_dir, 'original-git-dir')
shutil.move('.git', saved_git_dir)
git('init', '.')
git('config', 'user.email', 'robot@spack.io')
git('config', 'user.name', 'Spack Build Bot')
git('add', '.')
# If the environment contains a spack directory, do not commit
# or push it with any other generated products
if os.path.exists('./spack') and os.path.isdir('./spack'):
git('rm', '-rf', '--cached', 'spack')
tty.msg('git commit')
commit_message = '{0} {1} ({2})'.format(
'Auto-generated commit testing', branch_name, commit_sha)
git('commit', '-m', '{0}'.format(commit_message))
tty.msg('git push')
git('remote', 'add', 'downstream', downstream_repo)
push_to_branch = 'master:multi-ci-{0}'.format(branch_name)
git('push', '--force', 'downstream', push_to_branch)
shutil.rmtree('.git')
shutil.move(saved_git_dir, '.git')
git('reset', '--hard', 'HEAD')
def ci_rebuild(args):
"""This command represents a gitlab-ci job, corresponding to a single
release spec. As such it must first decide whether or not the spec it
@@ -149,7 +239,6 @@ def ci_rebuild(args):
compiler_action = get_env_var('SPACK_COMPILER_ACTION')
cdash_build_name = get_env_var('SPACK_CDASH_BUILD_NAME')
related_builds = get_env_var('SPACK_RELATED_BUILDS_CDASH')
pr_env_var = get_env_var('SPACK_IS_PR_PIPELINE')
gitlab_ci = None
if 'gitlab-ci' in yaml_root:
@@ -194,6 +283,8 @@ def ci_rebuild(args):
spack_cmd = exe.which('spack')
os.environ['FORCE_UNSAFE_CONFIGURE'] = '1'
cdash_report_dir = os.path.join(ci_artifact_dir, 'cdash_report')
temp_dir = os.path.join(ci_artifact_dir, 'jobs_scratch_dir')
job_log_dir = os.path.join(temp_dir, 'logs')
@@ -202,18 +293,11 @@ def ci_rebuild(args):
local_mirror_dir = os.path.join(ci_artifact_dir, 'local_mirror')
build_cache_dir = os.path.join(local_mirror_dir, 'build_cache')
spack_is_pr_pipeline = True if pr_env_var == 'True' else False
enable_artifacts_mirror = False
artifact_mirror_url = None
if 'enable-artifacts-buildcache' in gitlab_ci:
enable_artifacts_mirror = gitlab_ci['enable-artifacts-buildcache']
if enable_artifacts_mirror or spack_is_pr_pipeline:
# If this is a PR pipeline, we will override the setting to
# make sure that artifacts buildcache is enabled. Otherwise
# jobs will not have binary deps available since we do not
# allow pushing binaries to remote mirror during PR pipelines
enable_artifacts_mirror = True
if enable_artifacts_mirror:
artifact_mirror_url = 'file://' + local_mirror_dir
mirror_msg = 'artifact buildcache enabled, mirror url: {0}'.format(
artifact_mirror_url)
@@ -359,12 +443,9 @@ def ci_rebuild(args):
spack_ci.copy_stage_logs_to_artifacts(job_spec, job_log_dir)
# 4) create buildcache on remote mirror, but not if this is
# running to test a spack PR
if not spack_is_pr_pipeline:
spack_ci.push_mirror_contents(
env, job_spec, job_spec_yaml_path, remote_mirror_url,
cdash_build_id)
# 4) create buildcache on remote mirror
spack_ci.push_mirror_contents(env, job_spec, job_spec_yaml_path,
remote_mirror_url, cdash_build_id)
# 5) create another copy of that buildcache on "local artifact
# mirror" (only done if cash reporting is enabled)
@@ -389,6 +470,13 @@ def ci_rebuild(args):
job_spec, build_cache_dir, True, remote_mirror_url)
def ci_start(args):
"""Kicks of the CI process (currently just calls ci_generate() then
ci_push())"""
ci_generate(args)
ci_pushyaml(args)
def ci(parser, args):
if args.func:
args.func(args)

View File

@@ -23,9 +23,9 @@
class AllClean(argparse.Action):
"""Activates flags -s -d -f -m and -p simultaneously"""
"""Activates flags -s -d -m and -p simultaneously"""
def __call__(self, parser, namespace, values, option_string=None):
parser.parse_args(['-sdfmp'], namespace=namespace)
parser.parse_args(['-sdmp'], namespace=namespace)
def setup_parser(subparser):
@@ -35,9 +35,6 @@ def setup_parser(subparser):
subparser.add_argument(
'-d', '--downloads', action='store_true',
help="remove cached downloads")
subparser.add_argument(
'-f', '--failures', action='store_true',
help="force removal of all install failure tracking markers")
subparser.add_argument(
'-m', '--misc-cache', action='store_true',
help="remove long-lived caches, like the virtual package index")
@@ -45,15 +42,15 @@ def setup_parser(subparser):
'-p', '--python-cache', action='store_true',
help="remove .pyc, .pyo files and __pycache__ folders")
subparser.add_argument(
'-a', '--all', action=AllClean, help="equivalent to -sdfmp", nargs=0
'-a', '--all', action=AllClean, help="equivalent to -sdmp", nargs=0
)
arguments.add_common_arguments(subparser, ['specs'])
def clean(parser, args):
# If nothing was set, activate the default
if not any([args.specs, args.stage, args.downloads, args.failures,
args.misc_cache, args.python_cache]):
if not any([args.specs, args.stage, args.downloads, args.misc_cache,
args.python_cache]):
args.stage = True
# Then do the cleaning falling through the cases
@@ -73,10 +70,6 @@ def clean(parser, args):
tty.msg('Removing cached downloads')
spack.caches.fetch_cache.destroy()
if args.failures:
tty.msg('Removing install failure marks')
spack.installer.clear_failures()
if args.misc_cache:
tty.msg('Removing cached information on repositories')
spack.caches.misc_cache.destroy()

View File

@@ -78,10 +78,9 @@ def setup_parser(subparser):
class SpackArgparseRstWriter(ArgparseRstWriter):
"""RST writer tailored for spack documentation."""
def __init__(self, prog, out=None, aliases=False,
def __init__(self, prog, out=sys.stdout, aliases=False,
documented_commands=[],
rst_levels=['-', '-', '^', '~', ':', '`']):
out = sys.stdout if out is None else out
super(SpackArgparseRstWriter, self).__init__(
prog, out, aliases, rst_levels)
self.documented = documented_commands

View File

@@ -111,7 +111,7 @@ def __call__(self, parser, namespace, jobs, option_string):
def default(self):
# This default is coded as a property so that look-up
# of this value is done only on demand
return min(spack.config.get('config:build_jobs', 16),
return min(spack.config.get('config:build_jobs'),
multiprocessing.cpu_count())
@default.setter

View File

@@ -37,7 +37,7 @@ def setup_parser(subparser):
find_parser.add_argument('add_paths', nargs=argparse.REMAINDER)
find_parser.add_argument(
'--scope', choices=scopes, metavar=scopes_metavar,
default=spack.config.default_modify_scope('compilers'),
default=spack.config.default_modify_scope(),
help="configuration scope to modify")
# Remove
@@ -49,7 +49,7 @@ def setup_parser(subparser):
remove_parser.add_argument('compiler_spec')
remove_parser.add_argument(
'--scope', choices=scopes, metavar=scopes_metavar,
default=spack.config.default_modify_scope('compilers'),
default=spack.config.default_modify_scope(),
help="configuration scope to modify")
# List
@@ -142,7 +142,7 @@ def compiler_info(args):
for flag, flag_value in iteritems(c.flags):
print("\t\t%s = %s" % (flag, flag_value))
if len(c.environment) != 0:
if len(c.environment.get('set', {})) != 0:
if len(c.environment['set']) != 0:
print("\tenvironment:")
print("\t set:")
for key, value in iteritems(c.environment['set']):
@@ -159,19 +159,7 @@ def compiler_list(args):
tty.msg("Available compilers")
index = index_by(spack.compilers.all_compilers(scope=args.scope),
lambda c: (c.spec.name, c.operating_system, c.target))
# For a container, take each element which does not evaluate to false and
# convert it to a string. For elements which evaluate to False (e.g. None)
# convert them to '' (in which case it still evaluates to False but is a
# string type). Tuples produced by this are guaranteed to be comparable in
# Python 3
convert_str = (
lambda tuple_container:
tuple(str(x) if x else '' for x in tuple_container))
index_str_keys = list(
(convert_str(x), y) for x, y in index.items())
ordered_sections = sorted(index_str_keys, key=lambda item: item[0])
ordered_sections = sorted(index.items(), key=lambda item: item[0])
for i, (key, compilers) in enumerate(ordered_sections):
if i >= 1:
print()

View File

@@ -5,14 +5,12 @@
from __future__ import print_function
import os
import re
import llnl.util.tty as tty
import spack.config
import spack.schema.env
import spack.environment as ev
import spack.util.spack_yaml as syaml
from spack.util.editor import editor
description = "get and set configuration options"
@@ -60,48 +58,24 @@ def setup_parser(subparser):
sp.add_parser('list', help='list configuration sections')
add_parser = sp.add_parser('add', help='add configuration parameters')
add_parser.add_argument(
'path', nargs='?',
help="colon-separated path to config that should be added,"
" e.g. 'config:default:true'")
add_parser.add_argument(
'-f', '--file',
help="file from which to set all config values"
)
remove_parser = sp.add_parser('remove', aliases=['rm'],
help='remove configuration parameters')
remove_parser.add_argument(
'path',
help="colon-separated path to config that should be removed,"
" e.g. 'config:default:true'")
# Make the add parser available later
setup_parser.add_parser = add_parser
def _get_scope_and_section(args):
"""Extract config scope and section from arguments."""
scope = args.scope
section = getattr(args, 'section', None)
path = getattr(args, 'path', None)
section = args.section
# w/no args and an active environment, point to env manifest
if not section:
if not args.section:
env = ev.get_env(args, 'config edit')
if env:
scope = env.env_file_config_scope_name()
# set scope defaults
elif not scope:
scope = spack.config.default_modify_scope(section)
# special handling for commands that take value instead of section
if path:
section = path[:path.find(':')] if ':' in path else path
if not scope:
scope = spack.config.default_modify_scope(section)
elif not args.scope:
if section == 'compilers':
scope = spack.config.default_modify_scope()
else:
scope = 'user'
return scope, section
@@ -161,126 +135,11 @@ def config_list(args):
print(' '.join(list(spack.config.section_schemas)))
def set_config(args, section, new, scope):
if re.match(r'env.*', scope):
e = ev.get_env(args, 'config add')
e.set_config(section, new)
else:
spack.config.set(section, new, scope=scope)
def config_add(args):
"""Add the given configuration to the specified config scope
This is a stateful operation that edits the config files."""
if not (args.file or args.path):
tty.error("No changes requested. Specify a file or value.")
setup_parser.add_parser.print_help()
exit(1)
scope, section = _get_scope_and_section(args)
# Updates from file
if args.file:
# Get file as config dict
data = spack.config.read_config_file(args.file)
if any(k in data for k in spack.schema.env.keys):
data = ev.config_dict(data)
# update all sections from config dict
# We have to iterate on keys to keep overrides from the file
for section in data.keys():
if section in spack.config.section_schemas.keys():
# Special handling for compiler scope difference
# Has to be handled after we choose a section
if scope is None:
scope = spack.config.default_modify_scope(section)
value = data[section]
existing = spack.config.get(section, scope=scope)
new = spack.config.merge_yaml(existing, value)
set_config(args, section, new, scope)
if args.path:
components = spack.config.process_config_path(args.path)
has_existing_value = True
path = ''
override = False
for idx, name in enumerate(components[:-1]):
# First handle double colons in constructing path
colon = '::' if override else ':' if path else ''
path += colon + name
if getattr(name, 'override', False):
override = True
else:
override = False
# Test whether there is an existing value at this level
existing = spack.config.get(path, scope=scope)
if existing is None:
has_existing_value = False
# We've nested further than existing config, so we need the
# type information for validation to know how to handle bare
# values appended to lists.
existing = spack.config.get_valid_type(path)
# construct value from this point down
value = syaml.load_config(components[-1])
for component in reversed(components[idx + 1:-1]):
value = {component: value}
break
if has_existing_value:
path, _, value = args.path.rpartition(':')
value = syaml.load_config(value)
existing = spack.config.get(path, scope=scope)
# append values to lists
if isinstance(existing, list) and not isinstance(value, list):
value = [value]
# merge value into existing
new = spack.config.merge_yaml(existing, value)
set_config(args, path, new, scope)
def config_remove(args):
"""Remove the given configuration from the specified config scope
This is a stateful operation that edits the config files."""
scope, _ = _get_scope_and_section(args)
path, _, value = args.path.rpartition(':')
existing = spack.config.get(path, scope=scope)
if not isinstance(existing, (list, dict)):
path, _, value = path.rpartition(':')
existing = spack.config.get(path, scope=scope)
value = syaml.load(value)
if isinstance(existing, list):
values = value if isinstance(value, list) else [value]
for v in values:
existing.remove(v)
elif isinstance(existing, dict):
existing.pop(value, None)
else:
# This should be impossible to reach
raise spack.config.ConfigError('Config has nested non-dict values')
set_config(args, path, existing, scope)
def config(parser, args):
action = {'get': config_get,
'blame': config_blame,
'edit': config_edit,
'list': config_list,
'add': config_add,
'rm': config_remove,
'remove': config_remove}
action = {
'get': config_get,
'blame': config_blame,
'edit': config_edit,
'list': config_list,
}
action[args.config_command](args)

View File

@@ -0,0 +1,85 @@
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import argparse
import llnl.util.tty as tty
import spack.cmd
import spack.cmd.common.arguments as arguments
import spack.cmd.install as inst
from spack.build_systems.autotools import AutotoolsPackage
from spack.build_systems.cmake import CMakePackage
from spack.build_systems.qmake import QMakePackage
from spack.build_systems.waf import WafPackage
from spack.build_systems.perl import PerlPackage
from spack.build_systems.intel import IntelPackage
from spack.build_systems.meson import MesonPackage
from spack.build_systems.sip import SIPPackage
description = 'DEPRECATED: stage and configure a package but do not install'
section = "build"
level = "long"
build_system_to_phase = {
AutotoolsPackage: 'configure',
CMakePackage: 'cmake',
QMakePackage: 'qmake',
WafPackage: 'configure',
PerlPackage: 'configure',
IntelPackage: 'configure',
MesonPackage: 'meson',
SIPPackage: 'configure',
}
def setup_parser(subparser):
subparser.add_argument(
'-v', '--verbose',
action='store_true',
help="print additional output during builds"
)
arguments.add_common_arguments(subparser, ['spec'])
def _stop_at_phase_during_install(args, calling_fn, phase_mapping):
if not args.package:
tty.die("configure requires at least one package argument")
# TODO: to be refactored with code in install
specs = spack.cmd.parse_specs(args.package, concretize=True)
if len(specs) != 1:
tty.error('only one spec can be installed at a time.')
spec = specs.pop()
pkg = spec.package
try:
key = [cls for cls in phase_mapping if isinstance(pkg, cls)].pop()
phase = phase_mapping[key]
# Install package dependencies if needed
parser = argparse.ArgumentParser()
inst.setup_parser(parser)
tty.msg('Checking dependencies for {0}'.format(args.spec[0]))
cli_args = ['-v'] if args.verbose else []
install_args = parser.parse_args(cli_args + ['--only=dependencies'])
install_args.spec = args.spec
inst.install(parser, install_args)
# Install package and stop at the given phase
cli_args = ['-v'] if args.verbose else []
install_args = parser.parse_args(cli_args + ['--only=package'])
install_args.spec = args.spec
inst.install(parser, install_args, stop_at=phase)
except IndexError:
tty.error(
'Package {0} has no {1} phase, or its {1} phase is not separated from install'.format( # NOQA: ignore=E501
spec.name, calling_fn.__name__)
)
def configure(parser, args):
tty.warn("This command is deprecated. Use `spack install --until` to"
" select an end phase instead. The `spack configure` command will"
" be removed in a future version of Spack.")
_stop_at_phase_during_install(args, configure, build_system_to_phase)

View File

@@ -245,9 +245,7 @@ class PythonPackageTemplate(PackageTemplate):
base_class_name = 'PythonPackage'
dependencies = """\
# FIXME: Add dependencies if required. Only add the python dependency
# if you need specific versions. A generic python dependency is
# added implicity by the PythonPackage class.
# FIXME: Add dependencies if required.
# depends_on('python@2.X:2.Y,3.Z:', type=('build', 'run'))
# depends_on('py-setuptools', type='build')
# depends_on('py-foo', type=('build', 'run'))"""
@@ -629,7 +627,7 @@ def get_versions(args, name):
versions = spack.stage.get_checksums_for_versions(
url_dict, name, first_stage_function=guesser,
keep_stage=args.keep_stage, batch=True)
keep_stage=args.keep_stage)
else:
versions = unhashed_versions

View File

@@ -3,10 +3,7 @@
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from __future__ import print_function
import os
import platform
import re
from datetime import datetime
from glob import glob
@@ -14,9 +11,7 @@
import llnl.util.tty as tty
from llnl.util.filesystem import working_dir
import spack.architecture as architecture
import spack.paths
from spack.main import get_version
from spack.util.executable import which
description = "debugging commands for troubleshooting Spack"
@@ -28,7 +23,6 @@ def setup_parser(subparser):
sp = subparser.add_subparsers(metavar='SUBCOMMAND', dest='debug_command')
sp.add_parser('create-db-tarball',
help="create a tarball of Spack's installation metadata")
sp.add_parser('report', help='print information useful for bug reports')
def _debug_tarball_suffix():
@@ -84,16 +78,6 @@ def create_db_tarball(args):
tty.msg('Created %s' % tarball_name)
def report(args):
print('* **Spack:**', get_version())
print('* **Python:**', platform.python_version())
print('* **Platform:**', architecture.Arch(
architecture.platform(), 'frontend', 'frontend'))
def debug(parser, args):
action = {
'create-db-tarball': create_db_tarball,
'report': report,
}
action = {'create-db-tarball': create_db_tarball}
action[args.debug_command](args)

View File

@@ -3,15 +3,12 @@
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import sys
import llnl.util.tty as tty
from llnl.util.tty.colify import colify
import spack.cmd
import spack.cmd.common.arguments as arguments
import spack.environment as ev
import spack.package
import spack.repo
import spack.store
@@ -45,9 +42,7 @@ def dependencies(parser, args):
spec = spack.cmd.disambiguate_spec(specs[0], env)
format_string = '{name}{@version}{%compiler}{/hash:7}'
if sys.stdout.isatty():
tty.msg(
"Dependencies of %s" % spec.format(format_string, color=True))
tty.msg("Dependencies of %s" % spec.format(format_string, color=True))
deps = spack.store.db.installed_relatives(
spec, 'children', args.transitive, deptype=args.deptype)
if deps:
@@ -57,15 +52,22 @@ def dependencies(parser, args):
else:
spec = specs[0]
dependencies = spack.package.possible_dependencies(
spec,
transitive=args.transitive,
expand_virtuals=args.expand_virtuals,
deptype=args.deptype
)
if not spec.virtual:
packages = [spec.package]
else:
packages = [
spack.repo.get(s.name)
for s in spack.repo.path.providers_for(spec)]
dependencies = set()
for pkg in packages:
possible = pkg.possible_dependencies(
args.transitive, args.expand_virtuals, deptype=args.deptype)
dependencies.update(possible)
if spec.name in dependencies:
del dependencies[spec.name]
dependencies.remove(spec.name)
if dependencies:
colify(sorted(dependencies))

View File

@@ -3,8 +3,6 @@
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import sys
import llnl.util.tty as tty
from llnl.util.tty.colify import colify
@@ -32,7 +30,7 @@ def setup_parser(subparser):
def inverted_dependencies():
"""Iterate through all packages and return a dictionary mapping package
names to possible dependencies.
names to possible dependnecies.
Virtual packages are included as sources, so that you can query
dependents of, e.g., `mpi`, but virtuals are not included as
@@ -86,8 +84,7 @@ def dependents(parser, args):
spec = spack.cmd.disambiguate_spec(specs[0], env)
format_string = '{name}{@version}{%compiler}{/hash:7}'
if sys.stdout.isatty():
tty.msg("Dependents of %s" % spec.cformat(format_string))
tty.msg("Dependents of %s" % spec.cformat(format_string))
deps = spack.store.db.installed_relatives(
spec, 'parents', args.transitive)
if deps:

View File

@@ -38,17 +38,9 @@ def setup_parser(subparser):
'-q', '--quiet', action='store_true', dest='quiet',
help="do not display verbose build output while installing")
subparser.add_argument(
'--drop-in', type=str, dest='shell', default=None,
help="drop into a build environment in a new shell, e.g. bash, zsh")
arguments.add_common_arguments(subparser, ['spec'])
stop_group = subparser.add_mutually_exclusive_group()
stop_group.add_argument(
'-b', '--before', type=str, dest='before', default=None,
help="phase to stop before when installing (default None)")
stop_group.add_argument(
'-u', '--until', type=str, dest='until', default=None,
help="phase to stop after when installing (default None)")
arguments.add_common_arguments(subparser, ['spec'])
cd_group = subparser.add_mutually_exclusive_group()
arguments.add_common_arguments(cd_group, ['clean', 'dirty'])
@@ -99,10 +91,4 @@ def dev_build(self, args):
verbose=not args.quiet,
keep_stage=True, # don't remove source dir for dev build.
dirty=args.dirty,
stop_before=args.before,
stop_at=args.until)
# drop into the build environment of the package?
if args.shell is not None:
spack.build_environment.setup_package(package, dirty=False)
os.execvp(args.shell, [args.shell])

View File

@@ -0,0 +1,20 @@
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import spack.cmd.dev_build
import llnl.util.tty as tty
description = "DEPRECATED: do-it-yourself: build from local source directory"
section = "build"
level = "long"
def setup_parser(subparser):
spack.cmd.dev_build.setup_parser(subparser)
def diy(self, args):
tty.warn("`spack diy` has been renamed to `spack dev-build`."
"The `diy` command will be removed in a future version of Spack")
spack.cmd.dev_build.dev_build(self, args)

View File

@@ -52,9 +52,6 @@ def env_activate_setup_parser(subparser):
shells.add_argument(
'--csh', action='store_const', dest='shell', const='csh',
help="print csh commands to activate the environment")
shells.add_argument(
'--fish', action='store_const', dest='shell', const='fish',
help="print fish commands to activate the environment")
view_options = subparser.add_mutually_exclusive_group()
view_options.add_argument(
@@ -130,9 +127,6 @@ def env_deactivate_setup_parser(subparser):
shells.add_argument(
'--csh', action='store_const', dest='shell', const='csh',
help="print csh commands to deactivate the environment")
shells.add_argument(
'--fish', action='store_const', dest='shell', const='fish',
help="print fish commands to activate the environment")
def env_deactivate(args):
@@ -214,14 +208,10 @@ def _env_create(name_or_path, init_file=None, dir=False, with_view=None):
env = ev.Environment(name_or_path, init_file, with_view)
env.write()
tty.msg("Created environment in %s" % env.path)
tty.msg("You can activate this environment with:")
tty.msg(" spack env activate %s" % env.path)
else:
env = ev.create(name_or_path, init_file, with_view)
env.write()
tty.msg("Created environment '%s' in %s" % (name_or_path, env.path))
tty.msg("You can activate this environment with:")
tty.msg(" spack env activate %s" % (name_or_path))
return env

View File

@@ -4,7 +4,6 @@
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import argparse
import sys
import llnl.util.tty as tty
from llnl.util.tty.colify import colify
@@ -22,8 +21,6 @@
def setup_parser(subparser):
subparser.epilog = 'If called without argument returns ' \
'the list of all valid extendable packages'
arguments.add_common_arguments(subparser, ['long', 'very_long'])
subparser.add_argument('-d', '--deps', action='store_true',
help='output dependencies along with found specs')
@@ -45,19 +42,7 @@ def setup_parser(subparser):
def extensions(parser, args):
if not args.spec:
# If called without arguments, list all the extendable packages
isatty = sys.stdout.isatty()
if isatty:
tty.info('Extendable packages:')
extendable_pkgs = []
for name in spack.repo.all_package_names():
pkg = spack.repo.get(name)
if pkg.extendable:
extendable_pkgs.append(name)
colify(extendable_pkgs, indent=4)
return
tty.die("extensions requires a package spec.")
# Checks
spec = cmd.parse_specs(args.spec)

View File

@@ -1,271 +0,0 @@
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from __future__ import print_function
from collections import defaultdict, namedtuple
import argparse
import os
import re
import six
import spack
import spack.error
import llnl.util.tty as tty
import spack.util.spack_yaml as syaml
import spack.util.environment
import llnl.util.filesystem
description = "add external packages to Spack configuration"
section = "config"
level = "short"
def setup_parser(subparser):
sp = subparser.add_subparsers(
metavar='SUBCOMMAND', dest='external_command')
find_parser = sp.add_parser('find', help=external_find.__doc__)
find_parser.add_argument(
'--not-buildable', action='store_true', default=False,
help="packages with detected externals won't be built with Spack")
find_parser.add_argument('packages', nargs=argparse.REMAINDER)
def is_executable(path):
return os.path.isfile(path) and os.access(path, os.X_OK)
def _get_system_executables():
"""Get the paths of all executables available from the current PATH.
For convenience, this is constructed as a dictionary where the keys are
the executable paths and the values are the names of the executables
(i.e. the basename of the executable path).
There may be multiple paths with the same basename. In this case it is
assumed there are two different instances of the executable.
"""
path_hints = spack.util.environment.get_path('PATH')
search_paths = llnl.util.filesystem.search_paths_for_executables(
*path_hints)
path_to_exe = {}
# Reverse order of search directories so that an exe in the first PATH
# entry overrides later entries
for search_path in reversed(search_paths):
for exe in os.listdir(search_path):
exe_path = os.path.join(search_path, exe)
if is_executable(exe_path):
path_to_exe[exe_path] = exe
return path_to_exe
ExternalPackageEntry = namedtuple(
'ExternalPackageEntry',
['spec', 'base_dir'])
def _generate_pkg_config(external_pkg_entries):
"""Generate config according to the packages.yaml schema for a single
package.
This does not generate the entire packages.yaml. For example, given some
external entries for the CMake package, this could return::
{ 'paths': {
'cmake@3.17.1': '/opt/cmake-3.17.1/',
'cmake@3.16.5': '/opt/cmake-3.16.5/'
}
}
"""
paths_dict = syaml.syaml_dict()
for e in external_pkg_entries:
if not _spec_is_valid(e.spec):
continue
paths_dict[str(e.spec)] = e.base_dir
pkg_dict = syaml.syaml_dict()
pkg_dict['paths'] = paths_dict
return pkg_dict
def _spec_is_valid(spec):
try:
str(spec)
except spack.error.SpackError:
# It is assumed here that we can at least extract the package name from
# the spec so we can look up the implementation of
# determine_spec_details
tty.warn('Constructed spec for {0} does not have a string'
' representation'.format(spec.name))
return False
try:
spack.spec.Spec(str(spec))
except spack.error.SpackError:
tty.warn('Constructed spec has a string representation but the string'
' representation does not evaluate to a valid spec: {0}'
.format(str(spec)))
return False
return True
def external_find(args):
if args.packages:
packages_to_check = list(spack.repo.get(pkg) for pkg in args.packages)
else:
packages_to_check = spack.repo.path.all_packages()
pkg_to_entries = _get_external_packages(packages_to_check)
_update_pkg_config(pkg_to_entries, args.not_buildable)
def _group_by_prefix(paths):
groups = defaultdict(set)
for p in paths:
groups[os.path.dirname(p)].add(p)
return groups.items()
def _convert_to_iterable(single_val_or_multiple):
x = single_val_or_multiple
if x is None:
return []
elif isinstance(x, six.string_types):
return [x]
elif isinstance(x, spack.spec.Spec):
# Specs are iterable, but a single spec should be converted to a list
return [x]
try:
iter(x)
return x
except TypeError:
return [x]
def _determine_base_dir(prefix):
# Given a prefix where an executable is found, assuming that prefix ends
# with /bin/, strip off the 'bin' directory to get a Spack-compatible
# prefix
assert os.path.isdir(prefix)
if os.path.basename(prefix) == 'bin':
return os.path.dirname(prefix)
def _get_predefined_externals():
# Pull from all scopes when looking for preexisting external package
# entries
pkg_config = spack.config.get('packages')
already_defined_specs = set()
for pkg_name, per_pkg_cfg in pkg_config.items():
paths = per_pkg_cfg.get('paths', {})
already_defined_specs.update(spack.spec.Spec(k) for k in paths)
modules = per_pkg_cfg.get('modules', {})
already_defined_specs.update(spack.spec.Spec(k) for k in modules)
return already_defined_specs
def _update_pkg_config(pkg_to_entries, not_buildable):
predefined_external_specs = _get_predefined_externals()
pkg_to_cfg = {}
for pkg_name, ext_pkg_entries in pkg_to_entries.items():
new_entries = list(
e for e in ext_pkg_entries
if (e.spec not in predefined_external_specs))
pkg_config = _generate_pkg_config(new_entries)
if not_buildable:
pkg_config['buildable'] = False
pkg_to_cfg[pkg_name] = pkg_config
cfg_scope = spack.config.default_modify_scope()
pkgs_cfg = spack.config.get('packages', scope=cfg_scope)
spack.config.merge_yaml(pkgs_cfg, pkg_to_cfg)
spack.config.set('packages', pkgs_cfg, scope=cfg_scope)
def _get_external_packages(packages_to_check, system_path_to_exe=None):
if not system_path_to_exe:
system_path_to_exe = _get_system_executables()
exe_pattern_to_pkgs = defaultdict(list)
for pkg in packages_to_check:
if hasattr(pkg, 'executables'):
for exe in pkg.executables:
exe_pattern_to_pkgs[exe].append(pkg)
pkg_to_found_exes = defaultdict(set)
for exe_pattern, pkgs in exe_pattern_to_pkgs.items():
compiled_re = re.compile(exe_pattern)
for path, exe in system_path_to_exe.items():
if compiled_re.search(exe):
for pkg in pkgs:
pkg_to_found_exes[pkg].add(path)
pkg_to_entries = defaultdict(list)
resolved_specs = {} # spec -> exe found for the spec
for pkg, exes in pkg_to_found_exes.items():
if not hasattr(pkg, 'determine_spec_details'):
tty.warn("{0} must define 'determine_spec_details' in order"
" for Spack to detect externally-provided instances"
" of the package.".format(pkg.name))
continue
# TODO: iterate through this in a predetermined order (e.g. by package
# name) to get repeatable results when there are conflicts. Note that
# if we take the prefixes returned by _group_by_prefix, then consider
# them in the order that they appear in PATH, this should be sufficient
# to get repeatable results.
for prefix, exes_in_prefix in _group_by_prefix(exes):
# TODO: multiple instances of a package can live in the same
# prefix, and a package implementation can return multiple specs
# for one prefix, but without additional details (e.g. about the
# naming scheme which differentiates them), the spec won't be
# usable.
specs = _convert_to_iterable(
pkg.determine_spec_details(prefix, exes_in_prefix))
if not specs:
tty.debug(
'The following executables in {0} were decidedly not'
'part of the package {1}: {2}'
.format(prefix, pkg.name, ', '.join(exes_in_prefix))
)
for spec in specs:
pkg_prefix = _determine_base_dir(prefix)
if not pkg_prefix:
tty.debug("{0} does not end with a 'bin/' directory: it"
" cannot be added as a Spack package"
.format(prefix))
continue
if spec in resolved_specs:
prior_prefix = ', '.join(resolved_specs[spec])
tty.debug(
"Executables in {0} and {1} are both associated"
" with the same spec {2}"
.format(prefix, prior_prefix, str(spec)))
continue
else:
resolved_specs[spec] = prefix
pkg_to_entries[pkg.name].append(
ExternalPackageEntry(spec=spec, base_dir=pkg_prefix))
return pkg_to_entries
def external(parser, args):
action = {'find': external_find}
action[args.external_command](args)

View File

@@ -7,7 +7,6 @@
import copy
import os
import sys
import llnl.util.tty as tty
import llnl.util.tty.color as color
@@ -237,7 +236,7 @@ def find(parser, args):
else:
if env:
display_env(env, args, decorator)
if sys.stdout.isatty() and args.groups:
if args.groups:
tty.msg("%s" % plural(len(results), 'installed package'))
cmd.display_specs(
results, args, decorator=decorator, all_headers=True)

View File

@@ -8,7 +8,6 @@
import textwrap
from six.moves import zip_longest
import llnl.util.tty as tty
import llnl.util.tty.color as color
from llnl.util.tty.colify import colify
@@ -54,9 +53,11 @@ def variant(s):
class VariantFormatter(object):
def __init__(self, variants):
def __init__(self, variants, max_widths=(30, 20, 30)):
self.variants = variants
self.headers = ('Name [Default]', 'Allowed values', 'Description')
# Set max headers lengths
self.max_column_widths = max_widths
# Formats
fmt_name = '{0} [{1}]'
@@ -66,7 +67,7 @@ def __init__(self, variants):
# than that
self.column_widths = [len(x) for x in self.headers]
# Expand columns based on max line lengths
# Update according to line lengths
for k, v in variants.items():
candidate_max_widths = (
len(fmt_name.format(k, self.default(v))), # Name [Default]
@@ -80,18 +81,12 @@ def __init__(self, variants):
max(self.column_widths[2], candidate_max_widths[2])
)
# Don't let name or possible values be less than max widths
_, cols = tty.terminal_size()
max_name = min(self.column_widths[0], 30)
max_vals = min(self.column_widths[1], 20)
# allow the description column to extend as wide as the terminal.
max_description = min(
self.column_widths[2],
# min width 70 cols, 14 cols of margins and column spacing
max(cols, 70) - max_name - max_vals - 14,
# Reduce to at most the maximum allowed
self.column_widths = (
min(self.column_widths[0], self.max_column_widths[0]),
min(self.column_widths[1], self.max_column_widths[1]),
min(self.column_widths[2], self.max_column_widths[2])
)
self.column_widths = (max_name, max_vals, max_description)
# Compute the format
self.fmt = "%%-%ss%%-%ss%%s" % (
@@ -119,8 +114,10 @@ def lines(self):
'{0} [{1}]'.format(k, self.default(v)),
width=self.column_widths[0]
)
allowed = v.allowed_values.replace('True, False', 'on, off')
allowed = textwrap.wrap(allowed, width=self.column_widths[1])
allowed = textwrap.wrap(
v.allowed_values,
width=self.column_widths[1]
)
description = textwrap.wrap(
v.description,
width=self.column_widths[2]

View File

@@ -32,7 +32,6 @@ def update_kwargs_from_args(args, kwargs):
that will be passed to Package.do_install API"""
kwargs.update({
'fail_fast': args.fail_fast,
'keep_prefix': args.keep_prefix,
'keep_stage': args.keep_stage,
'restage': not args.dont_restage,
@@ -41,6 +40,8 @@ def update_kwargs_from_args(args, kwargs):
'fake': args.fake,
'dirty': args.dirty,
'use_cache': args.use_cache,
'install_global': args.install_global,
'upstream': args.upstream,
'cache_only': args.cache_only,
'explicit': True, # Always true for install command
'stop_at': args.until,
@@ -48,7 +49,7 @@ def update_kwargs_from_args(args, kwargs):
})
kwargs.update({
'install_deps': ('dependencies' in args.things_to_install),
'install_dependencies': ('dependencies' in args.things_to_install),
'install_package': ('package' in args.things_to_install)
})
@@ -79,9 +80,6 @@ def setup_parser(subparser):
subparser.add_argument(
'--overwrite', action='store_true',
help="reinstall an existing spec, even if it has dependents")
subparser.add_argument(
'--fail-fast', action='store_true',
help="stop all builds if any build fails (default is best effort)")
subparser.add_argument(
'--keep-prefix', action='store_true',
help="don't remove the install prefix if installation fails")
@@ -127,6 +125,14 @@ def setup_parser(subparser):
'-f', '--file', action='append', default=[],
dest='specfiles', metavar='SPEC_YAML_FILE',
help="install from file. Read specs to install from .yaml files")
subparser.add_argument(
'--upstream', action='store', default=None,
dest='upstream', metavar='UPSTREAM_NAME',
help='specify which upstream spack to install too')
subparser.add_argument(
'-g', '--global', action='store_true', default=False,
dest='install_global',
help='install package to globally accesible location')
cd_group = subparser.add_mutually_exclusive_group()
arguments.add_common_arguments(cd_group, ['clean', 'dirty'])
@@ -220,7 +226,10 @@ def default_log_file(spec):
"""
fmt = 'test-{x.name}-{x.version}-{hash}.xml'
basename = fmt.format(x=spec, hash=spec.dag_hash())
dirname = fs.os.path.join(spack.paths.var_path, 'junit-report')
dirname = fs.os.path.join(spack.paths.user_config_path,
'var/spack',
'junit-report')
fs.mkdirp(dirname)
return fs.os.path.join(dirname, basename)
@@ -231,6 +240,7 @@ def install_spec(cli_args, kwargs, abstract_spec, spec):
try:
# handle active environment, if any
env = ev.get_env(cli_args, 'install')
if env:
with env.write_transaction():
concrete = env.concretize_and_add(
@@ -241,6 +251,10 @@ def install_spec(cli_args, kwargs, abstract_spec, spec):
env.regenerate_views()
else:
spec.package.do_install(**kwargs)
spack.config.set('config:active_tree', '~/.spack/opt/spack',
scope='user')
spack.config.set('config:active_upstream', None,
scope='user')
except spack.build_environment.InstallError as e:
if cli_args.show_log_on_error:
@@ -255,6 +269,30 @@ def install_spec(cli_args, kwargs, abstract_spec, spec):
def install(parser, args, **kwargs):
# Install Package to Global Upstream for multi-user use
if args.install_global:
spack.config.set('config:active_upstream', 'global',
scope='user')
global_root = spack.config.get('upstreams')
global_root = global_root['global']['install_tree']
global_root = spack.util.path.canonicalize_path(global_root)
spack.config.set('config:active_tree', global_root,
scope='user')
elif args.upstream:
if args.upstream not in spack.config.get('upstreams'):
tty.die("specified upstream does not exist")
spack.config.set('config:active_upstream', args.upstream,
scope='user')
root = spack.config.get('upstreams')
root = root[args.upstream]['install_tree']
root = spack.util.path.canonicalize_path(root)
spack.config.set('config:active_tree', root, scope='user')
else:
spack.config.set('config:active_upstream', None,
scope='user')
spack.config.set('config:active_tree',
spack.config.get('config:install_tree'),
scope='user')
if args.help_cdash:
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
@@ -268,7 +306,7 @@ def install(parser, args, **kwargs):
return
if not args.spec and not args.specfiles:
# if there are no args but an active environment
# if there are no args but an active environment or spack.yaml file
# then install the packages from it.
env = ev.get_env(args, 'install')
if env:
@@ -289,18 +327,7 @@ def install(parser, args, **kwargs):
env.regenerate_views()
return
else:
msg = "install requires a package argument or active environment"
if 'spack.yaml' in os.listdir(os.getcwd()):
# There's a spack.yaml file in the working dir, the user may
# have intended to use that
msg += "\n\n"
msg += "Did you mean to install using the `spack.yaml`"
msg += " in this directory? Try: \n"
msg += " spack env activate .\n"
msg += " spack install\n"
msg += " OR\n"
msg += " spack --env . install"
tty.die(msg)
tty.die("install requires a package argument or a spack.yaml file")
if args.no_checksum:
spack.config.set('config:checksum', False, scope='command_line')

View File

@@ -32,17 +32,6 @@ def setup_parser(subparser):
shells.add_argument(
'--csh', action='store_const', dest='shell', const='csh',
help="print csh commands to load the package")
shells.add_argument(
'--fish', action='store_const', dest='shell', const='fish',
help="print fish commands to load the package")
subparser.add_argument(
'--first',
action='store_true',
default=False,
dest='load_first',
help="load the first match if multiple packages match the spec"
)
subparser.add_argument(
'--only',
@@ -58,11 +47,10 @@ def setup_parser(subparser):
def load(parser, args):
env = ev.get_env(args, 'load')
specs = [spack.cmd.disambiguate_spec(spec, env, first=args.load_first)
specs = [spack.cmd.disambiguate_spec(spec, env)
for spec in spack.cmd.parse_specs(args.specs)]
if not args.shell:
specs_string = ' '.join(args.specs)
msg = [
"This command works best with Spack's shell support",
""
@@ -70,8 +58,8 @@ def load(parser, args):
'Or, if you want to use `spack load` without initializing',
'shell support, you can run one of these:',
'',
' eval `spack load --sh %s` # for bash/sh' % specs_string,
' eval `spack load --csh %s` # for csh/tcsh' % specs_string,
' eval `spack load --sh %s` # for bash/sh' % args.specs,
' eval `spack load --csh %s` # for csh/tcsh' % args.specs,
]
tty.msg(*msg)
return 1

View File

@@ -45,19 +45,7 @@ def setup_parser(subparser):
" (this requires significant time and space)")
create_parser.add_argument(
'-f', '--file', help="file with specs of packages to put in mirror")
create_parser.add_argument(
'--exclude-file',
help="specs which Spack should not try to add to a mirror"
" (listed in a file, one per line)")
create_parser.add_argument(
'--exclude-specs',
help="specs which Spack should not try to add to a mirror"
" (specified on command line)")
create_parser.add_argument(
'--skip-unstable-versions', action='store_true',
help="don't cache versions unless they identify a stable (unchanging)"
" source code")
create_parser.add_argument(
'-D', '--dependencies', action='store_true',
help="also fetch all dependencies")
@@ -241,7 +229,9 @@ def _read_specs_from_file(filename):
return specs
def _determine_specs_to_mirror(args):
def mirror_create(args):
"""Create a directory to be used as a spack mirror, and fill it with
package archives."""
if args.specs and args.all:
raise SpackError("Cannot specify specs on command line if you"
" chose to mirror all specs with '--all'")
@@ -271,7 +261,6 @@ def _determine_specs_to_mirror(args):
tty.die("Cannot pass specs on the command line with --file.")
specs = _read_specs_from_file(args.file)
env_specs = None
if not specs:
# If nothing is passed, use environment or all if no active env
if not args.all:
@@ -281,9 +270,12 @@ def _determine_specs_to_mirror(args):
env = ev.get_env(args, 'mirror')
if env:
env_specs = env.all_specs()
mirror_specs = env.specs_by_hash.values()
else:
specs = [Spec(n) for n in spack.repo.all_package_names()]
mirror_specs = spack.mirror.get_all_versions(specs)
mirror_specs.sort(
key=lambda s: (s.name, s.version))
else:
# If the user asked for dependencies, traverse spec DAG get them.
if args.dependencies:
@@ -302,38 +294,11 @@ def _determine_specs_to_mirror(args):
msg = 'Skipping {0} as it is an external spec.'
tty.msg(msg.format(spec.cshort_spec))
if env_specs:
if args.versions_per_spec:
tty.warn("Ignoring '--versions-per-spec' for mirroring specs"
" in environment.")
mirror_specs = env_specs
else:
if num_versions == 'all':
mirror_specs = spack.mirror.get_all_versions(specs)
else:
mirror_specs = spack.mirror.get_matching_versions(
specs, num_versions=num_versions)
mirror_specs.sort(
key=lambda s: (s.name, s.version))
exclude_specs = []
if args.exclude_file:
exclude_specs.extend(_read_specs_from_file(args.exclude_file))
if args.exclude_specs:
exclude_specs.extend(
spack.cmd.parse_specs(str(args.exclude_specs).split()))
if exclude_specs:
mirror_specs = list(
x for x in mirror_specs
if not any(x.satisfies(y, strict=True) for y in exclude_specs))
return mirror_specs
def mirror_create(args):
"""Create a directory to be used as a spack mirror, and fill it with
package archives."""
mirror_specs = _determine_specs_to_mirror(args)
mirror = spack.mirror.Mirror(
args.directory or spack.config.get('config:source_cache'))
@@ -343,8 +308,7 @@ def mirror_create(args):
existed = web_util.url_exists(directory)
# Actually do the work to create the mirror
present, mirrored, error = spack.mirror.create(
directory, mirror_specs, args.skip_unstable_versions)
present, mirrored, error = spack.mirror.create(directory, mirror_specs)
p, m, e = len(present), len(mirrored), len(error)
verb = "updated" if existed else "created"

Some files were not shown because too many files have changed in this diff Show More