Compare commits

..

179 Commits

Author SHA1 Message Date
Carson Woods
2b05d2bf5c Merge tag 'v0.15.1' into features/shared 2020-07-15 15:43:59 -04:00
Carson Woods
cc00619929 Remove unnecessary if/else logic for global upstream 2020-07-15 14:34:30 -04:00
Carson Woods
188a371595 Fix flake8 errors 2020-07-15 13:44:42 -04:00
Carson Woods
dce7be9932 Fix bug that caused packages installed upstream to install module files to user directory 2020-07-15 13:40:27 -04:00
Carson Woods
4bb26802ed Merge tag 'v0.15.0' into features/shared 2020-06-30 19:24:23 -05:00
Carson Woods
1965e1e606 Merge branch 'develop' into features/shared 2020-06-26 12:19:32 -04:00
Carson Woods
6ed3db6c14 Re-add testing code 2020-06-25 12:29:59 -04:00
Carson Woods
a8fbc96271 Add updated bash completion file 2020-06-24 16:00:19 -04:00
Carson Woods
d8956a3bbe Merge branch 'develop' into features/shared
Conflicts:
	share/spack/spack-completion.bash
2020-06-24 10:59:49 -06:00
Carson Woods
a807b95081 Merge branch 'develop' into features/shared 2020-06-23 11:37:54 -04:00
Carson Woods
1b608d6041 Merge branch 'develop' into features/shared 2020-04-20 11:02:54 -05:00
Carson Woods
be143d7dff Merge branch 'develop' into features/shared 2020-04-06 13:49:46 -04:00
Carson Woods
05fe92e086 Merge branch 'develop' into features/shared 2020-03-09 23:23:37 +00:00
Carson Woods
cd54fb95b8 Fix for infinite recursion bug that stemmed from duplicate code existing
after an improper merge with develop
2020-02-25 12:18:03 -05:00
Carson Woods
8b63c4555c Merge branch 'develop' into features/shared 2020-02-25 11:50:23 -05:00
Carson Woods
ec78160569 Update spack-completition.bash 2020-02-12 12:26:41 -05:00
Carson Woods
e1379f132d Merge branch 'develop' into features/shared 2020-02-12 12:21:19 -05:00
Carson Woods
cdcd3dcedd Merge branch 'develop' into features/shared 2020-01-27 15:28:28 -05:00
Carson Woods
7c1083916a Fix bash completion script 2020-01-24 11:36:26 -05:00
Carson Woods
c07bbe1a25 Fix flake8 error 2020-01-24 11:00:08 -05:00
Carson Woods
85032c6224 Resolve merge conflicts with develop 2020-01-24 10:41:44 -05:00
Carson Woods
7b7898a69c Merge branch 'develop' into features/shared 2020-01-21 18:55:21 -05:00
Carson Woods
84c5d76eae Merge branch 'develop' into features/shared 2020-01-18 13:57:50 -08:00
Carson Woods
bcd47f0bd6 Merge branch 'develop' into features/shared 2020-01-17 14:32:47 -08:00
Carson Woods
cb6a959cdb Merge branch 'develop' into features/shared 2020-01-15 14:41:14 -05:00
Carson Woods
32cd12bff7 Merge branch 'develop' into features/shared 2020-01-10 16:19:37 -08:00
Carson Woods
7021965159 Fix merge conflicts and repair broken unit test. 2020-01-09 20:12:39 -08:00
Carson Woods
5c5743ca33 Merge branch 'develop' into features/shared to support Spack 0.13.3 2019-12-26 21:00:09 -06:00
Carson Woods
034a7662ac Merge branch 'develop' into features/shared 2019-11-21 12:52:24 -07:00
Carson Woods
e6b6ac5898 Fixed error message to use proper --upstream rather than -u 2019-11-21 12:30:15 -07:00
Carson Woods
35037bf088 Merge branch 'develop' into features/shared 2019-11-17 16:37:47 -07:00
Carson Woods
d14c245411 Merge branch 'develop' into features/shared 2019-11-10 22:05:20 -05:00
Carson Woods
6e2ad01f20 Fix flake8 formatting 2019-11-06 13:25:15 -05:00
Carson Woods
ef9b5a8f74 Fix unit test failing 2019-11-06 13:24:10 -05:00
Carson Woods
4921ed29d5 Fix a few broken unit tests 2019-11-06 09:56:22 -05:00
Carson Woods
f4c720e902 Ensure feature supports Spack version 0.13.0-0.13.1 2019-11-05 16:38:18 -05:00
Carson Woods
0a71b1d5ac Merge branch 'develop' into features/shared 2019-10-31 21:29:33 -04:00
Carson Woods
3593a7be6a Better comment the purpose of new unit tests 2019-09-20 19:05:56 -04:00
Carson Woods
e4d2cf4441 Fix flake8 error to avoid failing CI testing 2019-09-20 15:29:46 -04:00
Carson Woods
911e51bd89 Merge branch 'develop' into features/shared
Done to resolve merge conflicts that had arisen since work on this
feature completed.
2019-09-20 15:28:44 -04:00
Carson Woods
6ec8aea6f7 Rebase branch 'features/shared' of github.com:carsonwoods/spack against develop 2019-08-07 18:57:48 -06:00
Carson Woods
5b11f7aa4c Fix bug where environments would ignore global path 2019-08-07 18:32:28 -06:00
Carson Woods
97e46981b9 Remove old doc from doc index 2019-08-07 18:32:28 -06:00
Carson Woods
873ac5e890 Remove old documentation for shared mode 2019-08-07 18:32:28 -06:00
Carson Woods
4d7dae5716 Remove old share command from tests 2019-08-07 18:32:28 -06:00
Carson Woods
b19f0fafcc Remove outdate share command 2019-08-07 18:32:28 -06:00
Carson Woods
11b1bdd119 Pep8 Compliance Fix 2019-08-07 18:32:28 -06:00
Carson Woods
f749821dc2 Pep8 Compliance 2019-08-07 18:32:28 -06:00
Carson Woods
5abb20dcab Rename test 2019-08-07 18:32:28 -06:00
Carson Woods
0c233bdd0f Add test for validating upstream database initialization 2019-08-07 18:32:28 -06:00
Carson Woods
0f171c7ded Replace space with = in command parameter 2019-08-07 18:32:28 -06:00
Carson Woods
b4c7520dd8 Flake8 Test Compliance 2019-08-07 18:32:28 -06:00
Carson Woods
9ab7d8f01d Add config parameter for active upstream to set install location for modules 2019-08-07 18:32:28 -06:00
Carson Woods
a7ad344c2a Add upstreams.yaml testing file so calls to upstreams['global] doesn't cause tests to fail 2019-08-07 18:32:28 -06:00
Carson Woods
deb2d3745c Fix .spack-db/index.json not being created in global upstream if previously uninitialized 2019-08-07 18:32:28 -06:00
Carson Woods
ff96ec430b Can now specify upstream of anyname through -u/--upstream flag 2019-08-07 18:32:28 -06:00
Carson Woods
d4a959736a Flake8 Compliance Changes 2019-08-07 18:32:28 -06:00
Carson Woods
5ba51a0be0 --global option now works for both install and uninstall 2019-08-07 18:32:28 -06:00
Carson Woods
27e1140df7 Reset active directory after each global install 2019-08-07 18:32:28 -06:00
Carson Woods
7ab6af8a3b Add scope to setting active tree to ensure that it is set at user level 2019-08-07 18:32:28 -06:00
Carson Woods
0e6e93eaac Fix unit test config.yaml 2019-08-07 18:32:28 -06:00
Carson Woods
38f8bdd2bb Home expansion was removed because it was no longer being used 2019-08-07 18:32:27 -06:00
Carson Woods
8e45a3fc2f Fix flake8 compliance 2019-08-07 18:32:27 -06:00
Carson Woods
c22af99b04 Fix how upstream db paths are canonicalized 2019-08-07 18:32:27 -06:00
Carson Woods
fc3a909fbc Set staging location to ~/.spack/var 2019-08-07 18:32:27 -06:00
Carson Woods
9665754eae Fix default install tree 2019-08-07 18:32:27 -06:00
Carson Woods
0f9f9f3a85 Revise default var path 2019-08-07 18:32:27 -06:00
Carson Woods
777a5682a6 Fix default install location 2019-08-07 18:32:27 -06:00
Carson Woods
8994b4aab6 Fix flake8 compliance 2019-08-07 18:32:27 -06:00
Carson Woods
98ec366470 Set root of store object to active tree 2019-08-07 18:32:27 -06:00
Carson Woods
c61f4d7c82 Add logic to set the active install tree 2019-08-07 18:32:27 -06:00
Carson Woods
811b304230 Remove old code 2019-08-07 18:32:27 -06:00
Carson Woods
8f0c9ad409 Change name of global parameter to install_global 2019-08-07 18:32:27 -06:00
Carson Woods
6a423a5d8a Typo fix 2019-08-07 18:32:27 -06:00
Carson Woods
23c37063bd Add default global upstream of /opt/spack 2019-08-07 18:32:27 -06:00
Carson Woods
478f3a5a99 Fix whitespace issue 2019-08-07 18:32:27 -06:00
Carson Woods
02afb30990 Remove unit testing for shared spack mode 2019-08-07 18:32:27 -06:00
Carson Woods
06e3f15e47 Remove old shared spack code 2019-08-07 18:32:27 -06:00
Carson Woods
f13ce3540d Add dest name of install_global to --global parameter 2019-08-07 18:32:27 -06:00
Carson Woods
7ae34087e3 Set remove old shared spack code 2019-08-07 18:32:27 -06:00
Carson Woods
f0fea97e88 Set source_cache to user's ~/.spack directory 2019-08-07 18:32:27 -06:00
Carson Woods
54893197ed Set staging location to be based out of users .spack directory 2019-08-07 18:32:27 -06:00
Carson Woods
80da1d50d1 Make var_path point to ~/.spack/var/spack 2019-08-07 18:32:27 -06:00
Carson Woods
944c5d75cd Add --global flag to install cmd to install to globally accessible location 2019-08-07 18:32:27 -06:00
Carson Woods
9ef4bc9d50 Add macro for expanding home directory 2019-08-07 18:32:27 -06:00
Carson Woods
a2af432833 Temporarily disable module file location overrride while feature is being implemented 2019-08-07 18:32:27 -06:00
Carson Woods
aefed311af Change modulefiles install location 2019-08-07 18:32:27 -06:00
Carson Woods
6ffacddcf4 Change default install tree to user's ~/.spack directory 2019-08-07 18:32:27 -06:00
Carson Woods
e17824f82f Remove shared mode set self as upstream 2019-08-07 18:32:27 -06:00
Carson Woods
57ca47f035 Remove testing for shared mode 2019-08-07 18:32:27 -06:00
Carson Woods
4532a56b4e Remove shared disable from unit testing 2019-08-07 18:32:27 -06:00
Carson Woods
86e69a48a2 Fix flake8 error 2019-08-07 18:32:27 -06:00
Carson Woods
2508295d81 Fix error caused by SPACK_PATH environment variable not existing 2019-08-07 18:32:27 -06:00
Carson Woods
1a041c051a Fix flake8 error 2019-08-07 18:32:27 -06:00
Carson Woods
2262ca2e67 Add test for install in shared mode 2019-08-07 18:32:27 -06:00
Carson Woods
2269771a91 Fix typo 2019-08-07 18:32:27 -06:00
Carson Woods
7f32574dd8 Fix shared cmd test file 2019-08-07 18:32:27 -06:00
Carson Woods
d15ac30f62 Add shared to toctree 2019-08-07 18:32:27 -06:00
Carson Woods
1f41347ab8 Share feature Unit testing 2019-08-07 18:32:27 -06:00
Carson Woods
1f4f01103b Add command interface for share feature 2019-08-07 18:32:27 -06:00
Carson Woods
8f46fcb512 When running tests, disable shared mode because it will break other tests. Custom tests must be written 2019-08-07 18:32:27 -06:00
Carson Woods
2d3b973ebc When shared mode is active store installed packages in SPACK_PATH 2019-08-07 18:32:27 -06:00
Carson Woods
7e62e0f27f When shared mode is active set stage path to SPACK_PATH 2019-08-07 18:32:27 -06:00
Carson Woods
ea0db4c0f9 Prevent packages from being installed upstream 2019-08-07 18:32:27 -06:00
Carson Woods
0afc68e60b Change module root path when shared mode is active 2019-08-07 18:32:27 -06:00
Carson Woods
8ad25d5013 Uninstall from SPACK_PATH when shared mode is active 2019-08-07 18:32:27 -06:00
Carson Woods
e90db68321 Install to SPACK_PATH when shared mode is active 2019-08-07 18:32:27 -06:00
Carson Woods
9e96b89f02 Add documentation for spack share command 2019-08-07 18:32:27 -06:00
Carson Woods
b4dae1b7fd When shared mode is active, spack treats the normal install directory as an upstream 2019-08-07 18:32:27 -06:00
Carson Woods
9e9adf1d2f When shared mode is active, set cache location to SPACK_PATH 2019-08-07 18:32:27 -06:00
Carson Woods
de9255247a Fix bug where environments would ignore global path 2019-08-06 17:49:17 -06:00
Carson Woods
de5d3e3229 Remove old doc from doc index 2019-07-26 08:54:12 -06:00
Carson Woods
e621aafc77 Remove old documentation for shared mode 2019-07-25 16:40:00 -06:00
Carson Woods
c53427c98d Remove old share command from tests 2019-07-25 14:22:43 -06:00
Carson Woods
7a75148d1b Remove outdate share command 2019-07-25 13:32:44 -06:00
Carson Woods
4210520c9d Pep8 Compliance Fix 2019-07-25 13:32:44 -06:00
Carson Woods
4f3fb50ae7 Pep8 Compliance 2019-07-25 13:32:44 -06:00
Carson Woods
7660659107 Rename test 2019-07-25 13:32:44 -06:00
Carson Woods
fcca2a518b Add test for validating upstream database initialization 2019-07-25 13:32:44 -06:00
Carson Woods
23e1cd7775 Replace space with = in command parameter 2019-07-25 13:32:44 -06:00
Carson Woods
58e794e95a Flake8 Test Compliance 2019-07-25 13:32:44 -06:00
Carson Woods
7ed59ed835 Add config parameter for active upstream to set install location for modules 2019-07-25 13:32:43 -06:00
Carson Woods
512726ae5b Add upstreams.yaml testing file so calls to upstreams['global] doesn't cause tests to fail 2019-07-25 13:32:43 -06:00
Carson Woods
20851a6e6c Fix .spack-db/index.json not being created in global upstream if previously uninitialized 2019-07-25 13:32:43 -06:00
Carson Woods
92bbbb9659 Can now specify upstream of anyname through -u/--upstream flag 2019-07-25 13:32:43 -06:00
Carson Woods
5f2f2bfb84 Flake8 Compliance Changes 2019-07-25 13:32:43 -06:00
Carson Woods
9b63f72d6b --global option now works for both install and uninstall 2019-07-25 13:32:43 -06:00
Carson Woods
4c60f01bae Reset active directory after each global install 2019-07-25 13:32:43 -06:00
Carson Woods
cd08308463 Add scope to setting active tree to ensure that it is set at user level 2019-07-25 13:32:43 -06:00
Carson Woods
fe69997043 Fix unit test config.yaml 2019-07-25 13:32:43 -06:00
Carson Woods
1584a6e3c6 Home expansion was removed because it was no longer being used 2019-07-25 13:32:43 -06:00
Carson Woods
c393880852 Fix flake8 compliance 2019-07-25 13:32:43 -06:00
Carson Woods
bbe9e6bf54 Fix how upstream db paths are canonicalized 2019-07-25 13:32:43 -06:00
Carson Woods
d7a00b71d4 Set staging location to ~/.spack/var 2019-07-25 13:32:43 -06:00
Carson Woods
6775d2546a Fix default install tree 2019-07-25 13:32:43 -06:00
Carson Woods
8a154333f2 Revise default var path 2019-07-25 13:32:43 -06:00
Carson Woods
5e637a04fd Fix default install location 2019-07-25 13:32:43 -06:00
Carson Woods
0213869439 Fix flake8 compliance 2019-07-25 13:32:43 -06:00
Carson Woods
22e9a9792a Set root of store object to active tree 2019-07-25 13:32:43 -06:00
Carson Woods
4f23da9d26 Add logic to set the active install tree 2019-07-25 13:32:43 -06:00
Carson Woods
f9430e2fd4 Remove old code 2019-07-25 13:32:43 -06:00
Carson Woods
a2f86d5d18 Change name of global parameter to install_global 2019-07-25 13:32:43 -06:00
Carson Woods
0efab6637c Typo fix 2019-07-25 13:32:43 -06:00
Carson Woods
2b11694b94 Add default global upstream of /opt/spack 2019-07-25 13:32:43 -06:00
Carson Woods
088798a727 Fix whitespace issue 2019-07-25 13:32:43 -06:00
Carson Woods
bddbb1c22e Remove unit testing for shared spack mode 2019-07-25 13:32:42 -06:00
Carson Woods
92f447cf1c Remove old shared spack code 2019-07-25 13:32:42 -06:00
Carson Woods
96f266c3e3 Add dest name of install_global to --global parameter 2019-07-25 13:32:42 -06:00
Carson Woods
d5093c20c5 Set remove old shared spack code 2019-07-25 13:32:42 -06:00
Carson Woods
2064241c37 Set source_cache to user's ~/.spack directory 2019-07-25 13:32:42 -06:00
Carson Woods
721742b764 Set staging location to be based out of users .spack directory 2019-07-25 13:32:42 -06:00
Carson Woods
c45bf153d8 Make var_path point to ~/.spack/var/spack 2019-07-25 13:32:42 -06:00
Carson Woods
b98e5e66e7 Add --global flag to install cmd to install to globally accessible location 2019-07-25 13:32:42 -06:00
Carson Woods
3d18bf345f Add macro for expanding home directory 2019-07-25 13:32:42 -06:00
Carson Woods
f8e9cf4081 Temporarily disable module file location overrride while feature is being implemented 2019-07-25 13:32:42 -06:00
Carson Woods
98e0f8b89b Change modulefiles install location 2019-07-25 13:32:42 -06:00
Carson Woods
263275b7ea Change default install tree to user's ~/.spack directory 2019-07-25 13:32:42 -06:00
Carson Woods
3e13002d7f Remove shared mode set self as upstream 2019-07-25 13:32:42 -06:00
Carson Woods
654e5cc924 Remove testing for shared mode 2019-07-25 13:32:42 -06:00
Carson Woods
04a72c1834 Remove shared disable from unit testing 2019-07-25 13:32:42 -06:00
Carson Woods
53cf6eb194 Fix flake8 error 2019-07-25 13:32:42 -06:00
Carson Woods
5a7f186176 Fix error caused by SPACK_PATH environment variable not existing 2019-07-25 13:32:42 -06:00
Carson Woods
987adfa9c9 Fix flake8 error 2019-07-25 13:32:42 -06:00
Carson Woods
e476bb1400 Add test for install in shared mode 2019-07-25 13:32:42 -06:00
Carson Woods
dc12233610 Fix typo 2019-07-25 13:32:42 -06:00
Carson Woods
29d21a0a5d Fix shared cmd test file 2019-07-25 13:32:42 -06:00
Carson Woods
762f505da5 Add shared to toctree 2019-07-25 13:32:42 -06:00
Carson Woods
8e1c326174 Share feature Unit testing 2019-07-25 13:32:42 -06:00
Carson Woods
0bac5d527d Add command interface for share feature 2019-07-25 13:32:42 -06:00
Carson Woods
79256eeb5c When running tests, disable shared mode because it will break other tests. Custom tests must be written 2019-07-25 13:32:42 -06:00
Carson Woods
de760942f2 When shared mode is active store installed packages in SPACK_PATH 2019-07-25 13:32:41 -06:00
Carson Woods
860641bfab When shared mode is active set stage path to SPACK_PATH 2019-07-25 13:32:41 -06:00
Carson Woods
673e55f14d Prevent packages from being installed upstream 2019-07-25 13:32:41 -06:00
Carson Woods
54777a4f3e Change module root path when shared mode is active 2019-07-25 13:32:41 -06:00
Carson Woods
db36e66592 Uninstall from SPACK_PATH when shared mode is active 2019-07-25 13:32:41 -06:00
Carson Woods
0d36e94407 Install to SPACK_PATH when shared mode is active 2019-07-25 13:32:41 -06:00
Carson Woods
92c3b5b8b2 Add documentation for spack share command 2019-07-25 13:32:41 -06:00
Carson Woods
71220a3656 When shared mode is active, spack treats the normal install directory as an upstream 2019-07-25 13:32:41 -06:00
Carson Woods
09bd29d816 When shared mode is active, set cache location to SPACK_PATH 2019-07-25 13:32:41 -06:00
2363 changed files with 13669 additions and 86587 deletions

View File

@@ -4,8 +4,7 @@ coverage:
range: 60...90
status:
project:
default:
threshold: 0.2%
default: yes
ignore:
- lib/spack/spack/test/.*

View File

@@ -4,9 +4,7 @@
parallel = True
concurrency = multiprocessing
branch = True
source =
bin
lib
source = lib
omit =
lib/spack/spack/test/*
lib/spack/docs/*

View File

@@ -29,4 +29,4 @@
#
[flake8]
ignore = E129,E221,E241,E272,E731,W503,W504,F999,N801,N813,N814
max-line-length = 88
max-line-length = 79

View File

@@ -1,20 +1,5 @@
#!/usr/bin/env sh
. share/spack/setup-env.sh
echo -e "config:\n build_jobs: 2" > etc/spack/config.yaml
spack config add "packages:all:target:[x86_64]"
# TODO: remove this explicit setting once apple-clang detection is fixed
cat <<EOF > etc/spack/compilers.yaml
compilers:
- compiler:
spec: apple-clang@11.0.3
paths:
cc: /usr/bin/clang
cxx: /usr/bin/clang++
f77: /usr/local/bin/gfortran-9
fc: /usr/local/bin/gfortran-9
modules: []
operating_system: catalina
target: x86_64
EOF
spack compiler info apple-clang
spack debug report
git clone https://github.com/spack/spack.git
echo -e "config:\n build_jobs: 2" > spack/etc/spack/config.yaml
. spack/share/spack/setup-env.sh
spack compilers

View File

@@ -3,12 +3,13 @@ name: linux builds
on:
push:
branches:
- master
- develop
- releases/**
pull_request:
branches:
- master
- develop
- releases/**
paths-ignore:
# Don't run if we only modified packages in the built-in repository
- 'var/spack/repos/builtin/**'
@@ -18,41 +19,36 @@ on:
- '!var/spack/repos/builtin/packages/py-setuptools/**'
- '!var/spack/repos/builtin/packages/openjpeg/**'
- '!var/spack/repos/builtin/packages/r-rcpp/**'
- '!var/spack/repos/builtin/packages/ruby-rake/**'
# Don't run if we only modified documentation
- 'lib/spack/docs/**'
jobs:
build:
runs-on: ubuntu-latest
strategy:
max-parallel: 4
matrix:
package:
- lz4 # MakefilePackage
- mpich~fortran # AutotoolsPackage
- tut # WafPackage
- py-setuptools # PythonPackage
- openjpeg # CMakePackage
- r-rcpp # RPackage
- ruby-rake # RubyPackage
package: [lz4, mpich, tut, py-setuptools, openjpeg, r-rcpp]
steps:
- uses: actions/checkout@v2
- uses: actions/cache@v2
- name: Cache ccache's store
uses: actions/cache@v1
with:
path: ~/.ccache
key: ccache-build-${{ matrix.package }}
restore-keys: |
ccache-build-${{ matrix.package }}
- uses: actions/setup-python@v2
- name: Setup Python
uses: actions/setup-python@v1
with:
python-version: 3.9
python-version: 3.8
- name: Install System Packages
run: |
sudo apt-get update
sudo apt-get -yqq install ccache gfortran perl perl-base r-base r-base-core r-base-dev ruby findutils openssl libssl-dev libpciaccess-dev
sudo apt-get -yqq install ccache gfortran perl perl-base r-base r-base-core r-base-dev findutils openssl libssl-dev libpciaccess-dev
R --version
perl --version
ruby --version
- name: Copy Configuration
run: |
ccache -M 300M && ccache -z

View File

@@ -3,36 +3,32 @@ name: linux tests
on:
push:
branches:
- master
- develop
- releases/**
pull_request:
branches:
- master
- develop
- releases/**
jobs:
unittests:
runs-on: ubuntu-latest
strategy:
matrix:
python-version: [2.7, 3.5, 3.6, 3.7, 3.8, 3.9]
concretizer: ['original', 'clingo']
python-version: [2.7, 3.5, 3.6, 3.7, 3.8]
steps:
- uses: actions/checkout@v2
with:
fetch-depth: 0
- uses: actions/setup-python@v2
- name: Setup Python ${{ matrix.python-version }}
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
- name: Install System packages
run: |
sudo apt-get -y update
# Needed for unit tests
sudo apt-get install -y coreutils gfortran graphviz gnupg2 mercurial
sudo apt-get install -y ninja-build patchelf
sudo apt-get install -y coreutils gfortran graphviz gnupg2 mercurial ninja-build patchelf
# Needed for kcov
sudo apt-get -y install cmake binutils-dev libcurl4-openssl-dev
sudo apt-get -y install zlib1g-dev libdw-dev libiberty-dev
sudo apt-get -y install cmake binutils-dev libcurl4-openssl-dev zlib1g-dev libdw-dev libiberty-dev
- name: Install Python packages
run: |
pip install --upgrade pip six setuptools codecov coverage
@@ -40,7 +36,9 @@ jobs:
run: |
# Need this for the git tests to succeed.
git --version
. .github/workflows/setup_git.sh
git config --global user.email "spack@example.com"
git config --global user.name "Test User"
git fetch -u origin develop:develop
- name: Install kcov for bash script coverage
env:
KCOV_VERSION: 34
@@ -51,100 +49,14 @@ jobs:
mkdir -p ${KCOV_ROOT}/build
cd ${KCOV_ROOT}/build && cmake -Wno-dev ${KCOV_ROOT}/kcov-${KCOV_VERSION} && cd -
make -C ${KCOV_ROOT}/build && sudo make -C ${KCOV_ROOT}/build install
- name: Bootstrap clingo from sources
if: ${{ matrix.concretizer == 'clingo' }}
run: |
. share/spack/setup-env.sh
spack external find --not-buildable cmake bison
spack -v solve zlib
- name: Run unit tests
env:
COVERAGE: true
SPACK_TEST_SOLVER: ${{ matrix.concretizer }}
run: |
share/spack/qa/run-unit-tests
coverage combine
coverage xml
- uses: codecov/codecov-action@v1
- name: Upload to codecov.io
uses: codecov/codecov-action@v1
with:
flags: unittests,linux,${{ matrix.concretizer }}
shell:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
with:
fetch-depth: 0
- uses: actions/setup-python@v2
with:
python-version: 3.9
- name: Install System packages
run: |
sudo apt-get -y update
# Needed for shell tests
sudo apt-get install -y coreutils csh zsh tcsh fish dash bash
# Needed for kcov
sudo apt-get -y install cmake binutils-dev libcurl4-openssl-dev
sudo apt-get -y install zlib1g-dev libdw-dev libiberty-dev
- name: Install Python packages
run: |
pip install --upgrade pip six setuptools codecov coverage
- name: Setup git configuration
run: |
# Need this for the git tests to succeed.
git --version
. .github/workflows/setup_git.sh
- name: Install kcov for bash script coverage
env:
KCOV_VERSION: 38
run: |
KCOV_ROOT=$(mktemp -d)
wget --output-document=${KCOV_ROOT}/${KCOV_VERSION}.tar.gz https://github.com/SimonKagstrom/kcov/archive/v${KCOV_VERSION}.tar.gz
tar -C ${KCOV_ROOT} -xzvf ${KCOV_ROOT}/${KCOV_VERSION}.tar.gz
mkdir -p ${KCOV_ROOT}/build
cd ${KCOV_ROOT}/build && cmake -Wno-dev ${KCOV_ROOT}/kcov-${KCOV_VERSION} && cd -
make -C ${KCOV_ROOT}/build && sudo make -C ${KCOV_ROOT}/build install
- name: Run shell tests
env:
COVERAGE: true
run: |
share/spack/qa/run-shell-tests
- uses: codecov/codecov-action@v1
with:
flags: shelltests,linux
centos6:
# Test for Python2.6 run on Centos 6
runs-on: ubuntu-latest
container: spack/github-actions:centos6
steps:
- name: Run unit tests
env:
HOME: /home/spack-test
run: |
whoami && echo $HOME && cd $HOME
git clone https://github.com/spack/spack.git && cd spack
git fetch origin ${{ github.ref }}:test-branch
git checkout test-branch
share/spack/qa/run-unit-tests
clingo-cffi:
# Test for the clingo based solver (using clingo-cffi)
runs-on: ubuntu-latest
container: spack/github-actions:clingo-cffi
steps:
- name: Run unit tests
run: |
whoami && echo PWD=$PWD && echo HOME=$HOME && echo SPACK_TEST_SOLVER=$SPACK_TEST_SOLVER
python3 -c "import clingo; print(hasattr(clingo.Symbol, '_rep'), clingo.__version__)"
git clone https://github.com/spack/spack.git && cd spack
git fetch origin ${{ github.ref }}:test-branch
git checkout test-branch
. share/spack/setup-env.sh
spack compiler find
spack solve mpileaks%gcc
coverage run $(which spack) unit-test -v
coverage combine
coverage xml
- uses: codecov/codecov-action@v1
with:
flags: unittests,linux,clingo
flags: unittests,linux

View File

@@ -8,13 +8,6 @@ on:
schedule:
# nightly at 1 AM
- cron: '0 1 * * *'
pull_request:
branches:
- develop
paths:
# Run if we modify this yaml file
- '.github/workflows/macos_python.yml'
# TODO: run if we touch any of the recipes involved in this
# GitHub Action Limits
# https://help.github.com/en/actions/reference/workflow-syntax-for-github-actions
@@ -25,14 +18,10 @@ jobs:
runs-on: macos-latest
steps:
- uses: actions/checkout@v2
- uses: actions/setup-python@v2
with:
python-version: 3.9
- name: spack install
run: |
. .github/workflows/install_spack.sh
# 9.2.0 is the latest version on which we apply homebrew patch
spack install -v --fail-fast gcc@9.2.0 %apple-clang
spack install -v gcc
install_jupyter_clang:
name: jupyter
@@ -40,39 +29,30 @@ jobs:
timeout-minutes: 700
steps:
- uses: actions/checkout@v2
- uses: actions/setup-python@v2
with:
python-version: 3.9
- name: spack install
run: |
. .github/workflows/install_spack.sh
spack install -v --fail-fast py-jupyterlab %apple-clang
spack install -v py-jupyter %clang
install_scipy_clang:
name: scipy, mpl, pd
runs-on: macos-latest
steps:
- uses: actions/checkout@v2
- uses: actions/setup-python@v2
with:
python-version: 3.9
- name: spack install
run: |
. .github/workflows/install_spack.sh
spack install -v --fail-fast py-scipy %apple-clang
spack install -v --fail-fast py-matplotlib %apple-clang
spack install -v --fail-fast py-pandas %apple-clang
spack install -v py-scipy %clang
spack install -v py-matplotlib %clang
spack install -v py-pandas %clang
install_mpi4py_clang:
name: mpi4py, petsc4py
runs-on: macos-latest
steps:
- uses: actions/checkout@v2
- uses: actions/setup-python@v2
with:
python-version: 3.9
- name: spack install
run: |
. .github/workflows/install_spack.sh
spack install -v --fail-fast py-mpi4py %apple-clang
spack install -v --fail-fast py-petsc4py %apple-clang
spack install -v py-mpi4py %clang
spack install -v py-petsc4py %clang

View File

@@ -3,23 +3,25 @@ name: macos tests
on:
push:
branches:
- master
- develop
- releases/**
pull_request:
branches:
- master
- develop
- releases/**
jobs:
build:
runs-on: macos-latest
strategy:
matrix:
python-version: [3.8]
python-version: [3.7]
steps:
- uses: actions/checkout@v2
with:
fetch-depth: 0
- uses: actions/setup-python@v2
- name: Setup Python ${{ matrix.python-version }}
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
- name: Install Python packages
@@ -29,16 +31,19 @@ jobs:
pip install --upgrade flake8 pep8-naming
- name: Setup Homebrew packages
run: |
brew install dash fish gcc gnupg2 kcov
brew update
brew upgrade
brew install gcc gnupg2 dash kcov
- name: Run unit tests
run: |
git --version
. .github/workflows/setup_git.sh
git fetch -u origin develop:develop
. share/spack/setup-env.sh
coverage run $(which spack) unit-test
coverage run $(which spack) test
coverage combine
coverage xml
- uses: codecov/codecov-action@v1
- name: Upload to codecov.io
uses: codecov/codecov-action@v1
with:
file: ./coverage.xml
flags: unittests,macos

View File

@@ -0,0 +1,31 @@
name: python version check
on:
push:
branches:
- master
- develop
- releases/**
pull_request:
branches:
- master
- develop
jobs:
validate:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Setup Python
uses: actions/setup-python@v1
with:
python-version: 3.7
- name: Install Python Packages
run: |
pip install --upgrade pip
pip install --upgrade vermin
- name: Minimum Version (Spack's Core)
run: vermin --backport argparse -t=2.6- -t=3.5- -v lib/spack/spack/ lib/spack/llnl/ bin/
- name: Minimum Version (Repositories)
run: vermin --backport argparse -t=2.6- -t=3.5- -v var/spack/repos

View File

@@ -1,9 +0,0 @@
#!/usr/bin/env sh
git config --global user.email "spack@example.com"
git config --global user.name "Test User"
# With fetch-depth: 0 we have a remote develop
# but not a local branch. Don't do this on develop
if [ "$(git branch --show-current)" != "develop" ]
then
git branch develop origin/develop
fi

View File

@@ -1,65 +0,0 @@
name: style and docs
on:
push:
branches:
- develop
- releases/**
pull_request:
branches:
- develop
- releases/**
jobs:
validate:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/setup-python@v2
with:
python-version: 3.9
- name: Install Python Packages
run: |
pip install --upgrade pip
pip install --upgrade vermin
- name: Minimum Version (Spack's Core)
run: vermin --backport argparse -t=2.6- -t=3.5- -v lib/spack/spack/ lib/spack/llnl/ bin/
- name: Minimum Version (Repositories)
run: vermin --backport argparse -t=2.6- -t=3.5- -v var/spack/repos
flake8:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
with:
fetch-depth: 0
- uses: actions/setup-python@v2
with:
python-version: 3.9
- name: Install Python packages
run: |
pip install --upgrade pip six setuptools flake8
- name: Setup git configuration
run: |
# Need this for the git tests to succeed.
git --version
. .github/workflows/setup_git.sh
- name: Run flake8 tests
run: |
share/spack/qa/run-flake8-tests
documentation:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/setup-python@v2
with:
python-version: 3.9
- name: Install System packages
run: |
sudo apt-get -y update
sudo apt-get install -y coreutils ninja-build graphviz
- name: Install Python packages
run: |
pip install --upgrade pip six setuptools
pip install --upgrade -r lib/spack/docs/requirements.txt
- name: Build documentation
run: |
share/spack/qa/run-doc-tests

View File

@@ -20,8 +20,8 @@ Geoffrey Oxberry <oxberry1@llnl.gov> Geoffrey Oxberry
Glenn Johnson <glenn-johnson@uiowa.edu> Glenn Johnson <gjohnson@argon-ohpc.hpc.uiowa.edu>
Glenn Johnson <glenn-johnson@uiowa.edu> Glenn Johnson <glennpj@gmail.com>
Gregory Becker <becker33@llnl.gov> Gregory Becker <becker33.llnl.gov>
Gregory Becker <becker33@llnl.gov> Gregory Becker <becker33.llnl.gov>
Gregory Becker <becker33@llnl.gov> Gregory Becker <becker33@llnl.gov>
Gregory Becker <becker33@llnl.gov> becker33 <becker33.llnl.gov>
Gregory Becker <becker33@llnl.gov> becker33 <becker33@llnl.gov>
Gregory L. Lee <lee218@llnl.gov> Greg Lee <lee218@llnl.gov>
Gregory L. Lee <lee218@llnl.gov> Gregory L. Lee <lee218@cab687.llnl.gov>
Gregory L. Lee <lee218@llnl.gov> Gregory L. Lee <lee218@cab690.llnl.gov>

152
.travis.yml Normal file
View File

@@ -0,0 +1,152 @@
#=============================================================================
# Project settings
#=============================================================================
# Only build master and develop on push; do not build every branch.
branches:
only:
- master
- develop
- /^releases\/.*$/
#=============================================================================
# Build matrix
#=============================================================================
dist: bionic
jobs:
fast_finish: true
include:
- stage: 'style checks'
python: '3.8'
os: linux
language: python
env: TEST_SUITE=flake8
- stage: 'unit tests + documentation'
python: '2.6'
dist: trusty
os: linux
language: python
addons:
apt:
# Everything but patchelf, that is not available for trusty
packages:
- ccache
- gfortran
- graphviz
- gnupg2
- kcov
- mercurial
- ninja-build
- realpath
- zsh
- fish
env: [ TEST_SUITE=unit, COVERAGE=true ]
- python: '3.8'
os: linux
language: python
env: [ TEST_SUITE=shell, COVERAGE=true, KCOV_VERSION=38 ]
- python: '3.8'
os: linux
language: python
env: TEST_SUITE=doc
stages:
- 'style checks'
- 'unit tests + documentation'
#=============================================================================
# Environment
#=============================================================================
# Docs need graphviz to build
addons:
# for Linux builds, we use APT
apt:
packages:
- ccache
- coreutils
- gfortran
- graphviz
- gnupg2
- mercurial
- ninja-build
- patchelf
- zsh
- fish
update: true
# ~/.ccache needs to be cached directly as Travis is not taking care of it
# (possibly because we use 'language: python' and not 'language: c')
cache:
pip: true
ccache: true
directories:
- ~/.ccache
before_install:
- ccache -M 2G && ccache -z
# Install kcov manually, since it's not packaged for bionic beaver
- if [[ "$KCOV_VERSION" ]]; then
sudo apt-get -y install cmake binutils-dev libcurl4-openssl-dev zlib1g-dev libdw-dev libiberty-dev;
KCOV_ROOT=$(mktemp -d);
wget --output-document=${KCOV_ROOT}/${KCOV_VERSION}.tar.gz https://github.com/SimonKagstrom/kcov/archive/v${KCOV_VERSION}.tar.gz;
tar -C ${KCOV_ROOT} -xzvf ${KCOV_ROOT}/${KCOV_VERSION}.tar.gz;
mkdir -p ${KCOV_ROOT}/build;
cd ${KCOV_ROOT}/build && cmake -Wno-dev ${KCOV_ROOT}/kcov-${KCOV_VERSION} && cd - ;
make -C ${KCOV_ROOT}/build && sudo make -C ${KCOV_ROOT}/build install;
fi
# Install various dependencies
install:
- pip install --upgrade pip
- pip install --upgrade six
- pip install --upgrade setuptools
- pip install --upgrade codecov coverage==4.5.4
- pip install --upgrade flake8
- pip install --upgrade pep8-naming
- if [[ "$TEST_SUITE" == "doc" ]]; then
pip install --upgrade -r lib/spack/docs/requirements.txt;
fi
before_script:
# Need this for the git tests to succeed.
- git config --global user.email "spack@example.com"
- git config --global user.name "Test User"
# Need this to be able to compute the list of changed files
- git fetch origin ${TRAVIS_BRANCH}:${TRAVIS_BRANCH}
#=============================================================================
# Building
#=============================================================================
script:
- share/spack/qa/run-$TEST_SUITE-tests
after_success:
- ccache -s
- case "$TEST_SUITE" in
unit)
if [[ "$COVERAGE" == "true" ]]; then
codecov --env PYTHON_VERSION
--required
--flags "${TEST_SUITE}${TRAVIS_OS_NAME}";
fi
;;
shell)
codecov --env PYTHON_VERSION
--required
--flags "${TEST_SUITE}${TRAVIS_OS_NAME}";
esac
#=============================================================================
# Notifications
#=============================================================================
notifications:
email:
recipients:
- tgamblin@llnl.gov
- massimiliano.culpo@gmail.com
on_success: change
on_failure: always

View File

@@ -1,229 +1,3 @@
# v0.16.3 (2021-09-21)
* clang/llvm: fix version detection (#19978)
* Fix use of quotes in Python build system (#22279)
* Cray: fix extracting paths from module files (#23472)
* Use AWS CloudFront for source mirror (#23978)
* Ensure all roots of an installed environment are marked explicit in db (#24277)
* Fix fetching for Python 3.8 and 3.9 (#24686)
* locks: only open lockfiles once instead of for every lock held (#24794)
* Remove the EOL centos:6 docker image
# v0.16.2 (2021-05-22)
* Major performance improvement for `spack load` and other commands. (#23661)
* `spack fetch` is now environment-aware. (#19166)
* Numerous fixes for the new, `clingo`-based concretizer. (#23016, #23307,
#23090, #22896, #22534, #20644, #20537, #21148)
* Supoprt for automatically bootstrapping `clingo` from source. (#20652, #20657
#21364, #21446, #21913, #22354, #22444, #22460, #22489, #22610, #22631)
* Python 3.10 support: `collections.abc` (#20441)
* Fix import issues by using `__import__` instead of Spack package importe.
(#23288, #23290)
* Bugfixes and `--source-dir` argument for `spack location`. (#22755, #22348,
#22321)
* Better support for externals in shared prefixes. (#22653)
* `spack build-env` now prefers specs defined in the active environment.
(#21642)
* Remove erroneous warnings about quotes in `from_sourcing_files`. (#22767)
* Fix clearing cache of `InternalConfigScope`. (#22609)
* Bugfix for active when pkg is already active error. (#22587)
* Make `SingleFileScope` able to repopulate the cache after clearing it.
(#22559)
* Channelflow: Fix the package. (#22483)
* More descriptive error message for bugs in `package.py` (#21811)
* Use package-supplied `autogen.sh`. (#20319)
* Respect `-k/verify-ssl-false` in `_existing_url` method. (#21864)
# v0.16.1 (2021-02-22)
This minor release includes a new feature and associated fixes:
* intel-oneapi support through new packages (#20411, #20686, #20693, #20717,
#20732, #20808, #21377, #21448)
This release also contains bug fixes/enhancements for:
* HIP/ROCm support (#19715, #20095)
* concretization (#19988, #20020, #20082, #20086, #20099, #20102, #20128,
#20182, #20193, #20194, #20196, #20203, #20247, #20259, #20307, #20362,
#20383, #20423, #20473, #20506, #20507, #20604, #20638, #20649, #20677,
#20680, #20790)
* environment install reporting fix (#20004)
* avoid import in ABI compatibility info (#20236)
* restore ability of dev-build to skip patches (#20351)
* spack find -d spec grouping (#20028)
* spack smoke test support (#19987, #20298)
* macOS fixes (#20038, #21662)
* abstract spec comparisons (#20341)
* continuous integration (#17563)
* performance improvements for binary relocation (#19690, #20768)
* additional sanity checks for variants in builtin packages (#20373)
* do not pollute auto-generated configuration files with empty lists or
dicts (#20526)
plus assorted documentation (#20021, #20174) and package bug fixes/enhancements
(#19617, #19933, #19986, #20006, #20097, #20198, #20794, #20906, #21411).
# v0.16.0 (2020-11-18)
`v0.16.0` is a major feature release.
## Major features in this release
1. **New concretizer (experimental)** Our new backtracking concretizer is
now in Spack as an experimental feature. You will need to install
`clingo@master+python` and set `concretizer: clingo` in `config.yaml`
to use it. The original concretizer is not exhaustive and is not
guaranteed to find a solution if one exists. We encourage you to use
the new concretizer and to report any bugs you find with it. We
anticipate making the new concretizer the default and including all
required dependencies for it in Spack `v0.17`. For more details, see
#19501.
2. **spack test (experimental)** Users can add `test()` methods to their
packages to run smoke tests on installations with the new `spack test`
command (the old `spack test` is now `spack unit-test`). `spack test`
is environment-aware, so you can `spack install` an environment and
`spack test run` smoke tests on all of its packages. Historical test
logs can be perused with `spack test results`. Generic smoke tests for
MPI implementations, C, C++, and Fortran compilers as well as specific
smoke tests for 18 packages. This is marked experimental because the
test API (`self.run_test()`) is likely to be change, but we encourage
users to upstream tests, and we will maintain and refactor any that
are added to mainline packages (#15702).
3. **spack develop** New `spack develop` command allows you to develop
several packages at once within a Spack environment. Running
`spack develop foo@v1` and `spack develop bar@v2` will check
out specific versions of `foo` and `bar` into subdirectories, which you
can then build incrementally with `spack install ` (#15256).
4. **More parallelism** Spack previously installed the dependencies of a
_single_ spec in parallel. Entire environments can now be installed in
parallel, greatly accelerating builds of large environments. get
parallelism from individual specs. Spack now parallelizes entire
environment builds (#18131).
5. **Customizable base images for spack containerize**
`spack containerize` previously only output a `Dockerfile` based
on `ubuntu`. You may now specify any base image of your choosing (#15028).
6. **more external finding** `spack external find` was added in `v0.15`,
but only `cmake` had support. `spack external find` can now find
`bison`, `cuda`, `findutils`, `flex`, `git`, `lustre` `m4`, `mpich`,
`mvapich2`, `ncurses`, `openmpi`, `perl`, `spectrum-mpi`, `tar`, and
`texinfo` on your system and add them automatically to
`packages.yaml`.
7. **Support aocc, nvhpc, and oneapi compilers** We are aggressively
pursuing support for the newest vendor compilers, especially those for
the U.S. exascale and pre-exascale systems. Compiler classes and
auto-detection for `aocc`, `nvhpc`, `oneapi` are now in Spack (#19345,
#19294, #19330).
## Additional new features of note
* New `spack mark` command can be used to designate packages as explicitly
installed, so that `spack gc` will not garbage-collect them (#16662).
* `install_tree` can be customized with Spack's projection format (#18341)
* `sbang` now lives in the `install_tree` so that all users can access it (#11598)
* `csh` and `tcsh` users no longer need to set `SPACK_ROOT` before
sourcing `setup-env.csh` (#18225)
* Spec syntax now supports `variant=*` syntax for finding any package
that has a particular variant (#19381).
* Spack respects `SPACK_GNUPGHOME` variable for custom GPG directories (#17139)
* Spack now recognizes Graviton chips
## Major refactors
* Use spawn instead of fork on Python >= 3.8 on macOS (#18205)
* Use indexes for public build caches (#19101, #19117, #19132, #19141, #19209)
* `sbang` is an external package now (https://github.com/spack/sbang, #19582)
* `archspec` is an external package now (https://github.com/archspec/archspec, #19600)
## Deprecations and Removals
* `spack bootstrap` was deprecated in v0.14.0, and has now been removed.
* `spack setup` is deprecated as of v0.16.0.
* What was `spack test` is now called `spack unit-test`. `spack test` is
now the smoke testing feature in (2) above.
## Bugfixes
Some of the most notable bugfixes in this release include:
* Better warning messages for deprecated syntax in `packages.yaml` (#18013)
* `buildcache list --allarch` now works properly (#17827)
* Many fixes and tests for buildcaches and binary relcoation (#15687,
*#17455, #17418, #17455, #15687, #18110)
## Package Improvements
Spack now has 5050 total packages, 720 of which were added since `v0.15`.
* ROCm packages (`hip`, `aomp`, more) added by AMD (#19957, #19832, others)
* Many improvements for ARM support
* `llvm-flang`, `flang`, and `f18` removed, as `llvm` has real `flang`
support since Flang was merged to LLVM mainline
* Emerging support for `spack external find` and `spack test` in packages.
## Infrastructure
* Major infrastructure improvements to pipelines on `gitlab.spack.io`
* Support for testing PRs from forks (#19248) is being enabled for all
forks to enable rolling, up-to-date binary builds on `develop`
# v0.15.4 (2020-08-12)
This release contains one feature addition:
* Users can set `SPACK_GNUPGHOME` to override Spack's GPG path (#17139)
Several bugfixes for CUDA, binary packaging, and `spack -V`:
* CUDA package's `.libs` method searches for `libcudart` instead of `libcuda` (#18000)
* Don't set `CUDAHOSTCXX` in environments that contain CUDA (#17826)
* `buildcache create`: `NoOverwriteException` is a warning, not an error (#17832)
* Fix `spack buildcache list --allarch` (#17884)
* `spack -V` works with `releases/latest` tag and shallow clones (#17884)
And fixes for GitHub Actions and tests to ensure that CI passes on the
release branch (#15687, #17279, #17328, #17377, #17732).
# v0.15.3 (2020-07-28)
This release contains the following bugfixes:
* Fix handling of relative view paths (#17721)
* Fixes for binary relocation (#17418, #17455)
* Fix redundant printing of error messages in build environment (#17709)
It also adds a support script for Spack tutorials:
* Add a tutorial setup script to share/spack (#17705, #17722)
# v0.15.2 (2020-07-23)
This minor release includes two new features:
* Spack install verbosity is decreased, and more debug levels are added (#17546)
* The $spack/share/spack/keys directory contains public keys that may be optionally trusted for public binary mirrors (#17684)
This release also includes several important fixes:
* MPICC and related variables are now cleand in the build environment (#17450)
* LLVM flang only builds CUDA offload components when +cuda (#17466)
* CI pipelines no longer upload user environments that can contain secrets to the internet (#17545)
* CI pipelines add bootstrapped compilers to the compiler config (#17536)
* `spack buildcache list` does not exit on first failure and lists later mirrors (#17565)
* Apple's "gcc" executable that is an apple-clang compiler does not generate a gcc compiler config (#17589)
* Mixed compiler toolchains are merged more naturally across different compiler suffixes (#17590)
* Cray Shasta platforms detect the OS properly (#17467)
* Additional more minor fixes.
# v0.15.1 (2020-07-10)
This minor release includes several important fixes:
@@ -784,4 +558,4 @@ version of all the changes since `v0.9.1`.
- Switched from `nose` to `pytest` for unit tests.
- Unit tests take 1 minute now instead of 8
- Massively expanded documentation
- Docs are now hosted on [spack.readthedocs.io](https://spack.readthedocs.io)
- Docs are now hosted on [spack.readthedocs.io](http://spack.readthedocs.io)

View File

@@ -28,11 +28,9 @@ text in the license header:
External Packages
-------------------
Spack bundles most external dependencies in lib/spack/external. It also
includes the sbang tool directly in bin/sbang. These packages are covered
by various permissive licenses. A summary listing follows. See the
license included with each package for full details.
Spack bundles its external dependencies in lib/spack/external. These
packages are covered by various permissive licenses. A summary listing
follows. See the license included with each package for full details.
PackageName: argparse
PackageHomePage: https://pypi.python.org/pypi/argparse
@@ -78,10 +76,6 @@ PackageName: ruamel.yaml
PackageHomePage: https://yaml.readthedocs.io/
PackageLicenseDeclared: MIT
PackageName: sbang
PackageHomePage: https://github.com/spack/sbang
PackageLicenseDeclared: Apache-2.0 OR MIT
PackageName: six
PackageHomePage: https://pypi.python.org/pypi/six
PackageLicenseDeclared: MIT

View File

@@ -1,21 +1,20 @@
MIT License
Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
Copyright (c) 2013-2020 LLNS, LLC and other Spack Project Developers.
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.

View File

@@ -4,6 +4,7 @@
[![Linux Tests](https://github.com/spack/spack/workflows/linux%20tests/badge.svg)](https://github.com/spack/spack/actions)
[![Linux Builds](https://github.com/spack/spack/workflows/linux%20builds/badge.svg)](https://github.com/spack/spack/actions)
[![macOS Builds (nightly)](https://github.com/spack/spack/workflows/macOS%20builds%20nightly/badge.svg?branch=develop)](https://github.com/spack/spack/actions?query=workflow%3A%22macOS+builds+nightly%22)
[![Build Status](https://travis-ci.com/spack/spack.svg?branch=develop)](https://travis-ci.com/spack/spack)
[![codecov](https://codecov.io/gh/spack/spack/branch/develop/graph/badge.svg)](https://codecov.io/gh/spack/spack)
[![Read the Docs](https://readthedocs.org/projects/spack/badge/?version=latest)](https://spack.readthedocs.io)
[![Slack](https://spackpm.herokuapp.com/badge.svg)](https://spackpm.herokuapp.com)
@@ -21,7 +22,7 @@ builds of the same package. With Spack, you can build your software
*all* the ways you want to.
See the
[Feature Overview](https://spack.readthedocs.io/en/latest/features.html)
[Feature Overview](http://spack.readthedocs.io/en/latest/features.html)
for examples and highlights.
To install spack and your first package, make sure you have Python.
@@ -34,14 +35,14 @@ Then:
Documentation
----------------
[**Full documentation**](https://spack.readthedocs.io/) is available, or
[**Full documentation**](http://spack.readthedocs.io/) is available, or
run `spack help` or `spack help --all`.
Tutorial
----------------
We maintain a
[**hands-on tutorial**](https://spack.readthedocs.io/en/latest/tutorial.html).
[**hands-on tutorial**](http://spack.readthedocs.io/en/latest/tutorial.html).
It covers basic to advanced usage, packaging, developer features, and large HPC
deployments. You can do all of the exercises on your own laptop using a
Docker container.
@@ -73,33 +74,15 @@ When you send your request, make ``develop`` the destination branch on the
Your PR must pass Spack's unit tests and documentation tests, and must be
[PEP 8](https://www.python.org/dev/peps/pep-0008/) compliant. We enforce
these guidelines with our CI process. To run these tests locally, and for
helpful tips on git, see our
[Contribution Guide](https://spack.readthedocs.io/en/latest/contribution_guide.html).
these guidelines with [Travis CI](https://travis-ci.org/spack/spack). To
run these tests locally, and for helpful tips on git, see our
[Contribution Guide](http://spack.readthedocs.io/en/latest/contribution_guide.html).
Spack's `develop` branch has the latest contributions. Pull requests
should target `develop`, and users who want the latest package versions,
features, etc. can use `develop`.
Releases
--------
For multi-user site deployments or other use cases that need very stable
software installations, we recommend using Spack's
[stable releases](https://github.com/spack/spack/releases).
Each Spack release series also has a corresponding branch, e.g.
`releases/v0.14` has `0.14.x` versions of Spack, and `releases/v0.13` has
`0.13.x` versions. We backport important bug fixes to these branches but
we do not advance the package versions or make other changes that would
change the way Spack concretizes dependencies within a release branch.
So, you can base your Spack deployment on a release branch and `git pull`
to get fixes, without the package churn that comes with `develop`.
The latest release is always available with the `releases/latest` tag.
See the [docs on releases](https://spack.readthedocs.io/en/latest/developer_guide.html#releases)
for more details.
Spack uses a rough approximation of the
[Git Flow](http://nvie.com/posts/a-successful-git-branching-model/)
branching model. The ``develop`` branch contains the latest
contributions, and ``master`` is always tagged and points to the latest
stable release.
Code of Conduct
------------------------
@@ -120,7 +103,7 @@ If you are referencing Spack in a publication, please cite the following paper:
* Todd Gamblin, Matthew P. LeGendre, Michael R. Collette, Gregory L. Lee,
Adam Moody, Bronis R. de Supinski, and W. Scott Futral.
[**The Spack Package Manager: Bringing Order to HPC Software Chaos**](https://www.computer.org/csdl/proceedings/sc/2015/3723/00/2807623.pdf).
[**The Spack Package Manager: Bringing Order to HPC Software Chaos**](http://www.computer.org/csdl/proceedings/sc/2015/3723/00/2807623.pdf).
In *Supercomputing 2015 (SC15)*, Austin, Texas, November 15-20 2015. LLNL-CONF-669890.
License

163
bin/sbang
View File

@@ -1,103 +1,114 @@
#!/bin/sh
#!/bin/bash
#
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# sbang project developers. See the top-level COPYRIGHT file for details.
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
#
# `sbang`: Run scripts with long shebang lines.
#
# Many operating systems limit the length and number of possible
# arguments in shebang lines, making it hard to use interpreters that are
# deep in the directory hierarchy or require special arguments.
# Many operating systems limit the length of shebang lines, making it
# hard to use interpreters that are deep in the directory hierarchy.
# `sbang` can run such scripts, either as a shebang interpreter, or
# directly on the command line.
#
# To use, put the long shebang on the second line of your script, and
# make sbang the interpreter, like this:
# Usage
# -----------------------------
# Suppose you have a script, long-shebang.sh, like this:
#
# #!/bin/sh /path/to/sbang
# #!/long/path/to/real/interpreter with arguments
# 1 #!/very/long/path/to/some/interpreter
# 2
# 3 echo "success!"
#
# `sbang` will run the real interpreter with the script as its argument.
# Invoking this script will result in an error on some OS's. On
# Linux, you get this:
#
# See https://github.com/spack/sbang for more details.
# $ ./long-shebang.sh
# -bash: ./long: /very/long/path/to/some/interp: bad interpreter:
# No such file or directory
#
# On Mac OS X, the system simply assumes the interpreter is the shell
# and tries to run with it, which is likely not what you want.
#
#
# `sbang` on the command line
# -----------------------------
# You can use `sbang` in two ways. The first is to use it directly,
# from the command line, like this:
#
# $ sbang ./long-shebang.sh
# success!
#
#
# `sbang` as the interpreter
# -----------------------------
# You can also use `sbang` *as* the interpreter for your script. Put
# `#!/bin/bash /path/to/sbang` on line 1, and move the original
# shebang to line 2 of the script:
#
# 1 #!/bin/bash /path/to/sbang
# 2 #!/long/path/to/real/interpreter with arguments
# 3
# 4 echo "success!"
#
# $ ./long-shebang.sh
# success!
#
# On Linux, you could shorten line 1 to `#!/path/to/sbang`, but other
# operating systems like Mac OS X require the interpreter to be a
# binary, so it's best to use `sbang` as a `bash` argument.
# Obviously, for this to work, `sbang` needs to have a short enough
# path that *it* will run without hitting OS limits.
#
# For Lua, scripts the second line can't start with #!, as # is not
# the comment character in lua (even though lua ignores #! on the
# *first* line of a script). So, instrument a lua script like this,
# using -- instead of # on the second line:
#
# 1 #!/bin/bash /path/to/sbang
# 2 --!/long/path/to/lua with arguments
# 3
# 4 print "success!"
#
# How it works
# -----------------------------
# `sbang` is a very simple bash script. It looks at the first two
# lines of a script argument and runs the last line starting with
# `#!`, with the script as an argument. It also forwards arguments.
#
# Generic error handling
die() {
echo "$@" 1>&2;
exit 1
}
# set SBANG_DEBUG to make the script print what would normally be executed.
exec="exec"
if [ -n "${SBANG_DEBUG}" ]; then
exec="echo "
fi
# First argument is the script we want to actually run.
script="$1"
# ensure that the script actually exists
if [ -z "$script" ]; then
die "error: sbang requires exactly one argument"
elif [ ! -f "$script" ]; then
die "$script: no such file or directory"
fi
# Search the first two lines of script for interpreters.
lines=0
while read -r line && [ $lines -ne 2 ]; do
if [ "${line#\#!}" != "$line" ]; then
shebang_line="${line#\#!}"
elif [ "${line#//!}" != "$line" ]; then # // comments
shebang_line="${line#//!}"
elif [ "${line#--!}" != "$line" ]; then # -- lua comments
shebang_line="${line#--!}"
elif [ "${line#<?php\ }" != "$line" ]; then # php comments
shebang_line="${line#<?php\ \#!}"
shebang_line="${shebang_line%\ ?>}"
while read line && ((lines < 2)) ; do
if [[ "$line" = '#!'* ]]; then
interpreter="${line#\#!}"
elif [[ "$line" = '//!'*node* ]]; then
interpreter="${line#//!}"
elif [[ "$line" = '--!'*lua* ]]; then
interpreter="${line#--!}"
fi
lines=$((lines+1))
done < "$script"
# this is ineeded for scripts with sbang parameter
# like ones in intltool
# #!/<spack-long-path>/perl -w
# this is the interpreter line with all the parameters as a vector
interpreter_v=(${interpreter})
# this is the single interpreter path
interpreter_f="${interpreter_v[0]}"
# error if we did not find any interpreter
if [ -z "$shebang_line" ]; then
die "error: sbang found no interpreter in $script"
fi
# parse out the interpreter and first argument
IFS=' ' read -r interpreter arg1 rest <<EOF
$shebang_line
EOF
# Determine if the interpreter is a particular program, accounting for the
# '#!/usr/bin/env PROGRAM' convention. So:
#
# interpreter_is perl
#
# will be true for '#!/usr/bin/perl' and '#!/usr/bin/env perl'
interpreter_is() {
if [ "${interpreter##*/}" = "$1" ]; then
return 0
elif [ "$interpreter" = "/usr/bin/env" ] && [ "$arg1" = "$1" ]; then
return 0
# Invoke any interpreter found, or raise an error if none was found.
if [[ -n "$interpreter_f" ]]; then
if [[ "${interpreter_f##*/}" = "perl"* ]]; then
exec $interpreter -x "$@"
else
return 1
exec $interpreter "$@"
fi
}
if interpreter_is "sbang"; then
die "error: refusing to re-execute sbang to avoid infinite loop."
fi
# Finally invoke the real shebang line
# ruby and perl need -x to ignore the first line of input (the sbang line)
#
if interpreter_is perl || interpreter_is ruby; then
# shellcheck disable=SC2086
$exec $shebang_line -x "$@"
else
# shellcheck disable=SC2086
$exec $shebang_line "$@"
echo "error: sbang found no interpreter in $script"
exit 1
fi

View File

@@ -59,8 +59,6 @@ if 'ruamel.yaml' in sys.modules:
if 'ruamel' in sys.modules:
del sys.modules['ruamel']
import spack.main # noqa
# Once we've set up the system path, run the spack main method
if __name__ == "__main__":
sys.exit(spack.main.main())
import spack.main # noqa
sys.exit(spack.main.main())

View File

@@ -16,17 +16,7 @@
config:
# This is the path to the root of the Spack install tree.
# You can use $spack here to refer to the root of the spack instance.
install_tree:
root: $spack/opt/spack
projections:
all: "${ARCHITECTURE}/${COMPILERNAME}-${COMPILERVER}/${PACKAGE}-${VERSION}-${HASH}"
# install_tree can include an optional padded length (int or boolean)
# default is False (do not pad)
# if padded_length is True, Spack will pad as close to the system max path
# length as possible
# if padded_length is an integer, Spack will pad to that many characters,
# assuming it is higher than the length of the install_tree root.
# padded_length: 128
install_tree: ~/.spack/opt/spack
# Locations where templates should be found
@@ -34,10 +24,14 @@ config:
- $spack/share/spack/templates
# Default directory layout
install_path_scheme: "${ARCHITECTURE}/${COMPILERNAME}-${COMPILERVER}/${PACKAGE}-${VERSION}-${HASH}"
# Locations where different types of modules should be installed.
module_roots:
tcl: $spack/share/spack/modules
lmod: $spack/share/spack/lmod
tcl: ~/.spack/share/spack/modules
lmod: ~/.spack/share/spack/lmod
# Temporary locations Spack can try to use for builds.
@@ -70,14 +64,10 @@ config:
- ~/.spack/stage
# - $spack/var/spack/stage
# Directory in which to run tests and store test results.
# Tests will be stored in directories named by date/time and package
# name/hash.
test_stage: ~/.spack/test
# Cache directory for already downloaded source tarballs and archived
# repositories. This can be purged with `spack clean --downloads`.
source_cache: $spack/var/spack/cache
source_cache: ~/.spack/var/spack/cache
# Cache directory for miscellaneous files, like the package index.
@@ -149,20 +139,6 @@ config:
ccache: false
# The concretization algorithm to use in Spack. Options are:
#
# 'original': Spack's original greedy, fixed-point concretizer. This
# algorithm can make decisions too early and will not backtrack
# sufficiently for many specs.
#
# 'clingo': Uses a logic solver under the hood to solve DAGs with full
# backtracking and optimization for user preferences.
#
# 'clingo' currently requires the clingo ASP solver to be installed and
# built with python bindings. 'original' is built in.
concretizer: original
# How long to wait to lock the Spack installation database. This lock is used
# when Spack needs to manage its own package metadata and all operations are
# expected to complete within the default time limit. The timeout should
@@ -177,13 +153,11 @@ config:
# never succeed.
package_lock_timeout: null
# Control whether Spack embeds RPATH or RUNPATH attributes in ELF binaries.
# Has no effect on macOS. DO NOT MIX these within the same install tree.
# See the Spack documentation for details.
shared_linking: 'rpath'
# Set to 'false' to allow installation on filesystems that doesn't allow setgid bit
# manipulation by unprivileged user (e.g. AFS)
allow_sgid: true

View File

@@ -21,14 +21,11 @@ packages:
- gcc
- intel
providers:
elf:
- libelf
unwind:
- apple-libunwind
elf: [libelf]
unwind: [apple-libunwind]
apple-libunwind:
buildable: false
externals:
paths:
# Apple bundles libunwind version 35.3 with macOS 10.9 and later,
# although the version number used here isn't critical
- spec: apple-libunwind@35.3
prefix: /usr
apple-libunwind@35.3: /usr
buildable: False

View File

@@ -1,2 +1,2 @@
mirrors:
spack-public: https://mirror.spack.io
spack-public: https://spack-llnl-mirror.s3-us-west-2.amazonaws.com/

View File

@@ -15,40 +15,38 @@
# -------------------------------------------------------------------------
packages:
all:
compiler: [gcc, intel, pgi, clang, xl, nag, fj, aocc]
compiler: [gcc, intel, pgi, clang, xl, nag, fj]
providers:
D: [ldc]
awk: [gawk]
blas: [openblas, amdblis]
blas: [openblas]
daal: [intel-daal]
elf: [elfutils]
fftw-api: [fftw, amdfftw]
gl: [mesa+opengl, mesa18+opengl, opengl]
glx: [mesa+glx, mesa18+glx, opengl]
fftw-api: [fftw]
gl: [mesa+opengl, opengl]
glx: [mesa+glx, opengl]
glu: [mesa-glu, openglu]
golang: [gcc]
iconv: [libiconv]
ipp: [intel-ipp]
java: [openjdk, jdk, ibm-java]
jpeg: [libjpeg-turbo, libjpeg]
lapack: [openblas, amdlibflame]
lapack: [openblas]
mariadb-client: [mariadb-c-client, mariadb]
mkl: [intel-mkl]
mpe: [mpe2]
mpi: [openmpi, mpich]
mysql-client: [mysql, mariadb-c-client]
opencl: [pocl]
osmesa: [mesa+osmesa, mesa18+osmesa]
pil: [py-pillow]
pkgconfig: [pkgconf, pkg-config]
rpc: [libtirpc]
scalapack: [netlib-scalapack, amdscalapack]
scalapack: [netlib-scalapack]
sycl: [hipsycl]
szip: [libszip, libaec]
tbb: [intel-tbb]
unwind: [libunwind]
yacc: [bison, byacc]
flame: [libflame, amdlibflame]
sycl: [hipsycl]
permissions:
read: world
write: user

View File

@@ -0,0 +1,7 @@
upstreams:
global:
install_tree: $spack/opt/spack
modules:
tcl: $spack/share/spack/modules
lmod: $spack/share/spack/lmod
dotkit: $spack/share/spack/dotkit

View File

@@ -1,10 +1,10 @@
<html>
<head>
<meta http-equiv="refresh" content="0; url=https://spack.readthedocs.io/" />
<meta http-equiv="refresh" content="0; url=http://spack.readthedocs.io/" />
</head>
<body>
<p>
This page has moved to <a href="https://spack.readthedocs.io/">https://spack.readthedocs.io/</a>
This page has moved to <a href="http://spack.readthedocs.io/">http://spack.readthedocs.io/</a>
</p>
</body>
</html>

View File

@@ -31,7 +31,7 @@ colorized output.
.. code-block:: console
$ spack --color always | less -R
$ spack --color always | less -R
--------------------------
Listing available packages
@@ -132,28 +132,32 @@ If ``mpileaks`` depends on other packages, Spack will install the
dependencies first. It then fetches the ``mpileaks`` tarball, expands
it, verifies that it was downloaded without errors, builds it, and
installs it in its own directory under ``$SPACK_ROOT/opt``. You'll see
a number of messages from Spack, a lot of build output, and a message
that the package is installed. Add one or more debug options (``-d``)
to get increasingly detailed output.
a number of messages from spack, a lot of build output, and a message
that the packages is installed:
.. code-block:: console
$ spack install mpileaks
... dependency build output ...
==> Installing mpileaks-1.0-ph7pbnhl334wuhogmugriohcwempqry2
==> No binary for mpileaks-1.0-ph7pbnhl334wuhogmugriohcwempqry2 found: installing from source
==> mpileaks: Executing phase: 'autoreconf'
==> mpileaks: Executing phase: 'configure'
==> mpileaks: Executing phase: 'build'
==> mpileaks: Executing phase: 'install'
[+] ~/spack/opt/linux-rhel7-broadwell/gcc-8.1.0/mpileaks-1.0-ph7pbnhl334wuhogmugriohcwempqry2
==> Installing mpileaks
==> mpich is already installed in ~/spack/opt/linux-debian7-x86_64/gcc@4.4.7/mpich@3.0.4.
==> callpath is already installed in ~/spack/opt/linux-debian7-x86_64/gcc@4.4.7/callpath@1.0.2-5dce4318.
==> adept-utils is already installed in ~/spack/opt/linux-debian7-x86_64/gcc@4.4.7/adept-utils@1.0-5adef8da.
==> Trying to fetch from https://github.com/hpc/mpileaks/releases/download/v1.0/mpileaks-1.0.tar.gz
######################################################################## 100.0%
==> Staging archive: ~/spack/var/spack/stage/mpileaks@1.0%gcc@4.4.7 arch=linux-debian7-x86_64-59f6ad23/mpileaks-1.0.tar.gz
==> Created stage in ~/spack/var/spack/stage/mpileaks@1.0%gcc@4.4.7 arch=linux-debian7-x86_64-59f6ad23.
==> No patches needed for mpileaks.
==> Building mpileaks.
... build output ...
==> Successfully installed mpileaks.
Fetch: 2.16s. Build: 9.82s. Total: 11.98s.
[+] ~/spack/opt/linux-debian7-x86_64/gcc@4.4.7/mpileaks@1.0-59f6ad23
The last line, with the ``[+]``, indicates where the package is
installed.
Add the debug option -- ``spack install -d mpileaks`` -- to get additional
output.
^^^^^^^^^^^^^^^^^^^^^^^^^^^
Building a specific version
^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -280,102 +284,6 @@ and removed everything that is not either:
You can check :ref:`cmd-spack-find-metadata` to see how to query for explicitly installed packages
or :ref:`dependency-types` for a more thorough treatment of dependency types.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Marking packages explicit or implicit
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
By default, Spack will mark packages a user installs as explicitly installed,
while all of its dependencies will be marked as implicitly installed. Packages
can be marked manually as explicitly or implicitly installed by using
``spack mark``. This can be used in combination with ``spack gc`` to clean up
packages that are no longer required.
.. code-block:: console
$ spack install m4
==> 29005: Installing libsigsegv
[...]
==> 29005: Installing m4
[...]
$ spack install m4 ^libsigsegv@2.11
==> 39798: Installing libsigsegv
[...]
==> 39798: Installing m4
[...]
$ spack find -d
==> 4 installed packages
-- linux-fedora32-haswell / gcc@10.1.1 --------------------------
libsigsegv@2.11
libsigsegv@2.12
m4@1.4.18
libsigsegv@2.12
m4@1.4.18
libsigsegv@2.11
$ spack gc
==> There are no unused specs. Spack's store is clean.
$ spack mark -i m4 ^libsigsegv@2.11
==> m4@1.4.18 : marking the package implicit
$ spack gc
==> The following packages will be uninstalled:
-- linux-fedora32-haswell / gcc@10.1.1 --------------------------
5fj7p2o libsigsegv@2.11 c6ensc6 m4@1.4.18
==> Do you want to proceed? [y/N]
In the example above, we ended up with two versions of ``m4`` since they depend
on different versions of ``libsigsegv``. ``spack gc`` will not remove any of
the packages since both versions of ``m4`` have been installed explicitly
and both versions of ``libsigsegv`` are required by the ``m4`` packages.
``spack mark`` can also be used to implement upgrade workflows. The following
example demonstrates how the ``spack mark`` and ``spack gc`` can be used to
only keep the current version of a package installed.
When updating Spack via ``git pull``, new versions for either ``libsigsegv``
or ``m4`` might be introduced. This will cause Spack to install duplicates.
Since we only want to keep one version, we mark everything as implicitly
installed before updating Spack. If there is no new version for either of the
packages, ``spack install`` will simply mark them as explicitly installed and
``spack gc`` will not remove them.
.. code-block:: console
$ spack install m4
==> 62843: Installing libsigsegv
[...]
==> 62843: Installing m4
[...]
$ spack mark -i -a
==> m4@1.4.18 : marking the package implicit
$ git pull
[...]
$ spack install m4
[...]
==> m4@1.4.18 : marking the package explicit
[...]
$ spack gc
==> There are no unused specs. Spack's store is clean.
When using this workflow for installations that contain more packages, care
has to be taken to either only mark selected packages or issue ``spack install``
for all packages that should be kept.
You can check :ref:`cmd-spack-find-metadata` to see how to query for explicitly
or implicitly installed packages.
^^^^^^^^^^^^^^^^^^^^^^^^^
Non-Downloadable Tarballs
^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -421,6 +329,85 @@ the tarballs in question to it (see :ref:`mirrors`):
$ spack install galahad
-----------------------------
Deprecating insecure packages
-----------------------------
``spack deprecate`` allows for the removal of insecure packages with
minimal impact to their dependents.
.. warning::
The ``spack deprecate`` command is designed for use only in
extraordinary circumstances. This is a VERY big hammer to be used
with care.
The ``spack deprecate`` command will remove one package and replace it
with another by replacing the deprecated package's prefix with a link
to the deprecator package's prefix.
.. warning::
The ``spack deprecate`` command makes no promises about binary
compatibility. It is up to the user to ensure the deprecator is
suitable for the deprecated package.
Spack tracks concrete deprecated specs and ensures that no future packages
concretize to a deprecated spec.
The first spec given to the ``spack deprecate`` command is the package
to deprecate. It is an abstract spec that must describe a single
installed package. The second spec argument is the deprecator
spec. By default it must be an abstract spec that describes a single
installed package, but with the ``-i/--install-deprecator`` it can be
any abstract spec that Spack will install and then use as the
deprecator. The ``-I/--no-install-deprecator`` option will ensure
the default behavior.
By default, ``spack deprecate`` will deprecate all dependencies of the
deprecated spec, replacing each by the dependency of the same name in
the deprecator spec. The ``-d/--dependencies`` option will ensure the
default, while the ``-D/--no-dependencies`` option will deprecate only
the root of the deprecate spec in favor of the root of the deprecator
spec.
``spack deprecate`` can use symbolic links or hard links. The default
behavior is symbolic links, but the ``-l/--link-type`` flag can take
options ``hard`` or ``soft``.
-----------------------
Verifying installations
-----------------------
The ``spack verify`` command can be used to verify the validity of
Spack-installed packages any time after installation.
At installation time, Spack creates a manifest of every file in the
installation prefix. For links, Spack tracks the mode, ownership, and
destination. For directories, Spack tracks the mode, and
ownership. For files, Spack tracks the mode, ownership, modification
time, hash, and size. The Spack verify command will check, for every
file in each package, whether any of those attributes have changed. It
will also check for newly added files or deleted files from the
installation prefix. Spack can either check all installed packages
using the `-a,--all` or accept specs listed on the command line to
verify.
The ``spack verify`` command can also verify for individual files that
they haven't been altered since installation time. If the given file
is not in a Spack installation prefix, Spack will report that it is
not owned by any package. To check individual files instead of specs,
use the ``-f,--files`` option.
Spack installation manifests are part of the tarball signed by Spack
for binary package distribution. When installed from a binary package,
Spack uses the packaged installation manifest instead of creating one
at install time.
The ``spack verify`` command also accepts the ``-l,--local`` option to
check only local packages (as opposed to those used transparently from
``upstream`` spack instances) and the ``-j,--json`` option to output
machine-readable json data for any errors.
-------------------------
Seeing installed packages
@@ -689,95 +676,6 @@ structured the way you want:
"hash": "zvaa4lhlhilypw5quj3akyd3apbq5gap"
}
------------------------
Using installed packages
------------------------
There are several different ways to use Spack packages once you have
installed them. As you've seen, spack packages are installed into long
paths with hashes, and you need a way to get them into your path. The
easiest way is to use :ref:`spack load <cmd-spack-load>`, which is
described in the next section.
Some more advanced ways to use Spack packages include:
* :ref:`environments <environments>`, which you can use to bundle a
number of related packages to "activate" all at once, and
* :ref:`environment modules <modules>`, which are commonly used on
supercomputing clusters. Spack generates module files for every
installation automatically, and you can customize how this is done.
.. _cmd-spack-load:
^^^^^^^^^^^^^^^^^^^^^^^
``spack load / unload``
^^^^^^^^^^^^^^^^^^^^^^^
If you have :ref:`shell support <shell-support>` enabled you can use the
``spack load`` command to quickly get a package on your ``PATH``.
For example this will add the ``mpich`` package built with ``gcc`` to
your path:
.. code-block:: console
$ spack install mpich %gcc@4.4.7
# ... wait for install ...
$ spack load mpich %gcc@4.4.7
$ which mpicc
~/spack/opt/linux-debian7-x86_64/gcc@4.4.7/mpich@3.0.4/bin/mpicc
These commands will add appropriate directories to your ``PATH``,
``MANPATH``, ``CPATH``, and ``LD_LIBRARY_PATH`` according to the
:ref:`prefix inspections <customize-env-modifications>` defined in your
modules configuration. When you no longer want to use a package, you
can type unload or unuse similarly:
.. code-block:: console
$ spack unload mpich %gcc@4.4.7
"""""""""""""""
Ambiguous specs
"""""""""""""""
If a spec used with load/unload or is ambiguous (i.e. more than one
installed package matches it), then Spack will warn you:
.. code-block:: console
$ spack load libelf
==> Error: libelf matches multiple packages.
Matching packages:
qmm4kso libelf@0.8.13%gcc@4.4.7 arch=linux-debian7-x86_64
cd2u6jt libelf@0.8.13%intel@15.0.0 arch=linux-debian7-x86_64
Use a more specific spec
You can either type the ``spack load`` command again with a fully
qualified argument, or you can add just enough extra constraints to
identify one package. For example, above, the key differentiator is
that one ``libelf`` is built with the Intel compiler, while the other
used ``gcc``. You could therefore just type:
.. code-block:: console
$ spack load libelf %intel
To identify just the one built with the Intel compiler. If you want to be
*very* specific, you can load it by its hash. For example, to load the
first ``libelf`` above, you would run:
.. code-block:: console
$ spack load /qmm4kso
We'll learn more about Spack's spec syntax in the next section.
.. _sec-specs:
--------------------
@@ -797,11 +695,11 @@ Here is an example of a much longer spec than we've seen thus far:
.. code-block:: none
mpileaks @1.2:1.4 %gcc@4.7.5 +debug -qt target=x86_64 ^callpath @1.1 %gcc@4.7.2
mpileaks @1.2:1.4 %gcc@4.7.5 +debug -qt arch=bgq_os ^callpath @1.1 %gcc@4.7.2
If provided to ``spack install``, this will install the ``mpileaks``
library at some version between ``1.2`` and ``1.4`` (inclusive),
built using ``gcc`` at version 4.7.5 for a generic ``x86_64`` architecture,
built using ``gcc`` at version 4.7.5 for the Blue Gene/Q architecture,
with debug options enabled, and without Qt support. Additionally, it
says to link it with the ``callpath`` library (which it depends on),
and to build callpath with ``gcc`` 4.7.2. Most specs will not be as
@@ -1336,88 +1234,6 @@ add a version specifier to the spec:
Notice that the package versions that provide insufficient MPI
versions are now filtered out.
-----------------------------
Deprecating insecure packages
-----------------------------
``spack deprecate`` allows for the removal of insecure packages with
minimal impact to their dependents.
.. warning::
The ``spack deprecate`` command is designed for use only in
extraordinary circumstances. This is a VERY big hammer to be used
with care.
The ``spack deprecate`` command will remove one package and replace it
with another by replacing the deprecated package's prefix with a link
to the deprecator package's prefix.
.. warning::
The ``spack deprecate`` command makes no promises about binary
compatibility. It is up to the user to ensure the deprecator is
suitable for the deprecated package.
Spack tracks concrete deprecated specs and ensures that no future packages
concretize to a deprecated spec.
The first spec given to the ``spack deprecate`` command is the package
to deprecate. It is an abstract spec that must describe a single
installed package. The second spec argument is the deprecator
spec. By default it must be an abstract spec that describes a single
installed package, but with the ``-i/--install-deprecator`` it can be
any abstract spec that Spack will install and then use as the
deprecator. The ``-I/--no-install-deprecator`` option will ensure
the default behavior.
By default, ``spack deprecate`` will deprecate all dependencies of the
deprecated spec, replacing each by the dependency of the same name in
the deprecator spec. The ``-d/--dependencies`` option will ensure the
default, while the ``-D/--no-dependencies`` option will deprecate only
the root of the deprecate spec in favor of the root of the deprecator
spec.
``spack deprecate`` can use symbolic links or hard links. The default
behavior is symbolic links, but the ``-l/--link-type`` flag can take
options ``hard`` or ``soft``.
-----------------------
Verifying installations
-----------------------
The ``spack verify`` command can be used to verify the validity of
Spack-installed packages any time after installation.
At installation time, Spack creates a manifest of every file in the
installation prefix. For links, Spack tracks the mode, ownership, and
destination. For directories, Spack tracks the mode, and
ownership. For files, Spack tracks the mode, ownership, modification
time, hash, and size. The Spack verify command will check, for every
file in each package, whether any of those attributes have changed. It
will also check for newly added files or deleted files from the
installation prefix. Spack can either check all installed packages
using the `-a,--all` or accept specs listed on the command line to
verify.
The ``spack verify`` command can also verify for individual files that
they haven't been altered since installation time. If the given file
is not in a Spack installation prefix, Spack will report that it is
not owned by any package. To check individual files instead of specs,
use the ``-f,--files`` option.
Spack installation manifests are part of the tarball signed by Spack
for binary package distribution. When installed from a binary package,
Spack uses the packaged installation manifest instead of creating one
at install time.
The ``spack verify`` command also accepts the ``-l,--local`` option to
check only local packages (as opposed to those used transparently from
``upstream`` spack instances) and the ``-j,--json`` option to output
machine-readable json data for any errors.
.. _extensions:
---------------------------

View File

@@ -57,13 +57,10 @@ directory. Here's an example of an external configuration:
packages:
openmpi:
externals:
- spec: "openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64"
prefix: /opt/openmpi-1.4.3
- spec: "openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64+debug"
prefix: /opt/openmpi-1.4.3-debug
- spec: "openmpi@1.6.5%intel@10.1 arch=linux-debian7-x86_64"
prefix: /opt/openmpi-1.6.5-intel
paths:
openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64: /opt/openmpi-1.4.3
openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64+debug: /opt/openmpi-1.4.3-debug
openmpi@1.6.5%intel@10.1 arch=linux-debian7-x86_64: /opt/openmpi-1.6.5-intel
This example lists three installations of OpenMPI, one built with GCC,
one built with GCC and debug information, and another built with Intel.
@@ -79,15 +76,13 @@ of the installation prefixes. The following example says that module
.. code-block:: yaml
cmake:
externals:
- spec: cmake@3.7.2
modules:
- CMake/3.7.2
modules:
cmake@3.7.2: CMake/3.7.2
Each ``packages.yaml`` begins with a ``packages:`` attribute, followed
by a list of package names. To specify externals, add an ``externals:``
attribute under the package name, which lists externals.
Each external should specify a ``spec:`` string that should be as
Each ``packages.yaml`` begins with a ``packages:`` token, followed
by a list of package names. To specify externals, add a ``paths`` or ``modules``
token under the package name, which lists externals in a
``spec: /path`` or ``spec: module-name`` format. Each spec should be as
well-defined as reasonably possible. If a
package lacks a spec component, such as missing a compiler or
package version, then Spack will guess the missing component based
@@ -111,13 +106,10 @@ be:
packages:
openmpi:
externals:
- spec: "openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64"
prefix: /opt/openmpi-1.4.3
- spec: "openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64+debug"
prefix: /opt/openmpi-1.4.3-debug
- spec: "openmpi@1.6.5%intel@10.1 arch=linux-debian7-x86_64"
prefix: /opt/openmpi-1.6.5-intel
paths:
openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64: /opt/openmpi-1.4.3
openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64+debug: /opt/openmpi-1.4.3-debug
openmpi@1.6.5%intel@10.1 arch=linux-debian7-x86_64: /opt/openmpi-1.6.5-intel
buildable: False
The addition of the ``buildable`` flag tells Spack that it should never build
@@ -145,13 +137,10 @@ but more conveniently:
mpi:
buildable: False
openmpi:
externals:
- spec: "openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64"
prefix: /opt/openmpi-1.4.3
- spec: "openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64+debug"
prefix: /opt/openmpi-1.4.3-debug
- spec: "openmpi@1.6.5%intel@10.1 arch=linux-debian7-x86_64"
prefix: /opt/openmpi-1.6.5-intel
paths:
openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64: /opt/openmpi-1.4.3
openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64+debug: /opt/openmpi-1.4.3-debug
openmpi@1.6.5%intel@10.1 arch=linux-debian7-x86_64: /opt/openmpi-1.6.5-intel
Implementations can also be listed immediately under the virtual they provide:
@@ -183,9 +172,8 @@ After running this command your ``packages.yaml`` may include new entries:
packages:
cmake:
externals:
- spec: cmake@3.17.2
prefix: /usr
paths:
cmake@3.17.2: /usr
Generally this is useful for detecting a small set of commonly-used packages;
for now this is generally limited to finding build-only dependencies.

View File

@@ -29,7 +29,6 @@ on these ideas for each distinct build system that Spack supports:
:maxdepth: 1
:caption: Make-incompatible
build_systems/mavenpackage
build_systems/sconspackage
build_systems/wafpackage

View File

@@ -418,13 +418,9 @@ Adapt the following example. Be sure to maintain the indentation:
# other content ...
intel-mkl:
externals:
- spec: "intel-mkl@2018.2.199 arch=linux-centos6-x86_64"
modules:
- intel-mkl/18/18.0.2
- spec: "intel-mkl@2018.3.222 arch=linux-centos6-x86_64"
modules:
- intel-mkl/18/18.0.3
modules:
intel-mkl@2018.2.199 arch=linux-centos6-x86_64: intel-mkl/18/18.0.2
intel-mkl@2018.3.222 arch=linux-centos6-x86_64: intel-mkl/18/18.0.3
The version numbers for the ``intel-mkl`` specs defined here correspond to file
and directory names that Intel uses for its products because they were adopted
@@ -455,16 +451,12 @@ mechanism.
packages:
intel-parallel-studio:
externals:
- spec: "intel-parallel-studio@cluster.2018.2.199 +mkl+mpi+ipp+tbb+daal arch=linux-centos6-x86_64"
modules:
- intel/18/18.0.2
- spec: "intel-parallel-studio@cluster.2018.3.222 +mkl+mpi+ipp+tbb+daal arch=linux-centos6-x86_64"
modules:
- intel/18/18.0.3
modules:
intel-parallel-studio@cluster.2018.2.199 +mkl+mpi+ipp+tbb+daal arch=linux-centos6-x86_64: intel/18/18.0.2
intel-parallel-studio@cluster.2018.3.222 +mkl+mpi+ipp+tbb+daal arch=linux-centos6-x86_64: intel/18/18.0.3
buildable: False
One additional example illustrates the use of ``prefix:`` instead of
One additional example illustrates the use of ``paths:`` instead of
``modules:``, useful when external modulefiles are not available or not
suitable:
@@ -472,15 +464,13 @@ suitable:
packages:
intel-parallel-studio:
externals:
- spec: "intel-parallel-studio@cluster.2018.2.199 +mkl+mpi+ipp+tbb+daal"
prefix: /opt/intel
- spec: "intel-parallel-studio@cluster.2018.3.222 +mkl+mpi+ipp+tbb+daal"
prefix: /opt/intel
paths:
intel-parallel-studio@cluster.2018.2.199 +mkl+mpi+ipp+tbb+daal: /opt/intel
intel-parallel-studio@cluster.2018.3.222 +mkl+mpi+ipp+tbb+daal: /opt/intel
buildable: False
Note that for the Intel packages discussed here, the directory values in the
``prefix:`` entries must be the high-level and typically version-less
``paths:`` entries must be the high-level and typically version-less
"installation directory" that has been used by Intel's product installer.
Such a directory will typically accumulate various product versions. Amongst
them, Spack will select the correct version-specific product directory based on

View File

@@ -1,102 +0,0 @@
.. Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
.. _mavenpackage:
------------
MavenPackage
------------
Apache Maven is a general-purpose build system that does not rely
on Makefiles to build software. It is designed for building and
managing and Java-based project.
^^^^^^
Phases
^^^^^^
The ``MavenPackage`` base class comes with the following phases:
#. ``build`` - compile code and package into a JAR file
#. ``install`` - copy to installation prefix
By default, these phases run:
.. code-block:: console
$ mvn package
$ install . <prefix>
^^^^^^^^^^^^^^^
Important files
^^^^^^^^^^^^^^^
Maven packages can be identified by the presence of a ``pom.xml`` file.
This file lists dependencies and other metadata about the project.
There may also be configuration files in the ``.mvn`` directory.
^^^^^^^^^^^^^^^^^^^^^^^^^
Build system dependencies
^^^^^^^^^^^^^^^^^^^^^^^^^
Maven requires the ``mvn`` executable to build the project. It also
requires Java at both build- and run-time. Because of this, the base
class automatically adds the following dependencies:
.. code-block:: python
depends_on('java', type=('build', 'run'))
depends_on('maven', type='build')
In the ``pom.xml`` file, you may see sections like:
.. code-block:: xml
<requireJavaVersion>
<version>[1.7,)</version>
</requireJavaVersion>
<requireMavenVersion>
<version>[3.5.4,)</version>
</requireMavenVersion>
This specifies the versions of Java and Maven that are required to
build the package. See
https://docs.oracle.com/middleware/1212/core/MAVEN/maven_version.htm#MAVEN402
for a description of this version range syntax. In this case, you
should add:
.. code-block:: python
depends_on('java@7:', type='build')
depends_on('maven@3.5.4:', type='build')
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Passing arguments to the build phase
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The default build and install phases should be sufficient to install
most packages. However, you may want to pass additional flags to
the build phase. For example:
.. code-block:: python
def build_args(self):
return [
'-Pdist,native',
'-Dtar',
'-Dmaven.javadoc.skip=true'
]
^^^^^^^^^^^^^^^^^^^^^^
External documentation
^^^^^^^^^^^^^^^^^^^^^^
For more information on the Maven build system, see:
https://maven.apache.org/index.html

View File

@@ -81,24 +81,6 @@ you'll need to define a function for it like so:
self.setup_py('configure')
^^^^^^
Wheels
^^^^^^
Some Python packages are closed-source and distributed as wheels.
Instead of using the ``PythonPackage`` base class, you should extend
the ``Package`` base class and implement the following custom installation
procedure:
.. code-block::
def install(self, spec, prefix):
pip = which('pip')
pip('install', self.stage.archive_file, '--prefix={0}'.format(prefix))
This will require a dependency on pip, as mentioned below.
^^^^^^^^^^^^^^^
Important files
^^^^^^^^^^^^^^^
@@ -113,27 +95,6 @@ file should be considered to be the truth. As dependencies are added or
removed, the documentation is much more likely to become outdated than
the ``setup.py``.
The Python ecosystem has evolved significantly over the years. Before
setuptools became popular, most packages listed their dependencies in a
``requirements.txt`` file. Once setuptools took over, these dependencies
were listed directly in the ``setup.py``. Newer PEPs introduced additional
files, like ``setup.cfg`` and ``pyproject.toml``. You should look out for
all of these files, as they may all contain important information about
package dependencies.
Some Python packages are closed-source and are distributed as Python
wheels. For example, ``py-azureml-sdk`` downloads a ``.whl`` file. This
file is simply a zip file, and can be extracted using:
.. code-block:: console
$ unzip *.whl
The zip file will not contain a ``setup.py``, but it will contain a
``METADATA`` file which contains all the information you need to
write a ``package.py`` build recipe.
^^^^^^^^^^^^^^^^^^^^^^^
Finding Python packages
^^^^^^^^^^^^^^^^^^^^^^^
@@ -144,9 +105,8 @@ it the only option for developers who want a simple installation.
Search for "PyPI <package-name>" to find the download page. Note that
some pages are versioned, and the first result may not be the newest
version. Click on the "Latest Version" button to the top right to see
if a newer version is available. The download page is usually at::
https://pypi.org/project/<package-name>
if a newer version is available. The download page is usually at:
https://pypi.org/project/<package-name>
^^^^^^^^^^^
Description
@@ -191,67 +151,39 @@ replacing this with the requested version. Obviously, if Spack cannot
guess the version correctly, or if non-version-related things change
in the URL, Spack cannot substitute the version properly.
Once upon a time, PyPI offered nice, simple download URLs like::
https://pypi.python.org/packages/source/n/numpy/numpy-1.13.1.zip
Once upon a time, PyPI offered nice, simple download URLs like:
https://pypi.python.org/packages/source/n/numpy/numpy-1.13.1.zip
As you can see, the version is 1.13.1. It probably isn't hard to guess
what URL to use to download version 1.12.0, and Spack was perfectly
capable of performing this calculation.
However, PyPI switched to a new download URL format::
https://pypi.python.org/packages/c0/3a/40967d9f5675fbb097ffec170f59c2ba19fc96373e73ad47c2cae9a30aed/numpy-1.13.1.zip#md5=2c3c0f4edf720c3a7b525dacc825b9ae
and more recently::
https://files.pythonhosted.org/packages/b0/2b/497c2bb7c660b2606d4a96e2035e92554429e139c6c71cdff67af66b58d2/numpy-1.14.3.zip
However, PyPI switched to a new download URL format:
https://pypi.python.org/packages/c0/3a/40967d9f5675fbb097ffec170f59c2ba19fc96373e73ad47c2cae9a30aed/numpy-1.13.1.zip#md5=2c3c0f4edf720c3a7b525dacc825b9ae
and more recently:
https://files.pythonhosted.org/packages/b0/2b/497c2bb7c660b2606d4a96e2035e92554429e139c6c71cdff67af66b58d2/numpy-1.14.3.zip
As you can imagine, it is impossible for Spack to guess what URL to
use to download version 1.12.0 given this URL. There is a solution,
however. PyPI offers a new hidden interface for downloading
Python packages that does not include a hash in the URL::
Python packages that does not include a hash in the URL:
https://pypi.io/packages/source/n/numpy/numpy-1.13.1.zip
https://pypi.io/packages/source/n/numpy/numpy-1.13.1.zip
This URL redirects to the https://files.pythonhosted.org URL. The general
syntax for this https://pypi.io URL is::
https://pypi.io/packages/<type>/<first-letter-of-name>/<name>/<name>-<version>.<extension>
Please use the https://pypi.io URL instead of the https://pypi.python.org
URL. If both ``.tar.gz`` and ``.zip`` versions are available, ``.tar.gz``
is preferred. If some releases offer both ``.tar.gz`` and ``.zip`` versions,
but some only offer ``.zip`` versions, use ``.zip``.
Some Python packages are closed-source and do not ship ``.tar.gz`` or ``.zip``
files on either PyPI or GitHub. If this is the case, you can still download
and install a Python wheel. For example, ``py-azureml-sdk`` is closed source
and can be downloaded from::
https://pypi.io/packages/py3/a/azureml_sdk/azureml_sdk-1.11.0-py3-none-any.whl
Note that instead of ``<type>`` being ``source``, it is now ``py3`` since this
wheel will work for any generic version of Python 3. You may see Python-specific
or OS-specific URLs. Note that when you add a ``.whl`` URL, you should add
``expand=False`` to ensure that Spack doesn't try to extract the wheel:
.. code-block:: python
version('1.11.0', sha256='d8c9d24ea90457214d798b0d922489863dad518adde3638e08ef62de28fb183a', expand=False)
This URL redirects to the files.pythonhosted.org URL. The general syntax for
this pypi.io URL is:
https://pypi.io/packages/source/<first-letter-of-name>/<name>/<name>-<version>.<extension>
Please use the pypi.io URL instead of the pypi.python.org URL. If both
``.tar.gz`` and ``.zip`` versions are available, ``.tar.gz`` is preferred.
If some releases offer both ``.tar.gz`` and ``.zip`` versions, but some
only offer ``.zip`` versions, use ``.zip``.
"""""""""""""""
PyPI vs. GitHub
"""""""""""""""
Many packages are hosted on PyPI, but are developed on GitHub or another
Many packages are hosted on PyPI, but are developed on GitHub and other
version control systems. The tarball can be downloaded from either
location, but PyPI is preferred for the following reasons:
@@ -294,7 +226,7 @@ location, but PyPI is preferred for the following reasons:
There are some reasons to prefer downloading from GitHub:
#. The GitHub tarball may contain unit tests.
#. The GitHub tarball may contain unit tests
As previously mentioned, the PyPI tarball contains the bare minimum
of files to install the package. Unless explicitly specified by the
@@ -302,6 +234,12 @@ There are some reasons to prefer downloading from GitHub:
If you desire to run the unit tests during installation, you should
use the GitHub tarball instead.
#. Spack does not yet support ``spack versions`` and ``spack checksum``
with PyPI URLs
These commands work just fine with GitHub URLs. This is a minor
annoyance, not a reason to prefer GitHub over PyPI.
If you really want to run these unit tests, no one will stop you from
submitting a PR for a new package that downloads from GitHub.
@@ -324,26 +262,26 @@ mentions that Python 3 is required, this can be specified as:
.. code-block:: python
depends_on('python@3:', type=('build', 'run'))
depends_on('python@3:', type=('build', 'run')
If Python 2 is required, this would look like:
.. code-block:: python
depends_on('python@:2', type=('build', 'run'))
depends_on('python@:2', type=('build', 'run')
If Python 2.7 is the only version that works, you can use:
.. code-block:: python
depends_on('python@2.7:2.8', type=('build', 'run'))
depends_on('python@2.7:2.8', type=('build', 'run')
The documentation may not always specify supported Python versions.
Another place to check is in the ``setup.py`` or ``setup.cfg`` file.
Look for a line containing ``python_requires``. An example from
Another place to check is in the ``setup.py`` file. Look for a line
containing ``python_requires``. An example from
`py-numpy <https://github.com/spack/spack/blob/develop/var/spack/repos/builtin/packages/py-numpy/package.py>`_
looks like:
@@ -352,7 +290,7 @@ looks like:
python_requires='>=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*'
You may also find a version check at the top of the ``setup.py``:
More commonly, you will find a version check at the top of the file:
.. code-block:: python
@@ -367,39 +305,6 @@ This can be converted to Spack's spec notation like so:
depends_on('python@2.7:2.8,3.4:', type=('build', 'run'))
If you are writing a recipe for a package that only distributes
wheels, look for a section in the ``METADATA`` file that looks like::
Requires-Python: >=3.5,<4
This would be translated to:
.. code-block:: python
extends('python')
depends_on('python@3.5:3.999', type=('build', 'run'))
Many ``setup.py`` or ``setup.cfg`` files also contain information like::
Programming Language :: Python :: 2
Programming Language :: Python :: 2.6
Programming Language :: Python :: 2.7
Programming Language :: Python :: 3
Programming Language :: Python :: 3.3
Programming Language :: Python :: 3.4
Programming Language :: Python :: 3.5
Programming Language :: Python :: 3.6
This is a list of versions of Python that the developer likely tests.
However, you should not use this to restrict the versions of Python
the package uses unless one of the two former methods (``python_requires``
or ``sys.version_info``) is used. There is no logic in setuptools
that prevents the package from building for Python versions not in
this list, and often new releases like Python 3.7 or 3.8 work just fine.
""""""""""
setuptools
""""""""""
@@ -412,7 +317,7 @@ Most notably, there was no way to list a project's dependencies
with distutils. Along came setuptools, a non-builtin build system
designed to overcome the limitations of distutils. Both projects
use a similar API, making the transition easy while adding much
needed functionality. Today, setuptools is used in around 90% of
needed functionality. Today, setuptools is used in around 75% of
the Python packages in Spack.
Since setuptools isn't built-in to Python, you need to add it as a
@@ -455,20 +360,6 @@ run-time. This can be specified as:
depends_on('py-setuptools', type='build')
"""
pip
"""
Packages distributed as Python wheels will require an extra dependency
on pip:
.. code-block:: python
depends_on('py-pip', type='build')
We will use pip to install the actual wheel.
""""""
cython
""""""
@@ -492,12 +383,6 @@ where speed is crucial. There is no reason why someone would not
want an optimized version of a library instead of the pure-Python
version.
Note that some release tarballs come pre-cythonized, and cython is
not needed as a dependency. However, this is becoming less common
as Python continues to evolve and developers discover that cythonized
sources are no longer compatible with newer versions of Python and
need to be re-cythonized.
^^^^^^^^^^^^^^^^^^^
Python dependencies
^^^^^^^^^^^^^^^^^^^
@@ -544,26 +429,15 @@ Obviously, this means that ``py-numpy`` is a dependency.
If the package uses ``setuptools``, check for the following clues:
* ``python_requires``
As mentioned above, this specifies which versions of Python are
required.
* ``setup_requires``
These packages are usually only needed at build-time, so you can
add them with ``type='build'``.
* ``install_requires``
These packages are required for building and installation. You can
add them with ``type=('build', 'run')``.
These packages are required for installation.
* ``extra_requires``
These packages are optional dependencies that enable additional
functionality. You should add a variant that optionally adds these
dependencies. This variant should be False by default.
dependencies.
* ``test_requires``
@@ -587,37 +461,13 @@ sphinx. If you can't find any information about the package's
dependencies, you can take a look in ``requirements.txt``, but be sure
not to add test or documentation dependencies.
Newer PEPs have added alternative ways to specify a package's dependencies.
If you don't see any dependencies listed in the ``setup.py``, look for a
``setup.cfg`` or ``pyproject.toml``. These files can be used to store the
same ``install_requires`` information that ``setup.py`` used to use.
If you are write a recipe for a package that only distributes wheels,
check the ``METADATA`` file for lines like::
Requires-Dist: azureml-core (~=1.11.0)
Requires-Dist: azureml-dataset-runtime[fuse] (~=1.11.0)
Requires-Dist: azureml-train (~=1.11.0)
Requires-Dist: azureml-train-automl-client (~=1.11.0)
Requires-Dist: azureml-pipeline (~=1.11.0)
Provides-Extra: accel-models
Requires-Dist: azureml-accel-models (~=1.11.0); extra == 'accel-models'
Provides-Extra: automl
Requires-Dist: azureml-train-automl (~=1.11.0); extra == 'automl'
Lines that use ``Requires-Dist`` are similar to ``install_requires``.
Lines that use ``Provides-Extra`` are similar to ``extra_requires``,
and you can add a variant for those dependencies. The ``~=1.11.0``
syntax is equivalent to ``1.11.0:1.11.999``.
""""""""""
setuptools
""""""""""
Setuptools is a bit of a special case. If a package requires setuptools
at run-time, how do they express this? They could add it to
``install_requires``, but setuptools is imported long before this and is
``install_requires``, but setuptools is imported long before this and
needed to read this line. And since you can't install the package
without setuptools, the developers assume that setuptools will already
be there, so they never mention when it is required. We don't want to
@@ -730,13 +580,11 @@ By default, Spack runs:
if it detects that the ``setup.py`` file supports a ``test`` phase.
You can add additional build-time or install-time tests by overriding
``test`` or adding a custom install-time test function. For example,
``py-numpy`` adds:
``test`` and ``installtest``, respectively. For example, ``py-numpy``
adds:
.. code-block:: python
install_time_test_callbacks = ['install_test', 'import_module_test']
def install_test(self):
with working_dir('..'):
python('-c', 'import numpy; numpy.test("full", verbose=2)')
@@ -803,8 +651,6 @@ that the package uses the ``PythonPackage`` build system. However, there
are occasionally packages that use ``PythonPackage`` that shouldn't
start with ``py-``. For example:
* awscli
* aws-parallelcluster
* busco
* easybuild
* httpie
@@ -890,9 +736,8 @@ non-Python dependencies. Anaconda contains many Python packages that
are not yet in Spack, and Spack contains many Python packages that are
not yet in Anaconda. The main advantage of Spack over Anaconda is its
ability to choose a specific compiler and BLAS/LAPACK or MPI library.
Spack also has better platform support for supercomputers, and can build
optimized binaries for your specific microarchitecture. On the other hand,
Anaconda offers Windows support.
Spack also has better platform support for supercomputers. On the
other hand, Anaconda offers Windows support.
^^^^^^^^^^^^^^^^^^^^^^
External documentation

View File

@@ -12,173 +12,5 @@ RubyPackage
Like Perl, Python, and R, Ruby has its own build system for
installing Ruby gems.
^^^^^^
Phases
^^^^^^
The ``RubyPackage`` base class provides the following phases that
can be overridden:
#. ``build`` - build everything needed to install
#. ``install`` - install everything from build directory
For packages that come with a ``*.gemspec`` file, these phases run:
.. code-block:: console
$ gem build *.gemspec
$ gem install *.gem
For packages that come with a ``Rakefile`` file, these phases run:
.. code-block:: console
$ rake package
$ gem install *.gem
For packages that come pre-packaged as a ``*.gem`` file, the build
phase is skipped and the install phase runs:
.. code-block:: console
$ gem install *.gem
These are all standard ``gem`` commands and can be found by running:
.. code-block:: console
$ gem help commands
For packages that only distribute ``*.gem`` files, these files can be
downloaded with the ``expand=False`` option in the ``version`` directive.
The build phase will be automatically skipped.
^^^^^^^^^^^^^^^
Important files
^^^^^^^^^^^^^^^
When building from source, Ruby packages can be identified by the
presence of any of the following files:
* ``*.gemspec``
* ``Rakefile``
* ``setup.rb`` (not yet supported)
However, not all Ruby packages are released as source code. Some are only
released as ``*.gem`` files. These files can be extracted using:
.. code-block:: console
$ gem unpack *.gem
^^^^^^^^^^^
Description
^^^^^^^^^^^
The ``*.gemspec`` file may contain something like:
.. code-block:: ruby
summary = 'An implementation of the AsciiDoc text processor and publishing toolchain'
description = 'A fast, open source text processor and publishing toolchain for converting AsciiDoc content to HTML 5, DocBook 5, and other formats.'
Either of these can be used for the description of the Spack package.
^^^^^^^^
Homepage
^^^^^^^^
The ``*.gemspec`` file may contain something like:
.. code-block:: ruby
homepage = 'https://asciidoctor.org'
This should be used as the official homepage of the Spack package.
^^^^^^^^^^^^^^^^^^^^^^^^^
Build system dependencies
^^^^^^^^^^^^^^^^^^^^^^^^^
All Ruby packages require Ruby at build and run-time. For this reason,
the base class contains:
.. code-block:: python
extends('ruby')
depends_on('ruby', type=('build', 'run'))
The ``*.gemspec`` file may contain something like:
.. code-block:: ruby
required_ruby_version = '>= 2.3.0'
This can be added to the Spack package using:
.. code-block:: python
depends_on('ruby@2.3.0:', type=('build', 'run'))
^^^^^^^^^^^^^^^^^
Ruby dependencies
^^^^^^^^^^^^^^^^^
When you install a package with ``gem``, it reads the ``*.gemspec``
file in order to determine the dependencies of the package.
If the dependencies are not yet installed, ``gem`` downloads them
and installs them for you. This may sound convenient, but Spack
cannot rely on this behavior for two reasons:
#. Spack needs to be able to install packages on air-gapped networks.
If there is no internet connection, ``gem`` can't download the
package dependencies. By explicitly listing every dependency in
the ``package.py``, Spack knows what to download ahead of time.
#. Duplicate installations of the same dependency may occur.
Spack supports *activation* of Ruby extensions, which involves
symlinking the package installation prefix to the Ruby installation
prefix. If your package is missing a dependency, that dependency
will be installed to the installation directory of the same package.
If you try to activate the package + dependency, it may cause a
problem if that package has already been activated.
For these reasons, you must always explicitly list all dependencies.
Although the documentation may list the package's dependencies,
often the developers assume people will use ``gem`` and won't have to
worry about it. Always check the ``*.gemspec`` file to find the true
dependencies.
Check for the following clues in the ``*.gemspec`` file:
* ``add_runtime_dependency``
These packages are required for installation.
* ``add_dependency``
This is an alias for ``add_runtime_dependency``
* ``add_development_dependency``
These packages are optional dependencies used for development.
They should not be added as dependencies of the package.
^^^^^^^^^^^^^^^^^^^^^^
External documentation
^^^^^^^^^^^^^^^^^^^^^^
For more information on Ruby packaging, see:
https://guides.rubygems.org/
This build system is a work-in-progress. See
https://github.com/spack/spack/pull/3127 for more information.

View File

@@ -99,7 +99,7 @@ username is not already in the path, Spack will append the value of ``$user`` to
the selected ``build_stage`` path.
.. warning:: We highly recommend specifying ``build_stage`` paths that
distinguish between staging and other activities to ensure
distinguish between staging and other activities to ensure
``spack clean`` does not inadvertently remove unrelated files.
Spack prepends ``spack-stage-`` to temporary staging directory names to
reduce this risk. Using a combination of ``spack`` and or ``stage`` in
@@ -223,7 +223,7 @@ To build all software in serial, set ``build_jobs`` to 1.
--------------------
When set to ``true`` Spack will use ccache to cache compiles. This is
useful specifically in two cases: (1) when using ``spack dev-build``, and (2)
useful specifically in two cases: (1) when using ``spack setup``, and (2)
when building the same package with many different variants. The default is
``false``.

View File

@@ -9,48 +9,28 @@
Container Images
================
Spack :ref:`environments` are a great tool to create container images, but
preparing one that is suitable for production requires some more boilerplate
than just:
Spack can be an ideal tool to setup images for containers since all the
features discussed in :ref:`environments` can greatly help to manage
the installation of software during the image build process. Nonetheless,
building a production image from scratch still requires a lot of
boilerplate to:
.. code-block:: docker
- Get Spack working within the image, possibly running as root
- Minimize the physical size of the software installed
- Properly update the system software in the base image
COPY spack.yaml /environment
RUN spack -e /environment install
Additional actions may be needed to minimize the size of the
container, or to update the system software that is installed in the base
image, or to set up a proper entrypoint to run the image. These tasks are
usually both necessary and repetitive, so Spack comes with a command
to generate recipes for container images starting from a ``spack.yaml``.
--------------------
A Quick Introduction
--------------------
Consider having a Spack environment like the following:
.. code-block:: yaml
spack:
specs:
- gromacs+mpi
- mpich
Producing a ``Dockerfile`` from it is as simple as moving to the directory
where the ``spack.yaml`` file is stored and giving the following command:
To facilitate users with these tedious tasks, Spack provides a command
to automatically generate recipes for container images based on
Environments:
.. code-block:: console
$ spack containerize > Dockerfile
The ``Dockerfile`` that gets created uses multi-stage builds and
other techniques to minimize the size of the final image:
.. code-block:: docker
$ ls
spack.yaml
$ spack containerize
# Build stage with Spack pre-installed and ready to be used
FROM spack/ubuntu-bionic:latest as builder
FROM spack/centos7:latest as builder
# What we want to install and how we want to install it
# is specified in a manifest file (spack.yaml)
@@ -65,7 +45,7 @@ other techniques to minimize the size of the final image:
&& echo " view: /opt/view") > /opt/spack-environment/spack.yaml
# Install the software, remove unnecessary deps
RUN cd /opt/spack-environment && spack env activate . && spack install --fail-fast && spack gc -y
RUN cd /opt/spack-environment && spack install && spack gc -y
# Strip all the binaries
RUN find -L /opt/view/* -type f -exec readlink -f '{}' \; | \
@@ -78,34 +58,38 @@ other techniques to minimize the size of the final image:
RUN cd /opt/spack-environment && \
spack env activate --sh -d . >> /etc/profile.d/z10_spack_environment.sh
# Bare OS image to run the installed executables
FROM ubuntu:18.04
FROM centos:7
COPY --from=builder /opt/spack-environment /opt/spack-environment
COPY --from=builder /opt/software /opt/software
COPY --from=builder /opt/view /opt/view
COPY --from=builder /etc/profile.d/z10_spack_environment.sh /etc/profile.d/z10_spack_environment.sh
RUN yum update -y && yum install -y epel-release && yum update -y \
&& yum install -y libgomp \
&& rm -rf /var/cache/yum && yum clean all
RUN echo 'export PS1="\[$(tput bold)\]\[$(tput setaf 1)\][gromacs]\[$(tput setaf 2)\]\u\[$(tput sgr0)\]:\w $ \[$(tput sgr0)\]"' >> ~/.bashrc
LABEL "app"="gromacs"
LABEL "mpi"="mpich"
ENTRYPOINT ["/bin/bash", "--rcfile", "/etc/profile", "-l"]
The image itself can then be built and run in the usual way, with any of the
tools suitable for the task. For instance, if we decided to use ``docker``:
.. code-block:: bash
The bits that make this automation possible are discussed in details
below. All the images generated in this way will be based on
multi-stage builds with:
$ spack containerize > Dockerfile
$ docker build -t myimage .
[ ... ]
$ docker run -it myimage
- A fat ``build`` stage containing common build tools and Spack itself
- A minimal ``final`` stage containing only the software requested by the user
The various components involved in the generation of the recipe and their
configuration are discussed in details in the sections below.
.. _container_spack_images:
--------------------------
Spack Images on Docker Hub
--------------------------
-----------------
Spack Base Images
-----------------
Docker images with Spack preinstalled and ready to be used are
built on `Docker Hub <https://hub.docker.com/u/spack>`_
@@ -126,6 +110,9 @@ are currently supported are summarized in the table below:
* - Ubuntu 18.04
- ``ubuntu:18.04``
- ``spack/ubuntu-bionic``
* - CentOS 6
- ``centos:6``
- ``spack/centos6``
* - CentOS 7
- ``centos:7``
- ``spack/centos7``
@@ -137,20 +124,19 @@ All the images are tagged with the corresponding release of Spack:
with the exception of the ``latest`` tag that points to the HEAD
of the ``develop`` branch. These images are available for anyone
to use and take care of all the repetitive tasks that are necessary
to setup Spack within a container. The container recipes generated
by Spack use them as default base images for their ``build`` stage,
even though handles to use custom base images provided by users are
available to accommodate complex use cases.
to setup Spack within a container. All the container recipes generated
automatically by Spack use them as base images for their ``build`` stage.
---------------------------------
Creating Images From Environments
---------------------------------
-------------------------
Environment Configuration
-------------------------
Any Spack Environment can be used for the automatic generation of container
recipes. Sensible defaults are provided for things like the base image or the
version of Spack used in the image.
If a finer tuning is needed it can be obtained by adding the relevant metadata
under the ``container`` attribute of environments:
version of Spack used in the image. If a finer tuning is needed it can be
obtained by adding the relevant metadata under the ``container`` attribute
of environments:
.. code-block:: yaml
@@ -164,10 +150,9 @@ under the ``container`` attribute of environments:
# singularity or anything else that is currently supported
format: docker
# Sets the base images for the stages where Spack builds the
# software or where the software gets installed after being built..
images:
os: "centos:7"
# Select from a valid list of images
base:
image: "centos:7"
spack: develop
# Whether or not to strip binaries
@@ -175,223 +160,19 @@ under the ``container`` attribute of environments:
# Additional system packages that are needed at runtime
os_packages:
final:
- libgomp
- libgomp
# Extra instructions
extra_instructions:
final: |
RUN echo 'export PS1="\[$(tput bold)\]\[$(tput setaf 1)\][gromacs]\[$(tput setaf 2)\]\u\[$(tput sgr0)\]:\w $ "' >> ~/.bashrc
RUN echo 'export PS1="\[$(tput bold)\]\[$(tput setaf 1)\][gromacs]\[$(tput setaf 2)\]\u\[$(tput sgr0)\]:\w $ \[$(tput sgr0)\]"' >> ~/.bashrc
# Labels for the image
labels:
app: "gromacs"
mpi: "mpich"
A detailed description of the options available can be found in the
:ref:`container_config_options` section.
-------------------
Setting Base Images
-------------------
The ``images`` subsection is used to select both the image where
Spack builds the software and the image where the built software
is installed. This attribute can be set in two different ways and
which one to use depends on the use case at hand.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Use Official Spack Images From Dockerhub
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
To generate a recipe that uses an official Docker image from the
Spack organization to build the software and the corresponding official OS image
to install the built software, all the user has to do is specify:
1. An operating system under ``images:os``
2. A Spack version under ``images:spack``
Any combination of these two values that can be mapped to one of the images
discussed in :ref:`container_spack_images` is allowed. For instance, the
following ``spack.yaml``:
.. code-block:: yaml
spack:
specs:
- gromacs+mpi
- mpich
container:
images:
os: centos/7
spack: 0.15.4
uses ``spack/centos7:0.15.4`` and ``centos:7`` for the stages where the
software is respectively built and installed:
.. code-block:: docker
# Build stage with Spack pre-installed and ready to be used
FROM spack/centos7:0.15.4 as builder
# What we want to install and how we want to install it
# is specified in a manifest file (spack.yaml)
RUN mkdir /opt/spack-environment \
&& (echo "spack:" \
&& echo " specs:" \
&& echo " - gromacs+mpi" \
&& echo " - mpich" \
&& echo " concretization: together" \
&& echo " config:" \
&& echo " install_tree: /opt/software" \
&& echo " view: /opt/view") > /opt/spack-environment/spack.yaml
[ ... ]
# Bare OS image to run the installed executables
FROM centos:7
COPY --from=builder /opt/spack-environment /opt/spack-environment
COPY --from=builder /opt/software /opt/software
COPY --from=builder /opt/view /opt/view
COPY --from=builder /etc/profile.d/z10_spack_environment.sh /etc/profile.d/z10_spack_environment.sh
ENTRYPOINT ["/bin/bash", "--rcfile", "/etc/profile", "-l"]
This method of selecting base images is the simplest of the two, and we advise
to use it whenever possible. There are cases though where using Spack official
images is not enough to fit production needs. In these situations users can manually
select which base image to start from in the recipe, as we'll see next.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Use Custom Images Provided by Users
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Consider, as an example, building a production grade image for a CUDA
application. The best strategy would probably be to build on top of
images provided by the vendor and regard CUDA as an external package.
Spack doesn't currently provide an official image with CUDA configured
this way, but users can build it on their own and then configure the
environment to explicitly pull it. This requires users to:
1. Specify the image used to build the software under ``images:build``
2. Specify the image used to install the built software under ``images:final``
A ``spack.yaml`` like the following:
.. code-block:: yaml
spack:
specs:
- gromacs@2019.4+cuda build_type=Release
- mpich
- fftw precision=float
packages:
cuda:
buildable: False
externals:
- spec: cuda%gcc
prefix: /usr/local/cuda
container:
images:
build: custom/cuda-10.1-ubuntu18.04:latest
final: nvidia/cuda:10.1-base-ubuntu18.04
produces, for instance, the following ``Dockerfile``:
.. code-block:: docker
# Build stage with Spack pre-installed and ready to be used
FROM custom/cuda-10.1-ubuntu18.04:latest as builder
# What we want to install and how we want to install it
# is specified in a manifest file (spack.yaml)
RUN mkdir /opt/spack-environment \
&& (echo "spack:" \
&& echo " specs:" \
&& echo " - gromacs@2019.4+cuda build_type=Release" \
&& echo " - mpich" \
&& echo " - fftw precision=float" \
&& echo " packages:" \
&& echo " cuda:" \
&& echo " buildable: false" \
&& echo " externals:" \
&& echo " - spec: cuda%gcc" \
&& echo " prefix: /usr/local/cuda" \
&& echo " concretization: together" \
&& echo " config:" \
&& echo " install_tree: /opt/software" \
&& echo " view: /opt/view") > /opt/spack-environment/spack.yaml
# Install the software, remove unnecessary deps
RUN cd /opt/spack-environment && spack env activate . && spack install --fail-fast && spack gc -y
# Strip all the binaries
RUN find -L /opt/view/* -type f -exec readlink -f '{}' \; | \
xargs file -i | \
grep 'charset=binary' | \
grep 'x-executable\|x-archive\|x-sharedlib' | \
awk -F: '{print $1}' | xargs strip -s
# Modifications to the environment that are necessary to run
RUN cd /opt/spack-environment && \
spack env activate --sh -d . >> /etc/profile.d/z10_spack_environment.sh
# Bare OS image to run the installed executables
FROM nvidia/cuda:10.1-base-ubuntu18.04
COPY --from=builder /opt/spack-environment /opt/spack-environment
COPY --from=builder /opt/software /opt/software
COPY --from=builder /opt/view /opt/view
COPY --from=builder /etc/profile.d/z10_spack_environment.sh /etc/profile.d/z10_spack_environment.sh
ENTRYPOINT ["/bin/bash", "--rcfile", "/etc/profile", "-l"]
where the base images for both stages are completely custom.
This second mode of selection for base images is more flexible than just
choosing an operating system and a Spack version, but is also more demanding.
Users may need to generate by themselves their base images and it's also their
responsibility to ensure that:
1. Spack is available in the ``build`` stage and set up correctly to install the required software
2. The artifacts produced in the ``build`` stage can be executed in the ``final`` stage
Therefore we don't recommend its use in cases that can be otherwise
covered by the simplified mode shown first.
----------------------------
Singularity Definition Files
----------------------------
In addition to producing recipes in ``Dockerfile`` format Spack can produce
Singularity Definition Files by just changing the value of the ``format``
attribute:
.. code-block:: console
$ cat spack.yaml
spack:
specs:
- hdf5~mpi
container:
format: singularity
$ spack containerize > hdf5.def
$ sudo singularity build hdf5.sif hdf5.def
The minimum version of Singularity required to build a SIF (Singularity Image Format)
image from the recipes generated by Spack is ``3.5.3``.
.. _container_config_options:
-----------------------
Configuration Reference
-----------------------
The tables below describe all the configuration options that are currently supported
to customize the generation of container recipes:
The tables below describe the configuration options that are currently supported:
.. list-table:: General configuration options for the ``container`` section of ``spack.yaml``
:header-rows: 1
@@ -404,41 +185,21 @@ to customize the generation of container recipes:
- The format of the recipe
- ``docker`` or ``singularity``
- Yes
* - ``images:os``
- Operating system used as a base for the image
* - ``base:image``
- Base image for ``final`` stage
- See :ref:`containers-supported-os`
- Yes, if using constrained selection of base images
* - ``images:spack``
- Version of Spack use in the ``build`` stage
- Yes
* - ``base:spack``
- Version of Spack
- Valid tags for ``base:image``
- Yes, if using constrained selection of base images
* - ``images:build``
- Image to be used in the ``build`` stage
- Any valid container image
- Yes, if using custom selection of base images
* - ``images:final``
- Image to be used in the ``build`` stage
- Any valid container image
- Yes, if using custom selection of base images
- Yes
* - ``strip``
- Whether to strip binaries
- ``true`` (default) or ``false``
- No
* - ``os_packages:command``
- Tool used to manage system packages
- ``apt``, ``yum``
- Only with custom base images
* - ``os_packages:update``
- Whether or not to update the list of available packages
- True or False (default: True)
- No
* - ``os_packages:build``
- System packages needed at build-time
- Valid packages for the current OS
- No
* - ``os_packages:final``
- System packages needed at run-time
- Valid packages for the current OS
* - ``os_packages``
- System packages to be installed
- Valid packages for the ``final`` OS
- No
* - ``extra_instructions:build``
- Extra instructions (e.g. `RUN`, `COPY`, etc.) at the end of the ``build`` stage
@@ -477,56 +238,70 @@ to customize the generation of container recipes:
- Description string
- No
--------------
Best Practices
--------------
Once the Environment is properly configured a recipe for a container
image can be printed to standard output by issuing the following
command from the directory where the ``spack.yaml`` resides:
^^^
MPI
^^^
Due to the dependency on Fortran for OpenMPI, which is the spack default
implementation, consider adding ``gfortran`` to the ``apt-get install`` list.
.. code-block:: console
Recent versions of OpenMPI will require you to pass ``--allow-run-as-root``
to your ``mpirun`` calls if started as root user inside Docker.
$ spack containerize
For execution on HPC clusters, it can be helpful to import the docker
image into Singularity in order to start a program with an *external*
MPI. Otherwise, also add ``openssh-server`` to the ``apt-get install`` list.
The example ``spack.yaml`` above would produce for instance the
following ``Dockerfile``:
^^^^
CUDA
^^^^
Starting from CUDA 9.0, Nvidia provides minimal CUDA images based on
Ubuntu. Please see `their instructions <https://hub.docker.com/r/nvidia/cuda/>`_.
Avoid double-installing CUDA by adding, e.g.
.. code-block:: docker
.. code-block:: yaml
# Build stage with Spack pre-installed and ready to be used
FROM spack/centos7:latest as builder
packages:
cuda:
externals:
- spec: "cuda@9.0.176%gcc@5.4.0 arch=linux-ubuntu16-x86_64"
prefix: /usr/local/cuda
buildable: False
# What we want to install and how we want to install it
# is specified in a manifest file (spack.yaml)
RUN mkdir /opt/spack-environment \
&& (echo "spack:" \
&& echo " specs:" \
&& echo " - gromacs+mpi" \
&& echo " - mpich" \
&& echo " concretization: together" \
&& echo " config:" \
&& echo " install_tree: /opt/software" \
&& echo " view: /opt/view") > /opt/spack-environment/spack.yaml
to your ``spack.yaml``.
# Install the software, remove unnecessary deps
RUN cd /opt/spack-environment && spack install && spack gc -y
Users will either need ``nvidia-docker`` or e.g. Singularity to *execute*
device kernels.
# Strip all the binaries
RUN find -L /opt/view/* -type f -exec readlink -f '{}' \; | \
xargs file -i | \
grep 'charset=binary' | \
grep 'x-executable\|x-archive\|x-sharedlib' | \
awk -F: '{print $1}' | xargs strip -s
^^^^^^^^^^^^^^^^^^^^^^^^^
Docker on Windows and OSX
^^^^^^^^^^^^^^^^^^^^^^^^^
# Modifications to the environment that are necessary to run
RUN cd /opt/spack-environment && \
spack env activate --sh -d . >> /etc/profile.d/z10_spack_environment.sh
On Mac OS and Windows, docker runs on a hypervisor that is not allocated much
memory by default, and some spack packages may fail to build due to lack of
memory. To work around this issue, consider configuring your docker installation
to use more of your host memory. In some cases, you can also ease the memory
pressure on parallel builds by limiting the parallelism in your config.yaml.
.. code-block:: yaml
# Bare OS image to run the installed executables
FROM centos:7
config:
build_jobs: 2
COPY --from=builder /opt/spack-environment /opt/spack-environment
COPY --from=builder /opt/software /opt/software
COPY --from=builder /opt/view /opt/view
COPY --from=builder /etc/profile.d/z10_spack_environment.sh /etc/profile.d/z10_spack_environment.sh
RUN yum update -y && yum install -y epel-release && yum update -y \
&& yum install -y libgomp \
&& rm -rf /var/cache/yum && yum clean all
RUN echo 'export PS1="\[$(tput bold)\]\[$(tput setaf 1)\][gromacs]\[$(tput setaf 2)\]\u\[$(tput sgr0)\]:\w $ \[$(tput sgr0)\]"' >> ~/.bashrc
LABEL "app"="gromacs"
LABEL "mpi"="mpich"
ENTRYPOINT ["/bin/bash", "--rcfile", "/etc/profile", "-l"]
.. note::
Spack can also produce Singularity definition files to build the image. The
minimum version of Singularity required to build a SIF (Singularity Image Format)
from them is ``3.5.3``.

View File

@@ -27,28 +27,17 @@ correspond to one feature/bugfix/extension/etc. One can create PRs with
changes relevant to different ideas, however reviewing such PRs becomes tedious
and error prone. If possible, try to follow the **one-PR-one-package/feature** rule.
--------
Branches
--------
Spack's ``develop`` branch has the latest contributions. Nearly all pull
requests should start from ``develop`` and target ``develop``.
There is a branch for each major release series. Release branches
originate from ``develop`` and have tags for each point release in the
series. For example, ``releases/v0.14`` has tags for ``0.14.0``,
``0.14.1``, ``0.14.2``, etc. versions of Spack. We backport important bug
fixes to these branches, but we do not advance the package versions or
make other changes that would change the way Spack concretizes
dependencies. Currently, the maintainers manage these branches by
cherry-picking from ``develop``. See :ref:`releases` for more
information.
Spack uses a rough approximation of the `Git Flow <http://nvie.com/posts/a-successful-git-branching-model/>`_
branching model. The develop branch contains the latest contributions, and
master is always tagged and points to the latest stable release. Therefore, when
you send your request, make ``develop`` the destination branch on the
`Spack repository <https://github.com/spack/spack>`_.
----------------------
Continuous Integration
----------------------
Spack uses `Github Actions <https://docs.github.com/en/actions>`_ for Continuous Integration
Spack uses `Travis CI <https://travis-ci.org/spack/spack>`_ for Continuous Integration
testing. This means that every time you submit a pull request, a series of tests will
be run to make sure you didn't accidentally introduce any bugs into Spack. **Your PR
will not be accepted until it passes all of these tests.** While you can certainly wait
@@ -57,24 +46,25 @@ locally to speed up the review process.
.. note::
Oftentimes, CI will fail for reasons other than a problem with your PR.
Oftentimes, Travis will fail for reasons other than a problem with your PR.
For example, apt-get, pip, or homebrew will fail to download one of the
dependencies for the test suite, or a transient bug will cause the unit tests
to timeout. If any job fails, click the "Details" link and click on the test(s)
to timeout. If Travis fails, click the "Details" link and click on the test(s)
that is failing. If it doesn't look like it is failing for reasons related to
your PR, you have two options. If you have write permissions for the Spack
repository, you should see a "Restart workflow" button on the right-hand side. If
repository, you should see a "Restart job" button on the right-hand side. If
not, you can close and reopen your PR to rerun all of the tests. If the same
test keeps failing, there may be a problem with your PR. If you notice that
every recent PR is failing with the same error message, it may be that an issue
occurred with the CI infrastructure or one of Spack's dependencies put out a
new release that is causing problems. If this is the case, please file an issue.
every recent PR is failing with the same error message, it may be that Travis
is down or one of Spack's dependencies put out a new release that is causing
problems. If this is the case, please file an issue.
We currently test against Python 2.6, 2.7, and 3.5-3.7 on both macOS and Linux and
If you take a look in ``$SPACK_ROOT/.travis.yml``, you'll notice that we test
against Python 2.6, 2.7, and 3.4-3.7 on both macOS and Linux. We currently
perform 3 types of tests:
.. _cmd-spack-unit-test:
.. _cmd-spack-test:
^^^^^^^^^^
Unit Tests
@@ -96,7 +86,7 @@ To run *all* of the unit tests, use:
.. code-block:: console
$ spack unit-test
$ spack test
These tests may take several minutes to complete. If you know you are
only modifying a single Spack feature, you can run subsets of tests at a
@@ -105,53 +95,51 @@ time. For example, this would run all the tests in
.. code-block:: console
$ spack unit-test lib/spack/spack/test/architecture.py
$ spack test architecture.py
And this would run the ``test_platform`` test from that file:
.. code-block:: console
$ spack unit-test lib/spack/spack/test/architecture.py::test_platform
$ spack test architecture.py::test_platform
This allows you to develop iteratively: make a change, test that change,
make another change, test that change, etc. We use `pytest
<http://pytest.org/>`_ as our tests framework, and these types of
<http://pytest.org/>`_ as our tests fromework, and these types of
arguments are just passed to the ``pytest`` command underneath. See `the
pytest docs
<http://doc.pytest.org/en/latest/usage.html#specifying-tests-selecting-tests>`_
for more details on test selection syntax.
``spack unit-test`` has a few special options that can help you
understand what tests are available. To get a list of all available
unit test files, run:
``spack test`` has a few special options that can help you understand
what tests are available. To get a list of all available unit test
files, run:
.. command-output:: spack unit-test --list
.. command-output:: spack test --list
:ellipsis: 5
To see a more detailed list of available unit tests, use ``spack
unit-test --list-long``:
To see a more detailed list of available unit tests, use ``spack test
--list-long``:
.. command-output:: spack unit-test --list-long
.. command-output:: spack test --list-long
:ellipsis: 10
And to see the fully qualified names of all tests, use ``--list-names``:
.. command-output:: spack unit-test --list-names
.. command-output:: spack test --list-names
:ellipsis: 5
You can combine these with ``pytest`` arguments to restrict which tests
you want to know about. For example, to see just the tests in
``architecture.py``:
.. command-output:: spack unit-test --list-long lib/spack/spack/test/architecture.py
.. command-output:: spack test --list-long architecture.py
You can also combine any of these options with a ``pytest`` keyword
search. See the `pytest usage docs
<https://docs.pytest.org/en/stable/usage.html#specifying-tests-selecting-tests>`_:
for more details on test selection syntax. For example, to see the names of all tests that have "spec"
search. For example, to see the names of all tests that have "spec"
or "concretize" somewhere in their names:
.. command-output:: spack unit-test --list-names -k "spec and concretize"
.. command-output:: spack test --list-names -k "spec and concretize"
By default, ``pytest`` captures the output of all unit tests, and it will
print any captured output for failed tests. Sometimes it's helpful to see
@@ -161,7 +149,7 @@ argument to ``pytest``:
.. code-block:: console
$ spack unit-test -s --list-long lib/spack/spack/test/architecture.py::test_platform
$ spack test -s architecture.py::test_platform
Unit tests are crucial to making sure bugs aren't introduced into
Spack. If you are modifying core Spack libraries or adding new
@@ -174,9 +162,9 @@ how to write tests!
.. note::
You may notice the ``share/spack/qa/run-unit-tests`` script in the
repository. This script is designed for CI. It runs the unit
repository. This script is designed for Travis CI. It runs the unit
tests and reports coverage statistics back to Codecov. If you want to
run the unit tests yourself, we suggest you use ``spack unit-test``.
run the unit tests yourself, we suggest you use ``spack test``.
^^^^^^^^^^^^
Flake8 Tests
@@ -247,7 +235,7 @@ to update them.
Try fixing flake8 errors in reverse order. This eliminates the need for
multiple runs of ``spack flake8`` just to re-compute line numbers and
makes it much easier to fix errors directly off of the CI output.
makes it much easier to fix errors directly off of the Travis output.
.. warning::
@@ -327,7 +315,7 @@ Once all of the dependencies are installed, you can try building the documentati
.. code-block:: console
$ cd path/to/spack/lib/spack/docs/
$ cd "$SPACK_ROOT/lib/spack/docs"
$ make clean
$ make
@@ -339,7 +327,7 @@ your PR is accepted.
There is also a ``run-doc-tests`` script in ``share/spack/qa``. The only
difference between running this script and running ``make`` by hand is that
the script will exit immediately if it encounters an error or warning. This
is necessary for CI. If you made a lot of documentation changes, it is
is necessary for Travis CI. If you made a lot of documentation changes, it is
much quicker to run ``make`` by hand so that you can see all of the warnings
at once.
@@ -403,7 +391,7 @@ and allow you to see coverage line-by-line when viewing the Spack repository.
If you are new to Spack, a great way to get started is to write unit tests to
increase coverage!
Unlike with CI on Github Actions Codecov tests are not required to pass in order for your
Unlike with Travis, Codecov tests are not required to pass in order for your
PR to be merged. If you modify core Spack libraries, we would greatly
appreciate unit tests that cover these changed lines. Otherwise, we have no
way of knowing whether or not your changes introduce a bug. If you make

View File

@@ -363,12 +363,11 @@ Developer commands
``spack doc``
^^^^^^^^^^^^^
^^^^^^^^^^^^^^^^^^^
``spack unit-test``
^^^^^^^^^^^^^^^^^^^
^^^^^^^^^^^^^^
``spack test``
^^^^^^^^^^^^^^
See the :ref:`contributor guide section <cmd-spack-unit-test>` on
``spack unit-test``.
See the :ref:`contributor guide section <cmd-spack-test>` on ``spack test``.
.. _cmd-spack-python:
@@ -496,384 +495,3 @@ The bottom of the output shows the top most time consuming functions,
slowest on top. The profiling support is from Python's built-in tool,
`cProfile
<https://docs.python.org/2/library/profile.html#module-cProfile>`_.
.. _releases:
--------
Releases
--------
This section documents Spack's release process. It is intended for
project maintainers, as the tasks described here require maintainer
privileges on the Spack repository. For others, we hope this section at
least provides some insight into how the Spack project works.
.. _release-branches:
^^^^^^^^^^^^^^^^
Release branches
^^^^^^^^^^^^^^^^
There are currently two types of Spack releases: :ref:`major releases
<major-releases>` (``0.13.0``, ``0.14.0``, etc.) and :ref:`point releases
<point-releases>` (``0.13.1``, ``0.13.2``, ``0.13.3``, etc.). Here is a
diagram of how Spack release branches work::
o branch: develop (latest version)
|
o merge v0.14.1 into develop
|\
| o branch: releases/v0.14, tag: v0.14.1
o | merge v0.14.0 into develop
|\|
| o tag: v0.14.0
|/
o merge v0.13.2 into develop
|\
| o branch: releases/v0.13, tag: v0.13.2
o | merge v0.13.1 into develop
|\|
| o tag: v0.13.1
o | merge v0.13.0 into develop
|\|
| o tag: v0.13.0
o |
| o
|/
o
The ``develop`` branch has the latest contributions, and nearly all pull
requests target ``develop``.
Each Spack release series also has a corresponding branch, e.g.
``releases/v0.14`` has ``0.14.x`` versions of Spack, and
``releases/v0.13`` has ``0.13.x`` versions. A major release is the first
tagged version on a release branch. Minor releases are back-ported from
develop onto release branches. This is typically done by cherry-picking
bugfix commits off of ``develop``.
To avoid version churn for users of a release series, minor releases
should **not** make changes that would change the concretization of
packages. They should generally only contain fixes to the Spack core.
Both major and minor releases are tagged. After each release, we merge
the release branch back into ``develop`` so that the version bump and any
other release-specific changes are visible in the mainline. As a
convenience, we also tag the latest release as ``releases/latest``,
so that users can easily check it out to get the latest
stable version. See :ref:`merging-releases` for more details.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Scheduling work for releases
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
We schedule work for releases by creating `GitHub projects
<https://github.com/spack/spack/projects>`_. At any time, there may be
several open release projects. For example, here are two releases (from
some past version of the page linked above):
.. image:: images/projects.png
Here, there's one release in progress for ``0.15.1`` and another for
``0.16.0``. Each of these releases has a project board containing issues
and pull requests. GitHub shows a status bar with completed work in
green, work in progress in purple, and work not started yet in gray, so
it's fairly easy to see progress.
Spack's project boards are not firm commitments, and we move work between
releases frequently. If we need to make a release and some tasks are not
yet done, we will simply move them to next minor or major release, rather
than delaying the release to complete them.
For more on using GitHub project boards, see `GitHub's documentation
<https://docs.github.com/en/github/managing-your-work-on-github/about-project-boards>`_.
.. _major-releases:
^^^^^^^^^^^^^^^^^^^^^
Making Major Releases
^^^^^^^^^^^^^^^^^^^^^
Assuming you've already created a project board and completed the work
for a major release, the steps to make the release are as follows:
#. Create two new project boards:
* One for the next major release
* One for the next point release
#. Move any tasks that aren't done yet to one of the new project boards.
Small bugfixes should go to the next point release. Major features,
refactors, and changes that could affect concretization should go in
the next major release.
#. Create a branch for the release, based on ``develop``:
.. code-block:: console
$ git checkout -b releases/v0.15 develop
For a version ``vX.Y.Z``, the branch's name should be
``releases/vX.Y``. That is, you should create a ``releases/vX.Y``
branch if you are preparing the ``X.Y.0`` release.
#. Bump the version in ``lib/spack/spack/__init__.py``. See `this example from 0.13.0
<https://github.com/spack/spack/commit/8eeb64096c98b8a43d1c587f13ece743c864fba9>`_
#. Update ``CHANGELOG.md`` with major highlights in bullet form. Use
proper markdown formatting, like `this example from 0.15.0
<https://github.com/spack/spack/commit/d4bf70d9882fcfe88507e9cb444331d7dd7ba71c>`_.
#. Push the release branch to GitHub.
#. Make sure CI passes on the release branch, including:
* Regular unit tests
* Build tests
* The E4S pipeline at `gitlab.spack.io <https://gitlab.spack.io>`_
If CI is not passing, submit pull requests to ``develop`` as normal
and keep rebasing the release branch on ``develop`` until CI passes.
#. Follow the steps in :ref:`publishing-releases`.
#. Follow the steps in :ref:`merging-releases`.
#. Follow the steps in :ref:`announcing-releases`.
.. _point-releases:
^^^^^^^^^^^^^^^^^^^^^
Making Point Releases
^^^^^^^^^^^^^^^^^^^^^
This assumes you've already created a project board for a point release
and completed the work to be done for the release. To make a point
release:
#. Create one new project board for the next point release.
#. Move any cards that aren't done yet to the next project board.
#. Check out the release branch (it should already exist). For the
``X.Y.Z`` release, the release branch is called ``releases/vX.Y``. For
``v0.15.1``, you would check out ``releases/v0.15``:
.. code-block:: console
$ git checkout releases/v0.15
#. Cherry-pick each pull request in the ``Done`` column of the release
project onto the release branch.
This is **usually** fairly simple since we squash the commits from the
vast majority of pull requests, which means there is only one commit
per pull request to cherry-pick. For example, `this pull request
<https://github.com/spack/spack/pull/15777>`_ has three commits, but
the were squashed into a single commit on merge. You can see the
commit that was created here:
.. image:: images/pr-commit.png
You can easily cherry pick it like this (assuming you already have the
release branch checked out):
.. code-block:: console
$ git cherry-pick 7e46da7
For pull requests that were rebased, you'll need to cherry-pick each
rebased commit individually. There have not been any rebased PRs like
this in recent point releases.
.. warning::
It is important to cherry-pick commits in the order they happened,
otherwise you can get conflicts while cherry-picking. When
cherry-picking onto a point release, look at the merge date,
**not** the number of the pull request or the date it was opened.
Sometimes you may **still** get merge conflicts even if you have
cherry-picked all the commits in order. This generally means there
is some other intervening pull request that the one you're trying
to pick depends on. In these cases, you'll need to make a judgment
call:
1. If the dependency is small, you might just cherry-pick it, too.
If you do this, add it to the release board.
2. If it is large, then you may decide that this fix is not worth
including in a point release, in which case you should remove it
from the release project.
3. You can always decide to manually back-port the fix to the release
branch if neither of the above options makes sense, but this can
require a lot of work. It's seldom the right choice.
#. Bump the version in ``lib/spack/spack/__init__.py``. See `this example from 0.14.1
<https://github.com/spack/spack/commit/ff0abb9838121522321df2a054d18e54b566b44a>`_.
#. Update ``CHANGELOG.md`` with a list of bugfixes. This is typically just a
summary of the commits you cherry-picked onto the release branch. See
`the changelog from 0.14.1
<https://github.com/spack/spack/commit/ff0abb9838121522321df2a054d18e54b566b44a>`_.
#. Push the release branch to GitHub.
#. Make sure CI passes on the release branch, including:
* Regular unit tests
* Build tests
* The E4S pipeline at `gitlab.spack.io <https://gitlab.spack.io>`_
If CI does not pass, you'll need to figure out why, and make changes
to the release branch until it does. You can make more commits, modify
or remove cherry-picked commits, or cherry-pick **more** from
``develop`` to make this happen.
#. Follow the steps in :ref:`publishing-releases`.
#. Follow the steps in :ref:`merging-releases`.
#. Follow the steps in :ref:`announcing-releases`.
.. _publishing-releases:
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Publishing a release on GitHub
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#. Go to `github.com/spack/spack/releases
<https://github.com/spack/spack/releases>`_ and click ``Draft a new
release``. Set the following:
* ``Tag version`` should start with ``v`` and contain *all three*
parts of the version, .g. ``v0.15.1``. This is the name of the tag
that will be created.
* ``Target`` should be the ``releases/vX.Y`` branch (e.g., ``releases/v0.15``).
* ``Release title`` should be ``vX.Y.Z`` (To match the tag, e.g., ``v0.15.1``).
* For the text, paste the latest release markdown from your ``CHANGELOG.md``.
You can save the draft and keep coming back to this as you prepare the release.
#. When you are done, click ``Publish release``.
#. Immediately after publishing, go back to
`github.com/spack/spack/releases
<https://github.com/spack/spack/releases>`_ and download the
auto-generated ``.tar.gz`` file for the release. It's the ``Source
code (tar.gz)`` link.
#. Click ``Edit`` on the release you just did and attach the downloaded
release tarball as a binary. This does two things:
#. Makes sure that the hash of our releases doesn't change over time.
GitHub sometimes annoyingly changes they way they generate
tarballs, and then hashes can change if you rely on the
auto-generated tarball links.
#. Gets us download counts on releases visible through the GitHub
API. GitHub tracks downloads of artifacts, but *not* the source
links. See the `releases
page <https://api.github.com/repos/spack/spack/releases>`_ and search
for ``download_count`` to see this.
#. Go to `readthedocs.org <https://readthedocs.org/projects/spack>`_ and activate
the release tag. This builds the documentation and makes the released version
selectable in the versions menu.
.. _merging-releases:
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Updating `releases/latest` and `develop`
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
If the new release is the **highest** Spack release yet, you should
also tag it as ``releases/latest``. For example, suppose the highest
release is currently ``0.15.3``:
* If you are releasing ``0.15.4`` or ``0.16.0``, then you should tag
it with ``releases/latest``, as these are higher than ``0.15.3``.
* If you are making a new release of an **older** major version of
Spack, e.g. ``0.14.4``, then you should not tag it as
``releases/latest`` (as there are newer major versions).
To tag ``releases/latest``, do this:
.. code-block:: console
$ git checkout releases/vX.Y # vX.Y is the new release's branch
$ git tag --force releases/latest
$ git push --tags
The ``--force`` argument makes ``git`` overwrite the existing
``releases/latest`` tag with the new one.
We also merge each release that we tag as ``releases/latest`` into ``develop``.
Make sure to do this with a merge commit:
.. code-block:: console
$ git checkout develop
$ git merge --no-ff vX.Y.Z # vX.Y.Z is the new release's tag
$ git push
We merge back to ``develop`` because it:
* updates the version and ``CHANGELOG.md`` on ``develop``.
* ensures that your release tag is reachable from the head of
``develop``
We *must* use a real merge commit (via the ``--no-ff`` option) because it
ensures that the release tag is reachable from the tip of ``develop``.
This is necessary for ``spack -V`` to work properly -- it uses ``git
describe --tags`` to find the last reachable tag in the repository and
reports how far we are from it. For example:
.. code-block:: console
$ spack -V
0.14.2-1486-b80d5e74e5
This says that we are at commit ``b80d5e74e5``, which is 1,486 commits
ahead of the ``0.14.2`` release.
We put this step last in the process because it's best to do it only once
the release is complete and tagged. If you do it before you've tagged the
release and later decide you want to tag some later commit, you'll need
to merge again.
.. _announcing-releases:
^^^^^^^^^^^^^^^^^^^^
Announcing a release
^^^^^^^^^^^^^^^^^^^^
We announce releases in all of the major Spack communication channels.
Publishing the release takes care of GitHub. The remaining channels are
Twitter, Slack, and the mailing list. Here are the steps:
#. Make a tweet to announce the release. It should link to the release's
page on GitHub. You can base it on `this example tweet
<https://twitter.com/spackpm/status/1231761858182307840>`_.
#. Ping ``@channel`` in ``#general`` on Slack (`spackpm.slack.com
<https://spackpm.slack.com>`_) with a link to the tweet. The tweet
will be shown inline so that you do not have to retype your release
announcement.
#. Email the Spack mailing list to let them know about the release. As
with the tweet, you likely want to link to the release's page on
GitHub. It's also helpful to include some information directly in the
email. You can base yours on this `example email
<https://groups.google.com/forum/#!topic/spack/WT4CT9i_X4s>`_.
Once you've announced the release, congratulations, you're done! You've
finished making the release!

View File

@@ -0,0 +1,41 @@
.. Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
.. _docker_for_developers:
=====================
Docker for Developers
=====================
This guide is intended for people who want to use our prepared docker
environments to work on developing Spack or working on spack packages. It is
meant to serve as the companion documentation for the :ref:`packaging-guide`.
--------
Overview
--------
To get started, all you need is the latest version of ``docker``.
.. code-block:: console
$ cd share/spack/docker
$ source config/ubuntu.bash
$ ./run-image.sh
This command should drop you into an interactive shell where you can run spack
within an isolated docker container running ubuntu. The copy of spack being
used should be tied to the working copy of your cloned git repo, so any changes
you make should be immediately reflected in the running docker container. Feel
free to add or modify any packages or to hack on spack, itself. Your contained
copy of spack should immediately reflect all changes.
To work within a container running a different linux distro, source one of the
other environment files under ``config``.
.. code-block:: console
$ source config/fedora.bash
$ ./run-image.sh

View File

@@ -191,24 +191,44 @@ Environment has been activated. Similarly, the ``install`` and
==> 0 installed packages
$ spack install zlib@1.2.11
==> Installing zlib-1.2.11-q6cqrdto4iktfg6qyqcc5u4vmfmwb7iv
==> No binary for zlib-1.2.11-q6cqrdto4iktfg6qyqcc5u4vmfmwb7iv found: installing from source
==> zlib: Executing phase: 'install'
[+] ~/spack/opt/spack/linux-rhel7-broadwell/gcc-8.1.0/zlib-1.2.11-q6cqrdto4iktfg6qyqcc5u4vmfmwb7iv
==> Installing zlib
==> Searching for binary cache of zlib
==> Warning: No Spack mirrors are currently configured
==> No binary for zlib found: installing from source
==> Fetching http://zlib.net/fossils/zlib-1.2.11.tar.gz
######################################################################## 100.0%
==> Staging archive: /spack/var/spack/stage/zlib-1.2.11-3r4cfkmx3wwfqeof4bc244yduu2mz4ur/zlib-1.2.11.tar.gz
==> Created stage in /spack/var/spack/stage/zlib-1.2.11-3r4cfkmx3wwfqeof4bc244yduu2mz4ur
==> No patches needed for zlib
==> Building zlib [Package]
==> Executing phase: 'install'
==> Successfully installed zlib
Fetch: 0.36s. Build: 11.58s. Total: 11.93s.
[+] /spack/opt/spack/linux-rhel7-x86_64/gcc-4.9.3/zlib-1.2.11-3r4cfkmx3wwfqeof4bc244yduu2mz4ur
$ spack env activate myenv
$ spack find
==> In environment myenv
==> No root specs
==> 0 installed packages
$ spack install zlib@1.2.8
==> Installing zlib-1.2.8-yfc7epf57nsfn2gn4notccaiyxha6z7x
==> No binary for zlib-1.2.8-yfc7epf57nsfn2gn4notccaiyxha6z7x found: installing from source
==> zlib: Executing phase: 'install'
[+] ~/spack/opt/spack/linux-rhel7-broadwell/gcc-8.1.0/zlib-1.2.8-yfc7epf57nsfn2gn4notccaiyxha6z7x
==> Updating view at ~/spack/var/spack/environments/myenv/.spack-env/view
==> Installing zlib
==> Searching for binary cache of zlib
==> Warning: No Spack mirrors are currently configured
==> No binary for zlib found: installing from source
==> Fetching http://zlib.net/fossils/zlib-1.2.8.tar.gz
######################################################################## 100.0%
==> Staging archive: /spack/var/spack/stage/zlib-1.2.8-y2t6kq3s23l52yzhcyhbpovswajzi7f7/zlib-1.2.8.tar.gz
==> Created stage in /spack/var/spack/stage/zlib-1.2.8-y2t6kq3s23l52yzhcyhbpovswajzi7f7
==> No patches needed for zlib
==> Building zlib [Package]
==> Executing phase: 'install'
==> Successfully installed zlib
Fetch: 0.26s. Build: 2.08s. Total: 2.35s.
[+] /spack/opt/spack/linux-rhel7-x86_64/gcc-4.9.3/zlib-1.2.8-y2t6kq3s23l52yzhcyhbpovswajzi7f7
$ spack find
==> In environment myenv
@@ -216,17 +236,15 @@ Environment has been activated. Similarly, the ``install`` and
zlib@1.2.8
==> 1 installed package
-- linux-rhel7-broadwell / gcc@8.1.0 ----------------------------
-- linux-rhel7-x86_64 / gcc@4.9.3 -------------------------------
zlib@1.2.8
$ despacktivate
$ spack find
==> 2 installed packages
-- linux-rhel7-broadwell / gcc@8.1.0 ----------------------------
-- linux-rhel7-x86_64 / gcc@4.9.3 -------------------------------
zlib@1.2.8 zlib@1.2.11
Note that when we installed the abstract spec ``zlib@1.2.8``, it was
presented as a root of the Environment. All explicitly installed
packages will be listed as roots of the Environment.
@@ -331,9 +349,6 @@ installed specs using the ``-c`` (``--concretized``) flag.
==> 0 installed packages
.. _installing-environment:
^^^^^^^^^^^^^^^^^^^^^^^^^
Installing an Environment
^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -815,10 +830,8 @@ environment for Spack commands. The arguments ``-v,--with-view`` and
behavior is to activate with the environment view if there is one.
The environment variables affected by the ``spack env activate``
command and the paths that are used to update them are determined by
the :ref:`prefix inspections <customize-env-modifications>` defined in
your modules configuration; the defaults are summarized in the following
table.
command and the paths that are used to update them are in the
following table.
=================== =========
Variable Paths

View File

@@ -87,12 +87,11 @@ will be available from the command line:
--implicit select specs that are not installed or were installed implicitly
--output OUTPUT where to dump the result
The corresponding unit tests can be run giving the appropriate options
to ``spack unit-test``:
The corresponding unit tests can be run giving the appropriate options to ``spack test``:
.. code-block:: console
$ spack unit-test --extension=scripting
$ spack test --extension=scripting
============================================================== test session starts ===============================================================
platform linux2 -- Python 2.7.15rc1, pytest-3.2.5, py-1.4.34, pluggy-0.4.0

View File

@@ -48,8 +48,8 @@ platform, all on the command line.
# Add compiler flags using the conventional names
$ spack install mpileaks@1.1.2 %gcc@4.7.3 cppflags="-O3 -floop-block"
# Cross-compile for a different micro-architecture with target=
$ spack install mpileaks@1.1.2 target=icelake
# Cross-compile for a different architecture with arch=
$ spack install mpileaks@1.1.2 arch=bgqos_0
Users can specify as many or few options as they care about. Spack
will fill in the unspecified values with sensible defaults. The two listed

View File

@@ -16,18 +16,15 @@ Prerequisites
Spack has the following minimum requirements, which must be installed
before Spack is run:
#. Python 2 (2.6 or 2.7) or 3 (3.5 - 3.9) to run Spack
#. Python 2 (2.6 or 2.7) or 3 (3.5 - 3.8) to run Spack
#. A C/C++ compiler for building
#. The ``make`` executable for building
#. The ``tar``, ``gzip``, ``bzip2``, ``xz`` and optionally ``zstd``
executables for extracting source code
#. The ``patch`` command to apply patches
#. The ``git`` and ``curl`` commands for fetching
#. If using the ``gpg`` subcommand, ``gnupg2`` is required
These requirements can be easily installed on most modern Linux systems;
on macOS, XCode is required. Spack is designed to run on HPC
platforms like Cray. Not all packages should be expected
on Macintosh, XCode is required. Spack is designed to run on HPC
platforms like Cray and BlueGene/Q. Not all packages should be expected
to work on all platforms. A build matrix showing which packages are
working on which systems is planned but not yet available.
@@ -44,50 +41,35 @@ Getting Spack is easy. You can clone it from the `github repository
This will create a directory called ``spack``.
.. _shell-support:
^^^^^^^^^^^^^^^^^^^^^^^^
Add Spack to the Shell
^^^^^^^^^^^^^^^^^^^^^^^^
^^^^^^^^^^^^^
Shell support
^^^^^^^^^^^^^
Once you have cloned Spack, we recommend sourcing the appropriate script
for your shell:
We'll assume that the full path to your downloaded Spack directory is
in the ``SPACK_ROOT`` environment variable. Add ``$SPACK_ROOT/bin``
to your path and you're ready to go:
.. code-block:: console
# For bash/zsh/sh
$ . spack/share/spack/setup-env.sh
$ export PATH=$SPACK_ROOT/bin:$PATH
$ spack install libelf
# For tcsh/csh
$ source spack/share/spack/setup-env.csh
For a richer experience, use Spack's shell support:
# For fish
$ . spack/share/spack/setup-env.fish
.. code-block:: console
That's it! You're ready to use Spack.
# For bash/zsh users
$ export SPACK_ROOT=/path/to/spack
$ . $SPACK_ROOT/share/spack/setup-env.sh
Sourcing these files will put the ``spack`` command in your ``PATH``, set
up your ``MODULEPATH`` to use Spack's packages, and add other useful
shell integration for :ref:`certain commands <packaging-shell-support>`,
:ref:`environments <environments>`, and :ref:`modules <modules>`. For
``bash``, it also sets up tab completion.
If you do not want to use Spack's shell support, you can always just run
the ``spack`` command directly from ``spack/bin/spack``.
# For tcsh or csh users (note you must set SPACK_ROOT)
$ setenv SPACK_ROOT /path/to/spack
$ source $SPACK_ROOT/share/spack/setup-env.csh
^^^^^^^^^^^^^^^^^^
Check Installation
^^^^^^^^^^^^^^^^^^
With Spack installed, you should be able to run some basic Spack
commands. For example:
.. command-output:: spack spec netcdf-c
In theory, Spack doesn't need any additional installation; just
download and run! But in real life, additional steps are usually
required before Spack can work in a practical sense. Read on...
This automatically adds Spack to your ``PATH`` and allows the ``spack``
command to be used to execute spack :ref:`commands <shell-support>` and
:ref:`useful packaging commands <packaging-shell-support>`.
^^^^^^^^^^^^^^^^^
Clean Environment
@@ -103,52 +85,16 @@ environment*, especially for ``PATH``. Only software that comes with
the system, or that you know you wish to use with Spack, should be
included. This procedure will avoid many strange build errors.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Optional: Bootstrapping clingo
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Spack supports using clingo as an external solver to compute which software
needs to be installed. If you have a default compiler supporting C++14 Spack
can automatically bootstrap this tool from sources the first time it is
needed:
^^^^^^^^^^^^^^^^^^
Check Installation
^^^^^^^^^^^^^^^^^^
.. code-block:: console
With Spack installed, you should be able to run some basic Spack
commands. For example:
$ spack solve zlib
[+] /usr (external bison-3.0.4-wu5pgjchxzemk5ya2l3ddqug2d7jv6eb)
[+] /usr (external cmake-3.19.4-a4kmcfzxxy45mzku4ipmj5kdiiz5a57b)
[+] /usr (external python-3.6.9-x4fou4iqqlh5ydwddx3pvfcwznfrqztv)
==> Installing re2c-1.2.1-e3x6nxtk3ahgd63ykgy44mpuva6jhtdt
[ ... ]
==> Optimization: [0, 0, 0, 0, 0, 1, 0, 0, 0]
zlib@1.2.11%gcc@10.1.0+optimize+pic+shared arch=linux-ubuntu18.04-broadwell
.. command-output:: spack spec netcdf-c
If you want to speed-up bootstrapping, you may try to search for ``cmake`` and ``bison``
on your system:
.. code-block:: console
$ spack external find cmake bison
==> The following specs have been detected on this system and added to /home/spack/.spack/packages.yaml
bison@3.0.4 cmake@3.19.4
All the tools Spack needs for its own functioning are installed in a separate store, which lives
under the ``${HOME}/.spack`` directory. The software installed there can be queried with:
.. code-block:: console
$ spack find --bootstrap
==> Showing internal bootstrap store at "/home/spack/.spack/bootstrap/store"
==> 3 installed packages
-- linux-ubuntu18.04-x86_64 / gcc@10.1.0 ------------------------
clingo-bootstrap@spack python@3.6.9 re2c@1.2.1
In case it's needed the bootstrap store can also be cleaned with:
.. code-block:: console
$ spack clean -b
==> Removing software in "/home/spack/.spack/bootstrap/store"
^^^^^^^^^^^^^^^^^^^^^^^^^^
Optional: Alternate Prefix
@@ -168,6 +114,15 @@ copy of spack installs packages into its own ``$PREFIX/opt``
directory.
^^^^^^^^^^
Next Steps
^^^^^^^^^^
In theory, Spack doesn't need any additional installation; just
download and run! But in real life, additional steps are usually
required before Spack can work in a practical sense. Read on...
.. _compiler-config:
----------------------
@@ -757,9 +712,8 @@ an OpenMPI installed in /opt/local, one would use:
packages:
openmpi:
externals:
- spec: openmpi@1.10.1
prefix: /opt/local
paths:
openmpi@1.10.1: /opt/local
buildable: False
In general, Spack is easier to use and more reliable if it builds all of
@@ -821,9 +775,8 @@ Then add the following to ``~/.spack/packages.yaml``:
packages:
openssl:
externals:
- spec: openssl@1.0.2g
prefix: /usr
paths:
openssl@1.0.2g: /usr
buildable: False
@@ -838,9 +791,8 @@ to add the following to ``packages.yaml``:
packages:
netlib-lapack:
externals:
- spec: netlib-lapack@3.6.1
prefix: /usr
paths:
netlib-lapack@3.6.1: /usr
buildable: False
all:
providers:
@@ -866,7 +818,7 @@ Git
Some Spack packages use ``git`` to download, which might not work on
some computers. For example, the following error was
encountered on a Macintosh during ``spack install julia@master``:
encountered on a Macintosh during ``spack install julia-master``:
.. code-block:: console
@@ -1229,13 +1181,9 @@ Here's an example of an external configuration for cray modules:
packages:
mpich:
externals:
- spec: "mpich@7.3.1%gcc@5.2.0 arch=cray_xc-haswell-CNL10"
modules:
- cray-mpich
- spec: "mpich@7.3.1%intel@16.0.0.109 arch=cray_xc-haswell-CNL10"
modules:
- cray-mpich
modules:
mpich@7.3.1%gcc@5.2.0 arch=cray_xc-haswell-CNL10: cray-mpich
mpich@7.3.1%intel@16.0.0.109 arch=cray_xc-haswell-CNL10: cray-mpich
all:
providers:
mpi: [mpich]
@@ -1247,7 +1195,7 @@ via module load.
.. note::
For Cray-provided packages, it is best to use ``modules:`` instead of ``prefix:``
For Cray-provided packages, it is best to use ``modules:`` instead of ``paths:``
in ``packages.yaml``, because the Cray Programming Environment heavily relies on
modules (e.g., loading the ``cray-mpich`` module adds MPI libraries to the
compiler wrapper link line).
@@ -1263,31 +1211,19 @@ Here is an example of a full packages.yaml used at NERSC
packages:
mpich:
externals:
- spec: "mpich@7.3.1%gcc@5.2.0 arch=cray_xc-CNL10-ivybridge"
modules:
- cray-mpich
- spec: "mpich@7.3.1%intel@16.0.0.109 arch=cray_xc-SuSE11-ivybridge"
modules:
- cray-mpich
modules:
mpich@7.3.1%gcc@5.2.0 arch=cray_xc-CNL10-ivybridge: cray-mpich
mpich@7.3.1%intel@16.0.0.109 arch=cray_xc-SuSE11-ivybridge: cray-mpich
buildable: False
netcdf:
externals:
- spec: "netcdf@4.3.3.1%gcc@5.2.0 arch=cray_xc-CNL10-ivybridge"
modules:
- cray-netcdf
- spec: "netcdf@4.3.3.1%intel@16.0.0.109 arch=cray_xc-CNL10-ivybridge"
modules:
- cray-netcdf
modules:
netcdf@4.3.3.1%gcc@5.2.0 arch=cray_xc-CNL10-ivybridge: cray-netcdf
netcdf@4.3.3.1%intel@16.0.0.109 arch=cray_xc-CNL10-ivybridge: cray-netcdf
buildable: False
hdf5:
externals:
- spec: "hdf5@1.8.14%gcc@5.2.0 arch=cray_xc-CNL10-ivybridge"
modules:
- cray-hdf5
- spec: "hdf5@1.8.14%intel@16.0.0.109 arch=cray_xc-CNL10-ivybridge"
modules:
- cray-hdf5
modules:
hdf5@1.8.14%gcc@5.2.0 arch=cray_xc-CNL10-ivybridge: cray-hdf5
hdf5@1.8.14%intel@16.0.0.109 arch=cray_xc-CNL10-ivybridge: cray-hdf5
buildable: False
all:
compiler: [gcc@5.2.0, intel@16.0.0.109]
@@ -1311,6 +1247,6 @@ environment variables may be propagated into containers that are not
using the Cray programming environment.
To ensure that Spack does not autodetect the Cray programming
environment, unset the environment variable ``MODULEPATH``. This
environment, unset the environment variable ``CRAYPE_VERSION``. This
will cause Spack to treat a linux container on a Cray system as a base
linux distro.

Binary file not shown.

Before

Width:  |  Height:  |  Size: 44 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 68 KiB

View File

@@ -85,6 +85,7 @@ or refer to the full manual below.
packaging_guide
build_systems
developer_guide
docker_for_developers
.. toctree::
:maxdepth: 2

View File

@@ -14,7 +14,7 @@ problems if you encounter them.
Variants are not properly forwarded to dependencies
---------------------------------------------------
**Status:** Expected to be fixed by Spack's new concretizer
**Status:** Expected to be fixed in the next release
Sometimes, a variant of a package can also affect how its dependencies are
built. For example, in order to build MPI support for a package, it may
@@ -49,29 +49,15 @@ A workaround is to explicitly activate the variants of dependencies as well:
See https://github.com/spack/spack/issues/267 and
https://github.com/spack/spack/issues/2546 for further details.
-----------------------------------------------
depends_on cannot handle recursive dependencies
-----------------------------------------------
**Status:** Not yet a work in progress
----------------------------
``spack setup`` doesn't work
----------------------------
Although ``depends_on`` can handle any aspect of Spack's spec syntax,
it currently cannot handle recursive dependencies. If the ``^`` sigil
appears in a ``depends_on`` statement, the concretizer will hang.
For example, something like:
**Status:** Work in progress
.. code-block:: python
depends_on('mfem+cuda ^hypre+cuda', when='+cuda')
should be rewritten as:
.. code-block:: python
depends_on('mfem+cuda', when='+cuda')
depends_on('hypre+cuda', when='+cuda')
See https://github.com/spack/spack/issues/17660 and
https://github.com/spack/spack/issues/11160 for more details.
Spack provides a ``setup`` command that is useful for the development of
software outside of Spack. Unfortunately, this command no longer works.
See https://github.com/spack/spack/issues/2597 and
https://github.com/spack/spack/issues/2662 for details. This is expected
to be fixed by https://github.com/spack/spack/pull/2664.

View File

@@ -10,16 +10,14 @@ Modules
=======
The use of module systems to manage user environment in a controlled way
is a common practice at HPC centers that is often embraced also by
individual programmers on their development machines. To support this
common practice Spack integrates with `Environment Modules
<http://modules.sourceforge.net/>`_ and `LMod
<http://lmod.readthedocs.io/en/latest/>`_ by providing post-install hooks
that generate module files and commands to manipulate them.
is a common practice at HPC centers that is often embraced also by individual
programmers on their development machines. To support this common practice
Spack integrates with `Environment Modules
<http://modules.sourceforge.net/>`_ and `LMod
<http://lmod.readthedocs.io/en/latest/>`_ by
providing post-install hooks that generate module files and commands to manipulate them.
Modules are one of several ways you can use Spack packages. For other
options that may fit your use case better, you should also look at
:ref:`spack load <spack-load>` and :ref:`environments <environments>`.
.. _shell-support:
----------------------------
Using module files via Spack
@@ -62,9 +60,206 @@ to load the ``cmake`` module:
$ module load cmake-3.7.2-gcc-6.3.0-fowuuby
Neither of these is particularly pretty, easy to remember, or easy to
type. Luckily, Spack offers many facilities for customizing the module
scheme used at your site.
Neither of these is particularly pretty, easy to remember, or
easy to type. Luckily, Spack has its own interface for using modules.
^^^^^^^^^^^^^
Shell support
^^^^^^^^^^^^^
To enable additional Spack commands for loading and unloading module files,
and to add the correct path to ``MODULEPATH``, you need to source the appropriate
setup file in the ``$SPACK_ROOT/share/spack`` directory. This will activate shell
support for the commands that need it. For ``bash``, ``ksh`` or ``zsh`` users:
.. code-block:: console
$ . ${SPACK_ROOT}/share/spack/setup-env.sh
For ``csh`` and ``tcsh`` instead:
.. code-block:: console
$ set SPACK_ROOT ...
$ source $SPACK_ROOT/share/spack/setup-env.csh
Note that in the latter case it is necessary to explicitly set ``SPACK_ROOT``
before sourcing the setup file (you will get a meaningful error message
if you don't).
If you want to have Spack's shell support available on the command line at
any login you can put this source line in one of the files that are sourced
at startup (like ``.profile``, ``.bashrc`` or ``.cshrc``). Be aware though
that the startup time may be slightly increased because of that.
.. _cmd-spack-load:
^^^^^^^^^^^^^^^^^^^^^^^
``spack load / unload``
^^^^^^^^^^^^^^^^^^^^^^^
Once you have shell support enabled you can use the same spec syntax
you're used to and you can use the same shortened names you use
everywhere else in Spack.
For example this will add the ``mpich`` package built with ``gcc`` to your path:
.. code-block:: console
$ spack install mpich %gcc@4.4.7
# ... wait for install ...
$ spack load mpich %gcc@4.4.7
$ which mpicc
~/spack/opt/linux-debian7-x86_64/gcc@4.4.7/mpich@3.0.4/bin/mpicc
These commands will add appropriate directories to your ``PATH``,
``MANPATH``, ``CPATH``, and ``LD_LIBRARY_PATH``. When you no longer
want to use a package, you can type unload or unuse similarly:
.. code-block:: console
$ spack unload mpich %gcc@4.4.7
.. note::
The ``load`` and ``unload`` subcommands are only available if you
have enabled Spack's shell support. These command DO NOT use the
underlying Spack-generated module files.
^^^^^^^^^^^^^^^
Ambiguous specs
^^^^^^^^^^^^^^^
If a spec used with load/unload or is ambiguous (i.e. more than one
installed package matches it), then Spack will warn you:
.. code-block:: console
$ spack load libelf
==> Error: libelf matches multiple packages.
Matching packages:
libelf@0.8.13%gcc@4.4.7 arch=linux-debian7-x86_64
libelf@0.8.13%intel@15.0.0 arch=linux-debian7-x86_64
Use a more specific spec
You can either type the ``spack load`` command again with a fully
qualified argument, or you can add just enough extra constraints to
identify one package. For example, above, the key differentiator is
that one ``libelf`` is built with the Intel compiler, while the other
used ``gcc``. You could therefore just type:
.. code-block:: console
$ spack load libelf %intel
To identify just the one built with the Intel compiler.
.. _cmd-spack-module-loads:
^^^^^^^^^^^^^^^^^^^^^^^^^^
``spack module tcl loads``
^^^^^^^^^^^^^^^^^^^^^^^^^^
In some cases, it is desirable to use a Spack-generated module, rather
than relying on Spack's built-in user-environment modification
capabilities. To translate a spec into a module name, use ``spack
module tcl loads`` or ``spack module lmod loads`` depending on the
module system desired.
To load not just a module, but also all the modules it depends on, use
the ``--dependencies`` option. This is not required for most modules
because Spack builds binaries with RPATH support. However, not all
packages use RPATH to find their dependencies: this can be true in
particular for Python extensions, which are currently *not* built with
RPATH.
Scripts to load modules recursively may be made with the command:
.. code-block:: console
$ spack module tcl loads --dependencies <spec>
An equivalent alternative using `process substitution <http://tldp.org/LDP/abs/html/process-sub.html>`_ is:
.. code-block :: console
$ source <( spack module tcl loads --dependencies <spec> )
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Module Commands for Shell Scripts
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Although Spack is flexible, the ``module`` command is much faster.
This could become an issue when emitting a series of ``spack load``
commands inside a shell script. By adding the ``--dependencies`` flag,
``spack module tcl loads`` may also be used to generate code that can be
cut-and-pasted into a shell script. For example:
.. code-block:: console
$ spack module tcl loads --dependencies py-numpy git
# bzip2@1.0.6%gcc@4.9.3=linux-x86_64
module load bzip2-1.0.6-gcc-4.9.3-ktnrhkrmbbtlvnagfatrarzjojmkvzsx
# ncurses@6.0%gcc@4.9.3=linux-x86_64
module load ncurses-6.0-gcc-4.9.3-kaazyneh3bjkfnalunchyqtygoe2mncv
# zlib@1.2.8%gcc@4.9.3=linux-x86_64
module load zlib-1.2.8-gcc-4.9.3-v3ufwaahjnviyvgjcelo36nywx2ufj7z
# sqlite@3.8.5%gcc@4.9.3=linux-x86_64
module load sqlite-3.8.5-gcc-4.9.3-a3eediswgd5f3rmto7g3szoew5nhehbr
# readline@6.3%gcc@4.9.3=linux-x86_64
module load readline-6.3-gcc-4.9.3-se6r3lsycrwxyhreg4lqirp6xixxejh3
# python@3.5.1%gcc@4.9.3=linux-x86_64
module load python-3.5.1-gcc-4.9.3-5q5rsrtjld4u6jiicuvtnx52m7tfhegi
# py-setuptools@20.5%gcc@4.9.3=linux-x86_64
module load py-setuptools-20.5-gcc-4.9.3-4qr2suj6p6glepnedmwhl4f62x64wxw2
# py-nose@1.3.7%gcc@4.9.3=linux-x86_64
module load py-nose-1.3.7-gcc-4.9.3-pwhtjw2dvdvfzjwuuztkzr7b4l6zepli
# openblas@0.2.17%gcc@4.9.3+shared=linux-x86_64
module load openblas-0.2.17-gcc-4.9.3-pw6rmlom7apfsnjtzfttyayzc7nx5e7y
# py-numpy@1.11.0%gcc@4.9.3+blas+lapack=linux-x86_64
module load py-numpy-1.11.0-gcc-4.9.3-mulodttw5pcyjufva4htsktwty4qd52r
# curl@7.47.1%gcc@4.9.3=linux-x86_64
module load curl-7.47.1-gcc-4.9.3-ohz3fwsepm3b462p5lnaquv7op7naqbi
# autoconf@2.69%gcc@4.9.3=linux-x86_64
module load autoconf-2.69-gcc-4.9.3-bkibjqhgqm5e3o423ogfv2y3o6h2uoq4
# cmake@3.5.0%gcc@4.9.3~doc+ncurses+openssl~qt=linux-x86_64
module load cmake-3.5.0-gcc-4.9.3-x7xnsklmgwla3ubfgzppamtbqk5rwn7t
# expat@2.1.0%gcc@4.9.3=linux-x86_64
module load expat-2.1.0-gcc-4.9.3-6pkz2ucnk2e62imwakejjvbv6egncppd
# git@2.8.0-rc2%gcc@4.9.3+curl+expat=linux-x86_64
module load git-2.8.0-rc2-gcc-4.9.3-3bib4hqtnv5xjjoq5ugt3inblt4xrgkd
The script may be further edited by removing unnecessary modules.
^^^^^^^^^^^^^^^
Module Prefixes
^^^^^^^^^^^^^^^
On some systems, modules are automatically prefixed with a certain
string; ``spack module tcl loads`` needs to know about that prefix when it
issues ``module load`` commands. Add the ``--prefix`` option to your
``spack module tcl loads`` commands if this is necessary.
For example, consider the following on one system:
.. code-block:: console
$ module avail
linux-SuSE11-x86_64/antlr-2.7.7-gcc-5.3.0-bdpl46y
$ spack module tcl loads antlr # WRONG!
# antlr@2.7.7%gcc@5.3.0~csharp+cxx~java~python arch=linux-SuSE11-x86_64
module load antlr-2.7.7-gcc-5.3.0-bdpl46y
$ spack module tcl loads --prefix linux-SuSE11-x86_64/ antlr
# antlr@2.7.7%gcc@5.3.0~csharp+cxx~java~python arch=linux-SuSE11-x86_64
module load linux-SuSE11-x86_64/antlr-2.7.7-gcc-5.3.0-bdpl46y
-------------------------
Module file customization
@@ -394,32 +589,6 @@ that are already in the LMod hierarchy.
For hierarchies that are deeper than three layers ``lmod spider`` may have some issues.
See `this discussion on the LMod project <https://github.com/TACC/Lmod/issues/114>`_.
.. _customize-env-modifications:
"""""""""""""""""""""""""""""""""""
Customize environment modifications
"""""""""""""""""""""""""""""""""""
You can control which prefixes in a Spack package are added to environment
variables with the ``prefix_inspections`` section; this section maps relative
prefixes to the list of environment variables which should be updated with
those prefixes.
.. code-block:: yaml
modules:
prefix_inspections:
bin:
- PATH
lib:
- LIBRARY_PATH
'':
- CMAKE_PREFIX_PATH
In this case, for a Spack package ``foo`` installed to ``/spack/prefix/foo``,
the generated module file for ``foo`` would update ``PATH`` to contain
``/spack/prefix/foo/bin``.
""""""""""""""""""""""""""""""""""""
Filter out environment modifications
""""""""""""""""""""""""""""""""""""
@@ -528,135 +697,3 @@ subcommand is ``rm``:
that are already existing will ask for a confirmation by default. If
the command is used in a script it is possible though to pass the
``-y`` argument, that will skip this safety measure.
.. _modules-in-shell-scripts:
------------------------------------
Using Spack modules in shell scripts
------------------------------------
The easiest To enable additional Spack commands for loading and unloading
module files, and to add the correct path to ``MODULEPATH``, you need to
source the appropriate setup file. Assuming Spack is installed in
``$SPACK_ROOT``, run the appropriate command for your shell:
.. code-block:: console
# For bash/zsh/sh
$ . $SPACK_ROOT/share/spack/setup-env.sh
# For tcsh/csh
$ source $SPACK_ROOT/share/spack/setup-env.csh
# For fish
$ . $SPACK_ROOT/share/spack/setup-env.fish
If you want to have Spack's shell support available on the command line
at any login you can put this source line in one of the files that are
sourced at startup (like ``.profile``, ``.bashrc`` or ``.cshrc``). Be
aware that the shell startup time may increase slightly as a result.
.. _cmd-spack-module-loads:
^^^^^^^^^^^^^^^^^^^^^^^^^^
``spack module tcl loads``
^^^^^^^^^^^^^^^^^^^^^^^^^^
In some cases, it is desirable to use a Spack-generated module, rather
than relying on Spack's built-in user-environment modification
capabilities. To translate a spec into a module name, use ``spack
module tcl loads`` or ``spack module lmod loads`` depending on the
module system desired.
To load not just a module, but also all the modules it depends on, use
the ``--dependencies`` option. This is not required for most modules
because Spack builds binaries with RPATH support. However, not all
packages use RPATH to find their dependencies: this can be true in
particular for Python extensions, which are currently *not* built with
RPATH.
Scripts to load modules recursively may be made with the command:
.. code-block:: console
$ spack module tcl loads --dependencies <spec>
An equivalent alternative using `process substitution <http://tldp.org/LDP/abs/html/process-sub.html>`_ is:
.. code-block:: console
$ source <( spack module tcl loads --dependencies <spec> )
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Module Commands for Shell Scripts
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Although Spack is flexible, the ``module`` command is much faster.
This could become an issue when emitting a series of ``spack load``
commands inside a shell script. By adding the ``--dependencies`` flag,
``spack module tcl loads`` may also be used to generate code that can be
cut-and-pasted into a shell script. For example:
.. code-block:: console
$ spack module tcl loads --dependencies py-numpy git
# bzip2@1.0.6%gcc@4.9.3=linux-x86_64
module load bzip2-1.0.6-gcc-4.9.3-ktnrhkrmbbtlvnagfatrarzjojmkvzsx
# ncurses@6.0%gcc@4.9.3=linux-x86_64
module load ncurses-6.0-gcc-4.9.3-kaazyneh3bjkfnalunchyqtygoe2mncv
# zlib@1.2.8%gcc@4.9.3=linux-x86_64
module load zlib-1.2.8-gcc-4.9.3-v3ufwaahjnviyvgjcelo36nywx2ufj7z
# sqlite@3.8.5%gcc@4.9.3=linux-x86_64
module load sqlite-3.8.5-gcc-4.9.3-a3eediswgd5f3rmto7g3szoew5nhehbr
# readline@6.3%gcc@4.9.3=linux-x86_64
module load readline-6.3-gcc-4.9.3-se6r3lsycrwxyhreg4lqirp6xixxejh3
# python@3.5.1%gcc@4.9.3=linux-x86_64
module load python-3.5.1-gcc-4.9.3-5q5rsrtjld4u6jiicuvtnx52m7tfhegi
# py-setuptools@20.5%gcc@4.9.3=linux-x86_64
module load py-setuptools-20.5-gcc-4.9.3-4qr2suj6p6glepnedmwhl4f62x64wxw2
# py-nose@1.3.7%gcc@4.9.3=linux-x86_64
module load py-nose-1.3.7-gcc-4.9.3-pwhtjw2dvdvfzjwuuztkzr7b4l6zepli
# openblas@0.2.17%gcc@4.9.3+shared=linux-x86_64
module load openblas-0.2.17-gcc-4.9.3-pw6rmlom7apfsnjtzfttyayzc7nx5e7y
# py-numpy@1.11.0%gcc@4.9.3+blas+lapack=linux-x86_64
module load py-numpy-1.11.0-gcc-4.9.3-mulodttw5pcyjufva4htsktwty4qd52r
# curl@7.47.1%gcc@4.9.3=linux-x86_64
module load curl-7.47.1-gcc-4.9.3-ohz3fwsepm3b462p5lnaquv7op7naqbi
# autoconf@2.69%gcc@4.9.3=linux-x86_64
module load autoconf-2.69-gcc-4.9.3-bkibjqhgqm5e3o423ogfv2y3o6h2uoq4
# cmake@3.5.0%gcc@4.9.3~doc+ncurses+openssl~qt=linux-x86_64
module load cmake-3.5.0-gcc-4.9.3-x7xnsklmgwla3ubfgzppamtbqk5rwn7t
# expat@2.1.0%gcc@4.9.3=linux-x86_64
module load expat-2.1.0-gcc-4.9.3-6pkz2ucnk2e62imwakejjvbv6egncppd
# git@2.8.0-rc2%gcc@4.9.3+curl+expat=linux-x86_64
module load git-2.8.0-rc2-gcc-4.9.3-3bib4hqtnv5xjjoq5ugt3inblt4xrgkd
The script may be further edited by removing unnecessary modules.
^^^^^^^^^^^^^^^
Module Prefixes
^^^^^^^^^^^^^^^
On some systems, modules are automatically prefixed with a certain
string; ``spack module tcl loads`` needs to know about that prefix when it
issues ``module load`` commands. Add the ``--prefix`` option to your
``spack module tcl loads`` commands if this is necessary.
For example, consider the following on one system:
.. code-block:: console
$ module avail
linux-SuSE11-x86_64/antlr-2.7.7-gcc-5.3.0-bdpl46y
$ spack module tcl loads antlr # WRONG!
# antlr@2.7.7%gcc@5.3.0~csharp+cxx~java~python arch=linux-SuSE11-x86_64
module load antlr-2.7.7-gcc-5.3.0-bdpl46y
$ spack module tcl loads --prefix linux-SuSE11-x86_64/ antlr
# antlr@2.7.7%gcc@5.3.0~csharp+cxx~java~python arch=linux-SuSE11-x86_64
module load linux-SuSE11-x86_64/antlr-2.7.7-gcc-5.3.0-bdpl46y

View File

@@ -10,8 +10,8 @@ Package List
============
This is a list of things you can install using Spack. It is
automatically generated based on the packages in this Spack
version.
automatically generated based on the packages in the latest Spack
release.
.. raw:: html
:file: package_list.html

View File

@@ -645,7 +645,7 @@ multiple fields based on delimiters such as ``.``, ``-`` etc. Then
matching fields are compared using the rules below:
#. The following develop-like strings are greater (newer) than all
numbers and are ordered as ``develop > main > master > head > trunk``.
numbers and are ordered as ``develop > master > head > trunk``.
#. Numbers are all less than the chosen develop-like strings above,
and are sorted numerically.
@@ -1778,18 +1778,8 @@ RPATHs in Spack are handled in one of three ways:
Parallel builds
---------------
Spack supports parallel builds on an individual package and at the
installation level. Package-level parallelism is established by the
``--jobs`` option and its configuration and package recipe equivalents.
Installation-level parallelism is driven by the DAG(s) of the requested
package or packages.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Package-level build parallelism
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
By default, Spack will invoke ``make()``, or any other similar tool,
with a ``-j <njobs>`` argument, so those builds run in parallel.
with a ``-j <njobs>`` argument, so that builds run in parallel.
The parallelism is determined by the value of the ``build_jobs`` entry
in ``config.yaml`` (see :ref:`here <build-jobs>` for more details on
how this value is computed).
@@ -1837,43 +1827,6 @@ you set ``parallel`` to ``False`` at the package level, then each call
to ``make()`` will be sequential by default, but packagers can call
``make(parallel=True)`` to override it.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Install-level build parallelism
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Spack supports the concurrent installation of packages within a Spack
instance across multiple processes using file system locks. This
parallelism is separate from the package-level achieved through build
systems' use of the ``-j <njobs>`` option. With install-level parallelism,
processes coordinate the installation of the dependencies of specs
provided on the command line and as part of an environment build with
only **one process** being allowed to install a given package at a time.
Refer to :ref:`Dependencies` for more information on dependencies and
:ref:`installing-environment` for how to install an environment.
Concurrent processes may be any combination of interactive sessions and
batch jobs. Which means a ``spack install`` can be running in a terminal
window while a batch job is running ``spack install`` on the same or
overlapping dependencies without any process trying to re-do the work of
another.
For example, if you are using SLURM, you could launch an installation
of ``mpich`` using the following command:
.. code-block:: console
$ srun -N 2 -n 8 spack install -j 4 mpich@3.3.2
This will create eight concurrent four-job installation on two different
nodes.
.. note::
The effective parallelism will be based on the maximum number of
packages that can be installed at the same time, which will limited
by the number of packages with no (remaining) uninstalled dependencies.
.. _dependencies:
------------
@@ -2014,29 +1967,22 @@ exactly what kind of a dependency you need. For example:
depends_on('cmake', type='build')
depends_on('py-numpy', type=('build', 'run'))
depends_on('libelf', type=('build', 'link'))
depends_on('py-pytest', type='test')
The following dependency types are available:
* **"build"**: the dependency will be added to the ``PATH`` and
``PYTHONPATH`` at build-time.
* **"link"**: the dependency will be added to Spack's compiler
wrappers, automatically injecting the appropriate linker flags,
including ``-I``, ``-L``, and RPATH/RUNPATH handling.
* **"run"**: the dependency will be added to the ``PATH`` and
``PYTHONPATH`` at run-time. This is true for both ``spack load``
and the module files Spack writes.
* **"test"**: the dependency will be added to the ``PATH`` and
``PYTHONPATH`` at build-time. The only difference between
"build" and "test" is that test dependencies are only built
if the user requests unit tests with ``spack install --test``.
* **"build"**: made available during the project's build. The package will
be added to ``PATH``, the compiler include paths, and ``PYTHONPATH``.
Other projects which depend on this one will not have these modified
(building project X doesn't need project Y's build dependencies).
* **"link"**: the project is linked to by the project. The package will be
added to the current package's ``rpath``.
* **"run"**: the project is used by the project at runtime. The package will
be added to ``PATH`` and ``PYTHONPATH``.
One of the advantages of the ``build`` dependency type is that although the
dependency needs to be installed in order for the package to be built, it
can be uninstalled without concern afterwards. ``link`` and ``run`` disallow
this because uninstalling the dependency would break the package. Another
consequence of this is that ``build``-only dependencies do not affect the
hash of the package. The same is true for ``test`` dependencies.
this because uninstalling the dependency would break the package.
If the dependency type is not specified, Spack uses a default of
``('build', 'link')``. This is the common case for compiler languages.
@@ -2057,8 +2003,7 @@ package. In that case, you could say something like:
.. code-block:: python
variant('mpi', default=False, description='Enable MPI support')
variant('mpi', default=False)
depends_on('mpi', when='+mpi')
``when`` can include constraints on the variant, version, compiler, etc. and
@@ -3163,7 +3108,7 @@ differ from package to package. In order to make the ``install()`` method
independent of the choice of ``Blas`` implementation, each package which
provides it implements ``@property def blas_libs(self):`` to return an object
of
`LibraryList <https://spack.readthedocs.io/en/latest/llnl.util.html#llnl.util.filesystem.LibraryList>`_
`LibraryList <http://spack.readthedocs.io/en/latest/llnl.util.html#llnl.util.filesystem.LibraryList>`_
type which simplifies usage of a set of libraries.
The same applies to packages which provide ``Lapack`` and ``ScaLapack``.
Package developers are requested to use this interface. Common usage cases are:
@@ -3198,7 +3143,7 @@ Package developers are requested to use this interface. Common usage cases are:
For more information, see documentation of
`LibraryList <https://spack.readthedocs.io/en/latest/llnl.util.html#llnl.util.filesystem.LibraryList>`_
`LibraryList <http://spack.readthedocs.io/en/latest/llnl.util.html#llnl.util.filesystem.LibraryList>`_
class.
@@ -3948,118 +3893,6 @@ using the ``run_before`` decorator.
.. _file-manipulation:
^^^^^^^^^^^^^
Install Tests
^^^^^^^^^^^^^
.. warning::
The API for adding and running install tests is not yet considered
stable and may change drastically in future releases. Packages with
upstreamed tests will be refactored to match changes to the API.
While build-tests are integrated with the build system, install tests
may be added to Spack packages to be run independently of the install
method.
Install tests may be added by defining a ``test`` method with the following signature:
.. code-block:: python
def test(self):
These tests will be run in an environment set up to provide access to
this package and all of its dependencies, including ``test``-type
dependencies. Inside the ``test`` method, standard python ``assert``
statements and other error reporting mechanisms can be used. Spack
will report any errors as a test failure.
Inside the test method, individual tests can be run separately (and
continue transparently after a test failure) using the ``run_test``
method. The signature for the ``run_test`` method is:
.. code-block:: python
def run_test(self, exe, options=[], expected=[], status=0, installed=False,
purpose='', skip_missing=False, work_dir=None):
This method will operate in ``work_dir`` if one is specified. It will
search for an executable in the ``PATH`` variable named ``exe``, and
if ``installed=True`` it will fail if that executable does not come
from the prefix of the package being tested. If the executable is not
found, it will fail the test unless ``skip_missing`` is set to
``True``. The executable will be run with the options specified, and
the return code will be checked against the ``status`` argument, which
can be an integer or list of integers. Spack will also check that
every string in ``expected`` is a regex matching part of the output of
the executable. The ``purpose`` argument is recorded in the test log
for debugging purposes.
""""""""""""""""""""""""""""""""""""""
Install tests that require compilation
""""""""""""""""""""""""""""""""""""""
Some tests may require access to the compiler with which the package
was built, especially to test library-only packages. To ensure the
compiler is configured as part of the test environment, set the
attribute ``tests_require_compiler = True`` on the package. The
compiler will be available through the canonical environment variables
(``CC``, ``CXX``, ``FC``, ``F77``) in the test environment.
""""""""""""""""""""""""""""""""""""""""""""""""
Install tests that require build-time components
""""""""""""""""""""""""""""""""""""""""""""""""
Some packages cannot be easily tested without components from the
build-time test suite. For those packages, the
``cache_extra_test_sources`` method can be used.
.. code-block:: python
@run_after('install')
def cache_test_sources(self):
srcs = ['./tests/foo.c', './tests/bar.c']
self.cache_extra_test_sources(srcs)
This method will copy the listed methods into the metadata directory
of the package at the end of the install phase of the build. They will
be available to the test method in the directory
``self._extra_tests_path``.
While source files are generally recommended, for many packages
binaries may also technically be cached in this way for later testing.
"""""""""""""""""""""
Running install tests
"""""""""""""""""""""
Install tests can be run using the ``spack test run`` command. The
``spack test run`` command will create a ``test suite`` out of the
specs provided to it, or if no specs are provided it will test all
specs in the active environment, or all specs installed in Spack if no
environment is active. Test suites can be named using the ``--alias``
option; test suites not aliased will use the content hash of their
specs as their name.
Packages to install test can be queried using the ``spack test list``
command, which outputs all installed packages with defined ``test``
methods.
Test suites can be found using the ``spack test find`` command. It
will list all test suites that have been run and have not been removed
using the ``spack test remove`` command. The ``spack test remove``
command will remove tests to declutter the test stage. The ``spack
test results`` command will show results for completed test suites.
The test stage is the working directory for all install tests run with
Spack. By default, Spack uses ``~/.spack/test`` as the test stage. The
test stage can be set in the high-level config:
.. code-block:: yaml
config:
test_stage: /path/to/stage
---------------------------
File manipulation functions
---------------------------
@@ -4221,223 +4054,21 @@ File functions
Making a package discoverable with ``spack external find``
----------------------------------------------------------
The simplest way to make a package discoverable with
:ref:`spack external find <cmd-spack-external-find>` is to:
To make a package discoverable with
:ref:`spack external find <cmd-spack-external-find>` you must
define one or more executables associated with the package and must
implement a method to generate a Spec when given an executable.
1. Define the executables associated with the package
2. Implement a method to determine the versions of these executables
The executables are specified as a package level ``executables``
attribute which is a list of strings (see example below); each string
is treated as a regular expression (e.g. 'gcc' would match 'gcc', 'gcc-8.3',
'my-weird-gcc', etc.).
^^^^^^^^^^^^^^^^^
Minimal detection
^^^^^^^^^^^^^^^^^
The first step is fairly simple, as it requires only to
specify a package level ``executables`` attribute:
The method ``determine_spec_details`` has the following signature:
.. code-block:: python
class Foo(Package):
# Each string provided here is treated as a regular expression, and
# would match for example 'foo', 'foobar', and 'bazfoo'.
executables = ['foo']
This attribute must be a list of strings. Each string is a regular
expression (e.g. 'gcc' would match 'gcc', 'gcc-8.3', 'my-weird-gcc', etc.) to
determine a set of system executables that might be part or this package. Note
that to match only executables named 'gcc' the regular expression ``'^gcc$'``
must be used.
Finally to determine the version of each executable the ``determine_version``
method must be implemented:
.. code-block:: python
@classmethod
def determine_version(cls, exe):
"""Return either the version of the executable passed as argument
or ``None`` if the version cannot be determined.
Args:
exe (str): absolute path to the executable being examined
"""
This method receives as input the path to a single executable and must return
as output its version as a string; if the user cannot determine the version
or determines that the executable is not an instance of the package, they can
return None and the exe will be discarded as a candidate.
Implementing the two steps above is mandatory, and gives the package the
basic ability to detect if a spec is present on the system at a given version.
.. note::
Any executable for which the ``determine_version`` method returns ``None``
will be discarded and won't appear in later stages of the workflow described below.
^^^^^^^^^^^^^^^^^^^^^^^^
Additional functionality
^^^^^^^^^^^^^^^^^^^^^^^^
Besides the two mandatory steps described above, there are also optional
methods that can be implemented to either increase the amount of details
being detected or improve the robustness of the detection logic in a package.
""""""""""""""""""""""""""""""
Variants and custom attributes
""""""""""""""""""""""""""""""
The ``determine_variants`` method can be optionally implemented in a package
to detect additional details of the spec:
.. code-block:: python
@classmethod
def determine_variants(cls, exes, version_str):
"""Return either a variant string, a tuple of a variant string
and a dictionary of extra attributes that will be recorded in
packages.yaml or a list of those items.
Args:
exes (list of str): list of executables (absolute paths) that
live in the same prefix and share the same version
version_str (str): version associated with the list of
executables, as detected by ``determine_version``
"""
This method takes as input a list of executables that live in the same prefix and
share the same version string, and returns either:
1. A variant string
2. A tuple of a variant string and a dictionary of extra attributes
3. A list of items matching either 1 or 2 (if multiple specs are detected
from the set of executables)
If extra attributes are returned, they will be recorded in ``packages.yaml``
and be available for later reuse. As an example, the ``gcc`` package will record
by default the different compilers found and an entry in ``packages.yaml``
would look like:
.. code-block:: yaml
packages:
gcc:
externals:
- spec: 'gcc@9.0.1 languages=c,c++,fortran'
prefix: /usr
extra_attributes:
compilers:
c: /usr/bin/x86_64-linux-gnu-gcc-9
c++: /usr/bin/x86_64-linux-gnu-g++-9
fortran: /usr/bin/x86_64-linux-gnu-gfortran-9
This allows us, for instance, to keep track of executables that would be named
differently if built by Spack (e.g. ``x86_64-linux-gnu-gcc-9``
instead of just ``gcc``).
.. TODO: we need to gather some more experience on overriding 'prefix'
and other special keywords in extra attributes, but as soon as we are
confident that this is the way to go we should document the process.
See https://github.com/spack/spack/pull/16526#issuecomment-653783204
"""""""""""""""""""""""""""
Filter matching executables
"""""""""""""""""""""""""""
Sometimes defining the appropriate regex for the ``executables``
attribute might prove to be difficult, especially if one has to
deal with corner cases or exclude "red herrings". To help keeping
the regular expressions as simple as possible, each package can
optionally implement a ``filter_executables`` method:
.. code-block:: python
@classmethod
def filter_detected_exes(cls, prefix, exes_in_prefix):
"""Return a filtered list of the executables in prefix"""
which takes as input a prefix and a list of matching executables and
returns a filtered list of said executables.
Using this method has the advantage of allowing custom logic for
filtering, and does not restrict the user to regular expressions
only. Consider the case of detecting the GNU C++ compiler. If we
try to search for executables that match ``g++``, that would have
the unwanted side effect of selecting also ``clang++`` - which is
a C++ compiler provided by another package - if present on the system.
Trying to select executables that contain ``g++`` but not ``clang``
would be quite complicated to do using regex only. Employing the
``filter_detected_exes`` method it becomes:
.. code-block:: python
class Gcc(Package):
executables = ['g++']
def filter_detected_exes(cls, prefix, exes_in_prefix):
return [x for x in exes_in_prefix if 'clang' not in x]
Another possibility that this method opens is to apply certain
filtering logic when specific conditions are met (e.g. take some
decisions on an OS and not on another).
^^^^^^^^^^^^^^^^^^
Validate detection
^^^^^^^^^^^^^^^^^^
To increase detection robustness, packagers may also implement a method
to validate the detected Spec objects:
.. code-block:: python
@classmethod
def validate_detected_spec(cls, spec, extra_attributes):
"""Validate a detected spec. Raise an exception if validation fails."""
This method receives a detected spec along with its extra attributes and can be
used to check that certain conditions are met by the spec. Packagers can either
use assertions or raise an ``InvalidSpecDetected`` exception when the check fails.
In case the conditions are not honored the spec will be discarded and any message
associated with the assertion or the exception will be logged as the reason for
discarding it.
As an example, a package that wants to check that the ``compilers`` attribute is
in the extra attributes can implement this method like this:
.. code-block:: python
@classmethod
def validate_detected_spec(cls, spec, extra_attributes):
"""Check that 'compilers' is in the extra attributes."""
msg = ('the extra attribute "compilers" must be set for '
'the detected spec "{0}"'.format(spec))
assert 'compilers' in extra_attributes, msg
or like this:
.. code-block:: python
@classmethod
def validate_detected_spec(cls, spec, extra_attributes):
"""Check that 'compilers' is in the extra attributes."""
if 'compilers' not in extra_attributes:
msg = ('the extra attribute "compilers" must be set for '
'the detected spec "{0}"'.format(spec))
raise InvalidSpecDetected(msg)
.. _determine_spec_details:
^^^^^^^^^^^^^^^^^^^^^^^^^
Custom detection workflow
^^^^^^^^^^^^^^^^^^^^^^^^^
In the rare case when the mechanisms described so far don't fit the
detection of a package, the implementation of all the methods above
can be disregarded and instead a custom ``determine_spec_details``
method can be implemented directly in the package class (note that
the definition of the ``executables`` attribute is still required):
.. code-block:: python
@classmethod
def determine_spec_details(cls, prefix, exes_in_prefix):
def determine_spec_details(prefix, exes_in_prefix):
# exes_in_prefix = a set of paths, each path is an executable
# prefix = a prefix that is common to each path in exes_in_prefix
@@ -4445,13 +4076,14 @@ the definition of the ``executables`` attribute is still required):
# the package. Return one or more Specs for each instance of the
# package which is thought to be installed in the provided prefix
This method takes as input a set of discovered executables (which match
those specified by the user) as well as a common prefix shared by all
of those executables. The function must return one or more :py:class:`spack.spec.Spec` associated
with the executables (it can also return ``None`` to indicate that no
provided executables are associated with the package).
``determine_spec_details`` takes as parameters a set of discovered
executables (which match those specified by the user) as well as a
common prefix shared by all of those executables. The function must
return one or more Specs associated with the executables (it can also
return ``None`` to indicate that no provided executables are associated
with the package).
As an example, consider a made-up package called ``foo-package`` which
Say for example we have a package called ``foo-package`` which
builds an executable called ``foo``. ``FooPackage`` would appear as
follows:
@@ -4475,12 +4107,10 @@ follows:
return
# This implementation is lazy and only checks the first candidate
exe_path = candidates[0]
exe = Executable(exe_path)
output = exe('--version', output=str, error=str)
exe = spack.util.executable.Executable(exe_path)
output = exe('--version')
version_str = ... # parse output for version string
return Spec.from_detection(
'foo-package@{0}'.format(version_str)
)
return Spec('foo-package@{0}'.format(version_str))
.. _package-lifecycle:
@@ -4844,3 +4474,119 @@ might write:
DWARF_PREFIX = $(spack location --install-dir libdwarf)
CXXFLAGS += -I$DWARF_PREFIX/include
CXXFLAGS += -L$DWARF_PREFIX/lib
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Build System Configuration Support
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Imagine a developer creating a CMake or Autotools-based project in a
local directory, which depends on libraries A-Z. Once Spack has
installed those dependencies, one would like to run ``cmake`` with
appropriate command line and environment so CMake can find them. The
``spack setup`` command does this conveniently, producing a CMake
configuration that is essentially the same as how Spack *would have*
configured the project. This can be demonstrated with a usage
example:
.. code-block:: console
$ cd myproject
$ spack setup myproject@local
$ mkdir build; cd build
$ ../spconfig.py ..
$ make
$ make install
Notes:
* Spack must have ``myproject/package.py`` in its repository for
this to work.
* ``spack setup`` produces the executable script ``spconfig.py`` in
the local directory, and also creates the module file for the
package. ``spconfig.py`` is normally run from the user's
out-of-source build directory.
* The version number given to ``spack setup`` is arbitrary, just
like ``spack diy``. ``myproject/package.py`` does not need to
have any valid downloadable versions listed (typical when a
project is new).
* spconfig.py produces a CMake configuration that *does not* use the
Spack wrappers. Any resulting binaries *will not* use RPATH,
unless the user has enabled it. This is recommended for
development purposes, not production.
* ``spconfig.py`` is human readable, and can serve as a developer
reference of what dependencies are being used.
* ``make install`` installs the package into the Spack repository,
where it may be used by other Spack packages.
* CMake-generated makefiles re-run CMake in some circumstances. Use
of ``spconfig.py`` breaks this behavior, requiring the developer
to manually re-run ``spconfig.py`` when a ``CMakeLists.txt`` file
has changed.
^^^^^^^^^^^^
CMakePackage
^^^^^^^^^^^^
In order to enable ``spack setup`` functionality, the author of
``myproject/package.py`` must subclass from ``CMakePackage`` instead
of the standard ``Package`` superclass. Because CMake is
standardized, the packager does not need to tell Spack how to run
``cmake; make; make install``. Instead the packager only needs to
create (optional) methods ``configure_args()`` and ``configure_env()``, which
provide the arguments (as a list) and extra environment variables (as
a dict) to provide to the ``cmake`` command. Usually, these will
translate variant flags into CMake definitions. For example:
.. code-block:: python
def cmake_args(self):
spec = self.spec
return [
'-DUSE_EVERYTRACE=%s' % ('YES' if '+everytrace' in spec else 'NO'),
'-DBUILD_PYTHON=%s' % ('YES' if '+python' in spec else 'NO'),
'-DBUILD_GRIDGEN=%s' % ('YES' if '+gridgen' in spec else 'NO'),
'-DBUILD_COUPLER=%s' % ('YES' if '+coupler' in spec else 'NO'),
'-DUSE_PISM=%s' % ('YES' if '+pism' in spec else 'NO')
]
If needed, a packager may also override methods defined in
``StagedPackage`` (see below).
^^^^^^^^^^^^^
StagedPackage
^^^^^^^^^^^^^
``CMakePackage`` is implemented by subclassing the ``StagedPackage``
superclass, which breaks down the standard ``Package.install()``
method into several sub-stages: ``setup``, ``configure``, ``build``
and ``install``. Details:
* Instead of implementing the standard ``install()`` method, package
authors implement the methods for the sub-stages
``install_setup()``, ``install_configure()``,
``install_build()``, and ``install_install()``.
* The ``spack install`` command runs the sub-stages ``configure``,
``build`` and ``install`` in order. (The ``setup`` stage is
not run by default; see below).
* The ``spack setup`` command runs the sub-stages ``setup``
and a dummy install (to create the module file).
* The sub-stage install methods take no arguments (other than
``self``). The arguments ``spec`` and ``prefix`` to the standard
``install()`` method may be accessed via ``self.spec`` and
``self.prefix``.
^^^^^^^^^^^^^
GNU Autotools
^^^^^^^^^^^^^
The ``setup`` functionality is currently only available for
CMake-based packages. Extending this functionality to GNU
Autotools-based packages would be easy (and should be done by a
developer who actively uses Autotools). Packages that use
non-standard build systems can gain ``setup`` functionality by
subclassing ``StagedPackage`` directly.
.. Emacs local variables
Local Variables:
fill-column: 79
End:

View File

@@ -45,7 +45,7 @@ for setting up a build pipeline are as follows:
tags:
- <custom-tag>
script:
- spack env activate --without-view .
- spack env activate .
- spack ci generate
--output-file "${CI_PROJECT_DIR}/jobs_scratch_dir/pipeline.yml"
artifacts:
@@ -82,9 +82,9 @@ or Amazon Elastic Kubernetes Service (`EKS <https://aws.amazon.com/eks>`_), thou
topics are outside the scope of this document.
Spack's pipelines are now making use of the
`trigger <https://docs.gitlab.com/12.9/ee/ci/yaml/README.html#trigger>`_ syntax to run
`trigger <https://docs.gitlab.com/12.9/ee/ci/yaml/README.html#trigger>` syntax to run
dynamically generated
`child pipelines <https://docs.gitlab.com/12.9/ee/ci/parent_child_pipelines.html>`_.
`child pipelines <https://docs.gitlab.com/12.9/ee/ci/parent_child_pipelines.html>`.
Note that the use of dynamic child pipelines requires running Gitlab version
``>= 12.9``.
@@ -122,10 +122,6 @@ pipeline jobs.
Concretizes the specs in the active environment, stages them (as described in
:ref:`staging_algorithm`), and writes the resulting ``.gitlab-ci.yml`` to disk.
This sub-command takes two arguments, but the most useful is ``--output-file``,
which should be an absolute path (including file name) to the generated
pipeline, if the default (``./.gitlab-ci.yml``) is not desired.
.. _cmd-spack-ci-rebuild:
^^^^^^^^^^^^^^^^^^^^
@@ -136,10 +132,6 @@ This sub-command is responsible for ensuring a single spec from the release
environment is up to date on the remote mirror configured in the environment,
and as such, corresponds to a single job in the ``.gitlab-ci.yml`` file.
Rather than taking command-line arguments, this sub-command expects information
to be communicated via environment variables, which will typically come via the
``.gitlab-ci.yml`` job as ``variables``.
------------------------------------
A pipeline-enabled spack environment
------------------------------------
@@ -197,33 +189,15 @@ corresponds to a known gitlab runner, where the ``match`` section is used
in assigning a release spec to one of the runners, and the ``runner-attributes``
section is used to configure the spec/job for that particular runner.
Both the top-level ``gitlab-ci`` section as well as each ``runner-attributes``
section can also contain the following keys: ``image``, ``tags``, ``variables``,
``before_script``, ``script``, and ``after_script``. If any of these keys are
provided at the ``gitlab-ci`` level, they will be used as the defaults for any
``runner-attributes``, unless they are overridden in those sections. Specifying
any of these keys at the ``runner-attributes`` level generally overrides the
keys specified at the higher level, with a couple exceptions. Any ``variables``
specified at both levels result in those dictionaries getting merged in the
resulting generated job, and any duplicate variable names get assigned the value
provided in the specific ``runner-attributes``. If ``tags`` are specified both
at the ``gitlab-ci`` level as well as the ``runner-attributes`` level, then the
lists of tags are combined, and any duplicates are removed.
See the section below on using a custom spack for an example of how these keys
could be used.
There are other pipeline options you can configure within the ``gitlab-ci`` section
as well.
The ``bootstrap`` section allows you to specify lists of specs from
as well. The ``bootstrap`` section allows you to specify lists of specs from
your ``definitions`` that should be staged ahead of the environment's ``specs`` (this
section is described in more detail below). The ``enable-artifacts-buildcache`` key
takes a boolean and determines whether the pipeline uses artifacts to store and
pass along the buildcaches from one stage to the next (the default if you don't
provide this option is ``False``).
The
provide this option is ``False``). The ``enable-debug-messages`` key takes a boolean
and allows you to choose whether the pipeline build jobs are run as ``spack -d ci rebuild``
or just ``spack ci rebuild`` (the default is not to enable debug messages). The
``final-stage-rebuild-index`` section controls whether an extra job is added to the
end of your pipeline (in a stage by itself) which will regenerate the mirror's
buildcache index. Under normal operation, each pipeline job that rebuilds a package
@@ -246,11 +220,6 @@ progresses, this build group may have jobs added or removed. The url, project,
and site are used to specify the CDash instance to which build results should
be reported.
Take a look at the
`schema <https://github.com/spack/spack/blob/develop/lib/spack/spack/schema/gitlab_ci.py>`_
for the gitlab-ci section of the spack environment file, to see precisely what
syntax is allowed there.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Assignment of specs to runners
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -276,18 +245,7 @@ runners known to the gitlab instance. For Docker executor type runners, the
as well as an ``entrypoint`` to override whatever the default for that image is).
For other types of runners the ``variables`` key will be useful to pass any
information on to the runner that it needs to do its work (e.g. scheduler
parameters, etc.). Any ``variables`` provided here will be added, verbatim, to
each job.
The ``runner-attributes`` section also allows users to supply custom ``script``,
``before_script``, and ``after_script`` sections to be applied to every job
scheduled on that runner. This allows users to do any custom preparation or
cleanup tasks that fit their particular workflow, as well as completely
customize the rebuilding of a spec if they so choose. Spack will not generate
a ``before_script`` or ``after_script`` for jobs, but if you do not provide
a custom ``script``, spack will generate one for you that assumes your
``spack.yaml`` is at the root of the repository, activates that environment for
you, and invokes ``spack ci rebuild``.
parameters, etc.).
.. _staging_algorithm:
@@ -298,8 +256,8 @@ Summary of ``.gitlab-ci.yml`` generation algorithm
All specs yielded by the matrix (or all the specs in the environment) have their
dependencies computed, and the entire resulting set of specs are staged together
before being run through the ``gitlab-ci/mappings`` entries, where each staged
spec is assigned a runner. "Staging" is the name given to the process of
figuring out in what order the specs should be built, taking into consideration
spec is assigned a runner. "Staging" is the name we have given to the process
of figuring out in what order the specs should be built, taking into consideration
Gitlab CI rules about jobs/stages. In the staging process the goal is to maximize
the number of jobs in any stage of the pipeline, while ensuring that the jobs in
any stage only depend on jobs in previous stages (since those jobs are guaranteed
@@ -310,7 +268,7 @@ a runner, the ``.gitlab-ci.yml`` is written to disk.
The short example provided above would result in the ``readline``, ``ncurses``,
and ``pkgconf`` packages getting staged and built on the runner chosen by the
``spack-k8s`` tag. In this example, spack assumes the runner is a Docker executor
``spack-k8s`` tag. In this example, we assume the runner is a Docker executor
type runner, and thus certain jobs will be run in the ``centos7`` container,
and others in the ``ubuntu-18.04`` container. The resulting ``.gitlab-ci.yml``
will contain 6 jobs in three stages. Once the jobs have been generated, the
@@ -369,12 +327,12 @@ Here's an example of what bootstrapping some compilers might look like:
# mappings similar to the example higher up in this description
...
The example above adds a list to the ``definitions`` called ``compiler-pkgs``
(you can add any number of these), which lists compiler packages that should
be staged ahead of the full matrix of release specs (in this example, only
readline). Then within the ``gitlab-ci`` section, note the addition of a
``bootstrap`` section, which can contain a list of items, each referring to
a list in the ``definitions`` section. These items can either
In the example above, we have added a list to the ``definitions`` called
``compiler-pkgs`` (you can add any number of these), which lists compiler packages
we want to be staged ahead of the full matrix of release specs (which consists
only of readline in our example). Then within the ``gitlab-ci`` section, we
have added a ``bootstrap`` section, which can contain a list of items, each
referring to a list in the ``definitions`` section. These items can either
be a dictionary or a string. If you supply a dictionary, it must have a name
key whose value must match one of the lists in definitions and it can have a
``compiler-agnostic`` key whose value is a boolean. If you supply a string,
@@ -410,15 +368,13 @@ Using a custom spack in your pipeline
If your runners will not have a version of spack ready to invoke, or if for some
other reason you want to use a custom version of spack to run your pipelines,
this section provides an example of how you could take advantage of
user-provided pipeline scripts to accomplish this fairly simply. First, you
could use the GitLab user interface to create CI environment variables
containing the url and branch or tag you want to use (calling them, for
example, ``SPACK_REPO`` and ``SPACK_REF``), then refer to those in a custom shell
script invoked both from your pipeline generation job, as well as in your rebuild
jobs. Here's the ``generate-pipeline`` job from the top of this document,
updated to invoke a custom shell script that will clone and source a custom
spack:
this can be accomplished fairly simply. First, create CI environment variables
containing the url and branch/tag you want to clone (calling them, for example,
``SPACK_REPO`` and ``SPACK_REF``), use them to clone spack in your pre-ci
``before_script``, and finally pass those same values along to the workload
generation process via the ``spack-repo`` and ``spack-ref`` cli args. Here's
the ``generate-pipeline`` job from the top of this document, updated to clone
a custom spack and make sure the generated rebuild jobs will clone it too:
.. code-block:: yaml
@@ -426,10 +382,12 @@ spack:
tags:
- <some-other-tag>
before_script:
- ./cloneSpack.sh
- git clone ${SPACK_REPO} --branch ${SPACK_REF}
- . ./spack/share/spack/setup-env.sh
script:
- spack env activate --without-view .
- spack env activate .
- spack ci generate
--spack-repo ${SPACK_REPO} --spack-ref ${SPACK_REF}
--output-file "${CI_PROJECT_DIR}/jobs_scratch_dir/pipeline.yml"
after_script:
- rm -rf ./spack
@@ -437,68 +395,13 @@ spack:
paths:
- "${CI_PROJECT_DIR}/jobs_scratch_dir/pipeline.yml"
And the ``cloneSpack.sh`` script could contain:
.. code-block:: bash
#!/bin/bash
git clone ${SPACK_REPO}
pushd ./spack
git checkout ${SPACK_REF}
popd
. "./spack/share/spack/setup-env.sh"
spack --version
Finally, you would also want your generated rebuild jobs to clone that version
of spack, so you would update your ``spack.yaml`` from above as follows:
.. code-block:: yaml
spack:
...
gitlab-ci:
mappings:
- match:
- os=ubuntu18.04
runner-attributes:
tags:
- spack-kube
image: spack/ubuntu-bionic
before_script:
- ./cloneSpack.sh
script:
- spack env activate --without-view .
- spack -d ci rebuild
after_script:
- rm -rf ./spack
Now all of the generated rebuild jobs will use the same shell script to clone
spack before running their actual workload. Note in the above example the
provision of a custom ``script`` section. The reason for this is to run
``spack ci rebuild`` in debug mode to get more information when builds fail.
Now imagine you have long pipelines with many specs to be built, and you
are pointing to a spack repository and branch that has a tendency to change
frequently, such as the main repo and it's ``develop`` branch. If each child
job checks out the ``develop`` branch, that could result in some jobs running
with one SHA of spack, while later jobs run with another. To help avoid this
issue, the pipeline generation process saves global variables called
``SPACK_VERSION`` and ``SPACK_CHECKOUT_VERSION`` that capture the version
of spack used to generate the pipeline. While the ``SPACK_VERSION`` variable
simply contains the human-readable value produced by ``spack -V`` at pipeline
generation time, the ``SPACK_CHECKOUT_VERSION`` variable can be used in a
``git checkout`` command to make sure all child jobs checkout the same version
of spack used to generate the pipeline. To take advantage of this, you could
simply replace ``git checkout ${SPACK_REF}`` in the example ``cloneSpack.sh``
script above with ``git checkout ${SPACK_CHECKOUT_VERSION}``.
On the other hand, if you're pointing to a spack repository and branch under your
control, there may be no benefit in using the captured ``SPACK_CHECKOUT_VERSION``,
and you can instead just clone using the project CI variables you set (in the
earlier example these were ``SPACK_REPO`` and ``SPACK_REF``).
If the ``spack ci generate`` command receives those extra command line arguments,
then it adds similar ``before_script`` and ``after_script`` sections for each of
the ``spack ci rebuild`` jobs it generates (cloning and sourcing a custom
spack in the ``before_script`` and removing it again in the ``after_script``).
This gives you control over the version of spack used when the rebuild jobs
are actually run on the gitlab runner.
.. _ci_environment_variables:

View File

@@ -703,6 +703,400 @@ environments:
Administrators might find things easier to maintain without the
added "heavyweight" state of a view.
------------------------------
Developing Software with Spack
------------------------------
For any project, one needs to assemble an
environment of that application's dependencies. You might consider
loading a series of modules or creating a filesystem view. This
approach, while obvious, has some serious drawbacks:
1. There is no guarantee that an environment created this way will be
consistent. Your application could end up with dependency A
expecting one version of MPI, and dependency B expecting another.
The linker will not be happy...
2. Suppose you need to debug a package deep within your software DAG.
If you build that package with a manual environment, then it
becomes difficult to have Spack auto-build things that depend on
it. That could be a serious problem, depending on how deep the
package in question is in your dependency DAG.
3. At its core, Spack is a sophisticated concretization algorithm that
matches up packages with appropriate dependencies and creates a
*consistent* environment for the package it's building. Writing a
list of ``spack load`` commands for your dependencies is at least
as hard as writing the same list of ``depends_on()`` declarations
in a Spack package. But it makes no use of Spack concretization
and is more error-prone.
4. Spack provides an automated, systematic way not just to find a
packages's dependencies --- but also to build other packages on
top. Any Spack package can become a dependency for another Spack
package, offering a powerful vision of software re-use. If you
build your package A outside of Spack, then your ability to use it
as a building block for other packages in an automated way is
diminished: other packages depending on package A will not
be able to use Spack to fulfill that dependency.
5. If you are reading this manual, you probably love Spack. You're
probably going to write a Spack package for your software so
prospective users can install it with the least amount of pain.
Why should you go to additional work to find dependencies in your
development environment? Shouldn't Spack be able to help you build
your software based on the package you've already written?
In this section, we show how Spack can be used in the software
development process to greatest effect, and how development packages
can be seamlessly integrated into the Spack ecosystem. We will show
how this process works by example, assuming the software you are
creating is called ``mylib``.
^^^^^^^^^^^^^^^^^^^^^
Write the CMake Build
^^^^^^^^^^^^^^^^^^^^^
For now, the techniques in this section only work for CMake-based
projects, although they could be easily extended to other build
systems in the future. We will therefore assume you are using CMake
to build your project.
The ``CMakeLists.txt`` file should be written as normal. A few caveats:
1. Your project should produce binaries with RPATHs. This will ensure
that they work the same whether built manually or automatically by
Spack. For example:
.. code-block:: cmake
# enable @rpath in the install name for any shared library being built
# note: it is planned that a future version of CMake will enable this by default
set(CMAKE_MACOSX_RPATH 1)
# Always use full RPATH
# http://www.cmake.org/Wiki/CMake_RPATH_handling
# http://www.kitware.com/blog/home/post/510
# use, i.e. don't skip the full RPATH for the build tree
SET(CMAKE_SKIP_BUILD_RPATH FALSE)
# when building, don't use the install RPATH already
# (but later on when installing)
SET(CMAKE_BUILD_WITH_INSTALL_RPATH FALSE)
# add the automatically determined parts of the RPATH
# which point to directories outside the build tree to the install RPATH
SET(CMAKE_INSTALL_RPATH_USE_LINK_PATH TRUE)
# the RPATH to be used when installing, but only if it's not a system directory
LIST(FIND CMAKE_PLATFORM_IMPLICIT_LINK_DIRECTORIES "${CMAKE_INSTALL_PREFIX}/lib" isSystemDir)
IF("${isSystemDir}" STREQUAL "-1")
SET(CMAKE_INSTALL_RPATH "${CMAKE_INSTALL_PREFIX}/lib")
ENDIF("${isSystemDir}" STREQUAL "-1")
2. Spack provides a CMake variable called
``SPACK_TRANSITIVE_INCLUDE_PATH``, which contains the ``include/``
directory for all of your project's transitive dependencies. It
can be useful if your project ``#include``s files from package B,
which ``#include`` files from package C, but your project only
lists project B as a dependency. This works in traditional
single-tree build environments, in which B and C's include files
live in the same place. In order to make it work with Spack as
well, you must add the following to ``CMakeLists.txt``. It will
have no effect when building without Spack:
.. code-block:: cmake
# Include all the transitive dependencies determined by Spack.
# If we're not running with Spack, this does nothing...
include_directories($ENV{SPACK_TRANSITIVE_INCLUDE_PATH})
.. note::
Note that this feature is controversial and could break with
future versions of GNU ld. The best practice is to make sure
anything you ``#include`` is listed as a dependency in your
CMakeLists.txt (and Spack package).
.. _write-the-spack-package:
^^^^^^^^^^^^^^^^^^^^^^^
Write the Spack Package
^^^^^^^^^^^^^^^^^^^^^^^
The Spack package also needs to be written, in tandem with setting up
the build (for example, CMake). The most important part of this task
is declaring dependencies. Here is an example of the Spack package
for the ``mylib`` package (ellipses for brevity):
.. code-block:: python
class Mylib(CMakePackage):
"""Misc. reusable utilities used by Myapp."""
homepage = "https://github.com/citibeth/mylib"
url = "https://github.com/citibeth/mylib/tarball/123"
version('0.1.2', '3a6acd70085e25f81b63a7e96c504ef9')
version('develop', git='https://github.com/citibeth/mylib.git',
branch='develop')
variant('everytrace', default=False,
description='Report errors through Everytrace')
...
extends('python')
depends_on('eigen')
depends_on('everytrace', when='+everytrace')
depends_on('proj', when='+proj')
...
depends_on('cmake', type='build')
depends_on('doxygen', type='build')
def cmake_args(self):
spec = self.spec
return [
'-DUSE_EVERYTRACE=%s' % ('YES' if '+everytrace' in spec else 'NO'),
'-DUSE_PROJ4=%s' % ('YES' if '+proj' in spec else 'NO'),
...
'-DUSE_UDUNITS2=%s' % ('YES' if '+udunits2' in spec else 'NO'),
'-DUSE_GTEST=%s' % ('YES' if '+googletest' in spec else 'NO')]
This is a standard Spack package that can be used to install
``mylib`` in a production environment. The list of dependencies in
the Spack package will generally be a repeat of the list of CMake
dependencies. This package also has some features that allow it to be
used for development:
1. It subclasses ``CMakePackage`` instead of ``Package``. This
eliminates the need to write an ``install()`` method, which is
defined in the superclass. Instead, one just needs to write the
``configure_args()`` method. That method should return the
arguments needed for the ``cmake`` command (beyond the standard
CMake arguments, which Spack will include already). These
arguments are typically used to turn features on/off in the build.
2. It specifies a non-checksummed version ``develop``. Running
``spack install mylib@develop`` the ``@develop`` version will
install the latest version off the develop branch. This method of
download is useful for the developer of a project while it is in
active development; however, it should only be used by developers
who control and trust the repository in question!
3. The ``url``, ``url_for_version()`` and ``homepage`` attributes are
not used in development. Don't worry if you don't have any, or if
they are behind a firewall.
^^^^^^^^^^^^^^^^
Build with Spack
^^^^^^^^^^^^^^^^
Now that you have a Spack package, you can use Spack to find its
dependencies automatically. For example:
.. code-block:: console
$ cd mylib
$ spack setup mylib@local
The result will be a file ``spconfig.py`` in the top-level
``mylib/`` directory. It is a short script that calls CMake with the
dependencies and options determined by Spack --- similar to what
happens in ``spack install``, but now written out in script form.
From a developer's point of view, you can think of ``spconfig.py`` as
a stand-in for the ``cmake`` command.
.. note::
You can invent any "version" you like for the ``spack setup``
command.
.. note::
Although ``spack setup`` does not build your package, it does
create and install a module file, and mark in the database that
your package has been installed. This can lead to errors, of
course, if you don't subsequently install your package.
Also... you will need to ``spack uninstall`` before you run
``spack setup`` again.
You can now build your project as usual with CMake:
.. code-block:: console
$ mkdir build; cd build
$ ../spconfig.py .. # Instead of cmake ..
$ make
$ make install
Once your ``make install`` command is complete, your package will be
installed, just as if you'd run ``spack install``. Except you can now
edit, re-build and re-install as often as needed, without checking
into Git or downloading tarballs.
.. note::
The build you get this way will be *almost* the same as the build
from ``spack install``. The only difference is, you will not be
using Spack's compiler wrappers. This difference has not caused
problems in our experience, as long as your project sets
RPATHs as shown above. You DO use RPATHs, right?
^^^^^^^^^^^^^^^^^^^^
Build Other Software
^^^^^^^^^^^^^^^^^^^^
Now that you've built ``mylib`` with Spack, you might want to build
another package that depends on it --- for example, ``myapp``. This
is accomplished easily enough:
.. code-block:: console
$ spack install myapp ^mylib@local
Note that auto-built software has now been installed *on top of*
manually-built software, without breaking Spack's "web." This
property is useful if you need to debug a package deep in the
dependency hierarchy of your application. It is a *big* advantage of
using ``spack setup`` to build your package's environment.
If you feel your software is stable, you might wish to install it with
``spack install`` and skip the source directory. You can just use,
for example:
.. code-block:: console
$ spack install mylib@develop
.. _release-your-software:
^^^^^^^^^^^^^^^^^^^^^
Release Your Software
^^^^^^^^^^^^^^^^^^^^^
You are now ready to release your software as a tarball with a
numbered version, and a Spack package that can build it. If you're
hosted on GitHub, this process will be a bit easier.
#. Put tag(s) on the version(s) in your GitHub repo you want to be
release versions. For example, a tag ``v0.1.0`` for version 0.1.0.
#. Set the ``url`` in your ``package.py`` to download a tarball for
the appropriate version. GitHub will give you a tarball for any
commit in the repo, if you tickle it the right way. For example:
.. code-block:: python
url = 'https://github.com/citibeth/mylib/tarball/v0.1.2'
#. Use Spack to determine your version's hash, and cut'n'paste it into
your ``package.py``:
.. code-block:: console
$ spack checksum mylib 0.1.2
==> Found 1 versions of mylib
0.1.2 https://github.com/citibeth/mylib/tarball/v0.1.2
How many would you like to checksum? (default is 5, q to abort)
==> Downloading...
==> Trying to fetch from https://github.com/citibeth/mylib/tarball/v0.1.2
######################################################################## 100.0%
==> Checksummed new versions of mylib:
version('0.1.2', '3a6acd70085e25f81b63a7e96c504ef9')
#. You should now be able to install released version 0.1.2 of your package with:
.. code-block:: console
$ spack install mylib@0.1.2
#. There is no need to remove the `develop` version from your package.
Spack concretization will always prefer numbered version to
non-numeric versions. Users will only get it if they ask for it.
^^^^^^^^^^^^^^^^^^^^^^^^
Distribute Your Software
^^^^^^^^^^^^^^^^^^^^^^^^
Once you've released your software, other people will want to build
it; and you will need to tell them how. In the past, that has meant a
few paragraphs of prose explaining which dependencies to install. But
now you use Spack, and those instructions are written in executable
Python code. But your software has many dependencies, and you know
Spack is the best way to install it:
#. First, you will want to fork Spack's ``develop`` branch. Your aim
is to provide a stable version of Spack that you KNOW will install
your software. If you make changes to Spack in the process, you
will want to submit pull requests to Spack core.
#. Add your software's ``package.py`` to that fork. You should submit
a pull request for this as well, unless you don't want the public
to know about your software.
#. Prepare instructions that read approximately as follows:
#. Download Spack from your forked repo.
#. Install Spack; see :ref:`getting_started`.
#. Set up an appropriate ``packages.yaml`` file. You should tell
your users to include in this file whatever versions/variants
are needed to make your software work correctly (assuming those
are not already in your ``packages.yaml``).
#. Run ``spack install mylib``.
#. Run this script to generate the ``module load`` commands or
filesystem view needed to use this software.
#. Be aware that your users might encounter unexpected bootstrapping
issues on their machines, especially if they are running on older
systems. The :ref:`getting_started` section should cover this, but
there could always be issues.
^^^^^^^^^^^^^^^^^^^
Other Build Systems
^^^^^^^^^^^^^^^^^^^
``spack setup`` currently only supports CMake-based builds, in
packages that subclass ``CMakePackage``. The intent is that this
mechanism should support a wider range of build systems; for example,
GNU Autotools. Someone well-versed in Autotools is needed to develop
this patch and test it out.
Python Distutils is another popular build system that should get
``spack setup`` support. For non-compiled languages like Python,
``spack diy`` may be used. Even better is to put the source directory
directly in the user's ``PYTHONPATH``. Then, edits in source files
are immediately available to run without any install process at all!
^^^^^^^^^^
Conclusion
^^^^^^^^^^
The ``spack setup`` development workflow provides better automation,
flexibility and safety than workflows relying on environment modules
or filesystem views. However, it has some drawbacks:
#. It currently works only with projects that use the CMake build
system. Support for other build systems is not hard to build, but
will require a small amount of effort for each build system to be
supported. It might not work well with some IDEs.
#. It only works with packages that sub-class ``StagedPackage``.
Currently, most Spack packages do not. Converting them is not
hard; but must be done on a package-by-package basis.
#. It requires that users are comfortable with Spack, as they
integrate Spack explicitly in their workflow. Not all users are
willing to do this.
-------------------------------------
Using Spack to Replace Homebrew/Conda
-------------------------------------
@@ -1011,12 +1405,11 @@ The main points that are implemented below:
- export CXXFLAGS="-std=c++11"
install:
- |
if ! which spack >/dev/null; then
- if ! which spack >/dev/null; then
mkdir -p $SPACK_ROOT &&
git clone --depth 50 https://github.com/spack/spack.git $SPACK_ROOT &&
printf "config:\n build_jobs: 2\n" > $SPACK_ROOT/etc/spack/config.yaml &&
printf "packages:\n all:\n target: ['x86_64']\n" \
echo -e "config:""\n build_jobs:"" 2" > $SPACK_ROOT/etc/spack/config.yaml **
echo -e "packages:""\n all:""\n target:"" ['x86_64']"
> $SPACK_ROOT/etc/spack/packages.yaml;
fi
- travis_wait spack install cmake@3.7.2~openssl~ncurses
@@ -1034,6 +1427,170 @@ The main points that are implemented below:
- make -j 2
- make test
.. _workflow_create_docker_image:
-----------------------------------
Using Spack to Create Docker Images
-----------------------------------
Spack can be the ideal tool to set up images for Docker (and Singularity).
An example ``Dockerfile`` is given below, downloading the latest spack
version.
The following functionality is prepared:
#. Base image: the example starts from a minimal ubuntu.
#. Pre-install the spack dependencies.
Package installs are followed by a clean-up of the system package index,
to avoid outdated information and it saves space.
#. Install spack in ``/usr/local``.
Add ``setup-env.sh`` to profile scripts, so commands in *login* shells
can use the whole spack functionality, including modules.
#. Install an example package (``tar``).
As with system package managers above, ``spack install`` commands should be
concatenated with a ``&& spack clean -a`` in order to keep image sizes small.
#. Add a startup hook to an *interactive login shell* so spack modules will be
usable.
In order to build and run the image, execute:
.. code-block:: bash
docker build -t spack .
docker run -it spack
.. code-block:: docker
FROM ubuntu:16.04
MAINTAINER Your Name <someone@example.com>
# general environment for docker
ENV DEBIAN_FRONTEND=noninteractive \
SPACK_ROOT=/usr/local
# install minimal spack dependencies
RUN apt-get update \
&& apt-get install -y --no-install-recommends \
autoconf \
build-essential \
ca-certificates \
coreutils \
curl \
environment-modules \
git \
python \
unzip \
vim \
&& rm -rf /var/lib/apt/lists/*
# load spack environment on login
RUN echo "source $SPACK_ROOT/share/spack/setup-env.sh" \
> /etc/profile.d/spack.sh
# spack settings
# note: if you wish to change default settings, add files alongside
# the Dockerfile with your desired settings. Then uncomment this line
#COPY packages.yaml modules.yaml $SPACK_ROOT/etc/spack/
# install spack
RUN curl -s -L https://api.github.com/repos/spack/spack/tarball \
| tar xzC $SPACK_ROOT --strip 1
# note: at this point one could also run ``spack bootstrap`` to avoid
# parts of the long apt-get install list above
# install software
RUN spack install tar \
&& spack clean -a
# need the executables from a package already during image build?
#RUN /bin/bash -l -c ' \
# spack load tar \
# && which tar'
# image run hook: the -l will make sure /etc/profile environments are loaded
CMD /bin/bash -l
^^^^^^^^^^^^^^
Best Practices
^^^^^^^^^^^^^^
"""
MPI
"""
Due to the dependency on Fortran for OpenMPI, which is the spack default
implementation, consider adding ``gfortran`` to the ``apt-get install`` list.
Recent versions of OpenMPI will require you to pass ``--allow-run-as-root``
to your ``mpirun`` calls if started as root user inside Docker.
For execution on HPC clusters, it can be helpful to import the docker
image into Singularity in order to start a program with an *external*
MPI. Otherwise, also add ``openssh-server`` to the ``apt-get install`` list.
""""
CUDA
""""
Starting from CUDA 9.0, Nvidia provides minimal CUDA images based on
Ubuntu.
Please see `their instructions <https://hub.docker.com/r/nvidia/cuda/>`_.
Avoid double-installing CUDA by adding, e.g.
.. code-block:: yaml
packages:
cuda:
paths:
cuda@9.0.176%gcc@5.4.0 arch=linux-ubuntu16-x86_64: /usr/local/cuda
buildable: False
to your ``packages.yaml``.
Then ``COPY`` in that file into the image as in the example above.
Users will either need ``nvidia-docker`` or e.g. Singularity to *execute*
device kernels.
"""""""""""
Singularity
"""""""""""
Importing and running the image created above into
`Singularity <http://singularity.lbl.gov/>`_ works like a charm.
Just use the `docker bootstraping mechanism <http://singularity.lbl.gov/quickstart#bootstrap-recipes>`_:
.. code-block:: none
Bootstrap: docker
From: registry/user/image:tag
%runscript
exec /bin/bash -l
""""""""""""""""""""""
Docker for Development
""""""""""""""""""""""
For examples of how we use docker in development, see
:ref:`docker_for_developers`.
"""""""""""""""""""""""""
Docker on Windows and OSX
"""""""""""""""""""""""""
On Mac OS and Windows, docker runs on a hypervisor that is not allocated much
memory by default, and some spack packages may fail to build due to lack of
memory. To work around this issue, consider configuring your docker installation
to use more of your host memory. In some cases, you can also ease the memory
pressure on parallel builds by limiting the parallelism in your config.yaml.
.. code-block:: yaml
config:
build_jobs: 2
------------------
Upstream Bug Fixes
------------------

View File

@@ -1 +0,0 @@
../cc

View File

@@ -1 +0,0 @@
../cpp

View File

@@ -1 +0,0 @@
../fc

10
lib/spack/env/cc vendored
View File

@@ -22,7 +22,7 @@
# This is an array of environment variables that need to be set before
# the script runs. They are set by routines in spack.build_environment
# as part of the package installation process.
# as part of spack.package.Package.do_install().
parameters=(
SPACK_ENV_PATH
SPACK_DEBUG_LOG_DIR
@@ -107,25 +107,25 @@ case "$command" in
cpp)
mode=cpp
;;
cc|c89|c99|gcc|clang|armclang|icc|icx|pgcc|nvc|xlc|xlc_r|fcc)
cc|c89|c99|gcc|clang|armclang|icc|pgcc|xlc|xlc_r|fcc)
command="$SPACK_CC"
language="C"
comp="CC"
lang_flags=C
;;
c++|CC|g++|clang++|armclang++|icpc|icpx|pgc++|nvc++|xlc++|xlc++_r|FCC)
c++|CC|g++|clang++|armclang++|icpc|pgc++|xlc++|xlc++_r|FCC)
command="$SPACK_CXX"
language="C++"
comp="CXX"
lang_flags=CXX
;;
ftn|f90|fc|f95|gfortran|flang|armflang|ifort|ifx|pgfortran|nvfortran|xlf90|xlf90_r|nagfor|frt)
ftn|f90|fc|f95|gfortran|flang|armflang|ifort|pgfortran|xlf90|xlf90_r|nagfor|frt)
command="$SPACK_FC"
language="Fortran 90"
comp="FC"
lang_flags=F
;;
f77|xlf|xlf_r|pgf77|frt|flang)
f77|xlf|xlf_r|pgf77|frt)
command="$SPACK_F77"
language="Fortran 77"
comp="F77"

View File

@@ -1 +0,0 @@
../cc

View File

@@ -1 +0,0 @@
../cc

View File

@@ -1 +0,0 @@
../cc

View File

@@ -1 +0,0 @@
../cc

View File

@@ -1 +0,0 @@
../cc

View File

@@ -1 +0,0 @@
../cc

View File

@@ -6,13 +6,6 @@
"""This module contains the following external, potentially separately
licensed, packages that are included in Spack:
archspec
--------
* Homepage: https://pypi.python.org/pypi/archspec
* Usage: Labeling, comparison and detection of microarchitectures
* Version: 0.1.2 (commit 2846749dc5b12ae2b30ff1d3f0270a4a5954710d)
argparse
--------

View File

@@ -5,12 +5,9 @@
import _pytest._code
import py
try:
from collections.abc import Sequence
from collections import Sequence
except ImportError:
try:
from collections import Sequence
except ImportError:
Sequence = list
Sequence = list
u = py.builtin._totext

View File

@@ -10,12 +10,9 @@
import _pytest._code
import py
try:
from collections.abc import MutableMapping as MappingMixin
from collections import MutableMapping as MappingMixin
except ImportError:
try:
from collections import MutableMapping as MappingMixin
except ImportError:
from UserDict import DictMixin as MappingMixin
from UserDict import DictMixin as MappingMixin
from _pytest.config import directory_arg, UsageError, hookimpl
from _pytest.outcomes import exit

View File

@@ -398,10 +398,7 @@ def approx(expected, rel=None, abs=None, nan_ok=False):
__ https://docs.python.org/3/reference/datamodel.html#object.__ge__
"""
if sys.version_info >= (3, 3):
from collections.abc import Mapping, Sequence
else:
from collections import Mapping, Sequence
from collections import Mapping, Sequence
from _pytest.compat import STRING_TYPES as String
# Delegate the comparison to a class that knows how to deal with the type

View File

@@ -1,22 +0,0 @@
Intellectual Property Notice
------------------------------
Archspec is licensed under the Apache License, Version 2.0 (LICENSE-APACHE
or http://www.apache.org/licenses/LICENSE-2.0) or the MIT license,
(LICENSE-MIT or http://opensource.org/licenses/MIT), at your option.
Copyrights and patents in the Archspec project are retained by contributors.
No copyright assignment is required to contribute to Archspec.
SPDX usage
------------
Individual files contain SPDX tags instead of the full license text.
This enables machine processing of license information based on the SPDX
License Identifiers that are available here: https://spdx.org/licenses/
Files that are dual-licensed as Apache-2.0 OR MIT contain the following
text in the license header:
SPDX-License-Identifier: (Apache-2.0 OR MIT)

View File

@@ -1,202 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -1,20 +0,0 @@
Copyright 2019-2020 Lawrence Livermore National Security, LLC and other
Archspec Project Developers. See the top-level COPYRIGHT file for details.
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.

View File

@@ -1,68 +0,0 @@
[![](https://github.com/archspec/archspec/workflows/Unit%20tests/badge.svg)](https://github.com/archspec/archspec/actions)
[![codecov](https://codecov.io/gh/archspec/archspec/branch/master/graph/badge.svg)](https://codecov.io/gh/archspec/archspec)
[![Documentation Status](https://readthedocs.org/projects/archspec/badge/?version=latest)](https://archspec.readthedocs.io/en/latest/?badge=latest)
# Archspec (Python bindings)
Archspec aims at providing a standard set of human-understandable labels for
various aspects of a system architecture like CPU, network fabrics, etc. and
APIs to detect, query and compare them.
This project grew out of [Spack](https://spack.io/) and is currently under
active development. At present it supports APIs to detect and model
compatibility relationships among different CPU microarchitectures.
## Getting started with development
The `archspec` Python package needs [poetry](https://python-poetry.org/) to
be installed from VCS sources. The preferred method to install it is via
its custom installer outside of any virtual environment:
```console
$ curl -sSL https://raw.githubusercontent.com/python-poetry/poetry/master/get-poetry.py | python
```
You can refer to [Poetry's documentation](https://python-poetry.org/docs/#installation)
for further details or for other methods to install this tool. You'll also need `tox`
to run unit test:
```console
$ pip install --user tox
```
Finally you'll need to clone the repository:
```console
$ git clone --recursive https://github.com/archspec/archspec.git
```
### Running unit tests
Once you have your environment ready you can run `archspec` unit tests
using ``tox`` from the root of the repository:
```console
$ tox
[ ... ]
py27: commands succeeded
py35: commands succeeded
py36: commands succeeded
py37: commands succeeded
py38: commands succeeded
pylint: commands succeeded
flake8: commands succeeded
black: commands succeeded
congratulations :)
```
## License
Archspec is distributed under the terms of both the MIT license and the
Apache License (Version 2.0). Users may choose either license, at their
option.
All new contributions must be made under both the MIT and Apache-2.0
licenses.
See [LICENSE-MIT](https://github.com/archspec/archspec/blob/master/LICENSE-MIT),
[LICENSE-APACHE](https://github.com/archspec/archspec/blob/master/LICENSE-APACHE),
[COPYRIGHT](https://github.com/archspec/archspec/blob/master/COPYRIGHT), and
[NOTICE](https://github.com/archspec/archspec/blob/master/NOTICE) for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
LLNL-CODE-811653

View File

@@ -1,2 +0,0 @@
"""Init file to avoid namespace packages"""
__version__ = "0.1.1"

View File

@@ -1,24 +0,0 @@
# Copyright 2019-2020 Lawrence Livermore National Security, LLC and other
# Archspec Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""
archspec command line interface
"""
import click
import archspec
import archspec.cpu
@click.group(name="archspec")
@click.version_option(version=archspec.__version__)
def main():
"""archspec command line interface"""
@main.command()
def cpu():
"""archspec command line interface for CPU"""
click.echo(archspec.cpu.host())

View File

@@ -1,20 +0,0 @@
# Copyright 2019-2020 Lawrence Livermore National Security, LLC and other
# Archspec Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""The "cpu" package permits to query and compare different
CPU microarchitectures.
"""
from .microarchitecture import Microarchitecture, UnsupportedMicroarchitecture
from .microarchitecture import TARGETS, generic_microarchitecture
from .microarchitecture import version_components
from .detect import host
__all__ = [
"Microarchitecture",
"UnsupportedMicroarchitecture",
"TARGETS",
"generic_microarchitecture",
"host",
"version_components",
]

View File

@@ -1,88 +0,0 @@
# Copyright 2019-2020 Lawrence Livermore National Security, LLC and other
# Archspec Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""Aliases for microarchitecture features."""
# pylint: disable=useless-object-inheritance
from .schema import TARGETS_JSON, LazyDictionary
_FEATURE_ALIAS_PREDICATE = {}
class FeatureAliasTest(object):
"""A test that must be passed for a feature alias to succeed.
Args:
rules (dict): dictionary of rules to be met. Each key must be a
valid alias predicate
"""
# pylint: disable=too-few-public-methods
def __init__(self, rules):
self.rules = rules
self.predicates = []
for name, args in rules.items():
self.predicates.append(_FEATURE_ALIAS_PREDICATE[name](args))
def __call__(self, microarchitecture):
return all(feature_test(microarchitecture) for feature_test in self.predicates)
def _feature_aliases():
"""Returns the dictionary of all defined feature aliases."""
json_data = TARGETS_JSON["feature_aliases"]
aliases = {}
for alias, rules in json_data.items():
aliases[alias] = FeatureAliasTest(rules)
return aliases
FEATURE_ALIASES = LazyDictionary(_feature_aliases)
def alias_predicate(func):
"""Decorator to register a predicate that can be used to evaluate
feature aliases.
"""
name = func.__name__
# Check we didn't register anything else with the same name
if name in _FEATURE_ALIAS_PREDICATE:
msg = 'the alias predicate "{0}" already exists'.format(name)
raise KeyError(msg)
_FEATURE_ALIAS_PREDICATE[name] = func
return func
@alias_predicate
def reason(_):
"""This predicate returns always True and it's there to allow writing
a documentation string in the JSON file to explain why an alias is needed.
"""
return lambda x: True
@alias_predicate
def any_of(list_of_features):
"""Returns a predicate that is True if any of the feature in the
list is in the microarchitecture being tested, False otherwise.
"""
def _impl(microarchitecture):
return any(x in microarchitecture for x in list_of_features)
return _impl
@alias_predicate
def families(list_of_families):
"""Returns a predicate that is True if the architecture family of
the microarchitecture being tested is in the list, False otherwise.
"""
def _impl(microarchitecture):
return str(microarchitecture.family) in list_of_families
return _impl

View File

@@ -1,70 +0,0 @@
# Copyright 2019-2020 Lawrence Livermore National Security, LLC and other
# Archspec Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""Global objects with the content of the microarchitecture
JSON file and its schema
"""
import json
import os.path
try:
from collections.abc import MutableMapping # novm
except ImportError:
from collections import MutableMapping
class LazyDictionary(MutableMapping):
"""Lazy dictionary that gets constructed on first access to any object key
Args:
factory (callable): factory function to construct the dictionary
"""
def __init__(self, factory, *args, **kwargs):
self.factory = factory
self.args = args
self.kwargs = kwargs
self._data = None
@property
def data(self):
"""Returns the lazily constructed dictionary"""
if self._data is None:
self._data = self.factory(*self.args, **self.kwargs)
return self._data
def __getitem__(self, key):
return self.data[key]
def __setitem__(self, key, value):
self.data[key] = value
def __delitem__(self, key):
del self.data[key]
def __iter__(self):
return iter(self.data)
def __len__(self):
return len(self.data)
def _load_json_file(json_file):
json_dir = os.path.join(os.path.dirname(__file__), "..", "json", "cpu")
json_dir = os.path.abspath(json_dir)
def _factory():
filename = os.path.join(json_dir, json_file)
with open(filename, "r") as file:
return json.load(file)
return _factory
#: In memory representation of the data in microarchitectures.json,
#: loaded on first access
TARGETS_JSON = LazyDictionary(_load_json_file("microarchitectures.json"))
#: JSON schema for microarchitectures.json, loaded on first access
SCHEMA = LazyDictionary(_load_json_file("microarchitectures_schema.json"))

View File

@@ -1,22 +0,0 @@
Intellectual Property Notice
------------------------------
Archspec is licensed under the Apache License, Version 2.0 (LICENSE-APACHE
or http://www.apache.org/licenses/LICENSE-2.0) or the MIT license,
(LICENSE-MIT or http://opensource.org/licenses/MIT), at your option.
Copyrights and patents in the Archspec project are retained by contributors.
No copyright assignment is required to contribute to Archspec.
SPDX usage
------------
Individual files contain SPDX tags instead of the full license text.
This enables machine processing of license information based on the SPDX
License Identifiers that are available here: https://spdx.org/licenses/
Files that are dual-licensed as Apache-2.0 OR MIT contain the following
text in the license header:
SPDX-License-Identifier: (Apache-2.0 OR MIT)

View File

@@ -1,202 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -1,20 +0,0 @@
Copyright 2019-2020 Lawrence Livermore National Security, LLC and other
Archspec Project Developers. See the top-level COPYRIGHT file for details.
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.

View File

@@ -1,21 +0,0 @@
This work was produced under the auspices of the U.S. Department of
Energy by Lawrence Livermore National Laboratory under Contract
DE-AC52-07NA27344.
This work was prepared as an account of work sponsored by an agency of
the United States Government. Neither the United States Government nor
Lawrence Livermore National Security, LLC, nor any of their employees
makes any warranty, expressed or implied, or assumes any legal liability
or responsibility for the accuracy, completeness, or usefulness of any
information, apparatus, product, or process disclosed, or represents that
its use would not infringe privately owned rights.
Reference herein to any specific commercial product, process, or service
by trade name, trademark, manufacturer, or otherwise does not necessarily
constitute or imply its endorsement, recommendation, or favoring by the
United States Government or Lawrence Livermore National Security, LLC.
The views and opinions of authors expressed herein do not necessarily
state or reflect those of the United States Government or Lawrence
Livermore National Security, LLC, and shall not be used for advertising
or product endorsement purposes.

View File

@@ -1,36 +0,0 @@
[![](https://github.com/archspec/archspec-json/workflows/JSON%20Validation/badge.svg)](https://github.com/archspec/archspec-json/actions)
# Archspec-json
The [archspec-json](https://github.com/archspec/archspec-json) repository is part of the
[Archspec](https://github.com/archspec) project. It contains data on various architectural
aspects of a platform stored in JSON format and is meant to be used as a base to develop
language specific APIs.
Currently the repository contains the following JSON files:
```console
.
├── COPYRIGHT
└── cpu
   ├── microarchitectures.json # Contains information on CPU microarchitectures
   └── microarchitectures_schema.json # Schema for the file above
```
## License
Archspec is distributed under the terms of both the MIT license and the
Apache License (Version 2.0). Users may choose either license, at their
option.
All new contributions must be made under both the MIT and Apache-2.0
licenses.
See [LICENSE-MIT](https://github.com/archspec/archspec-json/blob/master/LICENSE-MIT),
[LICENSE-APACHE](https://github.com/archspec/archspec-json/blob/master/LICENSE-APACHE),
[COPYRIGHT](https://github.com/archspec/archspec-json/blob/master/COPYRIGHT), and
[NOTICE](https://github.com/archspec/archspec-json/blob/master/NOTICE) for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
LLNL-CODE-811653

View File

@@ -1,110 +0,0 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
"title": "Schema for microarchitecture definitions and feature aliases",
"type": "object",
"additionalProperties": false,
"properties": {
"microarchitectures": {
"type": "object",
"patternProperties": {
"([\\w]*)": {
"type": "object",
"properties": {
"from": {
"$comment": "More than one parent",
"type": "array",
"items": {
"type": "string"
}
},
"vendor": {
"type": "string"
},
"features": {
"type": "array",
"items": {
"type": "string"
}
},
"compilers": {
"type": "object",
"patternProperties": {
"([\\w]*)": {
"$comment": "Permit multiple entries since compilers change options across versions",
"type": "array",
"items": {
"type": "object",
"properties": {
"versions": {
"type": "string"
},
"name": {
"type": "string"
},
"flags": {
"type": "string"
}
},
"required": [
"versions",
"flags"
]
}
}
}
}
},
"required": [
"from",
"vendor",
"features"
]
}
}
},
"feature_aliases": {
"type": "object",
"patternProperties": {
"([\\w]*)": {
"type": "object",
"properties": {
"reason": {
"$comment": "Comment containing the reason why an alias is there",
"type": "string"
},
"any_of": {
"$comment": "The alias is true if any of the items is a feature of the target",
"type": "array",
"items": {
"type": "string"
}
},
"families": {
"$comment": "The alias is true if the family of the target is in this list",
"type": "array",
"items": {
"type": "string"
}
}
},
"additionalProperties": false
}
}
},
"conversions": {
"type": "object",
"properties": {
"description": {
"type": "string"
},
"arm_vendors": {
"type": "object"
},
"darwin_flags": {
"type": "object"
}
},
"additionalProperties": false
}
}
}

View File

@@ -118,7 +118,6 @@ def match(self, text):
"([^:]+): (Error:|error|undefined reference|multiply defined)",
"([^ :]+) ?: (error|fatal error|catastrophic error)",
"([^:]+)\\(([^\\)]+)\\) ?: (error|fatal error|catastrophic error)"),
"^FAILED",
"^[Bb]us [Ee]rror",
"^[Ss]egmentation [Vv]iolation",
"^[Ss]egmentation [Ff]ault",

View File

@@ -315,14 +315,10 @@ def __repr__(self):
# register the context as mapping if possible
try:
from collections.abc import Mapping
from collections import Mapping
Mapping.register(Context)
except ImportError:
try:
from collections import Mapping
Mapping.register(Context)
except ImportError:
pass
pass
class BlockReference(object):

View File

@@ -14,7 +14,7 @@
"""
import types
import operator
import sys
from collections import Mapping
from jinja2.environment import Environment
from jinja2.exceptions import SecurityError
from jinja2._compat import string_types, PY2
@@ -23,11 +23,6 @@
from markupsafe import EscapeFormatter
from string import Formatter
if sys.version_info >= (3, 3):
from collections.abc import Mapping
else:
from collections import Mapping
#: maximum number of items a range may produce
MAX_RANGE = 100000
@@ -84,10 +79,7 @@
pass
#: register Python 2.6 abstract base classes
if sys.version_info >= (3, 3):
from collections.abc import MutableSet, MutableMapping, MutableSequence
else:
from collections import MutableSet, MutableMapping, MutableSequence
from collections import MutableSet, MutableMapping, MutableSequence
_mutable_set_types += (MutableSet,)
_mutable_mapping_types += (MutableMapping,)
_mutable_sequence_types += (MutableSequence,)

View File

@@ -10,16 +10,11 @@
"""
import operator
import re
import sys
from collections import Mapping
from jinja2.runtime import Undefined
from jinja2._compat import text_type, string_types, integer_types
import decimal
if sys.version_info >= (3, 3):
from collections.abc import Mapping
else:
from collections import Mapping
number_re = re.compile(r'^-?\d+(\.\d+)?$')
regex_type = type(number_re)

View File

@@ -482,14 +482,10 @@ def __reversed__(self):
# register the LRU cache as mutable mapping if possible
try:
from collections.abc import MutableMapping
from collections import MutableMapping
MutableMapping.register(LRUCache)
except ImportError:
try:
from collections import MutableMapping
MutableMapping.register(LRUCache)
except ImportError:
pass
pass
def select_autoescape(enabled_extensions=('html', 'htm', 'xml'),

View File

@@ -10,15 +10,10 @@
"""
import re
import string
import sys
from collections import Mapping
from markupsafe._compat import text_type, string_types, int_types, \
unichr, iteritems, PY2
if sys.version_info >= (3, 3):
from collections.abc import Mapping
else:
from collections import Mapping
__version__ = "1.0"
__all__ = ['Markup', 'soft_unicode', 'escape', 'escape_silent']

View File

@@ -9,12 +9,7 @@
a separate base
"""
import sys
if sys.version_info >= (3, 3):
from collections.abc import MutableSet
else:
from collections import MutableSet
from collections import MutableSet
__all__ = ["CommentedSeq", "CommentedMap", "CommentedOrderedMap",
"CommentedSet", 'comment_attrib', 'merge_attrib']

View File

@@ -12,12 +12,9 @@
from ruamel.ordereddict import ordereddict
except:
try:
from collections.abc import OrderedDict
from collections import OrderedDict
except ImportError:
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
from ordereddict import OrderedDict
# to get the right name import ... as ordereddict doesn't do that
class ordereddict(OrderedDict):

View File

@@ -3,6 +3,7 @@
from __future__ import absolute_import
from __future__ import print_function
import collections
import datetime
import base64
import binascii
@@ -25,12 +26,6 @@
from ruamel.yaml.scalarstring import * # NOQA
if sys.version_info >= (3, 3):
from collections.abc import Hashable
else:
from collections import Hashable
__all__ = ['BaseConstructor', 'SafeConstructor', 'Constructor',
'ConstructorError', 'RoundTripConstructor']
@@ -168,7 +163,7 @@ def construct_mapping(self, node, deep=False):
# keys can be list -> deep
key = self.construct_object(key_node, deep=True)
# lists are not hashable, but tuples are
if not isinstance(key, Hashable):
if not isinstance(key, collections.Hashable):
if isinstance(key, list):
key = tuple(key)
if PY2:
@@ -180,7 +175,7 @@ def construct_mapping(self, node, deep=False):
"found unacceptable key (%s)" %
exc, key_node.start_mark)
else:
if not isinstance(key, Hashable):
if not isinstance(key, collections.Hashable):
raise ConstructorError(
"while constructing a mapping", node.start_mark,
"found unhashable key", key_node.start_mark)
@@ -964,7 +959,7 @@ def construct_mapping(self, node, maptyp, deep=False):
# keys can be list -> deep
key = self.construct_object(key_node, deep=True)
# lists are not hashable, but tuples are
if not isinstance(key, Hashable):
if not isinstance(key, collections.Hashable):
if isinstance(key, list):
key = tuple(key)
if PY2:
@@ -976,7 +971,7 @@ def construct_mapping(self, node, maptyp, deep=False):
"found unacceptable key (%s)" %
exc, key_node.start_mark)
else:
if not isinstance(key, Hashable):
if not isinstance(key, collections.Hashable):
raise ConstructorError(
"while constructing a mapping", node.start_mark,
"found unhashable key", key_node.start_mark)
@@ -1008,7 +1003,7 @@ def construct_setting(self, node, typ, deep=False):
# keys can be list -> deep
key = self.construct_object(key_node, deep=True)
# lists are not hashable, but tuples are
if not isinstance(key, Hashable):
if not isinstance(key, collections.Hashable):
if isinstance(key, list):
key = tuple(key)
if PY2:
@@ -1020,7 +1015,7 @@ def construct_setting(self, node, typ, deep=False):
"found unacceptable key (%s)" %
exc, key_node.start_mark)
else:
if not isinstance(key, Hashable):
if not isinstance(key, collections.Hashable):
raise ConstructorError(
"while constructing a mapping", node.start_mark,
"found unhashable key", key_node.start_mark)

View File

@@ -0,0 +1,18 @@
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from .microarchitecture import Microarchitecture, UnsupportedMicroarchitecture
from .microarchitecture import targets, generic_microarchitecture
from .microarchitecture import version_components
from .detect import host
__all__ = [
'Microarchitecture',
'UnsupportedMicroarchitecture',
'targets',
'generic_microarchitecture',
'host',
'version_components'
]

View File

@@ -0,0 +1,102 @@
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
#: Known predicates that can be used to construct feature aliases
from .schema import targets_json, LazyDictionary, properties
_feature_alias_predicate = {}
class FeatureAliasTest(object):
"""A test that must be passed for a feature alias to succeed.
Args:
rules (dict): dictionary of rules to be met. Each key must be a
valid alias predicate
"""
def __init__(self, rules):
self.rules = rules
self.predicates = []
for name, args in rules.items():
self.predicates.append(_feature_alias_predicate[name](args))
def __call__(self, microarchitecture):
return all(
feature_test(microarchitecture) for feature_test in self.predicates
)
def _feature_aliases():
"""Returns the dictionary of all defined feature aliases."""
json_data = targets_json['feature_aliases']
aliases = {}
for alias, rules in json_data.items():
aliases[alias] = FeatureAliasTest(rules)
return aliases
feature_aliases = LazyDictionary(_feature_aliases)
def alias_predicate(predicate_schema):
"""Decorator to register a predicate that can be used to define
feature aliases.
Args:
predicate_schema (dict): schema to be enforced in
microarchitectures.json for the predicate
"""
def decorator(func):
name = func.__name__
# Check we didn't register anything else with the same name
if name in _feature_alias_predicate:
msg = 'the alias predicate "{0}" already exists'.format(name)
raise KeyError(msg)
# Update the overall schema
alias_schema = properties['feature_aliases']['patternProperties']
alias_schema[r'([\w]*)']['properties'].update(
{name: predicate_schema}
)
# Register the predicate
_feature_alias_predicate[name] = func
return func
return decorator
@alias_predicate(predicate_schema={'type': 'string'})
def reason(motivation_for_the_alias):
"""This predicate returns always True and it's there to allow writing
a documentation string in the JSON file to explain why an alias is needed.
"""
return lambda x: True
@alias_predicate(predicate_schema={
'type': 'array',
'items': {'type': 'string'}
})
def any_of(list_of_features):
"""Returns a predicate that is True if any of the feature in the
list is in the microarchitecture being tested, False otherwise.
"""
def _impl(microarchitecture):
return any(x in microarchitecture for x in list_of_features)
return _impl
@alias_predicate(predicate_schema={
'type': 'array',
'items': {'type': 'string'}
})
def families(list_of_families):
"""Returns a predicate that is True if the architecture family of
the microarchitecture being tested is in the list, False otherwise.
"""
def _impl(microarchitecture):
return str(microarchitecture.family) in list_of_families
return _impl

View File

@@ -1,8 +1,7 @@
# Copyright 2019-2020 Lawrence Livermore National Security, LLC and other
# Archspec Project Developers. See the top-level COPYRIGHT file for details.
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""Detection of CPU microarchitectures"""
import collections
import functools
import os
@@ -13,16 +12,16 @@
import six
from .microarchitecture import generic_microarchitecture, TARGETS
from .schema import TARGETS_JSON
from .microarchitecture import generic_microarchitecture, targets
from .schema import targets_json
#: Mapping from operating systems to chain of commands
#: to obtain a dictionary of raw info on the current cpu
INFO_FACTORY = collections.defaultdict(list)
info_factory = collections.defaultdict(list)
#: Mapping from micro-architecture families (x86_64, ppc64le, etc.) to
#: functions checking the compatibility of the host with a given target
COMPATIBILITY_CHECKS = {}
compatibility_checks = {}
def info_dict(operating_system):
@@ -33,9 +32,8 @@ def info_dict(operating_system):
operating_system (str or tuple): operating system for which the marked
function is a viable factory of raw info dictionaries.
"""
def decorator(factory):
INFO_FACTORY[operating_system].append(factory)
info_factory[operating_system].append(factory)
@functools.wraps(factory)
def _impl():
@@ -43,10 +41,10 @@ def _impl():
# Check that info contains a few mandatory fields
msg = 'field "{0}" is missing from raw info dictionary'
assert "vendor_id" in info, msg.format("vendor_id")
assert "flags" in info, msg.format("flags")
assert "model" in info, msg.format("model")
assert "model_name" in info, msg.format("model_name")
assert 'vendor_id' in info, msg.format('vendor_id')
assert 'flags' in info, msg.format('flags')
assert 'model' in info, msg.format('model')
assert 'model_name' in info, msg.format('model_name')
return info
@@ -55,15 +53,15 @@ def _impl():
return decorator
@info_dict(operating_system="Linux")
@info_dict(operating_system='Linux')
def proc_cpuinfo():
"""Returns a raw info dictionary by parsing the first entry of
``/proc/cpuinfo``
"""
info = {}
with open("/proc/cpuinfo") as file:
with open('/proc/cpuinfo') as file:
for line in file:
key, separator, value = line.partition(":")
key, separator, value = line.partition(':')
# If there's no separator and info was already populated
# according to what's written here:
@@ -72,43 +70,44 @@ def proc_cpuinfo():
#
# we are on a blank line separating two cpus. Exit early as
# we want to read just the first entry in /proc/cpuinfo
if separator != ":" and info:
if separator != ':' and info:
break
info[key.strip()] = value.strip()
return info
def _check_output(args, env):
output = subprocess.Popen(args, stdout=subprocess.PIPE, env=env).communicate()[0]
return six.text_type(output.decode("utf-8"))
def check_output(args, env):
output = subprocess.Popen(
args, stdout=subprocess.PIPE, env=env
).communicate()[0]
return six.text_type(output.decode('utf-8'))
@info_dict(operating_system="Darwin")
@info_dict(operating_system='Darwin')
def sysctl_info_dict():
"""Returns a raw info dictionary parsing the output of sysctl."""
# Make sure that /sbin and /usr/sbin are in PATH as sysctl is
# usually found there
child_environment = dict(os.environ.items())
search_paths = child_environment.get("PATH", "").split(os.pathsep)
for additional_path in ("/sbin", "/usr/sbin"):
search_paths = child_environment.get('PATH', '').split(os.pathsep)
for additional_path in ('/sbin', '/usr/sbin'):
if additional_path not in search_paths:
search_paths.append(additional_path)
child_environment["PATH"] = os.pathsep.join(search_paths)
child_environment['PATH'] = os.pathsep.join(search_paths)
def sysctl(*args):
return _check_output(["sysctl"] + list(args), env=child_environment).strip()
return check_output(
['sysctl'] + list(args), env=child_environment
).strip()
flags = (
sysctl("-n", "machdep.cpu.features").lower()
+ " "
+ sysctl("-n", "machdep.cpu.leaf7_features").lower()
)
flags = (sysctl('-n', 'machdep.cpu.features').lower() + ' '
+ sysctl('-n', 'machdep.cpu.leaf7_features').lower())
info = {
"vendor_id": sysctl("-n", "machdep.cpu.vendor"),
"flags": flags,
"model": sysctl("-n", "machdep.cpu.model"),
"model name": sysctl("-n", "machdep.cpu.brand_string"),
'vendor_id': sysctl('-n', 'machdep.cpu.vendor'),
'flags': flags,
'model': sysctl('-n', 'machdep.cpu.model'),
'model name': sysctl('-n', 'machdep.cpu.brand_string')
}
return info
@@ -118,16 +117,16 @@ def adjust_raw_flags(info):
slightly different representations.
"""
# Flags detected on Darwin turned to their linux counterpart
flags = info.get("flags", [])
d2l = TARGETS_JSON["conversions"]["darwin_flags"]
flags = info.get('flags', [])
d2l = targets_json['conversions']['darwin_flags']
for darwin_flag, linux_flag in d2l.items():
if darwin_flag in flags:
info["flags"] += " " + linux_flag
info['flags'] += ' ' + linux_flag
def adjust_raw_vendor(info):
"""Adjust the vendor field to make it human readable"""
if "CPU implementer" not in info:
if 'CPU implementer' not in info:
return
# Mapping numeric codes to vendor (ARM). This list is a merge from
@@ -137,10 +136,10 @@ def adjust_raw_vendor(info):
# https://developer.arm.com/docs/ddi0487/latest/arm-architecture-reference-manual-armv8-for-armv8-a-architecture-profile
# https://github.com/gcc-mirror/gcc/blob/master/gcc/config/aarch64/aarch64-cores.def
# https://patchwork.kernel.org/patch/10524949/
arm_vendors = TARGETS_JSON["conversions"]["arm_vendors"]
arm_code = info["CPU implementer"]
arm_vendors = targets_json['conversions']['arm_vendors']
arm_code = info['CPU implementer']
if arm_code in arm_vendors:
info["CPU implementer"] = arm_vendors[arm_code]
info['CPU implementer'] = arm_vendors[arm_code]
def raw_info_dictionary():
@@ -149,13 +148,12 @@ def raw_info_dictionary():
This function calls all the viable factories one after the other until
there's one that is able to produce the requested information.
"""
# pylint: disable=broad-except
info = {}
for factory in INFO_FACTORY[platform.system()]:
for factory in info_factory[platform.system()]:
try:
info = factory()
except Exception as exc:
warnings.warn(str(exc))
except Exception as e:
warnings.warn(str(e))
if info:
adjust_raw_flags(info)
@@ -175,10 +173,9 @@ def compatible_microarchitectures(info):
architecture_family = platform.machine()
# If a tester is not registered, be conservative and assume no known
# target is compatible with the host
tester = COMPATIBILITY_CHECKS.get(architecture_family, lambda x, y: False)
return [x for x in TARGETS.values() if tester(info, x)] or [
generic_microarchitecture(architecture_family)
]
tester = compatibility_checks.get(architecture_family, lambda x, y: False)
return [x for x in targets.values() if tester(info, x)] or \
[generic_microarchitecture(architecture_family)]
def host():
@@ -191,9 +188,7 @@ def host():
# Reverse sort of the depth for the inheritance tree among only targets we
# can use. This gets the newest target we satisfy.
return sorted(
candidates, key=lambda t: (len(t.ancestors), len(t.features)), reverse=True
)[0]
return sorted(candidates, key=lambda t: len(t.ancestors), reverse=True)[0]
def compatibility_check(architecture_family):
@@ -212,59 +207,50 @@ def compatibility_check(architecture_family):
architecture_family = (architecture_family,)
def decorator(func):
# pylint: disable=fixme
# TODO: on removal of Python 2.6 support this can be re-written as
# TODO: an update + a dict comprehension
for arch_family in architecture_family:
COMPATIBILITY_CHECKS[arch_family] = func
compatibility_checks[arch_family] = func
return func
return decorator
@compatibility_check(architecture_family=("ppc64le", "ppc64"))
@compatibility_check(architecture_family=('ppc64le', 'ppc64'))
def compatibility_check_for_power(info, target):
"""Compatibility check for PPC64 and PPC64LE architectures."""
basename = platform.machine()
generation_match = re.search(r"POWER(\d+)", info.get("cpu", ""))
generation_match = re.search(r'POWER(\d+)', info.get('cpu', ''))
generation = int(generation_match.group(1))
# We can use a target if it descends from our machine type and our
# generation (9 for POWER9, etc) is at least its generation.
arch_root = TARGETS[basename]
return (
target == arch_root or arch_root in target.ancestors
) and target.generation <= generation
arch_root = targets[basename]
return (target == arch_root or arch_root in target.ancestors) \
and target.generation <= generation
@compatibility_check(architecture_family="x86_64")
@compatibility_check(architecture_family='x86_64')
def compatibility_check_for_x86_64(info, target):
"""Compatibility check for x86_64 architectures."""
basename = "x86_64"
vendor = info.get("vendor_id", "generic")
features = set(info.get("flags", "").split())
basename = 'x86_64'
vendor = info.get('vendor_id', 'generic')
features = set(info.get('flags', '').split())
# We can use a target if it descends from our machine type, is from our
# vendor, and we have all of its features
arch_root = TARGETS[basename]
return (
(target == arch_root or arch_root in target.ancestors)
and (target.vendor == vendor or target.vendor == "generic")
arch_root = targets[basename]
return (target == arch_root or arch_root in target.ancestors) \
and (target.vendor == vendor or target.vendor == 'generic') \
and target.features.issubset(features)
)
@compatibility_check(architecture_family="aarch64")
@compatibility_check(architecture_family='aarch64')
def compatibility_check_for_aarch64(info, target):
"""Compatibility check for AARCH64 architectures."""
basename = "aarch64"
features = set(info.get("Features", "").split())
vendor = info.get("CPU implementer", "generic")
basename = 'aarch64'
features = set(info.get('Features', '').split())
vendor = info.get('CPU implementer', 'generic')
arch_root = TARGETS[basename]
return (
(target == arch_root or arch_root in target.ancestors)
and (target.vendor == vendor or target.vendor == "generic")
arch_root = targets[basename]
return (target == arch_root or arch_root in target.ancestors) \
and (target.vendor == vendor or target.vendor == 'generic') \
and target.features.issubset(features)
)

View File

@@ -1,81 +1,81 @@
# Copyright 2019-2020 Lawrence Livermore National Security, LLC and other
# Archspec Project Developers. See the top-level COPYRIGHT file for details.
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""Types and functions to manage information
on CPU microarchitectures.
"""
# pylint: disable=useless-object-inheritance
import functools
import platform
import re
import warnings
try:
from collections.abc import Sequence # novm
except ImportError:
from collections import Sequence
import six
import archspec
import archspec.cpu.alias
import archspec.cpu.schema
from .alias import FEATURE_ALIASES
import llnl.util
import llnl.util.cpu.alias
import llnl.util.cpu.schema
from .schema import LazyDictionary
from .alias import feature_aliases
def coerce_target_names(func):
"""Decorator that automatically converts a known target name to a proper
Microarchitecture object.
"""
@functools.wraps(func)
def _impl(self, other):
if isinstance(other, six.string_types):
if other not in TARGETS:
if other not in targets:
msg = '"{0}" is not a valid target name'
raise ValueError(msg.format(other))
other = TARGETS[other]
other = targets[other]
return func(self, other)
return _impl
class Microarchitecture(object):
"""Represents a specific CPU micro-architecture.
Args:
name (str): name of the micro-architecture (e.g. skylake).
parents (list): list of parents micro-architectures, if any.
Parenthood is considered by cpu features and not
chronologically. As such each micro-architecture is
compatible with its ancestors. For example "skylake",
which has "broadwell" as a parent, supports running binaries
optimized for "broadwell".
vendor (str): vendor of the micro-architecture
features (list of str): supported CPU flags. Note that the semantic
of the flags in this field might vary among architectures, if
at all present. For instance x86_64 processors will list all
the flags supported by a given CPU while Arm processors will
list instead only the flags that have been added on top of the
base model for the current micro-architecture.
compilers (dict): compiler support to generate tuned code for this
micro-architecture. This dictionary has as keys names of
supported compilers, while values are list of dictionaries
with fields:
* name: name of the micro-architecture according to the
compiler. This is the name passed to the ``-march`` option
or similar. Not needed if the name is the same as that
passed in as argument above.
* versions: versions that support this micro-architecture.
generation (int): generation of the micro-architecture, if
relevant.
"""
# pylint: disable=too-many-arguments
#: Aliases for micro-architecture's features
feature_aliases = FEATURE_ALIASES
feature_aliases = feature_aliases
def __init__(self, name, parents, vendor, features, compilers, generation=0):
def __init__(
self, name, parents, vendor, features, compilers, generation=0
):
"""Represents a specific CPU micro-architecture.
Args:
name (str): name of the micro-architecture (e.g. skylake).
parents (list): list of parents micro-architectures, if any.
Parenthood is considered by cpu features and not
chronologically. As such each micro-architecture is
compatible with its ancestors. For example "skylake",
which has "broadwell" as a parent, supports running binaries
optimized for "broadwell".
vendor (str): vendor of the micro-architecture
features (list of str): supported CPU flags. Note that the semantic
of the flags in this field might vary among architectures, if
at all present. For instance x86_64 processors will list all
the flags supported by a given CPU while Arm processors will
list instead only the flags that have been added on top of the
base model for the current micro-architecture.
compilers (dict): compiler support to generate tuned code for this
micro-architecture. This dictionary has as keys names of
supported compilers, while values are list of dictionaries
with fields:
* name: name of the micro-architecture according to the
compiler. This is the name passed to the ``-march`` option
or similar. Not needed if the name is the same as that
passed in as argument above.
* versions: versions that support this micro-architecture.
generation (int): generation of the micro-architecture, if
relevant.
"""
self.name = name
self.parents = parents
self.vendor = vendor
@@ -85,7 +85,6 @@ def __init__(self, name, parents, vendor, features, compilers, generation=0):
@property
def ancestors(self):
"""All the ancestors of this microarchitecture."""
value = self.parents[:]
for parent in self.parents:
value.extend(a for a in parent.ancestors if a not in value)
@@ -102,14 +101,12 @@ def __eq__(self, other):
if not isinstance(other, Microarchitecture):
return NotImplemented
return (
self.name == other.name
and self.vendor == other.vendor
and self.features == other.features
and self.ancestors == other.ancestors
and self.compilers == other.compilers
and self.generation == other.generation
)
return (self.name == other.name and
self.vendor == other.vendor and
self.features == other.features and
self.ancestors == other.ancestors and
self.compilers == other.compilers and
self.generation == other.generation)
@coerce_target_names
def __ne__(self, other):
@@ -139,10 +136,8 @@ def __ge__(self, other):
def __repr__(self):
cls_name = self.__class__.__name__
fmt = (
cls_name + "({0.name!r}, {0.parents!r}, {0.vendor!r}, "
"{0.features!r}, {0.compilers!r}, {0.generation!r})"
)
fmt = cls_name + '({0.name!r}, {0.parents!r}, {0.vendor!r}, ' \
'{0.features!r}, {0.compilers!r}, {0.generation!r})'
return fmt.format(self)
def __str__(self):
@@ -151,7 +146,7 @@ def __str__(self):
def __contains__(self, feature):
# Feature must be of a string type, so be defensive about that
if not isinstance(feature, six.string_types):
msg = "only objects of string types are accepted [got {0}]"
msg = 'only objects of string types are accepted [got {0}]'
raise TypeError(msg.format(str(type(feature))))
# Here we look first in the raw features, and fall-back to
@@ -160,7 +155,9 @@ def __contains__(self, feature):
return True
# Check if the alias is defined, if not it will return False
match_alias = Microarchitecture.feature_aliases.get(feature, lambda x: False)
match_alias = Microarchitecture.feature_aliases.get(
feature, lambda x: False
)
return match_alias(self)
@property
@@ -168,7 +165,7 @@ def family(self):
"""Returns the architecture family a given target belongs to"""
roots = [x for x in [self] + self.ancestors if not x.ancestors]
msg = "a target is expected to belong to just one architecture family"
msg += "[found {0}]".format(", ".join(str(x) for x in roots))
msg += "[found {0}]".format(', '.join(str(x) for x in roots))
assert len(roots) == 1, msg
return roots.pop()
@@ -181,11 +178,13 @@ def to_dict(self, return_list_of_items=False):
items instead of the dictionary
"""
list_of_items = [
("name", str(self.name)),
("vendor", str(self.vendor)),
("features", sorted(str(x) for x in self.features)),
("generation", self.generation),
("parents", [str(x) for x in self.parents]),
('name', str(self.name)),
('vendor', str(self.vendor)),
('features', sorted(
str(x) for x in self.features
)),
('generation', self.generation),
('parents', [str(x) for x in self.parents])
]
if return_list_of_items:
return list_of_items
@@ -205,18 +204,19 @@ def optimization_flags(self, compiler, version):
compiler (str): name of the compiler to be used
version (str): version of the compiler to be used
"""
# If we don't have information on compiler at all return an empty string
# If we don't have information on compiler at all
# return an empty string
if compiler not in self.family.compilers:
return ""
return ''
# If we have information but it stops before this
# microarchitecture, fall back to the best known target
if compiler not in self.compilers:
best_target = [x for x in self.ancestors if compiler in x.compilers][0]
msg = (
"'{0}' compiler is known to optimize up to the '{1}'"
" microarchitecture in the '{2}' architecture family"
)
best_target = [
x for x in self.ancestors if compiler in x.compilers
][0]
msg = ("'{0}' compiler is known to optimize up to the '{1}'"
" microarchitecture in the '{2}' architecture family")
msg = msg.format(compiler, best_target, best_target.family)
raise UnsupportedMicroarchitecture(msg)
@@ -224,17 +224,20 @@ def optimization_flags(self, compiler, version):
# version being used
compiler_info = self.compilers[compiler]
def satisfies_constraint(entry, version):
min_version, max_version = entry["versions"].split(":")
# Normalize the entries to have a uniform treatment in the code below
if not isinstance(compiler_info, Sequence):
compiler_info = [compiler_info]
# Check version suffixes
def satisfies_constraint(entry, version):
min_version, max_version = entry['versions'].split(':')
# Extract numeric part of the version
min_version, _ = version_components(min_version)
max_version, _ = version_components(max_version)
version, _ = version_components(version)
# Assume compiler versions fit into semver
def tuplify(ver):
return tuple(int(y) for y in ver.split("."))
tuplify = lambda x: tuple(int(y) for y in x.split('.'))
version = tuplify(version)
if min_version:
@@ -251,29 +254,23 @@ def tuplify(ver):
for compiler_entry in compiler_info:
if satisfies_constraint(compiler_entry, version):
flags_fmt = compiler_entry["flags"]
flags_fmt = compiler_entry['flags']
# If there's no field name, use the name of the
# micro-architecture
compiler_entry.setdefault("name", self.name)
compiler_entry.setdefault('name', self.name)
# Check if we need to emit a warning
warning_message = compiler_entry.get("warnings", None)
warning_message = compiler_entry.get('warnings', None)
if warning_message:
warnings.warn(warning_message)
flags = flags_fmt.format(**compiler_entry)
return flags
msg = (
"cannot produce optimized binary for micro-architecture '{0}'"
" with {1}@{2} [supported compiler versions are {3}]"
)
msg = msg.format(
self.name,
compiler,
version,
", ".join([x["versions"] for x in compiler_info]),
)
msg = ("cannot produce optimized binary for micro-architecture '{0}'"
" with {1}@{2} [supported compiler versions are {3}]")
msg = msg.format(self.name, compiler, version,
', '.join([x['versions'] for x in compiler_info]))
raise UnsupportedMicroarchitecture(msg)
@@ -284,7 +281,7 @@ def generic_microarchitecture(name):
name (str): name of the micro-architecture
"""
return Microarchitecture(
name, parents=[], vendor="generic", features=[], compilers={}
name, parents=[], vendor='generic', features=[], compilers={}
)
@@ -292,15 +289,15 @@ def version_components(version):
"""Decomposes the version passed as input in version number and
suffix and returns them.
If the version number or the suffix are not present, an empty
If the version number of the suffix are not present, an empty
string is returned.
Args:
version (str): version to be decomposed into its components
"""
match = re.match(r"([\d.]*)(-?)(.*)", str(version))
match = re.match(r'([\d.]*)(-?)(.*)', str(version))
if not match:
return "", ""
return '', ''
version_number = match.group(1)
suffix = match.group(3)
@@ -312,7 +309,7 @@ def _known_microarchitectures():
"""Returns a dictionary of the known micro-architectures. If the
current host platform is unknown adds it too as a generic target.
"""
# pylint: disable=fixme
# TODO: Simplify this logic using object_pairs_hook to OrderedDict
# TODO: when we stop supporting python2.6
@@ -329,40 +326,44 @@ def fill_target_from_dict(name, data, targets):
values = data[name]
# Get direct parents of target
parent_names = values["from"]
for parent in parent_names:
parent_names = values['from']
if isinstance(parent_names, six.string_types):
parent_names = [parent_names]
if parent_names is None:
parent_names = []
for p in parent_names:
# Recursively fill parents so they exist before we add them
if parent in targets:
if p in targets:
continue
fill_target_from_dict(parent, data, targets)
parents = [targets.get(parent) for parent in parent_names]
fill_target_from_dict(p, data, targets)
parents = [targets.get(p) for p in parent_names]
vendor = values["vendor"]
features = set(values["features"])
compilers = values.get("compilers", {})
generation = values.get("generation", 0)
vendor = values['vendor']
features = set(values['features'])
compilers = values.get('compilers', {})
generation = values.get('generation', 0)
targets[name] = Microarchitecture(
name, parents, vendor, features, compilers, generation
)
known_targets = {}
data = archspec.cpu.schema.TARGETS_JSON["microarchitectures"]
targets = {}
data = llnl.util.cpu.schema.targets_json['microarchitectures']
for name in data:
if name in known_targets:
if name in targets:
# name was already brought in as ancestor to a target
continue
fill_target_from_dict(name, data, known_targets)
fill_target_from_dict(name, data, targets)
# Add the host platform if not present
host_platform = platform.machine()
known_targets.setdefault(host_platform, generic_microarchitecture(host_platform))
targets.setdefault(host_platform, generic_microarchitecture(host_platform))
return known_targets
return targets
#: Dictionary of known micro-architectures
TARGETS = LazyDictionary(_known_microarchitectures)
targets = LazyDictionary(_known_microarchitectures)
class UnsupportedMicroarchitecture(ValueError):

View File

@@ -0,0 +1,147 @@
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import json
import os.path
try:
from collections.abc import MutableMapping # novm
except ImportError:
from collections import MutableMapping
compilers_schema = {
'type': 'object',
'properties': {
'versions': {'type': 'string'},
'name': {'type': 'string'},
'flags': {'type': 'string'}
},
'required': ['versions', 'flags']
}
properties = {
'microarchitectures': {
'type': 'object',
'patternProperties': {
r'([\w]*)': {
'type': 'object',
'properties': {
'from': {
'anyOf': [
# More than one parent
{'type': 'array', 'items': {'type': 'string'}},
# Exactly one parent
{'type': 'string'},
# No parent
{'type': 'null'}
]
},
'vendor': {
'type': 'string'
},
'features': {
'type': 'array',
'items': {'type': 'string'}
},
'compilers': {
'type': 'object',
'patternProperties': {
r'([\w]*)': {
'anyOf': [
compilers_schema,
{
'type': 'array',
'items': compilers_schema
}
]
}
}
}
},
'required': ['from', 'vendor', 'features']
}
}
},
'feature_aliases': {
'type': 'object',
'patternProperties': {
r'([\w]*)': {
'type': 'object',
'properties': {},
'additionalProperties': False
}
},
},
'conversions': {
'type': 'object',
'properties': {
'description': {
'type': 'string'
},
'arm_vendors': {
'type': 'object',
},
'darwin_flags': {
'type': 'object'
}
},
'additionalProperties': False
}
}
schema = {
'$schema': 'http://json-schema.org/schema#',
'title': 'Schema for microarchitecture definitions and feature aliases',
'type': 'object',
'additionalProperties': False,
'properties': properties,
}
class LazyDictionary(MutableMapping):
"""Lazy dictionary that gets constructed on first access to any object key
Args:
factory (callable): factory function to construct the dictionary
"""
def __init__(self, factory, *args, **kwargs):
self.factory = factory
self.args = args
self.kwargs = kwargs
self._data = None
@property
def data(self):
if self._data is None:
self._data = self.factory(*self.args, **self.kwargs)
return self._data
def __getitem__(self, key):
return self.data[key]
def __setitem__(self, key, value):
self.data[key] = value
def __delitem__(self, key):
del self.data[key]
def __iter__(self):
return iter(self.data)
def __len__(self):
return len(self.data)
def _load_targets_json():
"""Loads ``microarchitectures.json`` in memory."""
directory_name = os.path.dirname(os.path.abspath(__file__))
filename = os.path.join(directory_name, 'microarchitectures.json')
with open(filename, 'r') as f:
return json.load(f)
#: In memory representation of the data in microarchitectures.json,
#: loaded on first access
targets_json = LazyDictionary(_load_targets_json)

View File

@@ -2,6 +2,7 @@
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import collections
import errno
import hashlib
@@ -23,12 +24,6 @@
from llnl.util.lang import dedupe, memoized
from spack.util.executable import Executable
if sys.version_info >= (3, 3):
from collections.abc import Sequence # novm
else:
from collections import Sequence
__all__ = [
'FileFilter',
'FileList',
@@ -47,20 +42,16 @@
'fix_darwin_install_name',
'force_remove',
'force_symlink',
'chgrp',
'chmod_x',
'copy',
'install',
'copy_tree',
'install_tree',
'is_exe',
'join_path',
'last_modification_time_recursive',
'mkdirp',
'partition_path',
'prefixes',
'remove_dead_links',
'remove_directory_contents',
'remove_if_dead_link',
'remove_linked_tree',
'set_executable',
@@ -347,78 +338,56 @@ def unset_executable_mode(path):
def copy(src, dest, _permissions=False):
"""Copy the file(s) *src* to the file or directory *dest*.
"""Copies the file *src* to the file or directory *dest*.
If *dest* specifies a directory, the file will be copied into *dest*
using the base filename from *src*.
*src* may contain glob characters.
Parameters:
src (str): the file(s) to copy
src (str): the file to copy
dest (str): the destination file or directory
_permissions (bool): for internal use only
Raises:
IOError: if *src* does not match any files or directories
ValueError: if *src* matches multiple files but *dest* is
not a directory
"""
if _permissions:
tty.debug('Installing {0} to {1}'.format(src, dest))
else:
tty.debug('Copying {0} to {1}'.format(src, dest))
files = glob.glob(src)
if not files:
raise IOError("No such file or directory: '{0}'".format(src))
if len(files) > 1 and not os.path.isdir(dest):
raise ValueError(
"'{0}' matches multiple files but '{1}' is not a directory".format(
src, dest))
# Expand dest to its eventual full path if it is a directory.
if os.path.isdir(dest):
dest = join_path(dest, os.path.basename(src))
for src in files:
# Expand dest to its eventual full path if it is a directory.
dst = dest
if os.path.isdir(dest):
dst = join_path(dest, os.path.basename(src))
shutil.copy(src, dest)
shutil.copy(src, dst)
if _permissions:
set_install_permissions(dst)
copy_mode(src, dst)
if _permissions:
set_install_permissions(dest)
copy_mode(src, dest)
def install(src, dest):
"""Install the file(s) *src* to the file or directory *dest*.
"""Installs the file *src* to the file or directory *dest*.
Same as :py:func:`copy` with the addition of setting proper
permissions on the installed file.
Parameters:
src (str): the file(s) to install
src (str): the file to install
dest (str): the destination file or directory
Raises:
IOError: if *src* does not match any files or directories
ValueError: if *src* matches multiple files but *dest* is
not a directory
"""
copy(src, dest, _permissions=True)
def resolve_link_target_relative_to_the_link(link):
def resolve_link_target_relative_to_the_link(l):
"""
os.path.isdir uses os.path.exists, which for links will check
the existence of the link target. If the link target is relative to
the link, we need to construct a pathname that is valid from
our cwd (which may not be the same as the link's directory)
"""
target = os.readlink(link)
target = os.readlink(l)
if os.path.isabs(target):
return target
link_dir = os.path.dirname(os.path.abspath(link))
link_dir = os.path.dirname(os.path.abspath(l))
return os.path.join(link_dir, target)
@@ -428,8 +397,6 @@ def copy_tree(src, dest, symlinks=True, ignore=None, _permissions=False):
If the destination directory *dest* does not already exist, it will
be created as well as missing parent directories.
*src* may contain glob characters.
If *symlinks* is true, symbolic links in the source tree are represented
as symbolic links in the new tree and the metadata of the original links
will be copied as far as the platform allows; if false, the contents and
@@ -444,66 +411,56 @@ def copy_tree(src, dest, symlinks=True, ignore=None, _permissions=False):
symlinks (bool): whether or not to preserve symlinks
ignore (function): function indicating which files to ignore
_permissions (bool): for internal use only
Raises:
IOError: if *src* does not match any files or directories
ValueError: if *src* is a parent directory of *dest*
"""
if _permissions:
tty.debug('Installing {0} to {1}'.format(src, dest))
else:
tty.debug('Copying {0} to {1}'.format(src, dest))
abs_src = os.path.abspath(src)
if not abs_src.endswith(os.path.sep):
abs_src += os.path.sep
abs_dest = os.path.abspath(dest)
if not abs_dest.endswith(os.path.sep):
abs_dest += os.path.sep
files = glob.glob(src)
if not files:
raise IOError("No such file or directory: '{0}'".format(src))
# Stop early to avoid unnecessary recursion if being asked to copy from a
# parent directory.
if abs_dest.startswith(abs_src):
raise ValueError('Cannot copy ancestor directory {0} into {1}'.
format(abs_src, abs_dest))
for src in files:
abs_src = os.path.abspath(src)
if not abs_src.endswith(os.path.sep):
abs_src += os.path.sep
mkdirp(dest)
# Stop early to avoid unnecessary recursion if being asked to copy
# from a parent directory.
if abs_dest.startswith(abs_src):
raise ValueError('Cannot copy ancestor directory {0} into {1}'.
format(abs_src, abs_dest))
for s, d in traverse_tree(abs_src, abs_dest, order='pre',
follow_symlinks=not symlinks,
ignore=ignore,
follow_nonexisting=True):
if os.path.islink(s):
link_target = resolve_link_target_relative_to_the_link(s)
if symlinks:
target = os.readlink(s)
if os.path.isabs(target):
new_target = re.sub(abs_src, abs_dest, target)
if new_target != target:
tty.debug("Redirecting link {0} to {1}"
.format(target, new_target))
target = new_target
mkdirp(abs_dest)
for s, d in traverse_tree(abs_src, abs_dest, order='pre',
follow_symlinks=not symlinks,
ignore=ignore,
follow_nonexisting=True):
if os.path.islink(s):
link_target = resolve_link_target_relative_to_the_link(s)
if symlinks:
target = os.readlink(s)
if os.path.isabs(target):
new_target = re.sub(abs_src, abs_dest, target)
if new_target != target:
tty.debug("Redirecting link {0} to {1}"
.format(target, new_target))
target = new_target
os.symlink(target, d)
elif os.path.isdir(link_target):
mkdirp(d)
else:
shutil.copyfile(s, d)
os.symlink(target, d)
elif os.path.isdir(link_target):
mkdirp(d)
else:
if os.path.isdir(s):
mkdirp(d)
else:
shutil.copy2(s, d)
shutil.copyfile(s, d)
else:
if os.path.isdir(s):
mkdirp(d)
else:
shutil.copy2(s, d)
if _permissions:
set_install_permissions(d)
copy_mode(s, d)
if _permissions:
set_install_permissions(d)
copy_mode(s, d)
def install_tree(src, dest, symlinks=True, ignore=None):
@@ -517,10 +474,6 @@ def install_tree(src, dest, symlinks=True, ignore=None):
dest (str): the destination directory
symlinks (bool): whether or not to preserve symlinks
ignore (function): function indicating which files to ignore
Raises:
IOError: if *src* does not match any files or directories
ValueError: if *src* is a parent directory of *dest*
"""
copy_tree(src, dest, symlinks=symlinks, ignore=ignore, _permissions=True)
@@ -690,7 +643,7 @@ def replace_directory_transaction(directory_name, tmp_root=None):
try:
yield tmp_dir
except (Exception, KeyboardInterrupt, SystemExit) as e:
except (Exception, KeyboardInterrupt, SystemExit):
# Delete what was there, before copying back the original content
if os.path.exists(directory_name):
shutil.rmtree(directory_name)
@@ -701,7 +654,6 @@ def replace_directory_transaction(directory_name, tmp_root=None):
tty.debug('DIRECTORY RECOVERED [{0}]'.format(directory_name))
msg = 'the transactional move of "{0}" failed.'
msg += '\n ' + str(e)
raise RuntimeError(msg.format(directory_name))
else:
# Otherwise delete the temporary directory
@@ -930,15 +882,6 @@ def set_executable(path):
os.chmod(path, mode)
def last_modification_time_recursive(path):
path = os.path.abspath(path)
times = [os.stat(path).st_mtime]
times.extend(os.stat(os.path.join(root, name)).st_mtime
for root, dirs, files in os.walk(path)
for name in dirs + files)
return max(times)
def remove_empty_directories(root):
"""Ascend up from the leaves accessible from `root` and remove empty
directories.
@@ -994,53 +937,6 @@ def remove_linked_tree(path):
shutil.rmtree(path, True)
@contextmanager
def safe_remove(*files_or_dirs):
"""Context manager to remove the files passed as input, but restore
them in case any exception is raised in the context block.
Args:
*files_or_dirs: glob expressions for files or directories
to be removed
Returns:
Dictionary that maps deleted files to their temporary copy
within the context block.
"""
# Find all the files or directories that match
glob_matches = [glob.glob(x) for x in files_or_dirs]
# Sort them so that shorter paths like "/foo/bar" come before
# nested paths like "/foo/bar/baz.yaml". This simplifies the
# handling of temporary copies below
sorted_matches = sorted([
os.path.abspath(x) for x in itertools.chain(*glob_matches)
], key=len)
# Copy files and directories in a temporary location
removed, dst_root = {}, tempfile.mkdtemp()
try:
for id, file_or_dir in enumerate(sorted_matches):
# The glob expression at the top ensures that the file/dir exists
# at the time we enter the loop. Double check here since it might
# happen that a previous iteration of the loop already removed it.
# This is the case, for instance, if we remove the directory
# "/foo/bar" before the file "/foo/bar/baz.yaml".
if not os.path.exists(file_or_dir):
continue
# The monotonic ID is a simple way to make the filename
# or directory name unique in the temporary folder
basename = os.path.basename(file_or_dir) + '-{0}'.format(id)
temporary_path = os.path.join(dst_root, basename)
shutil.move(file_or_dir, temporary_path)
removed[file_or_dir] = temporary_path
yield removed
except BaseException:
# Restore the files that were removed
for original_path, temporary_path in removed.items():
shutil.move(temporary_path, original_path)
raise
def fix_darwin_install_name(path):
"""Fix install name of dynamic libraries on Darwin to have full path.
@@ -1111,7 +1007,7 @@ def find(root, files, recursive=True):
Parameters:
root (str): The root directory to start searching from
files (str or Sequence): Library name(s) to search for
files (str or collections.Sequence): Library name(s) to search for
recurse (bool, optional): if False search only root folder,
if True descends top-down from the root. Defaults to True.
@@ -1174,7 +1070,7 @@ def _find_non_recursive(root, search_files):
# Utilities for libraries and headers
class FileList(Sequence):
class FileList(collections.Sequence):
"""Sequence of absolute paths to files.
Provides a few convenience methods to manipulate file paths.
@@ -1417,7 +1313,7 @@ def find_headers(headers, root, recursive=False):
"""
if isinstance(headers, six.string_types):
headers = [headers]
elif not isinstance(headers, Sequence):
elif not isinstance(headers, collections.Sequence):
message = '{0} expects a string or sequence of strings as the '
message += 'first argument [got {1} instead]'
message = message.format(find_headers.__name__, type(headers))
@@ -1572,7 +1468,7 @@ def find_system_libraries(libraries, shared=True):
"""
if isinstance(libraries, six.string_types):
libraries = [libraries]
elif not isinstance(libraries, Sequence):
elif not isinstance(libraries, collections.Sequence):
message = '{0} expects a string or sequence of strings as the '
message += 'first argument [got {1} instead]'
message = message.format(find_system_libraries.__name__,
@@ -1626,7 +1522,7 @@ def find_libraries(libraries, root, shared=True, recursive=False):
"""
if isinstance(libraries, six.string_types):
libraries = [libraries]
elif not isinstance(libraries, Sequence):
elif not isinstance(libraries, collections.Sequence):
message = '{0} expects a string or sequence of strings as the '
message += 'first argument [got {1} instead]'
message = message.format(find_libraries.__name__, type(libraries))
@@ -1674,19 +1570,6 @@ def can_access_dir(path):
return os.path.isdir(path) and os.access(path, os.R_OK | os.X_OK)
@memoized
def can_write_to_dir(path):
"""Return True if the argument is a directory in which we can write.
Args:
path: path to be tested
Returns:
True if ``path`` is an writeable directory, else False
"""
return os.path.isdir(path) and os.access(path, os.R_OK | os.X_OK | os.W_OK)
@memoized
def files_in(*search_paths):
"""Returns all the files in paths passed as arguments.
@@ -1800,28 +1683,3 @@ def prefixes(path):
pass
return paths
def md5sum(file):
"""Compute the MD5 sum of a file.
Args:
file (str): file to be checksummed
Returns:
MD5 sum of the file's content
"""
md5 = hashlib.md5()
with open(file, "rb") as f:
md5.update(f.read())
return md5.digest()
def remove_directory_contents(dir):
"""Remove all contents of a directory."""
if os.path.exists(dir):
for entry in [os.path.join(dir, entry) for entry in os.listdir(dir)]:
if os.path.isfile(entry) or os.path.islink(entry):
os.unlink(entry)
else:
shutil.rmtree(entry)

View File

@@ -5,87 +5,63 @@
from __future__ import division
import multiprocessing
import os
import re
import functools
import collections
import inspect
from datetime import datetime, timedelta
from six import string_types
import sys
if sys.version_info >= (3, 3):
from collections.abc import Hashable, MutableMapping # novm
else:
from collections import Hashable, MutableMapping
# Ignore emacs backups when listing modules
ignore_modules = [r'^\.#', '~$']
# On macOS, Python 3.8 multiprocessing now defaults to the 'spawn' start
# method. Spack cannot currently handle this, so force the process to start
# using the 'fork' start method.
#
# TODO: This solution is not ideal, as the 'fork' start method can lead to
# crashes of the subprocess. Figure out how to make 'spawn' work.
#
# See:
# * https://github.com/spack/spack/pull/18124
# * https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods # noqa: E501
# * https://bugs.python.org/issue33725
if sys.version_info >= (3,): # novm
fork_context = multiprocessing.get_context('fork')
else:
fork_context = multiprocessing
def index_by(objects, *funcs):
"""Create a hierarchy of dictionaries by splitting the supplied
set of objects on unique values of the supplied functions.
set of objects on unique values of the supplied functions.
Values are used as keys. For example, suppose you have four
objects with attributes that look like this::
Values are used as keys. For example, suppose you have four
objects with attributes that look like this::
a = Spec(name="boost", compiler="gcc", arch="bgqos_0")
b = Spec(name="mrnet", compiler="intel", arch="chaos_5_x86_64_ib")
c = Spec(name="libelf", compiler="xlc", arch="bgqos_0")
d = Spec(name="libdwarf", compiler="intel", arch="chaos_5_x86_64_ib")
a = Spec("boost %gcc target=skylake")
b = Spec("mrnet %intel target=zen2")
c = Spec("libelf %xlc target=skylake")
d = Spec("libdwarf %intel target=zen2")
list_of_specs = [a,b,c,d]
index1 = index_by(list_of_specs, lambda s: s.arch,
lambda s: s.compiler)
index2 = index_by(list_of_specs, lambda s: s.compiler)
list_of_specs = [a,b,c,d]
index1 = index_by(list_of_specs, lambda s: str(s.target),
lambda s: s.compiler)
index2 = index_by(list_of_specs, lambda s: s.compiler)
``index1`` now has two levels of dicts, with lists at the
leaves, like this::
``index1`` now has two levels of dicts, with lists at the
leaves, like this::
{ 'bgqos_0' : { 'gcc' : [a], 'xlc' : [c] },
'chaos_5_x86_64_ib' : { 'intel' : [b, d] }
}
{ 'zen2' : { 'gcc' : [a], 'xlc' : [c] },
'skylake' : { 'intel' : [b, d] }
}
And ``index2`` is a single level dictionary of lists that looks
like this::
And ``index2`` is a single level dictionary of lists that looks
like this::
{ 'gcc' : [a],
'intel' : [b,d],
'xlc' : [c]
}
{ 'gcc' : [a],
'intel' : [b,d],
'xlc' : [c]
}
If any elemnts in funcs is a string, it is treated as the name
of an attribute, and acts like getattr(object, name). So
shorthand for the above two indexes would be::
If any elements in funcs is a string, it is treated as the name
of an attribute, and acts like getattr(object, name). So
shorthand for the above two indexes would be::
index1 = index_by(list_of_specs, 'arch', 'compiler')
index2 = index_by(list_of_specs, 'compiler')
index1 = index_by(list_of_specs, 'arch', 'compiler')
index2 = index_by(list_of_specs, 'compiler')
You can also index by tuples by passing tuples::
You can also index by tuples by passing tuples::
index1 = index_by(list_of_specs, ('arch', 'compiler'))
index1 = index_by(list_of_specs, ('target', 'compiler'))
Keys in the resulting dict will look like ('gcc', 'skylake').
Keys in the resulting dict will look like ('gcc', 'bgqos_0').
"""
if not funcs:
return objects
@@ -194,7 +170,7 @@ def memoized(func):
@functools.wraps(func)
def _memoized_function(*args):
if not isinstance(args, Hashable):
if not isinstance(args, collections.Hashable):
# Not hashable, so just call the function.
return func(*args)
@@ -269,7 +245,7 @@ def setter(name, value):
@key_ordering
class HashableMap(MutableMapping):
class HashableMap(collections.MutableMapping):
"""This is a hashable, comparable dictionary. Hash is performed on
a tuple of the values in the dictionary."""
@@ -573,12 +549,6 @@ def instance(self):
return self._instance
def __getattr__(self, name):
# When unpickling Singleton objects, the 'instance' attribute may be
# requested but not yet set. The final 'getattr' line here requires
# 'instance'/'_instance' to be defined or it will enter an infinite
# loop, so protect against that here.
if name in ['_instance', 'instance']:
raise AttributeError()
return getattr(self.instance, name)
def __getitem__(self, name):
@@ -607,8 +577,6 @@ def __init__(self, ref_function):
self.ref_function = ref_function
def __getattr__(self, name):
if name == 'ref_function':
raise AttributeError()
return getattr(self.ref_function(), name)
def __getitem__(self, name):
@@ -676,19 +644,3 @@ def uniq(sequence):
uniq_list.append(element)
last = element
return uniq_list
def star(func):
"""Unpacks arguments for use with Multiprocessing mapping functions"""
def _wrapper(args):
return func(*args)
return _wrapper
class Devnull(object):
"""Null stream with less overhead than ``os.devnull``.
See https://stackoverflow.com/a/2929954.
"""
def write(self, *_):
pass

View File

@@ -26,126 +26,6 @@
true_fn = lambda: True
class OpenFile(object):
"""Record for keeping track of open lockfiles (with reference counting).
There's really only one ``OpenFile`` per inode, per process, but we record the
filehandle here as it's the thing we end up using in python code. You can get
the file descriptor from the file handle if needed -- or we could make this track
file descriptors as well in the future.
"""
def __init__(self, fh):
self.fh = fh
self.refs = 0
class OpenFileTracker(object):
"""Track open lockfiles, to minimize number of open file descriptors.
The ``fcntl`` locks that Spack uses are associated with an inode and a process.
This is convenient, because if a process exits, it releases its locks.
Unfortunately, this also means that if you close a file, *all* locks associated
with that file's inode are released, regardless of whether the process has any
other open file descriptors on it.
Because of this, we need to track open lock files so that we only close them when
a process no longer needs them. We do this by tracking each lockfile by its
inode and process id. This has several nice properties:
1. Tracking by pid ensures that, if we fork, we don't inadvertently track the parent
process's lockfiles. ``fcntl`` locks are not inherited across forks, so we'll
just track new lockfiles in the child.
2. Tracking by inode ensures that referencs are counted per inode, and that we don't
inadvertently close a file whose inode still has open locks.
3. Tracking by both pid and inode ensures that we only open lockfiles the minimum
number of times necessary for the locks we have.
Note: as mentioned elsewhere, these locks aren't thread safe -- they're designed to
work in Python and assume the GIL.
"""
def __init__(self):
"""Create a new ``OpenFileTracker``."""
self._descriptors = {}
def get_fh(self, path):
"""Get a filehandle for a lockfile.
This routine will open writable files for read/write even if you're asking
for a shared (read-only) lock. This is so that we can upgrade to an exclusive
(write) lock later if requested.
Arguments:
path (str): path to lock file we want a filehandle for
"""
# Open writable files as 'r+' so we can upgrade to write later
os_mode, fh_mode = (os.O_RDWR | os.O_CREAT), 'r+'
pid = os.getpid()
open_file = None # OpenFile object, if there is one
stat = None # stat result for the lockfile, if it exists
try:
# see whether we've seen this inode/pid before
stat = os.stat(path)
key = (stat.st_ino, pid)
open_file = self._descriptors.get(key)
except OSError as e:
if e.errno != errno.ENOENT: # only handle file not found
raise
# path does not exist -- fail if we won't be able to create it
parent = os.path.dirname(path) or '.'
if not os.access(parent, os.W_OK):
raise CantCreateLockError(path)
# if there was no already open file, we'll need to open one
if not open_file:
if stat and not os.access(path, os.W_OK):
# we know path exists but not if it's writable. If it's read-only,
# only open the file for reading (and fail if we're trying to get
# an exclusive (write) lock on it)
os_mode, fh_mode = os.O_RDONLY, 'r'
fd = os.open(path, os_mode)
fh = os.fdopen(fd, fh_mode)
open_file = OpenFile(fh)
# if we just created the file, we'll need to get its inode here
if not stat:
inode = os.fstat(fd).st_ino
key = (inode, pid)
self._descriptors[key] = open_file
open_file.refs += 1
return open_file.fh
def release_fh(self, path):
"""Release a filehandle, only closing it if there are no more references."""
try:
inode = os.stat(path).st_ino
except OSError as e:
if e.errno != errno.ENOENT: # only handle file not found
raise
inode = None # this will not be in self._descriptors
key = (inode, os.getpid())
open_file = self._descriptors.get(key)
assert open_file, "Attempted to close non-existing lock path: %s" % path
open_file.refs -= 1
if not open_file.refs:
del self._descriptors[key]
open_file.fh.close()
#: Open file descriptors for locks in this process. Used to prevent one process
#: from opening the sam file many times for different byte range locks
file_tracker = OpenFileTracker()
def _attempts_str(wait_time, nattempts):
# Don't print anything if we succeeded on the first try
if nattempts <= 1:
@@ -166,8 +46,7 @@ class Lock(object):
Note that this is for managing contention over resources *between*
processes and not for managing contention between threads in a process: the
functions of this object are not thread-safe. A process also must not
maintain multiple locks on the same file (or, more specifically, on
overlapping byte ranges in the same file).
maintain multiple locks on the same file.
"""
def __init__(self, path, start=0, length=0, default_timeout=None,
@@ -272,17 +151,31 @@ def _lock(self, op, timeout=None):
# Create file and parent directories if they don't exist.
if self._file is None:
self._ensure_parent_directory()
self._file = file_tracker.get_fh(self.path)
parent = self._ensure_parent_directory()
if op == fcntl.LOCK_EX and self._file.mode == 'r':
# Open writable files as 'r+' so we can upgrade to write later
os_mode, fd_mode = (os.O_RDWR | os.O_CREAT), 'r+'
if os.path.exists(self.path):
if not os.access(self.path, os.W_OK):
if op == fcntl.LOCK_SH:
# can still lock read-only files if we open 'r'
os_mode, fd_mode = os.O_RDONLY, 'r'
else:
raise LockROFileError(self.path)
elif not os.access(parent, os.W_OK):
raise CantCreateLockError(self.path)
fd = os.open(self.path, os_mode)
self._file = os.fdopen(fd, fd_mode)
elif op == fcntl.LOCK_EX and self._file.mode == 'r':
# Attempt to upgrade to write lock w/a read-only file.
# If the file were writable, we'd have opened it 'r+'
raise LockROFileError(self.path)
self._log_debug("{0} locking [{1}:{2}]: timeout {3} sec"
.format(lock_type[op], self._start, self._length,
timeout))
tty.debug("{0} locking [{1}:{2}]: timeout {3} sec"
.format(lock_type[op], self._start, self._length, timeout))
poll_intervals = iter(Lock._poll_interval_generator())
start_time = time.time()
@@ -318,14 +211,14 @@ def _poll_lock(self, op):
# help for debugging distributed locking
if self.debug:
# All locks read the owner PID and host
self._read_log_debug_data()
self._log_debug('{0} locked {1} [{2}:{3}] (owner={4})'
.format(lock_type[op], self.path,
self._start, self._length, self.pid))
self._read_debug_data()
tty.debug('{0} locked {1} [{2}:{3}] (owner={4})'
.format(lock_type[op], self.path,
self._start, self._length, self.pid))
# Exclusive locks write their PID/host
if op == fcntl.LOCK_EX:
self._write_log_debug_data()
self._write_debug_data()
return True
@@ -352,7 +245,7 @@ def _ensure_parent_directory(self):
raise
return parent
def _read_log_debug_data(self):
def _read_debug_data(self):
"""Read PID and host data out of the file if it is there."""
self.old_pid = self.pid
self.old_host = self.host
@@ -364,7 +257,7 @@ def _read_log_debug_data(self):
_, _, self.host = host.rpartition('=')
self.pid = int(self.pid)
def _write_log_debug_data(self):
def _write_debug_data(self):
"""Write PID and host data to the file, recording old values."""
self.old_pid = self.pid
self.old_host = self.host
@@ -388,8 +281,7 @@ def _unlock(self):
"""
fcntl.lockf(self._file, fcntl.LOCK_UN,
self._length, self._start, os.SEEK_SET)
file_tracker.release_fh(self.path)
self._file.close()
self._file = None
self._reads = 0
self._writes = 0
@@ -581,6 +473,9 @@ def release_write(self, release_fn=None):
else:
return False
def _debug(self, *args):
tty.debug(*args)
def _get_counts_desc(self):
return '(reads {0}, writes {1})'.format(self._reads, self._writes) \
if tty.is_verbose() else ''
@@ -589,50 +484,58 @@ def _log_acquired(self, locktype, wait_time, nattempts):
attempts_part = _attempts_str(wait_time, nattempts)
now = datetime.now()
desc = 'Acquired at %s' % now.strftime("%H:%M:%S.%f")
self._log_debug(self._status_msg(locktype, '{0}{1}'
.format(desc, attempts_part)))
self._debug(self._status_msg(locktype, '{0}{1}'.
format(desc, attempts_part)))
def _log_acquiring(self, locktype):
self._log_debug(self._status_msg(locktype, 'Acquiring'), level=3)
def _log_debug(self, *args, **kwargs):
"""Output lock debug messages."""
kwargs['level'] = kwargs.get('level', 2)
tty.debug(*args, **kwargs)
self._debug2(self._status_msg(locktype, 'Acquiring'))
def _log_downgraded(self, wait_time, nattempts):
attempts_part = _attempts_str(wait_time, nattempts)
now = datetime.now()
desc = 'Downgraded at %s' % now.strftime("%H:%M:%S.%f")
self._log_debug(self._status_msg('READ LOCK', '{0}{1}'
.format(desc, attempts_part)))
self._debug(self._status_msg('READ LOCK', '{0}{1}'
.format(desc, attempts_part)))
def _log_downgrading(self):
self._log_debug(self._status_msg('WRITE LOCK', 'Downgrading'), level=3)
self._debug2(self._status_msg('WRITE LOCK', 'Downgrading'))
def _log_released(self, locktype):
now = datetime.now()
desc = 'Released at %s' % now.strftime("%H:%M:%S.%f")
self._log_debug(self._status_msg(locktype, desc))
self._debug(self._status_msg(locktype, desc))
def _log_releasing(self, locktype):
self._log_debug(self._status_msg(locktype, 'Releasing'), level=3)
self._debug2(self._status_msg(locktype, 'Releasing'))
def _log_upgraded(self, wait_time, nattempts):
attempts_part = _attempts_str(wait_time, nattempts)
now = datetime.now()
desc = 'Upgraded at %s' % now.strftime("%H:%M:%S.%f")
self._log_debug(self._status_msg('WRITE LOCK', '{0}{1}'.
format(desc, attempts_part)))
self._debug(self._status_msg('WRITE LOCK', '{0}{1}'.
format(desc, attempts_part)))
def _log_upgrading(self):
self._log_debug(self._status_msg('READ LOCK', 'Upgrading'), level=3)
self._debug2(self._status_msg('READ LOCK', 'Upgrading'))
def _status_msg(self, locktype, status):
status_desc = '[{0}] {1}'.format(status, self._get_counts_desc())
return '{0}{1.desc}: {1.path}[{1._start}:{1._length}] {2}'.format(
locktype, self, status_desc)
def _debug2(self, *args):
# TODO: Easy place to make a single, temporary change to the
# TODO: debug level associated with the more detailed messages.
# TODO:
# TODO: Someday it would be great if we could switch this to
# TODO: another level, perhaps _between_ debug and verbose, or
# TODO: some other form of filtering so the first level of
# TODO: debugging doesn't have to generate these messages. Using
# TODO: verbose here did not work as expected because tests like
# TODO: test_spec_json will write the verbose messages to the
# TODO: output that is used to check test correctness.
tty.debug(*args)
class LockTransaction(object):
"""Simple nested transaction context manager that uses a file lock.

View File

@@ -19,8 +19,7 @@
from llnl.util.tty.color import cprint, cwrite, cescape, clen
# Globals
_debug = 0
_debug = False
_verbose = False
_stacktrace = False
_timestamp = False
@@ -30,26 +29,21 @@
indent = " "
def debug_level():
return _debug
def is_verbose():
return _verbose
def is_debug(level=1):
return _debug >= level
def is_debug():
return _debug
def is_stacktrace():
return _stacktrace
def set_debug(level=0):
def set_debug(flag):
global _debug
assert level >= 0, 'Debug level must be a positive value'
_debug = level
_debug = flag
def set_verbose(flag):
@@ -138,17 +132,12 @@ def process_stacktrace(countback):
return st_text
def show_pid():
return is_debug(2)
def get_timestamp(force=False):
"""Get a string timestamp"""
if _debug or _timestamp or force:
# Note inclusion of the PID is useful for parallel builds.
pid = ', {0}'.format(os.getpid()) if show_pid() else ''
return '[{0}{1}] '.format(
datetime.now().strftime("%Y-%m-%d-%H:%M:%S.%f"), pid)
return '[{0}, {1}] '.format(
datetime.now().strftime("%Y-%m-%d-%H:%M:%S.%f"), os.getpid())
else:
return ''
@@ -208,8 +197,7 @@ def verbose(message, *args, **kwargs):
def debug(message, *args, **kwargs):
level = kwargs.get('level', 1)
if is_debug(level):
if _debug:
kwargs.setdefault('format', 'g')
kwargs.setdefault('stream', sys.stderr)
info(message, *args, **kwargs)

View File

@@ -236,8 +236,6 @@ def __exit__(self, exc_type, exception, traceback):
"""If termios was available, restore old settings."""
if self.old_cfg:
self._restore_default_terminal_settings()
if sys.version_info >= (3,):
atexit.unregister(self._restore_default_terminal_settings)
# restore SIGSTP and SIGCONT handlers
if self.old_handlers:
@@ -289,109 +287,6 @@ def _file_descriptors_work(*streams):
return False
class FileWrapper(object):
"""Represents a file. Can be an open stream, a path to a file (not opened
yet), or neither. When unwrapped, it returns an open file (or file-like)
object.
"""
def __init__(self, file_like):
# This records whether the file-like object returned by "unwrap" is
# purely in-memory. In that case a subprocess will need to explicitly
# transmit the contents to the parent.
self.write_in_parent = False
self.file_like = file_like
if isinstance(file_like, string_types):
self.open = True
elif _file_descriptors_work(file_like):
self.open = False
else:
self.file_like = None
self.open = True
self.write_in_parent = True
self.file = None
def unwrap(self):
if self.open:
if self.file_like:
self.file = open(self.file_like, 'w')
else:
self.file = StringIO()
return self.file
else:
# We were handed an already-open file object. In this case we also
# will not actually close the object when requested to.
return self.file_like
def close(self):
if self.file:
self.file.close()
class MultiProcessFd(object):
"""Return an object which stores a file descriptor and can be passed as an
argument to a function run with ``multiprocessing.Process``, such that
the file descriptor is available in the subprocess."""
def __init__(self, fd):
self._connection = None
self._fd = None
if sys.version_info >= (3, 8):
self._connection = multiprocessing.connection.Connection(fd)
else:
self._fd = fd
@property
def fd(self):
if self._connection:
return self._connection._handle
else:
return self._fd
def close(self):
if self._connection:
self._connection.close()
else:
os.close(self._fd)
def close_connection_and_file(multiprocess_fd, file):
# MultiprocessFd is intended to transmit a FD
# to a child process, this FD is then opened to a Python File object
# (using fdopen). In >= 3.8, MultiprocessFd encapsulates a
# multiprocessing.connection.Connection; Connection closes the FD
# when it is deleted, and prints a warning about duplicate closure if
# it is not explicitly closed. In < 3.8, MultiprocessFd encapsulates a
# simple FD; closing the FD here appears to conflict with
# closure of the File object (in < 3.8 that is). Therefore this needs
# to choose whether to close the File or the Connection.
if sys.version_info >= (3, 8):
multiprocess_fd.close()
else:
file.close()
@contextmanager
def replace_environment(env):
"""Replace the current environment (`os.environ`) with `env`.
If `env` is empty (or None), this unsets all current environment
variables.
"""
env = env or {}
old_env = os.environ.copy()
try:
os.environ.clear()
for name, val in env.items():
os.environ[name] = val
yield
finally:
os.environ.clear()
for name, val in old_env.items():
os.environ[name] = val
class log_output(object):
"""Context manager that logs its output to a file.
@@ -428,15 +323,14 @@ class log_output(object):
work within test frameworks like nose and pytest.
"""
def __init__(self, file_like=None, echo=False, debug=0, buffer=False,
env=None):
def __init__(self, file_like=None, echo=False, debug=False, buffer=False):
"""Create a new output log context manager.
Args:
file_like (str or stream): open file object or name of file where
output should be logged
echo (bool): whether to echo output in addition to logging it
debug (int): positive to enable tty debug mode during logging
debug (bool): whether to enable tty debug mode during logging
buffer (bool): pass buffer=True to skip unbuffering output; note
this doesn't set up any *new* buffering
@@ -457,12 +351,11 @@ def __init__(self, file_like=None, echo=False, debug=0, buffer=False,
self.echo = echo
self.debug = debug
self.buffer = buffer
self.env = env # the environment to use for _writer_daemon
self._active = False # used to prevent re-entry
def __call__(self, file_like=None, echo=None, debug=None, buffer=None):
"""This behaves the same as init. It allows a logger to be reused.
"""Thie behaves the same as init. It allows a logger to be reused.
Arguments are the same as for ``__init__()``. Args here take
precedence over those passed to ``__init__()``.
@@ -499,7 +392,18 @@ def __enter__(self):
"file argument must be set by either __init__ or __call__")
# set up a stream for the daemon to write to
self.log_file = FileWrapper(self.file_like)
self.close_log_in_parent = True
self.write_log_in_parent = False
if isinstance(self.file_like, string_types):
self.log_file = open(self.file_like, 'w')
elif _file_descriptors_work(self.file_like):
self.log_file = self.file_like
self.close_log_in_parent = False
else:
self.log_file = StringIO()
self.write_log_in_parent = True
# record parent color settings before redirecting. We do this
# because color output depends on whether the *original* stdout
@@ -514,8 +418,6 @@ def __enter__(self):
# OS-level pipe for redirecting output to logger
read_fd, write_fd = os.pipe()
read_multiprocess_fd = MultiProcessFd(read_fd)
# Multiprocessing pipe for communication back from the daemon
# Currently only used to save echo value between uses
self.parent_pipe, child_pipe = multiprocessing.Pipe()
@@ -524,28 +426,24 @@ def __enter__(self):
try:
# need to pass this b/c multiprocessing closes stdin in child.
try:
input_multiprocess_fd = MultiProcessFd(
os.dup(sys.stdin.fileno())
)
input_stream = os.fdopen(os.dup(sys.stdin.fileno()))
except BaseException:
# just don't forward input if this fails
input_multiprocess_fd = None
input_stream = None # just don't forward input if this fails
with replace_environment(self.env):
self.process = multiprocessing.Process(
target=_writer_daemon,
args=(
input_multiprocess_fd, read_multiprocess_fd, write_fd,
self.echo, self.log_file, child_pipe
)
self.process = multiprocessing.Process(
target=_writer_daemon,
args=(
input_stream, read_fd, write_fd, self.echo, self.log_file,
child_pipe
)
self.process.daemon = True # must set before start()
self.process.start()
)
self.process.daemon = True # must set before start()
self.process.start()
os.close(read_fd) # close in the parent process
finally:
if input_multiprocess_fd:
input_multiprocess_fd.close()
read_multiprocess_fd.close()
if input_stream:
input_stream.close()
# Flush immediately before redirecting so that anything buffered
# goes to the original stream
@@ -616,21 +514,18 @@ def __exit__(self, exc_type, exc_val, exc_tb):
sys.stderr = self._saved_stderr
# print log contents in parent if needed.
if self.log_file.write_in_parent:
if self.write_log_in_parent:
string = self.parent_pipe.recv()
self.file_like.write(string)
# recover and store echo settings from the child before it dies
try:
self.echo = self.parent_pipe.recv()
except EOFError:
# This may occur if some exception prematurely terminates the
# _writer_daemon. An exception will have already been generated.
pass
if self.close_log_in_parent:
self.log_file.close()
# now that the write pipe is closed (in this __exit__, when we restore
# stdout with dup2), the logger daemon process loop will terminate. We
# wait for that here.
# recover and store echo settings from the child before it dies
self.echo = self.parent_pipe.recv()
# join the daemon process. The daemon will quit automatically
# when the write pipe is closed; we just wait for it here.
self.process.join()
# restore old color and debug settings
@@ -659,8 +554,7 @@ def force_echo(self):
sys.stdout.flush()
def _writer_daemon(stdin_multiprocess_fd, read_multiprocess_fd, write_fd, echo,
log_file_wrapper, control_pipe):
def _writer_daemon(stdin, read_fd, write_fd, echo, log_file, control_pipe):
"""Daemon used by ``log_output`` to write to a log file and to ``stdout``.
The daemon receives output from the parent process and writes it both
@@ -697,39 +591,26 @@ def _writer_daemon(stdin_multiprocess_fd, read_multiprocess_fd, write_fd, echo,
``StringIO`` in the parent. This is mainly for testing.
Arguments:
stdin_multiprocess_fd (int): input from the terminal
read_multiprocess_fd (int): pipe for reading from parent's redirected
stdout
stdin (stream): input from the terminal
read_fd (int): pipe for reading from parent's redirected stdout
write_fd (int): parent's end of the pipe will write to (will be
immediately closed by the writer daemon)
echo (bool): initial echo setting -- controlled by user and
preserved across multiple writer daemons
log_file_wrapper (FileWrapper): file to log all output
log_file (file-like): file to log all output
control_pipe (Pipe): multiprocessing pipe on which to send control
information to the parent
"""
# If this process was forked, then it will inherit file descriptors from
# the parent process. This process depends on closing all instances of
# write_fd to terminate the reading loop, so we close the file descriptor
# here. Forking is the process spawning method everywhere except Mac OS
# for Python >= 3.8 and on Windows
if sys.version_info < (3, 8) or sys.platform != 'darwin':
os.close(write_fd)
# Use line buffering (3rd param = 1) since Python 3 has a bug
# that prevents unbuffered text I/O.
in_pipe = os.fdopen(read_multiprocess_fd.fd, 'r', 1)
if stdin_multiprocess_fd:
stdin = os.fdopen(stdin_multiprocess_fd.fd)
else:
stdin = None
in_pipe = os.fdopen(read_fd, 'r', 1)
os.close(write_fd)
# list of streams to select from
istreams = [in_pipe, stdin] if stdin else [in_pipe]
force_echo = False # parent can force echo for certain output
log_file = log_file_wrapper.unwrap()
try:
with keyboard_input(stdin) as kb:
while True:
@@ -790,13 +671,10 @@ def _writer_daemon(stdin_multiprocess_fd, read_multiprocess_fd, write_fd, echo,
# send written data back to parent if we used a StringIO
if isinstance(log_file, StringIO):
control_pipe.send(log_file.getvalue())
log_file_wrapper.close()
close_connection_and_file(read_multiprocess_fd, in_pipe)
if stdin_multiprocess_fd:
close_connection_and_file(stdin_multiprocess_fd, stdin)
log_file.close()
# send echo value back to the parent so it can be preserved.
control_pipe.send(echo)
# send echo value back to the parent so it can be preserved.
control_pipe.send(echo)
def _retry(function):

View File

@@ -31,17 +31,17 @@
class ProcessController(object):
"""Wrapper around some fundamental process control operations.
This allows one process (the controller) to drive another (the
minion) similar to the way a shell would, by sending signals and I/O.
This allows one process to drive another similar to the way a shell
would, by sending signals and I/O.
"""
def __init__(self, pid, controller_fd,
def __init__(self, pid, master_fd,
timeout=1, sleep_time=1e-1, debug=False):
"""Create a controller to manipulate the process with id ``pid``
Args:
pid (int): id of process to control
controller_fd (int): controller fd attached to pid's stdin
master_fd (int): master file descriptor attached to pid's stdin
timeout (int): time in seconds for wait operations to time out
(default 1 second)
sleep_time (int): time to sleep after signals, to control the
@@ -58,7 +58,7 @@ def __init__(self, pid, controller_fd,
"""
self.pid = pid
self.pgid = os.getpgid(pid)
self.controller_fd = controller_fd
self.master_fd = master_fd
self.timeout = timeout
self.sleep_time = sleep_time
self.debug = debug
@@ -67,8 +67,8 @@ def __init__(self, pid, controller_fd,
self.ps = which("ps", required=True)
def get_canon_echo_attrs(self):
"""Get echo and canon attributes of the terminal of controller_fd."""
cfg = termios.tcgetattr(self.controller_fd)
"""Get echo and canon attributes of the terminal of master_fd."""
cfg = termios.tcgetattr(self.master_fd)
return (
bool(cfg[3] & termios.ICANON),
bool(cfg[3] & termios.ECHO),
@@ -82,7 +82,7 @@ def horizontal_line(self, name):
)
def status(self):
"""Print debug message with status info for the minion."""
"""Print debug message with status info for the child."""
if self.debug:
canon, echo = self.get_canon_echo_attrs()
sys.stderr.write("canon: %s, echo: %s\n" % (
@@ -94,12 +94,12 @@ def status(self):
sys.stderr.write("\n")
def input_on(self):
"""True if keyboard input is enabled on the controller_fd pty."""
"""True if keyboard input is enabled on the master_fd pty."""
return self.get_canon_echo_attrs() == (False, False)
def background(self):
"""True if pgid is in a background pgroup of controller_fd's tty."""
return self.pgid != os.tcgetpgrp(self.controller_fd)
"""True if pgid is in a background pgroup of master_fd's terminal."""
return self.pgid != os.tcgetpgrp(self.master_fd)
def tstp(self):
"""Send SIGTSTP to the controlled process."""
@@ -115,18 +115,18 @@ def cont(self):
def fg(self):
self.horizontal_line("fg")
with log.ignore_signal(signal.SIGTTOU):
os.tcsetpgrp(self.controller_fd, os.getpgid(self.pid))
os.tcsetpgrp(self.master_fd, os.getpgid(self.pid))
time.sleep(self.sleep_time)
def bg(self):
self.horizontal_line("bg")
with log.ignore_signal(signal.SIGTTOU):
os.tcsetpgrp(self.controller_fd, os.getpgrp())
os.tcsetpgrp(self.master_fd, os.getpgrp())
time.sleep(self.sleep_time)
def write(self, byte_string):
self.horizontal_line("write '%s'" % byte_string.decode("utf-8"))
os.write(self.controller_fd, byte_string)
os.write(self.master_fd, byte_string)
def wait(self, condition):
start = time.time()
@@ -156,51 +156,50 @@ def wait_running(self):
class PseudoShell(object):
"""Sets up controller and minion processes with a PTY.
"""Sets up master and child processes with a PTY.
You can create a ``PseudoShell`` if you want to test how some
function responds to terminal input. This is a pseudo-shell from a
job control perspective; ``controller_function`` and ``minion_function``
are set up with a pseudoterminal (pty) so that the controller can drive
the minion through process control signals and I/O.
job control perspective; ``master_function`` and ``child_function``
are set up with a pseudoterminal (pty) so that the master can drive
the child through process control signals and I/O.
The two functions should have signatures like this::
def controller_function(proc, ctl, **kwargs)
def minion_function(**kwargs)
def master_function(proc, ctl, **kwargs)
def child_function(**kwargs)
``controller_function`` is spawned in its own process and passed three
``master_function`` is spawned in its own process and passed three
arguments:
proc
the ``multiprocessing.Process`` object representing the minion
the ``multiprocessing.Process`` object representing the child
ctl
a ``ProcessController`` object tied to the minion
a ``ProcessController`` object tied to the child
kwargs
keyword arguments passed from ``PseudoShell.start()``.
``minion_function`` is only passed ``kwargs`` delegated from
``child_function`` is only passed ``kwargs`` delegated from
``PseudoShell.start()``.
The ``ctl.controller_fd`` will have its ``controller_fd`` connected to
``sys.stdin`` in the minion process. Both processes will share the
The ``ctl.master_fd`` will have its ``master_fd`` connected to
``sys.stdin`` in the child process. Both processes will share the
same ``sys.stdout`` and ``sys.stderr`` as the process instantiating
``PseudoShell``.
Here are the relationships between processes created::
._________________________________________________________.
| Minion Process | pid 2
| - runs minion_function | pgroup 2
| Child Process | pid 2
| - runs child_function | pgroup 2
|_________________________________________________________| session 1
^
| create process with controller_fd connected to stdin
| create process with master_fd connected to stdin
| stdout, stderr are the same as caller
._________________________________________________________.
| Controller Process | pid 1
| - runs controller_function | pgroup 1
| - uses ProcessController and controller_fd to | session 1
| control minion |
| Master Process | pid 1
| - runs master_function | pgroup 1
| - uses ProcessController and master_fd to control child | session 1
|_________________________________________________________|
^
| create process
@@ -208,51 +207,51 @@ def minion_function(**kwargs)
._________________________________________________________.
| Caller | pid 0
| - Constructs, starts, joins PseudoShell | pgroup 0
| - provides controller_function, minion_function | session 0
| - provides master_function, child_function | session 0
|_________________________________________________________|
"""
def __init__(self, controller_function, minion_function):
def __init__(self, master_function, child_function):
self.proc = None
self.controller_function = controller_function
self.minion_function = minion_function
self.master_function = master_function
self.child_function = child_function
# these can be optionally set to change defaults
self.controller_timeout = 1
self.sleep_time = 0
def start(self, **kwargs):
"""Start the controller and minion processes.
"""Start the master and child processes.
Arguments:
kwargs (dict): arbitrary keyword arguments that will be
passed to controller and minion functions
passed to master and child functions
The controller process will create the minion, then call
``controller_function``. The minion process will call
``minion_function``.
The master process will create the child, then call
``master_function``. The child process will call
``child_function``.
"""
self.proc = multiprocessing.Process(
target=PseudoShell._set_up_and_run_controller_function,
args=(self.controller_function, self.minion_function,
target=PseudoShell._set_up_and_run_master_function,
args=(self.master_function, self.child_function,
self.controller_timeout, self.sleep_time),
kwargs=kwargs,
)
self.proc.start()
def join(self):
"""Wait for the minion process to finish, and return its exit code."""
"""Wait for the child process to finish, and return its exit code."""
self.proc.join()
return self.proc.exitcode
@staticmethod
def _set_up_and_run_minion_function(
tty_name, stdout_fd, stderr_fd, ready, minion_function, **kwargs):
"""Minion process wrapper for PseudoShell.
def _set_up_and_run_child_function(
tty_name, stdout_fd, stderr_fd, ready, child_function, **kwargs):
"""Child process wrapper for PseudoShell.
Handles the mechanics of setting up a PTY, then calls
``minion_function``.
``child_function``.
"""
# new process group, like a command or pipeline launched by a shell
@@ -267,45 +266,45 @@ def _set_up_and_run_minion_function(
if kwargs.get("debug"):
sys.stderr.write(
"minion: stdin.isatty(): %s\n" % sys.stdin.isatty())
"child: stdin.isatty(): %s\n" % sys.stdin.isatty())
# tell the parent that we're really running
if kwargs.get("debug"):
sys.stderr.write("minion: ready!\n")
sys.stderr.write("child: ready!\n")
ready.value = True
try:
minion_function(**kwargs)
child_function(**kwargs)
except BaseException:
traceback.print_exc()
@staticmethod
def _set_up_and_run_controller_function(
controller_function, minion_function, controller_timeout,
sleep_time, **kwargs):
"""Set up a pty, spawn a minion process, execute controller_function.
def _set_up_and_run_master_function(
master_function, child_function, controller_timeout, sleep_time,
**kwargs):
"""Set up a pty, spawn a child process, and execute master_function.
Handles the mechanics of setting up a PTY, then calls
``controller_function``.
``master_function``.
"""
os.setsid() # new session; this process is the controller
controller_fd, minion_fd = os.openpty()
pty_name = os.ttyname(minion_fd)
master_fd, child_fd = os.openpty()
pty_name = os.ttyname(child_fd)
# take controlling terminal
pty_fd = os.open(pty_name, os.O_RDWR)
os.close(pty_fd)
ready = multiprocessing.Value('i', False)
minion_process = multiprocessing.Process(
target=PseudoShell._set_up_and_run_minion_function,
child_process = multiprocessing.Process(
target=PseudoShell._set_up_and_run_child_function,
args=(pty_name, sys.stdout.fileno(), sys.stderr.fileno(),
ready, minion_function),
ready, child_function),
kwargs=kwargs,
)
minion_process.start()
child_process.start()
# wait for subprocess to be running and connected.
while not ready.value:
@@ -316,31 +315,30 @@ def _set_up_and_run_controller_function(
sys.stderr.write("pid: %d\n" % os.getpid())
sys.stderr.write("pgid: %d\n" % os.getpgrp())
sys.stderr.write("sid: %d\n" % os.getsid(0))
sys.stderr.write("tcgetpgrp: %d\n" % os.tcgetpgrp(controller_fd))
sys.stderr.write("tcgetpgrp: %d\n" % os.tcgetpgrp(master_fd))
sys.stderr.write("\n")
minion_pgid = os.getpgid(minion_process.pid)
sys.stderr.write("minion pid: %d\n" % minion_process.pid)
sys.stderr.write("minion pgid: %d\n" % minion_pgid)
sys.stderr.write(
"minion sid: %d\n" % os.getsid(minion_process.pid))
child_pgid = os.getpgid(child_process.pid)
sys.stderr.write("child pid: %d\n" % child_process.pid)
sys.stderr.write("child pgid: %d\n" % child_pgid)
sys.stderr.write("child sid: %d\n" % os.getsid(child_process.pid))
sys.stderr.write("\n")
sys.stderr.flush()
# set up controller to ignore SIGTSTP, like a shell
# set up master to ignore SIGTSTP, like a shell
signal.signal(signal.SIGTSTP, signal.SIG_IGN)
# call the controller function once the minion is ready
# call the master function once the child is ready
try:
controller = ProcessController(
minion_process.pid, controller_fd, debug=kwargs.get("debug"))
child_process.pid, master_fd, debug=kwargs.get("debug"))
controller.timeout = controller_timeout
controller.sleep_time = sleep_time
error = controller_function(minion_process, controller, **kwargs)
error = master_function(child_process, controller, **kwargs)
except BaseException:
error = 1
traceback.print_exc()
minion_process.join()
child_process.join()
# return whether either the parent or minion failed
return error or minion_process.exitcode
# return whether either the parent or child failed
return error or child_process.exitcode

Some files were not shown because too many files have changed in this diff Show More