Compare commits
382 Commits
bugfix-spa
...
v0.15.3-sh
Author | SHA1 | Date | |
---|---|---|---|
![]() |
008cf2ee15 | ||
![]() |
474a077143 | ||
![]() |
d386c59de9 | ||
![]() |
dedadcd2ea | ||
![]() |
983aeea850 | ||
![]() |
0c44a9a504 | ||
![]() |
b81339cf80 | ||
![]() |
6c69b8a4d4 | ||
![]() |
ec1237479e | ||
![]() |
40e2a41477 | ||
![]() |
f168d63586 | ||
![]() |
78a84efb4b | ||
![]() |
c6891376f4 | ||
![]() |
83b281f36b | ||
![]() |
86ec698a33 | ||
![]() |
897e80e596 | ||
![]() |
ab32799b52 | ||
![]() |
bd236918dd | ||
![]() |
3949a85f9a | ||
![]() |
bab1852340 | ||
![]() |
ef814b7a32 | ||
![]() |
96fa6f0c1b | ||
![]() |
e4ba1c1daf | ||
![]() |
bbbf0466dc | ||
![]() |
dc18b3e3d4 | ||
![]() |
b5f82696e2 | ||
![]() |
a5aa150a98 | ||
![]() |
ae03782032 | ||
![]() |
c729c6b93c | ||
![]() |
324c383d8e | ||
![]() |
35b7a69456 | ||
![]() |
d69c32d7ef | ||
![]() |
27aaff3dc2 | ||
![]() |
fc8847cf4e | ||
![]() |
1fcc00df96 | ||
![]() |
697c2183d3 | ||
![]() |
b320be70cb | ||
![]() |
3f24188d19 | ||
![]() |
3449087284 | ||
![]() |
8e9f4d0078 | ||
![]() |
d7794540b2 | ||
![]() |
ae44b1d7b9 | ||
![]() |
df12b2bd15 | ||
![]() |
148a6a8860 | ||
![]() |
efba3731e5 | ||
![]() |
6eb332a984 | ||
![]() |
d0a83f318b | ||
![]() |
0f67b97065 | ||
![]() |
d2c2e000a7 | ||
![]() |
08926b5b12 | ||
![]() |
aaee0bcb7e | ||
![]() |
f2a35a767a | ||
![]() |
2b05d2bf5c | ||
![]() |
cc00619929 | ||
![]() |
188a371595 | ||
![]() |
dce7be9932 | ||
![]() |
4ac1a532f3 | ||
![]() |
f42dc4fa4d | ||
![]() |
d25c7ddd6f | ||
![]() |
48a9ad3652 | ||
![]() |
d55541919d | ||
![]() |
0d4740d1b1 | ||
![]() |
d56711f799 | ||
![]() |
99a47e407e | ||
![]() |
7efb0e541e | ||
![]() |
7340be98f6 | ||
![]() |
c281eaf69f | ||
![]() |
2110b98829 | ||
![]() |
88537d02e4 | ||
![]() |
a2729fcd7f | ||
![]() |
bcd41cec71 | ||
![]() |
6f6e896795 | ||
![]() |
28a25080ca | ||
![]() |
14f3f230c1 | ||
![]() |
d32bbae431 | ||
![]() |
710ff8d7ce | ||
![]() |
683881f912 | ||
![]() |
11d8aed6cd | ||
![]() |
ab68410c4c | ||
![]() |
fa614404e6 | ||
![]() |
a6abd530bd | ||
![]() |
2b809a5374 | ||
![]() |
3e13137f6e | ||
![]() |
6aa6e19d34 | ||
![]() |
c2d8d8acbd | ||
![]() |
299dcdd3eb | ||
![]() |
e0f13b298d | ||
![]() |
d2ac26f844 | ||
![]() |
fae57d1422 | ||
![]() |
c84a05b809 | ||
![]() |
05e8918076 | ||
![]() |
929cb9e62e | ||
![]() |
7d1f2abd56 | ||
![]() |
ab5f28aceb | ||
![]() |
4450377794 | ||
![]() |
45eaa442c3 | ||
![]() |
4fa519134f | ||
![]() |
815f62ce0c | ||
![]() |
b3b5ea4064 | ||
![]() |
573489db71 | ||
![]() |
9c42f246ed | ||
![]() |
dbdd2cb92f | ||
![]() |
406596af70 | ||
![]() |
73f02b10de | ||
![]() |
9629f571bc | ||
![]() |
5e50dc5acb | ||
![]() |
59bfc22d40 | ||
![]() |
1a8a147fe5 | ||
![]() |
0612a9e8e9 | ||
![]() |
f2889e698a | ||
![]() |
ea546425e8 | ||
![]() |
7269a5bf51 | ||
![]() |
00d7e817c6 | ||
![]() |
ed7d485b58 | ||
![]() |
38d387c9a5 | ||
![]() |
02dd90ebf9 | ||
![]() |
e72e2568dd | ||
![]() |
d9923a05e0 | ||
![]() |
8c6fa66b2a | ||
![]() |
84eae97f91 | ||
![]() |
12099ed55e | ||
![]() |
d0f5b69a19 | ||
![]() |
ce9d30f80f | ||
![]() |
e02d955aed | ||
![]() |
b3fff20d1f | ||
![]() |
8c41173678 | ||
![]() |
0bed621d0c | ||
![]() |
1d2754c3f6 | ||
![]() |
ae2a867a7f | ||
![]() |
207e496162 | ||
![]() |
f0391db096 | ||
![]() |
084994db9c | ||
![]() |
f85da868ac | ||
![]() |
f1f31e3dfe | ||
![]() |
7f8e827db8 | ||
![]() |
a63761f875 | ||
![]() |
3ce16c89b7 | ||
![]() |
f4ac3770b4 | ||
![]() |
b0506a722e | ||
![]() |
650ab563f4 | ||
![]() |
90285c7d61 | ||
![]() |
6c300ab717 | ||
![]() |
05d8ba170b | ||
![]() |
51f65152a5 | ||
![]() |
1113357e35 | ||
![]() |
d65a076c0d | ||
![]() |
845139740f | ||
![]() |
1f87b07689 | ||
![]() |
cbaa1bca1c | ||
![]() |
5fb6a06c37 | ||
![]() |
6e38fc56f6 | ||
![]() |
c00a05bfba | ||
![]() |
9ec9327f5a | ||
![]() |
11088df402 | ||
![]() |
4ea76dc95c | ||
![]() |
f0275d7e1b | ||
![]() |
516c3e659f | ||
![]() |
e62ddcb582 | ||
![]() |
b3bc538df6 | ||
![]() |
29fc94e29e | ||
![]() |
466f7fd996 | ||
![]() |
58cfe4e078 | ||
![]() |
00f7577273 | ||
![]() |
4e6d189a94 | ||
![]() |
9abadd4985 | ||
![]() |
66d4bc3f3c | ||
![]() |
52cafe6c96 | ||
![]() |
8d5aa46765 | ||
![]() |
e5ec89ad5b | ||
![]() |
7bba9cd2a5 | ||
![]() |
cce629e791 | ||
![]() |
bb15addad5 | ||
![]() |
e9e3e88f63 | ||
![]() |
c797a0611c | ||
![]() |
04f3000646 | ||
![]() |
f3eba3c482 | ||
![]() |
02fa7b680f | ||
![]() |
8fcd917e51 | ||
![]() |
9c85d87b90 | ||
![]() |
b45fc97564 | ||
![]() |
986f68f7ed | ||
![]() |
2cd9e1eb62 | ||
![]() |
61804f201a | ||
![]() |
cf104b0f10 | ||
![]() |
17106a131d | ||
![]() |
cc0dda95c4 | ||
![]() |
7679e20e83 | ||
![]() |
4349c091e7 | ||
![]() |
ff60f51a7a | ||
![]() |
06da1f195c | ||
![]() |
3d98ad3f4c | ||
![]() |
f1bb8999ab | ||
![]() |
1e75dde7b2 | ||
![]() |
f780839b87 | ||
![]() |
204f15b4c1 | ||
![]() |
a4fff39d7e | ||
![]() |
10016a34e0 | ||
![]() |
e133b44da6 | ||
![]() |
5732d8de50 | ||
![]() |
509b3c3016 | ||
![]() |
8a9fa9bd18 | ||
![]() |
a5eabfad91 | ||
![]() |
6a77f1ff45 | ||
![]() |
60283775b3 | ||
![]() |
4433e4de2d | ||
![]() |
aaf6f80d4c | ||
![]() |
4bb26802ed | ||
![]() |
59fb789290 | ||
![]() |
1965e1e606 | ||
![]() |
6ed3db6c14 | ||
![]() |
a8fbc96271 | ||
![]() |
d8956a3bbe | ||
![]() |
a807b95081 | ||
![]() |
1b608d6041 | ||
![]() |
be143d7dff | ||
![]() |
05fe92e086 | ||
![]() |
cd54fb95b8 | ||
![]() |
8b63c4555c | ||
![]() |
ec78160569 | ||
![]() |
e1379f132d | ||
![]() |
cdcd3dcedd | ||
![]() |
7c1083916a | ||
![]() |
c07bbe1a25 | ||
![]() |
85032c6224 | ||
![]() |
7b7898a69c | ||
![]() |
84c5d76eae | ||
![]() |
bcd47f0bd6 | ||
![]() |
cb6a959cdb | ||
![]() |
32cd12bff7 | ||
![]() |
7021965159 | ||
![]() |
5c5743ca33 | ||
![]() |
034a7662ac | ||
![]() |
e6b6ac5898 | ||
![]() |
35037bf088 | ||
![]() |
d14c245411 | ||
![]() |
6e2ad01f20 | ||
![]() |
ef9b5a8f74 | ||
![]() |
4921ed29d5 | ||
![]() |
f4c720e902 | ||
![]() |
0a71b1d5ac | ||
![]() |
3593a7be6a | ||
![]() |
e4d2cf4441 | ||
![]() |
911e51bd89 | ||
![]() |
6ec8aea6f7 | ||
![]() |
5b11f7aa4c | ||
![]() |
97e46981b9 | ||
![]() |
873ac5e890 | ||
![]() |
4d7dae5716 | ||
![]() |
b19f0fafcc | ||
![]() |
11b1bdd119 | ||
![]() |
f749821dc2 | ||
![]() |
5abb20dcab | ||
![]() |
0c233bdd0f | ||
![]() |
0f171c7ded | ||
![]() |
b4c7520dd8 | ||
![]() |
9ab7d8f01d | ||
![]() |
a7ad344c2a | ||
![]() |
deb2d3745c | ||
![]() |
ff96ec430b | ||
![]() |
d4a959736a | ||
![]() |
5ba51a0be0 | ||
![]() |
27e1140df7 | ||
![]() |
7ab6af8a3b | ||
![]() |
0e6e93eaac | ||
![]() |
38f8bdd2bb | ||
![]() |
8e45a3fc2f | ||
![]() |
c22af99b04 | ||
![]() |
fc3a909fbc | ||
![]() |
9665754eae | ||
![]() |
0f9f9f3a85 | ||
![]() |
777a5682a6 | ||
![]() |
8994b4aab6 | ||
![]() |
98ec366470 | ||
![]() |
c61f4d7c82 | ||
![]() |
811b304230 | ||
![]() |
8f0c9ad409 | ||
![]() |
6a423a5d8a | ||
![]() |
23c37063bd | ||
![]() |
478f3a5a99 | ||
![]() |
02afb30990 | ||
![]() |
06e3f15e47 | ||
![]() |
f13ce3540d | ||
![]() |
7ae34087e3 | ||
![]() |
f0fea97e88 | ||
![]() |
54893197ed | ||
![]() |
80da1d50d1 | ||
![]() |
944c5d75cd | ||
![]() |
9ef4bc9d50 | ||
![]() |
a2af432833 | ||
![]() |
aefed311af | ||
![]() |
6ffacddcf4 | ||
![]() |
e17824f82f | ||
![]() |
57ca47f035 | ||
![]() |
4532a56b4e | ||
![]() |
86e69a48a2 | ||
![]() |
2508295d81 | ||
![]() |
1a041c051a | ||
![]() |
2262ca2e67 | ||
![]() |
2269771a91 | ||
![]() |
7f32574dd8 | ||
![]() |
d15ac30f62 | ||
![]() |
1f41347ab8 | ||
![]() |
1f4f01103b | ||
![]() |
8f46fcb512 | ||
![]() |
2d3b973ebc | ||
![]() |
7e62e0f27f | ||
![]() |
ea0db4c0f9 | ||
![]() |
0afc68e60b | ||
![]() |
8ad25d5013 | ||
![]() |
e90db68321 | ||
![]() |
9e96b89f02 | ||
![]() |
b4dae1b7fd | ||
![]() |
9e9adf1d2f | ||
![]() |
de9255247a | ||
![]() |
de5d3e3229 | ||
![]() |
e621aafc77 | ||
![]() |
c53427c98d | ||
![]() |
7a75148d1b | ||
![]() |
4210520c9d | ||
![]() |
4f3fb50ae7 | ||
![]() |
7660659107 | ||
![]() |
fcca2a518b | ||
![]() |
23e1cd7775 | ||
![]() |
58e794e95a | ||
![]() |
7ed59ed835 | ||
![]() |
512726ae5b | ||
![]() |
20851a6e6c | ||
![]() |
92bbbb9659 | ||
![]() |
5f2f2bfb84 | ||
![]() |
9b63f72d6b | ||
![]() |
4c60f01bae | ||
![]() |
cd08308463 | ||
![]() |
fe69997043 | ||
![]() |
1584a6e3c6 | ||
![]() |
c393880852 | ||
![]() |
bbe9e6bf54 | ||
![]() |
d7a00b71d4 | ||
![]() |
6775d2546a | ||
![]() |
8a154333f2 | ||
![]() |
5e637a04fd | ||
![]() |
0213869439 | ||
![]() |
22e9a9792a | ||
![]() |
4f23da9d26 | ||
![]() |
f9430e2fd4 | ||
![]() |
a2f86d5d18 | ||
![]() |
0efab6637c | ||
![]() |
2b11694b94 | ||
![]() |
088798a727 | ||
![]() |
bddbb1c22e | ||
![]() |
92f447cf1c | ||
![]() |
96f266c3e3 | ||
![]() |
d5093c20c5 | ||
![]() |
2064241c37 | ||
![]() |
721742b764 | ||
![]() |
c45bf153d8 | ||
![]() |
b98e5e66e7 | ||
![]() |
3d18bf345f | ||
![]() |
f8e9cf4081 | ||
![]() |
98e0f8b89b | ||
![]() |
263275b7ea | ||
![]() |
3e13002d7f | ||
![]() |
654e5cc924 | ||
![]() |
04a72c1834 | ||
![]() |
53cf6eb194 | ||
![]() |
5a7f186176 | ||
![]() |
987adfa9c9 | ||
![]() |
e476bb1400 | ||
![]() |
dc12233610 | ||
![]() |
29d21a0a5d | ||
![]() |
762f505da5 | ||
![]() |
8e1c326174 | ||
![]() |
0bac5d527d | ||
![]() |
79256eeb5c | ||
![]() |
de760942f2 | ||
![]() |
860641bfab | ||
![]() |
673e55f14d | ||
![]() |
54777a4f3e | ||
![]() |
db36e66592 | ||
![]() |
0d36e94407 | ||
![]() |
92c3b5b8b2 | ||
![]() |
71220a3656 | ||
![]() |
09bd29d816 |
23
.github/workflows/install_spack.sh
vendored
23
.github/workflows/install_spack.sh
vendored
@@ -1,5 +1,20 @@
|
||||
#!/usr/bin/env sh
|
||||
git clone https://github.com/spack/spack.git
|
||||
echo -e "config:\n build_jobs: 2" > spack/etc/spack/config.yaml
|
||||
. spack/share/spack/setup-env.sh
|
||||
spack compilers
|
||||
. share/spack/setup-env.sh
|
||||
echo -e "config:\n build_jobs: 2" > etc/spack/config.yaml
|
||||
spack config add "packages:all:target:[x86_64]"
|
||||
# TODO: remove this explicit setting once apple-clang detection is fixed
|
||||
cat <<EOF > etc/spack/compilers.yaml
|
||||
compilers:
|
||||
- compiler:
|
||||
spec: apple-clang@11.0.3
|
||||
paths:
|
||||
cc: /usr/bin/clang
|
||||
cxx: /usr/bin/clang++
|
||||
f77: /usr/local/bin/gfortran-9
|
||||
fc: /usr/local/bin/gfortran-9
|
||||
modules: []
|
||||
operating_system: catalina
|
||||
target: x86_64
|
||||
EOF
|
||||
spack compiler info apple-clang
|
||||
spack debug report
|
||||
|
3
.github/workflows/linux_build_tests.yaml
vendored
3
.github/workflows/linux_build_tests.yaml
vendored
@@ -3,13 +3,12 @@ name: linux builds
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- develop
|
||||
- releases/**
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- develop
|
||||
- releases/**
|
||||
paths-ignore:
|
||||
# Don't run if we only modified packages in the built-in repository
|
||||
- 'var/spack/repos/builtin/**'
|
||||
|
80
.github/workflows/linux_unit_tests.yaml
vendored
80
.github/workflows/linux_unit_tests.yaml
vendored
@@ -60,3 +60,83 @@ jobs:
|
||||
uses: codecov/codecov-action@v1
|
||||
with:
|
||||
flags: unittests,linux
|
||||
flake8:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: 3.8
|
||||
- name: Install Python packages
|
||||
run: |
|
||||
pip install --upgrade pip six setuptools flake8
|
||||
- name: Setup git configuration
|
||||
run: |
|
||||
# Need this for the git tests to succeed.
|
||||
git --version
|
||||
git config --global user.email "spack@example.com"
|
||||
git config --global user.name "Test User"
|
||||
git fetch -u origin develop:develop
|
||||
- name: Run flake8 tests
|
||||
run: |
|
||||
share/spack/qa/run-flake8-tests
|
||||
shell:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: 3.8
|
||||
- name: Install System packages
|
||||
run: |
|
||||
sudo apt-get -y update
|
||||
sudo apt-get install -y coreutils gfortran gnupg2 mercurial ninja-build patchelf zsh fish
|
||||
# Needed for kcov
|
||||
sudo apt-get -y install cmake binutils-dev libcurl4-openssl-dev zlib1g-dev libdw-dev libiberty-dev
|
||||
- name: Install Python packages
|
||||
run: |
|
||||
pip install --upgrade pip six setuptools codecov coverage
|
||||
- name: Setup git configuration
|
||||
run: |
|
||||
# Need this for the git tests to succeed.
|
||||
git --version
|
||||
git config --global user.email "spack@example.com"
|
||||
git config --global user.name "Test User"
|
||||
git fetch -u origin develop:develop
|
||||
- name: Install kcov for bash script coverage
|
||||
env:
|
||||
KCOV_VERSION: 38
|
||||
run: |
|
||||
KCOV_ROOT=$(mktemp -d)
|
||||
wget --output-document=${KCOV_ROOT}/${KCOV_VERSION}.tar.gz https://github.com/SimonKagstrom/kcov/archive/v${KCOV_VERSION}.tar.gz
|
||||
tar -C ${KCOV_ROOT} -xzvf ${KCOV_ROOT}/${KCOV_VERSION}.tar.gz
|
||||
mkdir -p ${KCOV_ROOT}/build
|
||||
cd ${KCOV_ROOT}/build && cmake -Wno-dev ${KCOV_ROOT}/kcov-${KCOV_VERSION} && cd -
|
||||
make -C ${KCOV_ROOT}/build && sudo make -C ${KCOV_ROOT}/build install
|
||||
- name: Run shell tests
|
||||
env:
|
||||
COVERAGE: true
|
||||
run: |
|
||||
share/spack/qa/run-shell-tests
|
||||
- name: Upload to codecov.io
|
||||
uses: codecov/codecov-action@v1
|
||||
with:
|
||||
flags: shelltests,linux
|
||||
documentation:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: 3.8
|
||||
- name: Install System packages
|
||||
run: |
|
||||
sudo apt-get -y update
|
||||
sudo apt-get install -y coreutils ninja-build graphviz
|
||||
- name: Install Python packages
|
||||
run: |
|
||||
pip install --upgrade pip six setuptools
|
||||
pip install --upgrade -r lib/spack/docs/requirements.txt
|
||||
- name: Build documentation
|
||||
run: |
|
||||
share/spack/qa/run-doc-tests
|
||||
|
23
.github/workflows/macos_python.yml
vendored
23
.github/workflows/macos_python.yml
vendored
@@ -8,6 +8,13 @@ on:
|
||||
schedule:
|
||||
# nightly at 1 AM
|
||||
- cron: '0 1 * * *'
|
||||
pull_request:
|
||||
branches:
|
||||
- develop
|
||||
paths:
|
||||
# Run if we modify this yaml file
|
||||
- '.github/workflows/macos_python.yml'
|
||||
# TODO: run if we touch any of the recipes involved in this
|
||||
|
||||
# GitHub Action Limits
|
||||
# https://help.github.com/en/actions/reference/workflow-syntax-for-github-actions
|
||||
@@ -21,7 +28,8 @@ jobs:
|
||||
- name: spack install
|
||||
run: |
|
||||
. .github/workflows/install_spack.sh
|
||||
spack install -v gcc
|
||||
# 9.2.0 is the latest version on which we apply homebrew patch
|
||||
spack install -v --fail-fast gcc@9.2.0 %apple-clang
|
||||
|
||||
install_jupyter_clang:
|
||||
name: jupyter
|
||||
@@ -32,7 +40,8 @@ jobs:
|
||||
- name: spack install
|
||||
run: |
|
||||
. .github/workflows/install_spack.sh
|
||||
spack install -v py-jupyter %clang
|
||||
spack config add packages:opengl:paths:opengl@4.1:/usr/X11R6
|
||||
spack install -v --fail-fast py-jupyter %apple-clang
|
||||
|
||||
install_scipy_clang:
|
||||
name: scipy, mpl, pd
|
||||
@@ -42,9 +51,9 @@ jobs:
|
||||
- name: spack install
|
||||
run: |
|
||||
. .github/workflows/install_spack.sh
|
||||
spack install -v py-scipy %clang
|
||||
spack install -v py-matplotlib %clang
|
||||
spack install -v py-pandas %clang
|
||||
spack install -v --fail-fast py-scipy %apple-clang
|
||||
spack install -v --fail-fast py-matplotlib %apple-clang
|
||||
spack install -v --fail-fast py-pandas %apple-clang
|
||||
|
||||
install_mpi4py_clang:
|
||||
name: mpi4py, petsc4py
|
||||
@@ -54,5 +63,5 @@ jobs:
|
||||
- name: spack install
|
||||
run: |
|
||||
. .github/workflows/install_spack.sh
|
||||
spack install -v py-mpi4py %clang
|
||||
spack install -v py-petsc4py %clang
|
||||
spack install -v --fail-fast py-mpi4py %apple-clang
|
||||
spack install -v --fail-fast py-petsc4py %apple-clang
|
||||
|
3
.github/workflows/macos_unit_tests.yaml
vendored
3
.github/workflows/macos_unit_tests.yaml
vendored
@@ -3,13 +3,12 @@ name: macos tests
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- develop
|
||||
- releases/**
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- develop
|
||||
- releases/**
|
||||
jobs:
|
||||
build:
|
||||
|
||||
|
@@ -3,13 +3,12 @@ name: python version check
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- develop
|
||||
- releases/**
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- develop
|
||||
- releases/**
|
||||
jobs:
|
||||
validate:
|
||||
|
||||
|
124
.travis.yml
124
.travis.yml
@@ -1,101 +1,32 @@
|
||||
#=============================================================================
|
||||
# Project settings
|
||||
#=============================================================================
|
||||
# Only build master and develop on push; do not build every branch.
|
||||
# Only build releases and develop on push; do not build every branch.
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
- develop
|
||||
- /^releases\/.*$/
|
||||
|
||||
#=============================================================================
|
||||
# Build matrix
|
||||
#=============================================================================
|
||||
|
||||
dist: bionic
|
||||
|
||||
jobs:
|
||||
fast_finish: true
|
||||
include:
|
||||
- stage: 'style checks'
|
||||
python: '3.8'
|
||||
os: linux
|
||||
language: python
|
||||
env: TEST_SUITE=flake8
|
||||
- stage: 'unit tests + documentation'
|
||||
python: '2.6'
|
||||
dist: trusty
|
||||
os: linux
|
||||
language: python
|
||||
addons:
|
||||
apt:
|
||||
# Everything but patchelf, that is not available for trusty
|
||||
packages:
|
||||
- ccache
|
||||
- gfortran
|
||||
- graphviz
|
||||
- gnupg2
|
||||
- kcov
|
||||
- mercurial
|
||||
- ninja-build
|
||||
- realpath
|
||||
- zsh
|
||||
- fish
|
||||
env: [ TEST_SUITE=unit, COVERAGE=true ]
|
||||
- python: '3.8'
|
||||
os: linux
|
||||
language: python
|
||||
env: [ TEST_SUITE=shell, COVERAGE=true, KCOV_VERSION=38 ]
|
||||
- python: '3.8'
|
||||
os: linux
|
||||
language: python
|
||||
env: TEST_SUITE=doc
|
||||
|
||||
stages:
|
||||
- 'style checks'
|
||||
- 'unit tests + documentation'
|
||||
|
||||
|
||||
#=============================================================================
|
||||
# Environment
|
||||
#=============================================================================
|
||||
|
||||
# Docs need graphviz to build
|
||||
language: python
|
||||
python: '2.6'
|
||||
dist: trusty
|
||||
os: linux
|
||||
addons:
|
||||
# for Linux builds, we use APT
|
||||
apt:
|
||||
packages:
|
||||
- ccache
|
||||
- coreutils
|
||||
- gfortran
|
||||
- graphviz
|
||||
- gnupg2
|
||||
- kcov
|
||||
- mercurial
|
||||
- ninja-build
|
||||
- patchelf
|
||||
- realpath
|
||||
- zsh
|
||||
- fish
|
||||
update: true
|
||||
|
||||
# ~/.ccache needs to be cached directly as Travis is not taking care of it
|
||||
# (possibly because we use 'language: python' and not 'language: c')
|
||||
cache:
|
||||
pip: true
|
||||
ccache: true
|
||||
directories:
|
||||
- ~/.ccache
|
||||
|
||||
before_install:
|
||||
- ccache -M 2G && ccache -z
|
||||
# Install kcov manually, since it's not packaged for bionic beaver
|
||||
- if [[ "$KCOV_VERSION" ]]; then
|
||||
sudo apt-get -y install cmake binutils-dev libcurl4-openssl-dev zlib1g-dev libdw-dev libiberty-dev;
|
||||
KCOV_ROOT=$(mktemp -d);
|
||||
wget --output-document=${KCOV_ROOT}/${KCOV_VERSION}.tar.gz https://github.com/SimonKagstrom/kcov/archive/v${KCOV_VERSION}.tar.gz;
|
||||
tar -C ${KCOV_ROOT} -xzvf ${KCOV_ROOT}/${KCOV_VERSION}.tar.gz;
|
||||
mkdir -p ${KCOV_ROOT}/build;
|
||||
cd ${KCOV_ROOT}/build && cmake -Wno-dev ${KCOV_ROOT}/kcov-${KCOV_VERSION} && cd - ;
|
||||
make -C ${KCOV_ROOT}/build && sudo make -C ${KCOV_ROOT}/build install;
|
||||
- if [[ "$TRAVIS_DIST" == "trusty" ]]; then
|
||||
share/spack/qa/install_patchelf.sh;
|
||||
else
|
||||
sudo apt-get update;
|
||||
sudo apt-get -y install patchelf;
|
||||
fi
|
||||
|
||||
# Install various dependencies
|
||||
@@ -103,12 +34,8 @@ install:
|
||||
- pip install --upgrade pip
|
||||
- pip install --upgrade six
|
||||
- pip install --upgrade setuptools
|
||||
- pip install --upgrade codecov coverage==4.5.4
|
||||
- pip install --upgrade flake8
|
||||
- pip install --upgrade pep8-naming
|
||||
- if [[ "$TEST_SUITE" == "doc" ]]; then
|
||||
pip install --upgrade -r lib/spack/docs/requirements.txt;
|
||||
fi
|
||||
|
||||
before_script:
|
||||
# Need this for the git tests to succeed.
|
||||
@@ -118,31 +45,12 @@ before_script:
|
||||
# Need this to be able to compute the list of changed files
|
||||
- git fetch origin ${TRAVIS_BRANCH}:${TRAVIS_BRANCH}
|
||||
|
||||
#=============================================================================
|
||||
# Building
|
||||
#=============================================================================
|
||||
script:
|
||||
- share/spack/qa/run-$TEST_SUITE-tests
|
||||
- python bin/spack -h
|
||||
- python bin/spack help -a
|
||||
- python bin/spack -p --lines 20 spec mpileaks%gcc ^elfutils@0.170
|
||||
- python bin/spack test -x --verbose
|
||||
|
||||
after_success:
|
||||
- ccache -s
|
||||
- case "$TEST_SUITE" in
|
||||
unit)
|
||||
if [[ "$COVERAGE" == "true" ]]; then
|
||||
codecov --env PYTHON_VERSION
|
||||
--required
|
||||
--flags "${TEST_SUITE}${TRAVIS_OS_NAME}";
|
||||
fi
|
||||
;;
|
||||
shell)
|
||||
codecov --env PYTHON_VERSION
|
||||
--required
|
||||
--flags "${TEST_SUITE}${TRAVIS_OS_NAME}";
|
||||
esac
|
||||
|
||||
#=============================================================================
|
||||
# Notifications
|
||||
#=============================================================================
|
||||
notifications:
|
||||
email:
|
||||
recipients:
|
||||
|
28
README.md
28
README.md
@@ -78,11 +78,29 @@ these guidelines with [Travis CI](https://travis-ci.org/spack/spack). To
|
||||
run these tests locally, and for helpful tips on git, see our
|
||||
[Contribution Guide](http://spack.readthedocs.io/en/latest/contribution_guide.html).
|
||||
|
||||
Spack uses a rough approximation of the
|
||||
[Git Flow](http://nvie.com/posts/a-successful-git-branching-model/)
|
||||
branching model. The ``develop`` branch contains the latest
|
||||
contributions, and ``master`` is always tagged and points to the latest
|
||||
stable release.
|
||||
Spack's `develop` branch has the latest contributions. Pull requests
|
||||
should target `develop`, and users who want the latest package versions,
|
||||
features, etc. can use `develop`.
|
||||
|
||||
Releases
|
||||
--------
|
||||
|
||||
For multi-user site deployments or other use cases that need very stable
|
||||
software installations, we recommend using Spack's
|
||||
[stable releases](https://github.com/spack/spack/releases).
|
||||
|
||||
Each Spack release series also has a corresponding branch, e.g.
|
||||
`releases/v0.14` has `0.14.x` versions of Spack, and `releases/v0.13` has
|
||||
`0.13.x` versions. We backport important bug fixes to these branches but
|
||||
we do not advance the package versions or make other changes that would
|
||||
change the way Spack concretizes dependencies within a release branch.
|
||||
So, you can base your Spack deployment on a release branch and `git pull`
|
||||
to get fixes, without the package churn that comes with `develop`.
|
||||
|
||||
The latest release is always available with the `releases/latest` tag.
|
||||
|
||||
See the [docs on releases](https://spack.readthedocs.io/en/latest/developer_guide.html#releases)
|
||||
for more details.
|
||||
|
||||
Code of Conduct
|
||||
------------------------
|
||||
|
@@ -16,7 +16,7 @@
|
||||
config:
|
||||
# This is the path to the root of the Spack install tree.
|
||||
# You can use $spack here to refer to the root of the spack instance.
|
||||
install_tree: $spack/opt/spack
|
||||
install_tree: ~/.spack/opt/spack
|
||||
|
||||
|
||||
# Locations where templates should be found
|
||||
@@ -30,8 +30,8 @@ config:
|
||||
|
||||
# Locations where different types of modules should be installed.
|
||||
module_roots:
|
||||
tcl: $spack/share/spack/modules
|
||||
lmod: $spack/share/spack/lmod
|
||||
tcl: ~/.spack/share/spack/modules
|
||||
lmod: ~/.spack/share/spack/lmod
|
||||
|
||||
|
||||
# Temporary locations Spack can try to use for builds.
|
||||
@@ -67,7 +67,7 @@ config:
|
||||
|
||||
# Cache directory for already downloaded source tarballs and archived
|
||||
# repositories. This can be purged with `spack clean --downloads`.
|
||||
source_cache: $spack/var/spack/cache
|
||||
source_cache: ~/.spack/var/spack/cache
|
||||
|
||||
|
||||
# Cache directory for miscellaneous files, like the package index.
|
||||
|
@@ -23,8 +23,12 @@ packages:
|
||||
daal: [intel-daal]
|
||||
elf: [elfutils]
|
||||
fftw-api: [fftw]
|
||||
gl: [mesa+opengl, opengl]
|
||||
glx: [mesa+glx, opengl]
|
||||
gl: [libglvnd-fe, mesa+opengl~glvnd, opengl~glvnd]
|
||||
glx: [libglvnd-fe+glx, mesa+glx~glvnd, opengl+glx~glvnd]
|
||||
egl: [libglvnd-fe+egl, opengl+egl~glvnd]
|
||||
libglvnd-be-gl: [mesa+glvnd, opengl+glvnd]
|
||||
libglvnd-be-glx: [mesa+glx+glvnd, opengl+glx+glvnd]
|
||||
libglvnd-be-egl: [opengl+egl+glvnd]
|
||||
glu: [mesa-glu, openglu]
|
||||
golang: [gcc]
|
||||
iconv: [libiconv]
|
||||
|
7
etc/spack/defaults/upstreams.yaml
Normal file
7
etc/spack/defaults/upstreams.yaml
Normal file
@@ -0,0 +1,7 @@
|
||||
upstreams:
|
||||
global:
|
||||
install_tree: $spack/opt/spack
|
||||
modules:
|
||||
tcl: $spack/share/spack/modules
|
||||
lmod: $spack/share/spack/lmod
|
||||
dotkit: $spack/share/spack/dotkit
|
@@ -45,7 +45,7 @@ Environments:
|
||||
&& echo " view: /opt/view") > /opt/spack-environment/spack.yaml
|
||||
|
||||
# Install the software, remove unnecessary deps
|
||||
RUN cd /opt/spack-environment && spack install && spack gc -y
|
||||
RUN cd /opt/spack-environment && spack env activate . && spack install && spack gc -y
|
||||
|
||||
# Strip all the binaries
|
||||
RUN find -L /opt/view/* -type f -exec readlink -f '{}' \; | \
|
||||
@@ -267,7 +267,7 @@ following ``Dockerfile``:
|
||||
&& echo " view: /opt/view") > /opt/spack-environment/spack.yaml
|
||||
|
||||
# Install the software, remove unnecessary deps
|
||||
RUN cd /opt/spack-environment && spack install && spack gc -y
|
||||
RUN cd /opt/spack-environment && spack env activate . && spack install && spack gc -y
|
||||
|
||||
# Strip all the binaries
|
||||
RUN find -L /opt/view/* -type f -exec readlink -f '{}' \; | \
|
||||
|
@@ -27,11 +27,22 @@ correspond to one feature/bugfix/extension/etc. One can create PRs with
|
||||
changes relevant to different ideas, however reviewing such PRs becomes tedious
|
||||
and error prone. If possible, try to follow the **one-PR-one-package/feature** rule.
|
||||
|
||||
Spack uses a rough approximation of the `Git Flow <http://nvie.com/posts/a-successful-git-branching-model/>`_
|
||||
branching model. The develop branch contains the latest contributions, and
|
||||
master is always tagged and points to the latest stable release. Therefore, when
|
||||
you send your request, make ``develop`` the destination branch on the
|
||||
`Spack repository <https://github.com/spack/spack>`_.
|
||||
--------
|
||||
Branches
|
||||
--------
|
||||
|
||||
Spack's ``develop`` branch has the latest contributions. Nearly all pull
|
||||
requests should start from ``develop`` and target ``develop``.
|
||||
|
||||
There is a branch for each major release series. Release branches
|
||||
originate from ``develop`` and have tags for each point release in the
|
||||
series. For example, ``releases/v0.14`` has tags for ``0.14.0``,
|
||||
``0.14.1``, ``0.14.2``, etc. versions of Spack. We backport important bug
|
||||
fixes to these branches, but we do not advance the package versions or
|
||||
make other changes that would change the way Spack concretizes
|
||||
dependencies. Currently, the maintainers manage these branches by
|
||||
cherry-picking from ``develop``. See :ref:`releases` for more
|
||||
information.
|
||||
|
||||
----------------------
|
||||
Continuous Integration
|
||||
|
@@ -495,3 +495,393 @@ The bottom of the output shows the top most time consuming functions,
|
||||
slowest on top. The profiling support is from Python's built-in tool,
|
||||
`cProfile
|
||||
<https://docs.python.org/2/library/profile.html#module-cProfile>`_.
|
||||
|
||||
.. _releases:
|
||||
|
||||
--------
|
||||
Releases
|
||||
--------
|
||||
|
||||
This section documents Spack's release process. It is intended for
|
||||
project maintainers, as the tasks described here require maintainer
|
||||
privileges on the Spack repository. For others, we hope this section at
|
||||
least provides some insight into how the Spack project works.
|
||||
|
||||
.. _release-branches:
|
||||
|
||||
^^^^^^^^^^^^^^^^
|
||||
Release branches
|
||||
^^^^^^^^^^^^^^^^
|
||||
|
||||
There are currently two types of Spack releases: :ref:`major releases
|
||||
<major-releases>` (``0.13.0``, ``0.14.0``, etc.) and :ref:`point releases
|
||||
<point-releases>` (``0.13.1``, ``0.13.2``, ``0.13.3``, etc.). Here is a
|
||||
diagram of how Spack release branches work::
|
||||
|
||||
o branch: develop (latest version)
|
||||
|
|
||||
o merge v0.14.1 into develop
|
||||
|\
|
||||
| o branch: releases/v0.14, tag: v0.14.1
|
||||
o | merge v0.14.0 into develop
|
||||
|\|
|
||||
| o tag: v0.14.0
|
||||
|/
|
||||
o merge v0.13.2 into develop
|
||||
|\
|
||||
| o branch: releases/v0.13, tag: v0.13.2
|
||||
o | merge v0.13.1 into develop
|
||||
|\|
|
||||
| o tag: v0.13.1
|
||||
o | merge v0.13.0 into develop
|
||||
|\|
|
||||
| o tag: v0.13.0
|
||||
o |
|
||||
| o
|
||||
|/
|
||||
o
|
||||
|
||||
The ``develop`` branch has the latest contributions, and nearly all pull
|
||||
requests target ``develop``.
|
||||
|
||||
Each Spack release series also has a corresponding branch, e.g.
|
||||
``releases/v0.14`` has ``0.14.x`` versions of Spack, and
|
||||
``releases/v0.13`` has ``0.13.x`` versions. A major release is the first
|
||||
tagged version on a release branch. Minor releases are back-ported from
|
||||
develop onto release branches. This is typically done by cherry-picking
|
||||
bugfix commits off of ``develop``.
|
||||
|
||||
To avoid version churn for users of a release series, minor releases
|
||||
should **not** make changes that would change the concretization of
|
||||
packages. They should generally only contain fixes to the Spack core.
|
||||
|
||||
Both major and minor releases are tagged. After each release, we merge
|
||||
the release branch back into ``develop`` so that the version bump and any
|
||||
other release-specific changes are visible in the mainline. As a
|
||||
convenience, we also tag the latest release as ``releases/latest``,
|
||||
so that users can easily check it out to get the latest
|
||||
stable version. See :ref:`merging-releases` for more details.
|
||||
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Scheduling work for releases
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
We schedule work for releases by creating `GitHub projects
|
||||
<https://github.com/spack/spack/projects>`_. At any time, there may be
|
||||
several open release projects. For example, here are two releases (from
|
||||
some past version of the page linked above):
|
||||
|
||||
.. image:: images/projects.png
|
||||
|
||||
Here, there's one release in progress for ``0.15.1`` and another for
|
||||
``0.16.0``. Each of these releases has a project board containing issues
|
||||
and pull requests. GitHub shows a status bar with completed work in
|
||||
green, work in progress in purple, and work not started yet in gray, so
|
||||
it's fairly easy to see progress.
|
||||
|
||||
Spack's project boards are not firm commitments, and we move work between
|
||||
releases frequently. If we need to make a release and some tasks are not
|
||||
yet done, we will simply move them to next minor or major release, rather
|
||||
than delaying the release to complete them.
|
||||
|
||||
For more on using GitHub project boards, see `GitHub's documentation
|
||||
<https://docs.github.com/en/github/managing-your-work-on-github/about-project-boards>`_.
|
||||
|
||||
.. _major-releases:
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
Making Major Releases
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Assuming you've already created a project board and completed the work
|
||||
for a major release, the steps to make the release are as follows:
|
||||
|
||||
#. Create two new project boards:
|
||||
|
||||
* One for the next major release
|
||||
* One for the next point release
|
||||
|
||||
#. Move any tasks that aren't done yet to one of the new project boards.
|
||||
Small bugfixes should go to the next point release. Major features,
|
||||
refactors, and changes that could affect concretization should go in
|
||||
the next major release.
|
||||
|
||||
#. Create a branch for the release, based on ``develop``:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ git checkout -b releases/v0.15 develop
|
||||
|
||||
For a version ``vX.Y.Z``, the branch's name should be
|
||||
``releases/vX.Y``. That is, you should create a ``releases/vX.Y``
|
||||
branch if you are preparing the ``X.Y.0`` release.
|
||||
|
||||
#. Bump the version in ``lib/spack/spack/__init__.py``. See `this example from 0.13.0
|
||||
<https://github.com/spack/spack/commit/8eeb64096c98b8a43d1c587f13ece743c864fba9>`_
|
||||
|
||||
#. Updaate the release version lists in these files to include the new version:
|
||||
|
||||
* ``lib/spack/spack/schema/container.py``
|
||||
* ``lib/spack/spack/container/images.json``
|
||||
|
||||
**TODO**: We should get rid of this step in some future release.
|
||||
|
||||
#. Update ``CHANGELOG.md`` with major highlights in bullet form. Use
|
||||
proper markdown formatting, like `this example from 0.15.0
|
||||
<https://github.com/spack/spack/commit/d4bf70d9882fcfe88507e9cb444331d7dd7ba71c>`_.
|
||||
|
||||
#. Push the release branch to GitHub.
|
||||
|
||||
#. Make sure CI passes on the release branch, including:
|
||||
* Regular unit tests
|
||||
* Build tests
|
||||
* The E4S pipeline at `gitlab.spack.io <https://gitlab.spack.io>`_
|
||||
|
||||
If CI is not passing, submit pull requests to ``develop`` as normal
|
||||
and keep rebasing the release branch on ``develop`` until CI passes.
|
||||
|
||||
#. Follow the steps in :ref:`publishing-releases`.
|
||||
|
||||
#. Follow the steps in :ref:`merging-releases`.
|
||||
|
||||
#. Follow the steps in :ref:`announcing-releases`.
|
||||
|
||||
|
||||
.. _point-releases:
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
Making Point Releases
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
This assumes you've already created a project board for a point release
|
||||
and completed the work to be done for the release. To make a point
|
||||
release:
|
||||
|
||||
#. Create one new project board for the next point release.
|
||||
|
||||
#. Move any cards that aren't done yet to the next project board.
|
||||
|
||||
#. Check out the release branch (it should already exist). For the
|
||||
``X.Y.Z`` release, the release branch is called ``releases/vX.Y``. For
|
||||
``v0.15.1``, you would check out ``releases/v0.15``:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ git checkout releases/v0.15
|
||||
|
||||
#. Cherry-pick each pull request in the ``Done`` column of the release
|
||||
project onto the release branch.
|
||||
|
||||
This is **usually** fairly simple since we squash the commits from the
|
||||
vast majority of pull requests, which means there is only one commit
|
||||
per pull request to cherry-pick. For example, `this pull request
|
||||
<https://github.com/spack/spack/pull/15777>`_ has three commits, but
|
||||
the were squashed into a single commit on merge. You can see the
|
||||
commit that was created here:
|
||||
|
||||
.. image:: images/pr-commit.png
|
||||
|
||||
You can easily cherry pick it like this (assuming you already have the
|
||||
release branch checked out):
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ git cherry-pick 7e46da7
|
||||
|
||||
For pull requests that were rebased, you'll need to cherry-pick each
|
||||
rebased commit individually. There have not been any rebased PRs like
|
||||
this in recent point releases.
|
||||
|
||||
.. warning::
|
||||
|
||||
It is important to cherry-pick commits in the order they happened,
|
||||
otherwise you can get conflicts while cherry-picking. When
|
||||
cherry-picking onto a point release, look at the merge date,
|
||||
**not** the number of the pull request or the date it was opened.
|
||||
|
||||
Sometimes you may **still** get merge conflicts even if you have
|
||||
cherry-picked all the commits in order. This generally means there
|
||||
is some other intervening pull request that the one you're trying
|
||||
to pick depends on. In these cases, you'll need to make a judgment
|
||||
call:
|
||||
|
||||
1. If the dependency is small, you might just cherry-pick it, too.
|
||||
If you do this, add it to the release board.
|
||||
|
||||
2. If it is large, then you may decide that this fix is not worth
|
||||
including in a point release, in which case you should remove it
|
||||
from the release project.
|
||||
|
||||
3. You can always decide to manually back-port the fix to the release
|
||||
branch if neither of the above options makes sense, but this can
|
||||
require a lot of work. It's seldom the right choice.
|
||||
|
||||
#. Bump the version in ``lib/spack/spack/__init__.py``. See `this example from 0.14.1
|
||||
<https://github.com/spack/spack/commit/ff0abb9838121522321df2a054d18e54b566b44a>`_.
|
||||
|
||||
#. Updaate the release version lists in these files to include the new version:
|
||||
|
||||
* ``lib/spack/spack/schema/container.py``
|
||||
* ``lib/spack/spack/container/images.json``
|
||||
|
||||
**TODO**: We should get rid of this step in some future release.
|
||||
|
||||
#. Update ``CHANGELOG.md`` with a list of bugfixes. This is typically just a
|
||||
summary of the commits you cherry-picked onto the release branch. See
|
||||
`the changelog from 0.14.1
|
||||
<https://github.com/spack/spack/commit/ff0abb9838121522321df2a054d18e54b566b44a>`_.
|
||||
|
||||
#. Push the release branch to GitHub.
|
||||
|
||||
#. Make sure CI passes on the release branch, including:
|
||||
* Regular unit tests
|
||||
* Build tests
|
||||
* The E4S pipeline at `gitlab.spack.io <https://gitlab.spack.io>`_
|
||||
|
||||
If CI does not pass, you'll need to figure out why, and make changes
|
||||
to the release branch until it does. You can make more commits, modify
|
||||
or remove cherry-picked commits, or cherry-pick **more** from
|
||||
``develop`` to make this happen.
|
||||
|
||||
#. Follow the steps in :ref:`publishing-releases`.
|
||||
|
||||
#. Follow the steps in :ref:`merging-releases`.
|
||||
|
||||
#. Follow the steps in :ref:`announcing-releases`.
|
||||
|
||||
|
||||
.. _publishing-releases:
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Publishing a release on GitHub
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
#. Go to `github.com/spack/spack/releases
|
||||
<https://github.com/spack/spack/releases>`_ and click ``Draft a new
|
||||
release``. Set the following:
|
||||
|
||||
* ``Tag version`` should start with ``v`` and contain *all three*
|
||||
parts of the version, .g. ``v0.15.1``. This is the name of the tag
|
||||
that will be created.
|
||||
|
||||
* ``Target`` should be the ``releases/vX.Y`` branch (e.g., ``releases/v0.15``).
|
||||
|
||||
* ``Release title`` should be ``vX.Y.Z`` (To match the tag, e.g., ``v0.15.1``).
|
||||
|
||||
* For the text, paste the latest release markdown from your ``CHANGELOG.md``.
|
||||
|
||||
You can save the draft and keep coming back to this as you prepare the release.
|
||||
|
||||
#. When you are done, click ``Publish release``.
|
||||
|
||||
#. Immediately after publishing, go back to
|
||||
`github.com/spack/spack/releases
|
||||
<https://github.com/spack/spack/releases>`_ and download the
|
||||
auto-generated ``.tar.gz`` file for the release. It's the ``Source
|
||||
code (tar.gz)`` link.
|
||||
|
||||
#. Click ``Edit`` on the release you just did and attach the downloaded
|
||||
release tarball as a binary. This does two things:
|
||||
|
||||
#. Makes sure that the hash of our releases doesn't change over time.
|
||||
GitHub sometimes annoyingly changes they way they generate
|
||||
tarballs, and then hashes can change if you rely on the
|
||||
auto-generated tarball links.
|
||||
|
||||
#. Gets us download counts on releases visible through the GitHub
|
||||
API. GitHub tracks downloads of artifacts, but *not* the source
|
||||
links. See the `releases
|
||||
page <https://api.github.com/repos/spack/spack/releases>`_ and search
|
||||
for ``download_count`` to see this.
|
||||
|
||||
|
||||
.. _merging-releases:
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Updating `releases/latest` and `develop`
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
If the new release is the **highest** Spack release yet, you should
|
||||
also tag it as ``releases/latest``. For example, suppose the highest
|
||||
release is currently ``0.15.3``:
|
||||
|
||||
* If you are releasing ``0.15.4`` or ``0.16.0``, then you should tag
|
||||
it with ``releases/latest``, as these are higher than ``0.15.3``.
|
||||
|
||||
* If you are making a new release of an **older** major version of
|
||||
Spack, e.g. ``0.14.4``, then you should not tag it as
|
||||
``releases/latest`` (as there are newer major versions).
|
||||
|
||||
To tag ``releases/latest``, do this:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ git checkout releases/vX.Y # vX.Y is the new release's branch
|
||||
$ git tag --force releases/latest
|
||||
$ git push --tags
|
||||
|
||||
The ``--force`` argument makes ``git`` overwrite the existing
|
||||
``releases/latest`` tag with the new one.
|
||||
|
||||
We also merge each release that we tag as ``releases/latest`` into ``develop``.
|
||||
Make sure to do this with a merge commit:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ git checkout develop
|
||||
$ git merge --no-ff vX.Y.Z # vX.Y.Z is the new release's tag
|
||||
$ git push
|
||||
|
||||
We merge back to ``develop`` because it:
|
||||
|
||||
* updates the version and ``CHANGELOG.md`` on ``develop``.
|
||||
* ensures that your release tag is reachable from the head of
|
||||
``develop``
|
||||
|
||||
We *must* use a real merge commit (via the ``--no-ff`` option) because it
|
||||
ensures that the release tag is reachable from the tip of ``develop``.
|
||||
This is necessary for ``spack -V`` to work properly -- it uses ``git
|
||||
describe --tags`` to find the last reachable tag in the repository and
|
||||
reports how far we are from it. For example:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack -V
|
||||
0.14.2-1486-b80d5e74e5
|
||||
|
||||
This says that we are at commit ``b80d5e74e5``, which is 1,486 commits
|
||||
ahead of the ``0.14.2`` release.
|
||||
|
||||
We put this step last in the process because it's best to do it only once
|
||||
the release is complete and tagged. If you do it before you've tagged the
|
||||
release and later decide you want to tag some later commit, you'll need
|
||||
to merge again.
|
||||
|
||||
.. _announcing-releases:
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^
|
||||
Announcing a release
|
||||
^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
We announce releases in all of the major Spack communication channels.
|
||||
Publishing the release takes care of GitHub. The remaining channels are
|
||||
Twitter, Slack, and the mailing list. Here are the steps:
|
||||
|
||||
#. Make a tweet to announce the release. It should link to the release's
|
||||
page on GitHub. You can base it on `this example tweet
|
||||
<https://twitter.com/spackpm/status/1231761858182307840>`_.
|
||||
|
||||
#. Ping ``@channel`` in ``#general`` on Slack (`spackpm.slack.com
|
||||
<https://spackpm.slack.com>`_) with a link to the tweet. The tweet
|
||||
will be shown inline so that you do not have to retype your release
|
||||
announcement.
|
||||
|
||||
#. Email the Spack mailing list to let them know about the release. As
|
||||
with the tweet, you likely want to link to the release's page on
|
||||
GitHub. It's also helpful to include some information directly in the
|
||||
email. You can base yours on this `example email
|
||||
<https://groups.google.com/forum/#!topic/spack/WT4CT9i_X4s>`_.
|
||||
|
||||
Once you've announced the release, congratulations, you're done! You've
|
||||
finished making the release!
|
||||
|
@@ -811,6 +811,100 @@ to add the following to ``packages.yaml``:
|
||||
present in PATH, however it will have lower precedence compared to paths
|
||||
from other dependencies. This ensures that binaries in Spack dependencies
|
||||
are preferred over system binaries.
|
||||
|
||||
^^^^^^
|
||||
OpenGL
|
||||
^^^^^^
|
||||
|
||||
To use hardware-accelerated rendering from a system-supplied OpenGL driver,
|
||||
add something like the following to your ``packages`` configuration:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
packages:
|
||||
opengl:
|
||||
paths:
|
||||
opengl+glx@4.5: /usr
|
||||
buildable: False
|
||||
all:
|
||||
providers:
|
||||
gl: [opengl]
|
||||
glx: [opengl]
|
||||
|
||||
For `EGL <https://www.khronos.org/egl>` support, or for certain modern drivers,
|
||||
OpenGL calls are dispatched dynamically at run time to the hardware graphics
|
||||
implementation. This dynamic dispatch is performed using `libglvnd
|
||||
<https://github.com/NVIDIA/libglvnd>`. In this mode, the graphics library
|
||||
(e.g.: opengl) must be built to work with libglvnd. Applications then link
|
||||
against libglvnd instead of the underlying implementation. Environment
|
||||
variables set at run time govern the process by which libglvnd loads the
|
||||
underlying implementation and dispatches calls to it. See `this
|
||||
<https://github.com/NVIDIA/libglvnd/issues/177#issuecomment-496562769>` comment
|
||||
for details on loading a specific GLX implementation and `this
|
||||
<https://github.com/NVIDIA/libglvnd/blob/master/src/EGL/icd_enumeration.md>`
|
||||
page for information about EGL ICD enumeration.
|
||||
|
||||
This codependency between libglvnd and the underlying implementation is modeled
|
||||
in Spack with two packages for libglvnd: libglvnd, which provides libglvnd
|
||||
proper; and libglvnd-fe, a bundle package that depends on libglvnd and an
|
||||
implementation. Implementations that work through libglvnd are no longer
|
||||
providers for graphics virtual dependencies, like "gl" or "glx", but instead
|
||||
provide libglvnd versions of these dependencies ("libglvnd-be-gl",
|
||||
"libglvnd-be-glx", etc.). The libglvnd-fe package depends on these
|
||||
"libglvnd-be-..." virtual packages, which provide the actual implementation.
|
||||
It also depends on libglvnd, itself, and exposes its libraries to downstream
|
||||
applications. For correct operation, the Spack package for the underlying
|
||||
implementation has to set the runtime environment to ensure that it is loaded
|
||||
when an application linked against libglvnd runs. This last detail is
|
||||
important for users who want to set up an external OpenGL implementation that
|
||||
requires libglvnd to work. This setup requires modifying the ``modules``
|
||||
configuration so that modules generated for the external OpenGL implementation
|
||||
set the necessary environment variables.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
packages:
|
||||
opengl:
|
||||
paths:
|
||||
opengl@4.5+glx+egl+glvnd: /does/not/exist
|
||||
buildable: False
|
||||
variants:+glx+egl+glvnd
|
||||
libglvnd-fe:
|
||||
variants:+gl+glx+egl
|
||||
all:
|
||||
providers:
|
||||
glvnd-be-gl: [opengl]
|
||||
glvnd-be-glx: [opengl]
|
||||
glvnd-be-egl: [opengl]
|
||||
gl: [libglvnd-fe]
|
||||
glx: [libglvnd-fe]
|
||||
egl: [libglvnd-fe]
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
modules:
|
||||
tcl:
|
||||
opengl@4.5+glx+glvnd:
|
||||
environment:
|
||||
set:
|
||||
__GLX_VENDOR_LIBRARY_NAME: nvidia
|
||||
opengl@4.5+egl+glvnd:
|
||||
environment:
|
||||
set:
|
||||
__EGL_VENDOR_LIBRARY_FILENAMES: /usr/share/glvnd/egl_vendor.d/10_nvidia.json
|
||||
|
||||
One final detail about the above example is that it avoids setting the true
|
||||
root of the external OpenGL implementation, instead opting to set it to a path
|
||||
that is not expected to exist on the system. This is done for two reasons.
|
||||
First, Spack would add directories under this root to environment variables
|
||||
that would affect the process of building and installing other packages, such
|
||||
as ``PATH`` and ``PKG_CONFIG_PATH``. These additions may potentially prevent
|
||||
those packages from installing successfully, and this risk is especially great
|
||||
for paths that house many libraries and applications, like ``/usr``. Second,
|
||||
providing the true root of the external implementation in the ``packages``
|
||||
configuration is not necessary because libglvnd need only the environment
|
||||
variables set above in the ``modules`` configuration to determine what OpenGL
|
||||
implementation to dispatch calls to at run time.
|
||||
|
||||
^^^
|
||||
Git
|
||||
@@ -818,7 +912,7 @@ Git
|
||||
|
||||
Some Spack packages use ``git`` to download, which might not work on
|
||||
some computers. For example, the following error was
|
||||
encountered on a Macintosh during ``spack install julia-master``:
|
||||
encountered on a Macintosh during ``spack install julia@master``:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
|
BIN
lib/spack/docs/images/pr-commit.png
Normal file
BIN
lib/spack/docs/images/pr-commit.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 44 KiB |
BIN
lib/spack/docs/images/projects.png
Normal file
BIN
lib/spack/docs/images/projects.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 68 KiB |
@@ -82,9 +82,9 @@ or Amazon Elastic Kubernetes Service (`EKS <https://aws.amazon.com/eks>`_), thou
|
||||
topics are outside the scope of this document.
|
||||
|
||||
Spack's pipelines are now making use of the
|
||||
`trigger <https://docs.gitlab.com/12.9/ee/ci/yaml/README.html#trigger>` syntax to run
|
||||
`trigger <https://docs.gitlab.com/12.9/ee/ci/yaml/README.html#trigger>`_ syntax to run
|
||||
dynamically generated
|
||||
`child pipelines <https://docs.gitlab.com/12.9/ee/ci/parent_child_pipelines.html>`.
|
||||
`child pipelines <https://docs.gitlab.com/12.9/ee/ci/parent_child_pipelines.html>`_.
|
||||
Note that the use of dynamic child pipelines requires running Gitlab version
|
||||
``>= 12.9``.
|
||||
|
||||
|
@@ -1405,11 +1405,12 @@ The main points that are implemented below:
|
||||
- export CXXFLAGS="-std=c++11"
|
||||
|
||||
install:
|
||||
- if ! which spack >/dev/null; then
|
||||
- |
|
||||
if ! which spack >/dev/null; then
|
||||
mkdir -p $SPACK_ROOT &&
|
||||
git clone --depth 50 https://github.com/spack/spack.git $SPACK_ROOT &&
|
||||
echo -e "config:""\n build_jobs:"" 2" > $SPACK_ROOT/etc/spack/config.yaml **
|
||||
echo -e "packages:""\n all:""\n target:"" ['x86_64']"
|
||||
printf "config:\n build_jobs: 2\n" > $SPACK_ROOT/etc/spack/config.yaml &&
|
||||
printf "packages:\n all:\n target: ['x86_64']\n" \
|
||||
> $SPACK_ROOT/etc/spack/packages.yaml;
|
||||
fi
|
||||
- travis_wait spack install cmake@3.7.2~openssl~ncurses
|
||||
|
@@ -31,17 +31,17 @@
|
||||
class ProcessController(object):
|
||||
"""Wrapper around some fundamental process control operations.
|
||||
|
||||
This allows one process to drive another similar to the way a shell
|
||||
would, by sending signals and I/O.
|
||||
This allows one process (the controller) to drive another (the
|
||||
minion) similar to the way a shell would, by sending signals and I/O.
|
||||
|
||||
"""
|
||||
def __init__(self, pid, master_fd,
|
||||
def __init__(self, pid, controller_fd,
|
||||
timeout=1, sleep_time=1e-1, debug=False):
|
||||
"""Create a controller to manipulate the process with id ``pid``
|
||||
|
||||
Args:
|
||||
pid (int): id of process to control
|
||||
master_fd (int): master file descriptor attached to pid's stdin
|
||||
controller_fd (int): controller fd attached to pid's stdin
|
||||
timeout (int): time in seconds for wait operations to time out
|
||||
(default 1 second)
|
||||
sleep_time (int): time to sleep after signals, to control the
|
||||
@@ -58,7 +58,7 @@ def __init__(self, pid, master_fd,
|
||||
"""
|
||||
self.pid = pid
|
||||
self.pgid = os.getpgid(pid)
|
||||
self.master_fd = master_fd
|
||||
self.controller_fd = controller_fd
|
||||
self.timeout = timeout
|
||||
self.sleep_time = sleep_time
|
||||
self.debug = debug
|
||||
@@ -67,8 +67,8 @@ def __init__(self, pid, master_fd,
|
||||
self.ps = which("ps", required=True)
|
||||
|
||||
def get_canon_echo_attrs(self):
|
||||
"""Get echo and canon attributes of the terminal of master_fd."""
|
||||
cfg = termios.tcgetattr(self.master_fd)
|
||||
"""Get echo and canon attributes of the terminal of controller_fd."""
|
||||
cfg = termios.tcgetattr(self.controller_fd)
|
||||
return (
|
||||
bool(cfg[3] & termios.ICANON),
|
||||
bool(cfg[3] & termios.ECHO),
|
||||
@@ -82,7 +82,7 @@ def horizontal_line(self, name):
|
||||
)
|
||||
|
||||
def status(self):
|
||||
"""Print debug message with status info for the child."""
|
||||
"""Print debug message with status info for the minion."""
|
||||
if self.debug:
|
||||
canon, echo = self.get_canon_echo_attrs()
|
||||
sys.stderr.write("canon: %s, echo: %s\n" % (
|
||||
@@ -94,12 +94,12 @@ def status(self):
|
||||
sys.stderr.write("\n")
|
||||
|
||||
def input_on(self):
|
||||
"""True if keyboard input is enabled on the master_fd pty."""
|
||||
"""True if keyboard input is enabled on the controller_fd pty."""
|
||||
return self.get_canon_echo_attrs() == (False, False)
|
||||
|
||||
def background(self):
|
||||
"""True if pgid is in a background pgroup of master_fd's terminal."""
|
||||
return self.pgid != os.tcgetpgrp(self.master_fd)
|
||||
"""True if pgid is in a background pgroup of controller_fd's tty."""
|
||||
return self.pgid != os.tcgetpgrp(self.controller_fd)
|
||||
|
||||
def tstp(self):
|
||||
"""Send SIGTSTP to the controlled process."""
|
||||
@@ -115,18 +115,18 @@ def cont(self):
|
||||
def fg(self):
|
||||
self.horizontal_line("fg")
|
||||
with log.ignore_signal(signal.SIGTTOU):
|
||||
os.tcsetpgrp(self.master_fd, os.getpgid(self.pid))
|
||||
os.tcsetpgrp(self.controller_fd, os.getpgid(self.pid))
|
||||
time.sleep(self.sleep_time)
|
||||
|
||||
def bg(self):
|
||||
self.horizontal_line("bg")
|
||||
with log.ignore_signal(signal.SIGTTOU):
|
||||
os.tcsetpgrp(self.master_fd, os.getpgrp())
|
||||
os.tcsetpgrp(self.controller_fd, os.getpgrp())
|
||||
time.sleep(self.sleep_time)
|
||||
|
||||
def write(self, byte_string):
|
||||
self.horizontal_line("write '%s'" % byte_string.decode("utf-8"))
|
||||
os.write(self.master_fd, byte_string)
|
||||
os.write(self.controller_fd, byte_string)
|
||||
|
||||
def wait(self, condition):
|
||||
start = time.time()
|
||||
@@ -156,50 +156,51 @@ def wait_running(self):
|
||||
|
||||
|
||||
class PseudoShell(object):
|
||||
"""Sets up master and child processes with a PTY.
|
||||
"""Sets up controller and minion processes with a PTY.
|
||||
|
||||
You can create a ``PseudoShell`` if you want to test how some
|
||||
function responds to terminal input. This is a pseudo-shell from a
|
||||
job control perspective; ``master_function`` and ``child_function``
|
||||
are set up with a pseudoterminal (pty) so that the master can drive
|
||||
the child through process control signals and I/O.
|
||||
job control perspective; ``controller_function`` and ``minion_function``
|
||||
are set up with a pseudoterminal (pty) so that the controller can drive
|
||||
the minion through process control signals and I/O.
|
||||
|
||||
The two functions should have signatures like this::
|
||||
|
||||
def master_function(proc, ctl, **kwargs)
|
||||
def child_function(**kwargs)
|
||||
def controller_function(proc, ctl, **kwargs)
|
||||
def minion_function(**kwargs)
|
||||
|
||||
``master_function`` is spawned in its own process and passed three
|
||||
``controller_function`` is spawned in its own process and passed three
|
||||
arguments:
|
||||
|
||||
proc
|
||||
the ``multiprocessing.Process`` object representing the child
|
||||
the ``multiprocessing.Process`` object representing the minion
|
||||
ctl
|
||||
a ``ProcessController`` object tied to the child
|
||||
a ``ProcessController`` object tied to the minion
|
||||
kwargs
|
||||
keyword arguments passed from ``PseudoShell.start()``.
|
||||
|
||||
``child_function`` is only passed ``kwargs`` delegated from
|
||||
``minion_function`` is only passed ``kwargs`` delegated from
|
||||
``PseudoShell.start()``.
|
||||
|
||||
The ``ctl.master_fd`` will have its ``master_fd`` connected to
|
||||
``sys.stdin`` in the child process. Both processes will share the
|
||||
The ``ctl.controller_fd`` will have its ``controller_fd`` connected to
|
||||
``sys.stdin`` in the minion process. Both processes will share the
|
||||
same ``sys.stdout`` and ``sys.stderr`` as the process instantiating
|
||||
``PseudoShell``.
|
||||
|
||||
Here are the relationships between processes created::
|
||||
|
||||
._________________________________________________________.
|
||||
| Child Process | pid 2
|
||||
| - runs child_function | pgroup 2
|
||||
| Minion Process | pid 2
|
||||
| - runs minion_function | pgroup 2
|
||||
|_________________________________________________________| session 1
|
||||
^
|
||||
| create process with master_fd connected to stdin
|
||||
| create process with controller_fd connected to stdin
|
||||
| stdout, stderr are the same as caller
|
||||
._________________________________________________________.
|
||||
| Master Process | pid 1
|
||||
| - runs master_function | pgroup 1
|
||||
| - uses ProcessController and master_fd to control child | session 1
|
||||
| Controller Process | pid 1
|
||||
| - runs controller_function | pgroup 1
|
||||
| - uses ProcessController and controller_fd to | session 1
|
||||
| control minion |
|
||||
|_________________________________________________________|
|
||||
^
|
||||
| create process
|
||||
@@ -207,51 +208,51 @@ def child_function(**kwargs)
|
||||
._________________________________________________________.
|
||||
| Caller | pid 0
|
||||
| - Constructs, starts, joins PseudoShell | pgroup 0
|
||||
| - provides master_function, child_function | session 0
|
||||
| - provides controller_function, minion_function | session 0
|
||||
|_________________________________________________________|
|
||||
|
||||
"""
|
||||
def __init__(self, master_function, child_function):
|
||||
def __init__(self, controller_function, minion_function):
|
||||
self.proc = None
|
||||
self.master_function = master_function
|
||||
self.child_function = child_function
|
||||
self.controller_function = controller_function
|
||||
self.minion_function = minion_function
|
||||
|
||||
# these can be optionally set to change defaults
|
||||
self.controller_timeout = 1
|
||||
self.sleep_time = 0
|
||||
|
||||
def start(self, **kwargs):
|
||||
"""Start the master and child processes.
|
||||
"""Start the controller and minion processes.
|
||||
|
||||
Arguments:
|
||||
kwargs (dict): arbitrary keyword arguments that will be
|
||||
passed to master and child functions
|
||||
passed to controller and minion functions
|
||||
|
||||
The master process will create the child, then call
|
||||
``master_function``. The child process will call
|
||||
``child_function``.
|
||||
The controller process will create the minion, then call
|
||||
``controller_function``. The minion process will call
|
||||
``minion_function``.
|
||||
|
||||
"""
|
||||
self.proc = multiprocessing.Process(
|
||||
target=PseudoShell._set_up_and_run_master_function,
|
||||
args=(self.master_function, self.child_function,
|
||||
target=PseudoShell._set_up_and_run_controller_function,
|
||||
args=(self.controller_function, self.minion_function,
|
||||
self.controller_timeout, self.sleep_time),
|
||||
kwargs=kwargs,
|
||||
)
|
||||
self.proc.start()
|
||||
|
||||
def join(self):
|
||||
"""Wait for the child process to finish, and return its exit code."""
|
||||
"""Wait for the minion process to finish, and return its exit code."""
|
||||
self.proc.join()
|
||||
return self.proc.exitcode
|
||||
|
||||
@staticmethod
|
||||
def _set_up_and_run_child_function(
|
||||
tty_name, stdout_fd, stderr_fd, ready, child_function, **kwargs):
|
||||
"""Child process wrapper for PseudoShell.
|
||||
def _set_up_and_run_minion_function(
|
||||
tty_name, stdout_fd, stderr_fd, ready, minion_function, **kwargs):
|
||||
"""Minion process wrapper for PseudoShell.
|
||||
|
||||
Handles the mechanics of setting up a PTY, then calls
|
||||
``child_function``.
|
||||
``minion_function``.
|
||||
|
||||
"""
|
||||
# new process group, like a command or pipeline launched by a shell
|
||||
@@ -266,45 +267,45 @@ def _set_up_and_run_child_function(
|
||||
|
||||
if kwargs.get("debug"):
|
||||
sys.stderr.write(
|
||||
"child: stdin.isatty(): %s\n" % sys.stdin.isatty())
|
||||
"minion: stdin.isatty(): %s\n" % sys.stdin.isatty())
|
||||
|
||||
# tell the parent that we're really running
|
||||
if kwargs.get("debug"):
|
||||
sys.stderr.write("child: ready!\n")
|
||||
sys.stderr.write("minion: ready!\n")
|
||||
ready.value = True
|
||||
|
||||
try:
|
||||
child_function(**kwargs)
|
||||
minion_function(**kwargs)
|
||||
except BaseException:
|
||||
traceback.print_exc()
|
||||
|
||||
@staticmethod
|
||||
def _set_up_and_run_master_function(
|
||||
master_function, child_function, controller_timeout, sleep_time,
|
||||
**kwargs):
|
||||
"""Set up a pty, spawn a child process, and execute master_function.
|
||||
def _set_up_and_run_controller_function(
|
||||
controller_function, minion_function, controller_timeout,
|
||||
sleep_time, **kwargs):
|
||||
"""Set up a pty, spawn a minion process, execute controller_function.
|
||||
|
||||
Handles the mechanics of setting up a PTY, then calls
|
||||
``master_function``.
|
||||
``controller_function``.
|
||||
|
||||
"""
|
||||
os.setsid() # new session; this process is the controller
|
||||
|
||||
master_fd, child_fd = os.openpty()
|
||||
pty_name = os.ttyname(child_fd)
|
||||
controller_fd, minion_fd = os.openpty()
|
||||
pty_name = os.ttyname(minion_fd)
|
||||
|
||||
# take controlling terminal
|
||||
pty_fd = os.open(pty_name, os.O_RDWR)
|
||||
os.close(pty_fd)
|
||||
|
||||
ready = multiprocessing.Value('i', False)
|
||||
child_process = multiprocessing.Process(
|
||||
target=PseudoShell._set_up_and_run_child_function,
|
||||
minion_process = multiprocessing.Process(
|
||||
target=PseudoShell._set_up_and_run_minion_function,
|
||||
args=(pty_name, sys.stdout.fileno(), sys.stderr.fileno(),
|
||||
ready, child_function),
|
||||
ready, minion_function),
|
||||
kwargs=kwargs,
|
||||
)
|
||||
child_process.start()
|
||||
minion_process.start()
|
||||
|
||||
# wait for subprocess to be running and connected.
|
||||
while not ready.value:
|
||||
@@ -315,30 +316,31 @@ def _set_up_and_run_master_function(
|
||||
sys.stderr.write("pid: %d\n" % os.getpid())
|
||||
sys.stderr.write("pgid: %d\n" % os.getpgrp())
|
||||
sys.stderr.write("sid: %d\n" % os.getsid(0))
|
||||
sys.stderr.write("tcgetpgrp: %d\n" % os.tcgetpgrp(master_fd))
|
||||
sys.stderr.write("tcgetpgrp: %d\n" % os.tcgetpgrp(controller_fd))
|
||||
sys.stderr.write("\n")
|
||||
|
||||
child_pgid = os.getpgid(child_process.pid)
|
||||
sys.stderr.write("child pid: %d\n" % child_process.pid)
|
||||
sys.stderr.write("child pgid: %d\n" % child_pgid)
|
||||
sys.stderr.write("child sid: %d\n" % os.getsid(child_process.pid))
|
||||
minion_pgid = os.getpgid(minion_process.pid)
|
||||
sys.stderr.write("minion pid: %d\n" % minion_process.pid)
|
||||
sys.stderr.write("minion pgid: %d\n" % minion_pgid)
|
||||
sys.stderr.write(
|
||||
"minion sid: %d\n" % os.getsid(minion_process.pid))
|
||||
sys.stderr.write("\n")
|
||||
sys.stderr.flush()
|
||||
# set up master to ignore SIGTSTP, like a shell
|
||||
# set up controller to ignore SIGTSTP, like a shell
|
||||
signal.signal(signal.SIGTSTP, signal.SIG_IGN)
|
||||
|
||||
# call the master function once the child is ready
|
||||
# call the controller function once the minion is ready
|
||||
try:
|
||||
controller = ProcessController(
|
||||
child_process.pid, master_fd, debug=kwargs.get("debug"))
|
||||
minion_process.pid, controller_fd, debug=kwargs.get("debug"))
|
||||
controller.timeout = controller_timeout
|
||||
controller.sleep_time = sleep_time
|
||||
error = master_function(child_process, controller, **kwargs)
|
||||
error = controller_function(minion_process, controller, **kwargs)
|
||||
except BaseException:
|
||||
error = 1
|
||||
traceback.print_exc()
|
||||
|
||||
child_process.join()
|
||||
minion_process.join()
|
||||
|
||||
# return whether either the parent or child failed
|
||||
return error or child_process.exitcode
|
||||
# return whether either the parent or minion failed
|
||||
return error or minion_process.exitcode
|
||||
|
@@ -498,6 +498,7 @@ def download_tarball(spec):
|
||||
|
||||
# stage the tarball into standard place
|
||||
stage = Stage(url, name="build_cache", keep=True)
|
||||
stage.create()
|
||||
try:
|
||||
stage.fetch()
|
||||
return stage.save_filename
|
||||
|
@@ -12,8 +12,9 @@
|
||||
class CudaPackage(PackageBase):
|
||||
"""Auxiliary class which contains CUDA variant, dependencies and conflicts
|
||||
and is meant to unify and facilitate its usage.
|
||||
|
||||
Maintainers: ax3l, svenevs
|
||||
"""
|
||||
maintainers = ['ax3l', 'svenevs']
|
||||
|
||||
# https://docs.nvidia.com/cuda/cuda-compiler-driver-nvcc/index.html#gpu-feature-list
|
||||
# https://developer.nvidia.com/cuda-gpus
|
||||
@@ -25,6 +26,7 @@ class CudaPackage(PackageBase):
|
||||
'50', '52', '53',
|
||||
'60', '61', '62',
|
||||
'70', '72', '75',
|
||||
'80',
|
||||
]
|
||||
|
||||
# FIXME: keep cuda and cuda_arch separate to make usage easier until
|
||||
@@ -48,6 +50,7 @@ def cuda_flags(arch_list):
|
||||
|
||||
# CUDA version vs Architecture
|
||||
# https://en.wikipedia.org/wiki/CUDA#GPUs_supported
|
||||
# https://docs.nvidia.com/cuda/cuda-toolkit-release-notes/index.html#deprecated-features
|
||||
depends_on('cuda@:6.0', when='cuda_arch=10')
|
||||
depends_on('cuda@:6.5', when='cuda_arch=11')
|
||||
depends_on('cuda@2.1:6.5', when='cuda_arch=12')
|
||||
@@ -58,8 +61,8 @@ def cuda_flags(arch_list):
|
||||
|
||||
depends_on('cuda@5.0:10.2', when='cuda_arch=30')
|
||||
depends_on('cuda@5.0:10.2', when='cuda_arch=32')
|
||||
depends_on('cuda@5.0:10.2', when='cuda_arch=35')
|
||||
depends_on('cuda@6.5:10.2', when='cuda_arch=37')
|
||||
depends_on('cuda@5.0:', when='cuda_arch=35')
|
||||
depends_on('cuda@6.5:', when='cuda_arch=37')
|
||||
|
||||
depends_on('cuda@6.0:', when='cuda_arch=50')
|
||||
depends_on('cuda@6.5:', when='cuda_arch=52')
|
||||
@@ -73,6 +76,8 @@ def cuda_flags(arch_list):
|
||||
depends_on('cuda@9.0:', when='cuda_arch=72')
|
||||
depends_on('cuda@10.0:', when='cuda_arch=75')
|
||||
|
||||
depends_on('cuda@11.0:', when='cuda_arch=80')
|
||||
|
||||
# There are at least three cases to be aware of for compiler conflicts
|
||||
# 1. Linux x86_64
|
||||
# 2. Linux ppc64le
|
||||
@@ -88,12 +93,15 @@ def cuda_flags(arch_list):
|
||||
conflicts('%gcc@7:', when='+cuda ^cuda@:9.1' + arch_platform)
|
||||
conflicts('%gcc@8:', when='+cuda ^cuda@:10.0.130' + arch_platform)
|
||||
conflicts('%gcc@9:', when='+cuda ^cuda@:10.2.89' + arch_platform)
|
||||
conflicts('%gcc@:4,10:', when='+cuda ^cuda@:11.0.2' + arch_platform)
|
||||
conflicts('%pgi@:14.8', when='+cuda ^cuda@:7.0.27' + arch_platform)
|
||||
conflicts('%pgi@:15.3,15.5:', when='+cuda ^cuda@7.5' + arch_platform)
|
||||
conflicts('%pgi@:16.2,16.0:16.3', when='+cuda ^cuda@8' + arch_platform)
|
||||
conflicts('%pgi@:15,18:', when='+cuda ^cuda@9.0:9.1' + arch_platform)
|
||||
conflicts('%pgi@:16', when='+cuda ^cuda@9.2.88:10' + arch_platform)
|
||||
conflicts('%pgi@:17', when='+cuda ^cuda@10.2.89' + arch_platform)
|
||||
conflicts('%pgi@:16,19:', when='+cuda ^cuda@9.2.88:10' + arch_platform)
|
||||
conflicts('%pgi@:17,20:',
|
||||
when='+cuda ^cuda@10.1.105:10.2.89' + arch_platform)
|
||||
conflicts('%pgi@:17,20.2:', when='+cuda ^cuda@11.0.2' + arch_platform)
|
||||
conflicts('%clang@:3.4', when='+cuda ^cuda@:7.5' + arch_platform)
|
||||
conflicts('%clang@:3.7,4:',
|
||||
when='+cuda ^cuda@8.0:9.0' + arch_platform)
|
||||
@@ -104,7 +112,8 @@ def cuda_flags(arch_list):
|
||||
conflicts('%clang@:3.7,7.1:', when='+cuda ^cuda@10.1.105' + arch_platform)
|
||||
conflicts('%clang@:3.7,8.1:',
|
||||
when='+cuda ^cuda@10.1.105:10.1.243' + arch_platform)
|
||||
conflicts('%clang@:3.2,9.0:', when='+cuda ^cuda@10.2.89' + arch_platform)
|
||||
conflicts('%clang@:3.2,9:', when='+cuda ^cuda@10.2.89' + arch_platform)
|
||||
conflicts('%clang@:5,10:', when='+cuda ^cuda@11.0.2' + arch_platform)
|
||||
|
||||
# x86_64 vs. ppc64le differ according to NVidia docs
|
||||
# Linux ppc64le compiler conflicts from Table from the docs below:
|
||||
@@ -119,6 +128,8 @@ def cuda_flags(arch_list):
|
||||
conflicts('%gcc@6:', when='+cuda ^cuda@:9' + arch_platform)
|
||||
conflicts('%gcc@8:', when='+cuda ^cuda@:10.0.130' + arch_platform)
|
||||
conflicts('%gcc@9:', when='+cuda ^cuda@:10.1.243' + arch_platform)
|
||||
# officially, CUDA 11.0.2 only supports the system GCC 8.3 on ppc64le
|
||||
conflicts('%gcc@:4,10:', when='+cuda ^cuda@:11.0.2' + arch_platform)
|
||||
conflicts('%pgi', when='+cuda ^cuda@:8' + arch_platform)
|
||||
conflicts('%pgi@:16', when='+cuda ^cuda@:9.1.185' + arch_platform)
|
||||
conflicts('%pgi@:17', when='+cuda ^cuda@:10' + arch_platform)
|
||||
@@ -128,6 +139,7 @@ def cuda_flags(arch_list):
|
||||
conflicts('%clang@7:', when='+cuda ^cuda@10.0.130' + arch_platform)
|
||||
conflicts('%clang@7.1:', when='+cuda ^cuda@:10.1.105' + arch_platform)
|
||||
conflicts('%clang@8.1:', when='+cuda ^cuda@:10.2.89' + arch_platform)
|
||||
conflicts('%clang@:5,10.0:', when='+cuda ^cuda@11.0.2' + arch_platform)
|
||||
|
||||
# Intel is mostly relevant for x86_64 Linux, even though it also
|
||||
# exists for Mac OS X. No information prior to CUDA 3.2 or Intel 11.1
|
||||
@@ -141,11 +153,13 @@ def cuda_flags(arch_list):
|
||||
conflicts('%intel@17.0:', when='+cuda ^cuda@:8.0.60')
|
||||
conflicts('%intel@18.0:', when='+cuda ^cuda@:9.9')
|
||||
conflicts('%intel@19.0:', when='+cuda ^cuda@:10.0')
|
||||
conflicts('%intel@19.1:', when='+cuda ^cuda@:10.1')
|
||||
conflicts('%intel@19.2:', when='+cuda ^cuda@:11.0.2')
|
||||
|
||||
# XL is mostly relevant for ppc64le Linux
|
||||
conflicts('%xl@:12,14:', when='+cuda ^cuda@:9.1')
|
||||
conflicts('%xl@:12,14:15,17:', when='+cuda ^cuda@9.2')
|
||||
conflicts('%xl@17:', when='+cuda ^cuda@:10.2.89')
|
||||
conflicts('%xl@:12,17:', when='+cuda ^cuda@:11.0.2')
|
||||
|
||||
# Mac OS X
|
||||
# platform = ' platform=darwin'
|
||||
@@ -156,7 +170,7 @@ def cuda_flags(arch_list):
|
||||
# `clang-apple@x.y.z as a possible fix.
|
||||
# Compiler conflicts will be eventual taken from here:
|
||||
# https://docs.nvidia.com/cuda/cuda-installation-guide-mac-os-x/index.html#abstract
|
||||
conflicts('platform=darwin', when='+cuda ^cuda@11.0:')
|
||||
conflicts('platform=darwin', when='+cuda ^cuda@11.0.2:')
|
||||
|
||||
# Make sure cuda_arch can not be used without +cuda
|
||||
for value in cuda_arch_values:
|
||||
|
@@ -42,6 +42,7 @@ def _fetch_cache():
|
||||
building the same package different ways or multiple times.
|
||||
"""
|
||||
path = spack.config.get('config:source_cache')
|
||||
|
||||
if not path:
|
||||
path = os.path.join(spack.paths.var_path, "cache")
|
||||
path = spack.util.path.canonicalize_path(path)
|
||||
|
@@ -493,7 +493,7 @@ def generate_gitlab_ci_yaml(env, print_summary, output_file,
|
||||
after_script = None
|
||||
if custom_spack_repo:
|
||||
if not custom_spack_ref:
|
||||
custom_spack_ref = 'master'
|
||||
custom_spack_ref = 'develop'
|
||||
before_script = [
|
||||
('git clone "{0}"'.format(custom_spack_repo)),
|
||||
'pushd ./spack && git checkout "{0}" && popd'.format(
|
||||
|
@@ -65,7 +65,7 @@ def checksum(parser, args):
|
||||
|
||||
version_lines = spack.stage.get_checksums_for_versions(
|
||||
url_dict, pkg.name, keep_stage=args.keep_stage,
|
||||
batch=(args.batch or len(args.versions) > 0),
|
||||
batch=(args.batch or len(args.versions) > 0 or len(url_dict) == 1),
|
||||
fetch_options=pkg.fetch_options)
|
||||
|
||||
print()
|
||||
|
@@ -445,6 +445,9 @@ def setup_parser(subparser):
|
||||
subparser.add_argument(
|
||||
'--skip-editor', action='store_true',
|
||||
help="skip the edit session for the package (e.g., automation)")
|
||||
subparser.add_argument(
|
||||
'-b', '--batch', action='store_true',
|
||||
help="don't ask which versions to checksum")
|
||||
|
||||
|
||||
class BuildSystemGuesser:
|
||||
@@ -511,7 +514,7 @@ def __call__(self, stage, url):
|
||||
# Determine the build system based on the files contained
|
||||
# in the archive.
|
||||
for pattern, bs in clues:
|
||||
if any(re.search(pattern, l) for l in lines):
|
||||
if any(re.search(pattern, line) for line in lines):
|
||||
self.build_system = bs
|
||||
break
|
||||
|
||||
@@ -629,7 +632,8 @@ def get_versions(args, name):
|
||||
|
||||
versions = spack.stage.get_checksums_for_versions(
|
||||
url_dict, name, first_stage_function=guesser,
|
||||
keep_stage=args.keep_stage, batch=True)
|
||||
keep_stage=args.keep_stage,
|
||||
batch=(args.batch or len(url_dict) == 1))
|
||||
else:
|
||||
versions = unhashed_versions
|
||||
|
||||
|
@@ -41,6 +41,8 @@ def update_kwargs_from_args(args, kwargs):
|
||||
'fake': args.fake,
|
||||
'dirty': args.dirty,
|
||||
'use_cache': args.use_cache,
|
||||
'install_global': args.install_global,
|
||||
'upstream': args.upstream,
|
||||
'cache_only': args.cache_only,
|
||||
'explicit': True, # Always true for install command
|
||||
'stop_at': args.until,
|
||||
@@ -127,6 +129,14 @@ def setup_parser(subparser):
|
||||
'-f', '--file', action='append', default=[],
|
||||
dest='specfiles', metavar='SPEC_YAML_FILE',
|
||||
help="install from file. Read specs to install from .yaml files")
|
||||
subparser.add_argument(
|
||||
'--upstream', action='store', default=None,
|
||||
dest='upstream', metavar='UPSTREAM_NAME',
|
||||
help='specify which upstream spack to install too')
|
||||
subparser.add_argument(
|
||||
'-g', '--global', action='store_true', default=False,
|
||||
dest='install_global',
|
||||
help='install package to globally accesible location')
|
||||
|
||||
cd_group = subparser.add_mutually_exclusive_group()
|
||||
arguments.add_common_arguments(cd_group, ['clean', 'dirty'])
|
||||
@@ -220,7 +230,10 @@ def default_log_file(spec):
|
||||
"""
|
||||
fmt = 'test-{x.name}-{x.version}-{hash}.xml'
|
||||
basename = fmt.format(x=spec, hash=spec.dag_hash())
|
||||
dirname = fs.os.path.join(spack.paths.var_path, 'junit-report')
|
||||
|
||||
dirname = fs.os.path.join(spack.paths.user_config_path,
|
||||
'var/spack',
|
||||
'junit-report')
|
||||
fs.mkdirp(dirname)
|
||||
return fs.os.path.join(dirname, basename)
|
||||
|
||||
@@ -231,6 +244,7 @@ def install_spec(cli_args, kwargs, abstract_spec, spec):
|
||||
try:
|
||||
# handle active environment, if any
|
||||
env = ev.get_env(cli_args, 'install')
|
||||
|
||||
if env:
|
||||
with env.write_transaction():
|
||||
concrete = env.concretize_and_add(
|
||||
@@ -241,6 +255,10 @@ def install_spec(cli_args, kwargs, abstract_spec, spec):
|
||||
env.regenerate_views()
|
||||
else:
|
||||
spec.package.do_install(**kwargs)
|
||||
spack.config.set('config:active_tree', '~/.spack/opt/spack',
|
||||
scope='user')
|
||||
spack.config.set('config:active_upstream', None,
|
||||
scope='user')
|
||||
|
||||
except spack.build_environment.InstallError as e:
|
||||
if cli_args.show_log_on_error:
|
||||
@@ -255,6 +273,30 @@ def install_spec(cli_args, kwargs, abstract_spec, spec):
|
||||
|
||||
|
||||
def install(parser, args, **kwargs):
|
||||
# Install Package to Global Upstream for multi-user use
|
||||
if args.install_global:
|
||||
spack.config.set('config:active_upstream', 'global',
|
||||
scope='user')
|
||||
global_root = spack.config.get('upstreams')
|
||||
global_root = global_root['global']['install_tree']
|
||||
global_root = spack.util.path.canonicalize_path(global_root)
|
||||
spack.config.set('config:active_tree', global_root,
|
||||
scope='user')
|
||||
elif args.upstream:
|
||||
if args.upstream not in spack.config.get('upstreams'):
|
||||
tty.die("specified upstream does not exist")
|
||||
spack.config.set('config:active_upstream', args.upstream,
|
||||
scope='user')
|
||||
root = spack.config.get('upstreams')
|
||||
root = root[args.upstream]['install_tree']
|
||||
root = spack.util.path.canonicalize_path(root)
|
||||
spack.config.set('config:active_tree', root, scope='user')
|
||||
else:
|
||||
spack.config.set('config:active_upstream', None,
|
||||
scope='user')
|
||||
spack.config.set('config:active_tree',
|
||||
spack.config.get('config:install_tree'),
|
||||
scope='user')
|
||||
if args.help_cdash:
|
||||
parser = argparse.ArgumentParser(
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||
|
@@ -5,6 +5,8 @@
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import argparse
|
||||
import copy
|
||||
import sys
|
||||
import itertools
|
||||
|
||||
@@ -15,6 +17,7 @@
|
||||
import spack.cmd.common.arguments as arguments
|
||||
import spack.repo
|
||||
import spack.store
|
||||
import spack.spec
|
||||
from spack.database import InstallStatuses
|
||||
|
||||
from llnl.util import tty
|
||||
@@ -63,8 +66,24 @@ def setup_parser(subparser):
|
||||
help="remove ALL installed packages that match each supplied spec"
|
||||
)
|
||||
|
||||
subparser.add_argument(
|
||||
'packages',
|
||||
nargs=argparse.REMAINDER,
|
||||
help="specs of packages to uninstall")
|
||||
|
||||
def find_matching_specs(env, specs, allow_multiple_matches=False, force=False):
|
||||
subparser.add_argument(
|
||||
'-u', '--upstream', action='store', default=None,
|
||||
dest='upstream', metavar='UPSTREAM_NAME',
|
||||
help='specify which upstream spack to uninstall from')
|
||||
|
||||
subparser.add_argument(
|
||||
'-g', '--global', action='store_true',
|
||||
dest='global_uninstall',
|
||||
help='uninstall packages installed to global upstream')
|
||||
|
||||
|
||||
def find_matching_specs(env, specs, allow_multiple_matches=False, force=False,
|
||||
upstream=None, global_uninstall=False):
|
||||
"""Returns a list of specs matching the not necessarily
|
||||
concretized specs given from cli
|
||||
|
||||
@@ -76,6 +95,35 @@ def find_matching_specs(env, specs, allow_multiple_matches=False, force=False):
|
||||
Return:
|
||||
list of specs
|
||||
"""
|
||||
if global_uninstall:
|
||||
spack.config.set('config:active_upstream', 'global',
|
||||
scope='user')
|
||||
global_root = spack.config.get('upstreams')
|
||||
global_root = global_root['global']['install_tree']
|
||||
global_root = spack.util.path.canonicalize_path(global_root)
|
||||
spack.config.set('config:active_tree', global_root,
|
||||
scope='user')
|
||||
elif upstream:
|
||||
if upstream not in spack.config.get('upstreams'):
|
||||
tty.die("specified upstream does not exist")
|
||||
spack.config.set('config:active_upstream', upstream,
|
||||
scope='user')
|
||||
root = spack.config.get('upstreams')
|
||||
root = root[upstream]['install_tree']
|
||||
root = spack.util.path.canonicalize_path(root)
|
||||
spack.config.set('config:active_tree', root, scope='user')
|
||||
else:
|
||||
spack.config.set('config:active_upstream', None,
|
||||
scope='user')
|
||||
for spec in specs:
|
||||
if isinstance(spec, spack.spec.Spec):
|
||||
spec_name = str(spec)
|
||||
spec_copy = (copy.deepcopy(spec))
|
||||
spec_copy.concretize()
|
||||
if spec_copy.package.installed_upstream:
|
||||
tty.warn("{0} is installed upstream".format(spec_name))
|
||||
tty.die("Use 'spack uninstall [--upstream upstream_name]'")
|
||||
|
||||
# constrain uninstall resolution to current environment if one is active
|
||||
hashes = env.all_hashes() if env else None
|
||||
|
||||
@@ -233,11 +281,25 @@ def do_uninstall(env, specs, force):
|
||||
for item in ready:
|
||||
item.do_uninstall(force=force)
|
||||
|
||||
# write any changes made to the active environment
|
||||
if env:
|
||||
env.write()
|
||||
|
||||
spack.config.set('config:active_tree',
|
||||
'~/.spack/opt/spack',
|
||||
scope='user')
|
||||
|
||||
spack.config.set('config:active_upstream', None,
|
||||
scope='user')
|
||||
|
||||
|
||||
def get_uninstall_list(args, specs, env):
|
||||
# Gets the list of installed specs that match the ones give via cli
|
||||
# args.all takes care of the case where '-a' is given in the cli
|
||||
uninstall_list = find_matching_specs(env, specs, args.all, args.force)
|
||||
uninstall_list = find_matching_specs(env, specs, args.all, args.force,
|
||||
upstream=args.upstream,
|
||||
global_uninstall=args.global_uninstall
|
||||
)
|
||||
|
||||
# Takes care of '-R'
|
||||
active_dpts, inactive_dpts = installed_dependents(uninstall_list, env)
|
||||
@@ -314,7 +376,7 @@ def uninstall_specs(args, specs):
|
||||
anything_to_do = set(uninstall_list).union(set(remove_list))
|
||||
|
||||
if not anything_to_do:
|
||||
tty.warn('There are no package to uninstall.')
|
||||
tty.warn('There are no packages to uninstall.')
|
||||
return
|
||||
|
||||
if not args.yes_to_all:
|
||||
|
@@ -681,6 +681,7 @@ def _default_make_compilers(cmp_id, paths):
|
||||
sort_fn = lambda variation: (
|
||||
'cc' not in by_compiler_id[variation], # None last
|
||||
'cxx' not in by_compiler_id[variation], # None last
|
||||
|
||||
getattr(variation, 'prefix', None),
|
||||
getattr(variation, 'suffix', None),
|
||||
)
|
||||
|
@@ -365,7 +365,26 @@ def __init__(self, root, db_dir=None, upstream_dbs=None,
|
||||
tty.debug('PACKAGE LOCK TIMEOUT: {0}'.format(
|
||||
str(timeout_format_str)))
|
||||
|
||||
# Create .spack-db/index.json for global upstream it doesn't exist
|
||||
global_install_tree = spack.config.get(
|
||||
'upstreams')['global']['install_tree']
|
||||
global_install_tree = global_install_tree.replace(
|
||||
'$spack', spack.paths.prefix)
|
||||
if self.is_upstream:
|
||||
if global_install_tree in self._db_dir:
|
||||
if not os.path.isfile(self._index_path):
|
||||
f = open(self._index_path, "w+")
|
||||
database = {
|
||||
'database': {
|
||||
'installs': {},
|
||||
'version': str(_db_version)
|
||||
}
|
||||
}
|
||||
try:
|
||||
sjson.dump(database, f)
|
||||
except Exception as e:
|
||||
raise Exception(
|
||||
"error writing YAML database:", str(e))
|
||||
self.lock = ForbiddenLock()
|
||||
else:
|
||||
self.lock = lk.Lock(self._lock_path,
|
||||
@@ -1182,6 +1201,9 @@ def _remove(self, spec):
|
||||
rec.installed = False
|
||||
return rec.spec
|
||||
|
||||
if self.is_upstream:
|
||||
return rec.spec
|
||||
|
||||
del self._data[key]
|
||||
for dep in rec.spec.dependencies(_tracked_deps):
|
||||
# FIXME: the two lines below needs to be updated once #11983 is
|
||||
|
@@ -645,6 +645,7 @@ def shell_set(var, value):
|
||||
|
||||
other_spack_instances = spack.config.get(
|
||||
'upstreams') or {}
|
||||
|
||||
for install_properties in other_spack_instances.values():
|
||||
upstream_module_roots = install_properties.get('modules', {})
|
||||
upstream_module_roots = dict(
|
||||
@@ -702,16 +703,16 @@ def main(argv=None):
|
||||
if stored_var_name in os.environ:
|
||||
os.environ[var] = os.environ[stored_var_name]
|
||||
|
||||
# make spack.config aware of any command line configuration scopes
|
||||
if args.config_scopes:
|
||||
spack.config.command_line_scopes = args.config_scopes
|
||||
|
||||
# activate an environment if one was specified on the command line
|
||||
if not args.no_env:
|
||||
env = ev.find_environment(args)
|
||||
if env:
|
||||
ev.activate(env, args.use_env_repo, add_view=False)
|
||||
|
||||
# make spack.config aware of any command line configuration scopes
|
||||
if args.config_scopes:
|
||||
spack.config.command_line_scopes = args.config_scopes
|
||||
|
||||
if args.print_shell_vars:
|
||||
print_setup_info(*args.print_shell_vars.split(','))
|
||||
return 0
|
||||
|
@@ -215,9 +215,18 @@ def root_path(name):
|
||||
Returns:
|
||||
root folder for module file installation
|
||||
"""
|
||||
|
||||
# Root folders where the various module files should be written
|
||||
roots = spack.config.get('config:module_roots', {})
|
||||
path = roots.get(name, os.path.join(spack.paths.share_path, name))
|
||||
active_upstream = spack.config.get('config:active_upstream')
|
||||
if active_upstream is not None:
|
||||
# Installs module files to upstream share directory.
|
||||
# Extra logic is needed for determining this location.
|
||||
roots = spack.config.get('upstreams')[active_upstream]['modules']
|
||||
path = roots.get(name, os.path.join(spack.paths.user_share_path, name))
|
||||
else:
|
||||
# If no upstream is active install module file to user share directory.
|
||||
roots = spack.config.get('config:module_roots', {})
|
||||
path = roots.get(name, os.path.join(spack.paths.user_share_path, name))
|
||||
return spack.util.path.canonicalize_path(path)
|
||||
|
||||
|
||||
@@ -288,6 +297,7 @@ def read_module_indices():
|
||||
module_type_to_index = {}
|
||||
module_type_to_root = install_properties.get('modules', {})
|
||||
for module_type, root in module_type_to_root.items():
|
||||
root = spack.util.path.canonicalize_path(root)
|
||||
module_type_to_index[module_type] = read_module_index(root)
|
||||
module_indices.append(module_type_to_index)
|
||||
|
||||
|
@@ -22,6 +22,7 @@
|
||||
import sys
|
||||
import textwrap
|
||||
import time
|
||||
import traceback
|
||||
from six import StringIO
|
||||
from six import string_types
|
||||
from six import with_metaclass
|
||||
@@ -1747,7 +1748,23 @@ def uninstall_by_spec(spec, force=False, deprecator=None):
|
||||
with spack.store.db.prefix_write_lock(spec):
|
||||
|
||||
if pkg is not None:
|
||||
spack.hooks.pre_uninstall(spec)
|
||||
try:
|
||||
spack.hooks.pre_uninstall(spec)
|
||||
except Exception as error:
|
||||
if force:
|
||||
error_msg = (
|
||||
"One or more pre_uninstall hooks have failed"
|
||||
" for {0}, but Spack is continuing with the"
|
||||
" uninstall".format(str(spec)))
|
||||
if isinstance(error, spack.error.SpackError):
|
||||
error_msg += (
|
||||
"\n\nError message: {0}".format(str(error)))
|
||||
tty.warn(error_msg)
|
||||
# Note that if the uninstall succeeds then we won't be
|
||||
# seeing this error again and won't have another chance
|
||||
# to run the hook.
|
||||
else:
|
||||
raise
|
||||
|
||||
# Uninstalling in Spack only requires removing the prefix.
|
||||
if not spec.external:
|
||||
@@ -1768,7 +1785,20 @@ def uninstall_by_spec(spec, force=False, deprecator=None):
|
||||
spack.store.db.remove(spec)
|
||||
|
||||
if pkg is not None:
|
||||
spack.hooks.post_uninstall(spec)
|
||||
try:
|
||||
spack.hooks.post_uninstall(spec)
|
||||
except Exception:
|
||||
# If there is a failure here, this is our only chance to do
|
||||
# something about it: at this point the Spec has been removed
|
||||
# from the DB and prefix, so the post-uninstallation hooks
|
||||
# will not have another chance to run.
|
||||
error_msg = (
|
||||
"One or more post-uninstallation hooks failed for"
|
||||
" {0}, but the prefix has been removed (if it is not"
|
||||
" external).".format(str(spec)))
|
||||
tb_msg = traceback.format_exc()
|
||||
error_msg += "\n\nThe error:\n\n{0}".format(tb_msg)
|
||||
tty.warn(error_msg)
|
||||
|
||||
tty.msg('Successfully uninstalled {0}'.format(spec.short_spec))
|
||||
|
||||
|
@@ -16,6 +16,9 @@
|
||||
#: This file lives in $prefix/lib/spack/spack/__file__
|
||||
prefix = ancestor(__file__, 4)
|
||||
|
||||
#: User configuration location
|
||||
user_config_path = os.path.expanduser('~/.spack')
|
||||
|
||||
#: synonym for prefix
|
||||
spack_root = prefix
|
||||
|
||||
@@ -38,16 +41,16 @@
|
||||
test_path = os.path.join(module_path, "test")
|
||||
hooks_path = os.path.join(module_path, "hooks")
|
||||
var_path = os.path.join(prefix, "var", "spack")
|
||||
user_var_path = os.path.join(user_config_path, "var", "spack")
|
||||
stage_path = os.path.join(user_var_path, "stage")
|
||||
repos_path = os.path.join(var_path, "repos")
|
||||
share_path = os.path.join(prefix, "share", "spack")
|
||||
user_share_path = os.path.join(user_config_path, "share", "spack")
|
||||
|
||||
# Paths to built-in Spack repositories.
|
||||
packages_path = os.path.join(repos_path, "builtin")
|
||||
mock_packages_path = os.path.join(repos_path, "builtin.mock")
|
||||
|
||||
#: User configuration location
|
||||
user_config_path = os.path.expanduser('~/.spack')
|
||||
|
||||
|
||||
opt_path = os.path.join(prefix, "opt")
|
||||
etc_path = os.path.join(prefix, "etc")
|
||||
|
@@ -154,6 +154,7 @@ def get_stage_root():
|
||||
|
||||
if _stage_root is None:
|
||||
candidates = spack.config.get('config:build_stage')
|
||||
|
||||
if isinstance(candidates, string_types):
|
||||
candidates = [candidates]
|
||||
|
||||
|
@@ -34,7 +34,7 @@
|
||||
import spack.directory_layout
|
||||
|
||||
#: default installation root, relative to the Spack install path
|
||||
default_root = os.path.join(spack.paths.opt_path, 'spack')
|
||||
default_root = os.path.join(spack.paths.user_config_path, 'opt/spack')
|
||||
|
||||
|
||||
class Store(object):
|
||||
@@ -70,9 +70,10 @@ def reindex(self):
|
||||
|
||||
def _store():
|
||||
"""Get the singleton store instance."""
|
||||
root = spack.config.get('config:install_tree', default_root)
|
||||
root = spack.util.path.canonicalize_path(root)
|
||||
root = spack.config.get('config:active_tree', default_root)
|
||||
|
||||
# Canonicalize Path for Root regardless of origin
|
||||
root = spack.util.path.canonicalize_path(root)
|
||||
return Store(root,
|
||||
spack.config.get('config:install_path_scheme'),
|
||||
spack.config.get('config:install_hash_length'))
|
||||
@@ -88,11 +89,19 @@ def _store():
|
||||
|
||||
|
||||
def retrieve_upstream_dbs():
|
||||
other_spack_instances = spack.config.get('upstreams', {})
|
||||
|
||||
global_fallback = {'global': {'install_tree': '$spack/opt/spack',
|
||||
'modules':
|
||||
{'tcl': '$spack/share/spack/modules',
|
||||
'lmod': '$spack/share/spack/lmod',
|
||||
'dotkit': '$spack/share/spack/dotkit'}}}
|
||||
|
||||
other_spack_instances = spack.config.get('upstreams',
|
||||
global_fallback)
|
||||
install_roots = []
|
||||
for install_properties in other_spack_instances.values():
|
||||
install_roots.append(install_properties['install_tree'])
|
||||
install_roots.append(spack.util.path.canonicalize_path(
|
||||
install_properties['install_tree']))
|
||||
|
||||
return _construct_upstream_dbs_from_install_roots(install_roots)
|
||||
|
||||
|
480
lib/spack/spack/test/bindist.py
Normal file
480
lib/spack/spack/test/bindist.py
Normal file
@@ -0,0 +1,480 @@
|
||||
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
"""
|
||||
This test checks creating and install buildcaches
|
||||
"""
|
||||
import os
|
||||
import py
|
||||
import pytest
|
||||
import argparse
|
||||
import platform
|
||||
import spack.repo
|
||||
import spack.store
|
||||
import spack.binary_distribution as bindist
|
||||
import spack.cmd.buildcache as buildcache
|
||||
import spack.cmd.install as install
|
||||
import spack.cmd.uninstall as uninstall
|
||||
import spack.cmd.mirror as mirror
|
||||
from spack.spec import Spec
|
||||
from spack.directory_layout import YamlDirectoryLayout
|
||||
|
||||
|
||||
def_install_path_scheme = '${ARCHITECTURE}/${COMPILERNAME}-${COMPILERVER}/${PACKAGE}-${VERSION}-${HASH}' # noqa: E501
|
||||
ndef_install_path_scheme = '${PACKAGE}/${VERSION}/${ARCHITECTURE}-${COMPILERNAME}-${COMPILERVER}-${HASH}' # noqa: E501
|
||||
|
||||
mirror_path_def = None
|
||||
mirror_path_rel = None
|
||||
|
||||
|
||||
@pytest.fixture(scope='function')
|
||||
def cache_directory(tmpdir):
|
||||
old_cache_path = spack.caches.fetch_cache
|
||||
tmpdir.ensure('fetch_cache', dir=True)
|
||||
fsc = spack.fetch_strategy.FsCache(str(tmpdir.join('fetch_cache')))
|
||||
spack.config.caches = fsc
|
||||
yield spack.config.caches
|
||||
tmpdir.join('fetch_cache').remove()
|
||||
spack.config.caches = old_cache_path
|
||||
|
||||
|
||||
@pytest.fixture(scope='session')
|
||||
def session_mirror_def(tmpdir_factory):
|
||||
dir = tmpdir_factory.mktemp('mirror')
|
||||
global mirror_path_rel
|
||||
mirror_path_rel = dir
|
||||
dir.ensure('build_cache', dir=True)
|
||||
yield dir
|
||||
dir.join('build_cache').remove()
|
||||
|
||||
|
||||
@pytest.fixture(scope='function')
|
||||
def mirror_directory_def(session_mirror_def):
|
||||
yield str(session_mirror_def)
|
||||
|
||||
|
||||
@pytest.fixture(scope='session')
|
||||
def session_mirror_rel(tmpdir_factory):
|
||||
dir = tmpdir_factory.mktemp('mirror')
|
||||
global mirror_path_rel
|
||||
mirror_path_rel = dir
|
||||
dir.ensure('build_cache', dir=True)
|
||||
yield dir
|
||||
dir.join('build_cache').remove()
|
||||
|
||||
|
||||
@pytest.fixture(scope='function')
|
||||
def mirror_directory_rel(session_mirror_rel):
|
||||
yield(session_mirror_rel)
|
||||
|
||||
|
||||
@pytest.fixture(scope='session')
|
||||
def config_directory(tmpdir_factory):
|
||||
tmpdir = tmpdir_factory.mktemp('test_configs')
|
||||
# restore some sane defaults for packages and config
|
||||
config_path = py.path.local(spack.paths.etc_path)
|
||||
modules_yaml = config_path.join('spack', 'defaults', 'modules.yaml')
|
||||
os_modules_yaml = config_path.join('spack', 'defaults', '%s' %
|
||||
platform.system().lower(),
|
||||
'modules.yaml')
|
||||
packages_yaml = config_path.join('spack', 'defaults', 'packages.yaml')
|
||||
config_yaml = config_path.join('spack', 'defaults', 'config.yaml')
|
||||
repos_yaml = config_path.join('spack', 'defaults', 'repos.yaml')
|
||||
tmpdir.ensure('site', dir=True)
|
||||
tmpdir.ensure('user', dir=True)
|
||||
tmpdir.ensure('site/%s' % platform.system().lower(), dir=True)
|
||||
modules_yaml.copy(tmpdir.join('site', 'modules.yaml'))
|
||||
os_modules_yaml.copy(tmpdir.join('site/%s' % platform.system().lower(),
|
||||
'modules.yaml'))
|
||||
packages_yaml.copy(tmpdir.join('site', 'packages.yaml'))
|
||||
config_yaml.copy(tmpdir.join('site', 'config.yaml'))
|
||||
repos_yaml.copy(tmpdir.join('site', 'repos.yaml'))
|
||||
yield tmpdir
|
||||
tmpdir.remove()
|
||||
|
||||
|
||||
@pytest.fixture(scope='function')
|
||||
def default_config(tmpdir_factory, config_directory, monkeypatch):
|
||||
|
||||
# Global Upstream Not Registered in these tests.
|
||||
global_upstream = spack.config.get('upstreams')
|
||||
|
||||
mutable_dir = tmpdir_factory.mktemp('mutable_config').join('tmp')
|
||||
config_directory.copy(mutable_dir)
|
||||
|
||||
cfg = spack.config.Configuration(
|
||||
*[spack.config.ConfigScope(name, str(mutable_dir))
|
||||
for name in ['site/%s' % platform.system().lower(),
|
||||
'site', 'user']])
|
||||
|
||||
monkeypatch.setattr(spack.config, 'config', cfg)
|
||||
|
||||
# Set Global Upstream
|
||||
upstreams = spack.config.get('upstreams')
|
||||
if not upstreams:
|
||||
spack.config.set('upstreams', global_upstream, scope='user')
|
||||
|
||||
# This is essential, otherwise the cache will create weird side effects
|
||||
# that will compromise subsequent tests if compilers.yaml is modified
|
||||
monkeypatch.setattr(spack.compilers, '_cache_config_file', [])
|
||||
njobs = spack.config.get('config:build_jobs')
|
||||
if not njobs:
|
||||
spack.config.set('config:build_jobs', 4, scope='user')
|
||||
extensions = spack.config.get('config:template_dirs')
|
||||
if not extensions:
|
||||
spack.config.set('config:template_dirs',
|
||||
[os.path.join(spack.paths.share_path, 'templates')],
|
||||
scope='user')
|
||||
|
||||
mutable_dir.ensure('build_stage', dir=True)
|
||||
build_stage = spack.config.get('config:build_stage')
|
||||
if not build_stage:
|
||||
spack.config.set('config:build_stage',
|
||||
[str(mutable_dir.join('build_stage'))], scope='user')
|
||||
timeout = spack.config.get('config:connect_timeout')
|
||||
if not timeout:
|
||||
spack.config.set('config:connect_timeout', 10, scope='user')
|
||||
|
||||
yield spack.config.config
|
||||
mutable_dir.remove()
|
||||
|
||||
|
||||
@pytest.fixture(scope='function')
|
||||
def install_dir_default_layout(tmpdir):
|
||||
"""Hooks a fake install directory with a default layout"""
|
||||
real_store = spack.store.store
|
||||
real_layout = spack.store.layout
|
||||
spack.store.store = spack.store.Store(str(tmpdir.join('opt')))
|
||||
spack.store.layout = YamlDirectoryLayout(str(tmpdir.join('opt')),
|
||||
path_scheme=def_install_path_scheme) # noqa: E501
|
||||
yield spack.store
|
||||
spack.store.store = real_store
|
||||
spack.store.layout = real_layout
|
||||
|
||||
|
||||
@pytest.fixture(scope='function')
|
||||
def install_dir_non_default_layout(tmpdir):
|
||||
"""Hooks a fake install directory with a non-default layout"""
|
||||
real_store = spack.store.store
|
||||
real_layout = spack.store.layout
|
||||
spack.store.store = spack.store.Store(str(tmpdir.join('opt')))
|
||||
spack.store.layout = YamlDirectoryLayout(str(tmpdir.join('opt')),
|
||||
path_scheme=ndef_install_path_scheme) # noqa: E501
|
||||
yield spack.store
|
||||
spack.store.store = real_store
|
||||
spack.store.layout = real_layout
|
||||
|
||||
|
||||
@pytest.mark.requires_executables(
|
||||
'/usr/bin/gcc', 'patchelf', 'strings', 'file')
|
||||
@pytest.mark.disable_clean_stage_check
|
||||
@pytest.mark.maybeslow
|
||||
@pytest.mark.usefixtures('default_config', 'cache_directory',
|
||||
'install_dir_default_layout')
|
||||
def test_default_rpaths_create_install_default_layout(tmpdir,
|
||||
mirror_directory_def,
|
||||
install_mockery):
|
||||
"""
|
||||
Test the creation and installation of buildcaches with default rpaths
|
||||
into the default directory layout scheme.
|
||||
"""
|
||||
|
||||
gspec = Spec('garply')
|
||||
gspec.concretize()
|
||||
cspec = Spec('corge')
|
||||
cspec.concretize()
|
||||
|
||||
# Install patchelf needed for relocate in linux test environment
|
||||
iparser = argparse.ArgumentParser()
|
||||
install.setup_parser(iparser)
|
||||
# Install some packages with dependent packages
|
||||
iargs = iparser.parse_args(['--no-cache', cspec.name])
|
||||
install.install(iparser, iargs)
|
||||
|
||||
global mirror_path_def
|
||||
mirror_path_def = mirror_directory_def
|
||||
mparser = argparse.ArgumentParser()
|
||||
mirror.setup_parser(mparser)
|
||||
margs = mparser.parse_args(
|
||||
['add', '--scope', 'site', 'test-mirror-def', 'file://%s' % mirror_path_def])
|
||||
mirror.mirror(mparser, margs)
|
||||
margs = mparser.parse_args(['list'])
|
||||
mirror.mirror(mparser, margs)
|
||||
|
||||
# setup argument parser
|
||||
parser = argparse.ArgumentParser()
|
||||
buildcache.setup_parser(parser)
|
||||
|
||||
# Set default buildcache args
|
||||
create_args = ['create', '-a', '-u', '-d', str(mirror_path_def),
|
||||
cspec.name]
|
||||
install_args = ['install', '-a', '-u', cspec.name]
|
||||
|
||||
# Create a buildache
|
||||
args = parser.parse_args(create_args)
|
||||
buildcache.buildcache(parser, args)
|
||||
# Test force overwrite create buildcache
|
||||
create_args.insert(create_args.index('-a'), '-f')
|
||||
args = parser.parse_args(create_args)
|
||||
buildcache.buildcache(parser, args)
|
||||
# create mirror index
|
||||
args = parser.parse_args(['update-index', '-d', 'file://%s' % str(mirror_path_def)])
|
||||
buildcache.buildcache(parser, args)
|
||||
# list the buildcaches in the mirror
|
||||
args = parser.parse_args(['list', '-a', '-l', '-v'])
|
||||
buildcache.buildcache(parser, args)
|
||||
|
||||
# Uninstall the package and deps
|
||||
uparser = argparse.ArgumentParser()
|
||||
uninstall.setup_parser(uparser)
|
||||
uargs = uparser.parse_args(['-y', '--dependents', gspec.name])
|
||||
uninstall.uninstall(uparser, uargs)
|
||||
|
||||
# test install
|
||||
args = parser.parse_args(install_args)
|
||||
buildcache.buildcache(parser, args)
|
||||
|
||||
# This gives warning that spec is already installed
|
||||
buildcache.buildcache(parser, args)
|
||||
|
||||
# test overwrite install
|
||||
install_args.insert(install_args.index('-a'), '-f')
|
||||
args = parser.parse_args(install_args)
|
||||
buildcache.buildcache(parser, args)
|
||||
|
||||
args = parser.parse_args(['keys', '-f'])
|
||||
buildcache.buildcache(parser, args)
|
||||
|
||||
args = parser.parse_args(['list'])
|
||||
buildcache.buildcache(parser, args)
|
||||
|
||||
args = parser.parse_args(['list', '-a'])
|
||||
buildcache.buildcache(parser, args)
|
||||
|
||||
args = parser.parse_args(['list', '-l', '-v'])
|
||||
buildcache.buildcache(parser, args)
|
||||
bindist._cached_specs = set()
|
||||
spack.stage.purge()
|
||||
margs = mparser.parse_args(
|
||||
['rm', '--scope', 'site', 'test-mirror-def'])
|
||||
mirror.mirror(mparser, margs)
|
||||
|
||||
|
||||
@pytest.mark.requires_executables(
|
||||
'/usr/bin/gcc', 'patchelf', 'strings', 'file')
|
||||
@pytest.mark.disable_clean_stage_check
|
||||
@pytest.mark.maybeslow
|
||||
@pytest.mark.nomockstage
|
||||
@pytest.mark.usefixtures('default_config', 'cache_directory',
|
||||
'install_dir_non_default_layout')
|
||||
def test_default_rpaths_install_nondefault_layout(tmpdir,
|
||||
install_mockery):
|
||||
"""
|
||||
Test the creation and installation of buildcaches with default rpaths
|
||||
into the non-default directory layout scheme.
|
||||
"""
|
||||
|
||||
gspec = Spec('garply')
|
||||
gspec.concretize()
|
||||
cspec = Spec('corge')
|
||||
cspec.concretize()
|
||||
|
||||
global mirror_path_def
|
||||
mparser = argparse.ArgumentParser()
|
||||
mirror.setup_parser(mparser)
|
||||
margs = mparser.parse_args(
|
||||
['add', '--scope', 'site', 'test-mirror-def', 'file://%s' % mirror_path_def])
|
||||
mirror.mirror(mparser, margs)
|
||||
|
||||
# setup argument parser
|
||||
parser = argparse.ArgumentParser()
|
||||
buildcache.setup_parser(parser)
|
||||
|
||||
# Set default buildcache args
|
||||
install_args = ['install', '-a', '-u', '%s' % cspec.name]
|
||||
|
||||
# Install some packages with dependent packages
|
||||
# test install in non-default install path scheme
|
||||
args = parser.parse_args(install_args)
|
||||
buildcache.buildcache(parser, args)
|
||||
# test force install in non-default install path scheme
|
||||
install_args.insert(install_args.index('-a'), '-f')
|
||||
args = parser.parse_args(install_args)
|
||||
buildcache.buildcache(parser, args)
|
||||
|
||||
bindist._cached_specs = set()
|
||||
spack.stage.purge()
|
||||
margs = mparser.parse_args(
|
||||
['rm', '--scope', 'site', 'test-mirror-def'])
|
||||
mirror.mirror(mparser, margs)
|
||||
|
||||
|
||||
@pytest.mark.requires_executables(
|
||||
'/usr/bin/gcc', 'patchelf', 'strings', 'file')
|
||||
@pytest.mark.disable_clean_stage_check
|
||||
@pytest.mark.maybeslow
|
||||
@pytest.mark.nomockstage
|
||||
@pytest.mark.usefixtures('default_config', 'cache_directory',
|
||||
'install_dir_default_layout')
|
||||
def test_relative_rpaths_create_default_layout(tmpdir,
|
||||
mirror_directory_rel,
|
||||
install_mockery):
|
||||
"""
|
||||
Test the creation and installation of buildcaches with relative
|
||||
rpaths into the default directory layout scheme.
|
||||
"""
|
||||
|
||||
gspec = Spec('garply')
|
||||
gspec.concretize()
|
||||
cspec = Spec('corge')
|
||||
cspec.concretize()
|
||||
|
||||
global mirror_path_rel
|
||||
mirror_path_rel = mirror_directory_rel
|
||||
# Install patchelf needed for relocate in linux test environment
|
||||
iparser = argparse.ArgumentParser()
|
||||
install.setup_parser(iparser)
|
||||
# Install some packages with dependent packages
|
||||
iargs = iparser.parse_args(['--no-cache', cspec.name])
|
||||
install.install(iparser, iargs)
|
||||
|
||||
# setup argument parser
|
||||
parser = argparse.ArgumentParser()
|
||||
buildcache.setup_parser(parser)
|
||||
|
||||
# set default buildcache args
|
||||
create_args = ['create', '-a', '-u', '-r', '-d',
|
||||
str(mirror_path_rel),
|
||||
cspec.name]
|
||||
|
||||
# create build cache with relatived rpaths
|
||||
args = parser.parse_args(create_args)
|
||||
buildcache.buildcache(parser, args)
|
||||
# create mirror index
|
||||
args = parser.parse_args(['update-index', '-d', 'file://%s' % str(mirror_path_rel)])
|
||||
buildcache.buildcache(parser, args)
|
||||
# Uninstall the package and deps
|
||||
uparser = argparse.ArgumentParser()
|
||||
uninstall.setup_parser(uparser)
|
||||
uargs = uparser.parse_args(['-y', '--dependents', gspec.name])
|
||||
uninstall.uninstall(uparser, uargs)
|
||||
|
||||
bindist._cached_specs = set()
|
||||
spack.stage.purge()
|
||||
|
||||
|
||||
@pytest.mark.requires_executables(
|
||||
'/usr/bin/gcc', 'patchelf', 'strings', 'file')
|
||||
@pytest.mark.disable_clean_stage_check
|
||||
@pytest.mark.maybeslow
|
||||
@pytest.mark.nomockstage
|
||||
@pytest.mark.usefixtures('default_config', 'cache_directory',
|
||||
'install_dir_default_layout')
|
||||
def test_relative_rpaths_install_default_layout(tmpdir,
|
||||
install_mockery):
|
||||
"""
|
||||
Test the creation and installation of buildcaches with relative
|
||||
rpaths into the default directory layout scheme.
|
||||
"""
|
||||
|
||||
gspec = Spec('garply')
|
||||
gspec.concretize()
|
||||
cspec = Spec('corge')
|
||||
cspec.concretize()
|
||||
|
||||
global mirror_path_rel
|
||||
mparser = argparse.ArgumentParser()
|
||||
mirror.setup_parser(mparser)
|
||||
margs = mparser.parse_args(
|
||||
['add', '--scope', 'site', 'test-mirror-rel', 'file://%s' % mirror_path_rel])
|
||||
mirror.mirror(mparser, margs)
|
||||
|
||||
# Install patchelf needed for relocate in linux test environment
|
||||
iparser = argparse.ArgumentParser()
|
||||
install.setup_parser(iparser)
|
||||
|
||||
# setup argument parser
|
||||
parser = argparse.ArgumentParser()
|
||||
buildcache.setup_parser(parser)
|
||||
|
||||
# set default buildcache args
|
||||
install_args = ['install', '-a', '-u',
|
||||
cspec.name]
|
||||
|
||||
# install buildcache created with relativized rpaths
|
||||
args = parser.parse_args(install_args)
|
||||
buildcache.buildcache(parser, args)
|
||||
|
||||
# This gives warning that spec is already installed
|
||||
buildcache.buildcache(parser, args)
|
||||
|
||||
# Uninstall the package and deps
|
||||
uparser = argparse.ArgumentParser()
|
||||
uninstall.setup_parser(uparser)
|
||||
uargs = uparser.parse_args(['-y', '--dependents', gspec.name])
|
||||
uninstall.uninstall(uparser, uargs)
|
||||
|
||||
# install build cache
|
||||
buildcache.buildcache(parser, args)
|
||||
|
||||
# test overwrite install
|
||||
install_args.insert(install_args.index('-a'), '-f')
|
||||
args = parser.parse_args(install_args)
|
||||
buildcache.buildcache(parser, args)
|
||||
|
||||
bindist._cached_specs = set()
|
||||
spack.stage.purge()
|
||||
margs = mparser.parse_args(
|
||||
['rm', '--scope', 'site', 'test-mirror-rel'])
|
||||
mirror.mirror(mparser, margs)
|
||||
|
||||
|
||||
@pytest.mark.requires_executables(
|
||||
'/usr/bin/gcc', 'patchelf', 'strings', 'file')
|
||||
@pytest.mark.disable_clean_stage_check
|
||||
@pytest.mark.maybeslow
|
||||
@pytest.mark.nomockstage
|
||||
@pytest.mark.usefixtures('default_config', 'cache_directory',
|
||||
'install_dir_non_default_layout')
|
||||
def test_relative_rpaths_install_nondefault(tmpdir,
|
||||
install_mockery):
|
||||
"""
|
||||
Test the installation of buildcaches with relativized rpaths
|
||||
into the non-default directory layout scheme.
|
||||
"""
|
||||
|
||||
gspec = Spec('garply')
|
||||
gspec.concretize()
|
||||
cspec = Spec('corge')
|
||||
cspec.concretize()
|
||||
|
||||
global mirror_path_rel
|
||||
|
||||
mparser = argparse.ArgumentParser()
|
||||
mirror.setup_parser(mparser)
|
||||
margs = mparser.parse_args(
|
||||
['add', '--scope', 'site', 'test-mirror-rel', 'file://%s' % mirror_path_rel])
|
||||
mirror.mirror(mparser, margs)
|
||||
|
||||
# Install patchelf needed for relocate in linux test environment
|
||||
iparser = argparse.ArgumentParser()
|
||||
install.setup_parser(iparser)
|
||||
|
||||
# setup argument parser
|
||||
parser = argparse.ArgumentParser()
|
||||
buildcache.setup_parser(parser)
|
||||
|
||||
# Set default buildcache args
|
||||
install_args = ['install', '-a', '-u', '%s' % cspec.name]
|
||||
|
||||
# test install in non-default install path scheme and relative path
|
||||
args = parser.parse_args(install_args)
|
||||
buildcache.buildcache(parser, args)
|
||||
|
||||
bindist._cached_specs = set()
|
||||
spack.stage.purge()
|
||||
margs = mparser.parse_args(
|
||||
['rm', '--scope', 'site', 'test-mirror-rel'])
|
||||
mirror.mirror(mparser, margs)
|
@@ -117,7 +117,7 @@ def test_uninstall_deprecated(mock_packages, mock_archive, mock_fetch,
|
||||
|
||||
non_deprecated = spack.store.db.query()
|
||||
|
||||
uninstall('-y', 'libelf@0.8.10')
|
||||
uninstall('-y', '-g', 'libelf@0.8.10')
|
||||
|
||||
assert spack.store.db.query() == spack.store.db.query(installed=any)
|
||||
assert spack.store.db.query() == non_deprecated
|
||||
|
@@ -58,6 +58,46 @@ def test_install_package_and_dependency(
|
||||
assert 'errors="0"' in content
|
||||
|
||||
|
||||
def test_global_install_package_and_dependency(
|
||||
tmpdir, mock_packages, mock_archive, mock_fetch, config,
|
||||
install_mockery):
|
||||
|
||||
with tmpdir.as_cwd():
|
||||
install('--global',
|
||||
'--log-format=junit',
|
||||
'--log-file=test.xml',
|
||||
'libdwarf')
|
||||
|
||||
files = tmpdir.listdir()
|
||||
filename = tmpdir.join('test.xml')
|
||||
assert filename in files
|
||||
|
||||
content = filename.open().read()
|
||||
assert 'tests="2"' in content
|
||||
assert 'failures="0"' in content
|
||||
assert 'errors="0"' in content
|
||||
|
||||
|
||||
def test_upstream_install_package_and_dependency(
|
||||
tmpdir, mock_packages, mock_archive, mock_fetch, config,
|
||||
install_mockery):
|
||||
|
||||
with tmpdir.as_cwd():
|
||||
install('--upstream=global',
|
||||
'--log-format=junit',
|
||||
'--log-file=test.xml',
|
||||
'libdwarf')
|
||||
|
||||
files = tmpdir.listdir()
|
||||
filename = tmpdir.join('test.xml')
|
||||
assert filename in files
|
||||
|
||||
content = filename.open().read()
|
||||
assert 'tests="2"' in content
|
||||
assert 'failures="0"' in content
|
||||
assert 'errors="0"' in content
|
||||
|
||||
|
||||
@pytest.mark.disable_clean_stage_check
|
||||
def test_install_runtests_notests(monkeypatch, mock_packages, install_mockery):
|
||||
def check(pkg):
|
||||
|
@@ -81,6 +81,41 @@ def test_force_uninstall_spec_with_ref_count_not_zero(
|
||||
|
||||
|
||||
@pytest.mark.db
|
||||
@pytest.mark.usefixtures('mutable_database')
|
||||
def test_global_recursive_uninstall():
|
||||
"""Test recursive uninstall from global upstream"""
|
||||
uninstall('-g', '-y', '-a', '--dependents', 'callpath')
|
||||
|
||||
all_specs = spack.store.layout.all_specs()
|
||||
assert len(all_specs) == 8
|
||||
# query specs with multiple configurations
|
||||
mpileaks_specs = [s for s in all_specs if s.satisfies('mpileaks')]
|
||||
callpath_specs = [s for s in all_specs if s.satisfies('callpath')]
|
||||
mpi_specs = [s for s in all_specs if s.satisfies('mpi')]
|
||||
|
||||
assert len(mpileaks_specs) == 0
|
||||
assert len(callpath_specs) == 0
|
||||
assert len(mpi_specs) == 3
|
||||
|
||||
|
||||
@pytest.mark.db
|
||||
@pytest.mark.usefixtures('mutable_database')
|
||||
def test_upstream_recursive_uninstall():
|
||||
"""Test recursive uninstall from specified upstream"""
|
||||
uninstall('--upstream=global', '-y', '-a', '--dependents', 'callpath')
|
||||
|
||||
all_specs = spack.store.layout.all_specs()
|
||||
assert len(all_specs) == 8
|
||||
# query specs with multiple configurations
|
||||
mpileaks_specs = [s for s in all_specs if s.satisfies('mpileaks')]
|
||||
callpath_specs = [s for s in all_specs if s.satisfies('callpath')]
|
||||
mpi_specs = [s for s in all_specs if s.satisfies('mpi')]
|
||||
|
||||
assert len(mpileaks_specs) == 0
|
||||
assert len(callpath_specs) == 0
|
||||
assert len(mpi_specs) == 3
|
||||
|
||||
|
||||
def test_force_uninstall_and_reinstall_by_hash(mutable_database):
|
||||
"""Test forced uninstall and reinstall of old specs."""
|
||||
# this is the spec to be removed
|
||||
|
@@ -1,5 +1,5 @@
|
||||
config:
|
||||
install_tree: $spack/opt/spack
|
||||
install_tree: ~/.spack/opt/spack
|
||||
template_dirs:
|
||||
- $spack/share/spack/templates
|
||||
- $spack/lib/spack/spack/test/data/templates
|
||||
@@ -7,7 +7,7 @@ config:
|
||||
build_stage:
|
||||
- $tempdir/$user/spack-stage
|
||||
- ~/.spack/stage
|
||||
source_cache: $spack/var/spack/cache
|
||||
source_cache: ~/.spack/var/spack/cache
|
||||
misc_cache: ~/.spack/cache
|
||||
verify_ssl: true
|
||||
checksum: true
|
||||
|
7
lib/spack/spack/test/data/config/upstreams.yaml
Normal file
7
lib/spack/spack/test/data/config/upstreams.yaml
Normal file
@@ -0,0 +1,7 @@
|
||||
upstreams:
|
||||
global:
|
||||
install_tree: $spack/opt/spack
|
||||
modules:
|
||||
tcl: $spack/share/spack/modules
|
||||
lmod: $spack/share/spack/lmod
|
||||
dotkit: $spack/share/spack/dotkit
|
@@ -13,6 +13,7 @@
|
||||
import os
|
||||
import pytest
|
||||
import json
|
||||
import shutil
|
||||
try:
|
||||
import uuid
|
||||
_use_uuid = True
|
||||
@@ -48,6 +49,19 @@ def test_store(tmpdir):
|
||||
spack.store.store = real_store
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def test_global_db_initializtion():
|
||||
global_store = spack.store.store
|
||||
global_db_path = '$spack/opt/spack'
|
||||
global_db_path = spack.util.path.canonicalize_path(global_db_path)
|
||||
shutil.rmtree(os.path.join(global_db_path, '.spack-db'))
|
||||
global_store = spack.store.Store(str(global_db_path))
|
||||
|
||||
yield
|
||||
|
||||
spack.store.store = global_store
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def upstream_and_downstream_db(tmpdir_factory, gen_mock_layout):
|
||||
mock_db_root = str(tmpdir_factory.mktemp('mock_db_root'))
|
||||
|
@@ -1143,8 +1143,6 @@ def read():
|
||||
assert vals['read'] == 1
|
||||
|
||||
|
||||
@pytest.mark.skipif('macos' in os.environ.get('GITHUB_WORKFLOW', ''),
|
||||
reason="Skip failing test for GA on MacOS")
|
||||
def test_lock_debug_output(lock_path):
|
||||
host = socket.getfqdn()
|
||||
|
||||
|
@@ -111,7 +111,7 @@ def test_log_subproc_and_echo_output_capfd(capfd, tmpdir):
|
||||
# Tests below use a pseudoterminal to test llnl.util.tty.log
|
||||
#
|
||||
def simple_logger(**kwargs):
|
||||
"""Mock logger (child) process for testing log.keyboard_input."""
|
||||
"""Mock logger (minion) process for testing log.keyboard_input."""
|
||||
def handler(signum, frame):
|
||||
running[0] = False
|
||||
signal.signal(signal.SIGUSR1, handler)
|
||||
@@ -125,7 +125,7 @@ def handler(signum, frame):
|
||||
|
||||
|
||||
def mock_shell_fg(proc, ctl, **kwargs):
|
||||
"""PseudoShell master function for test_foreground_background."""
|
||||
"""PseudoShell controller function for test_foreground_background."""
|
||||
ctl.fg()
|
||||
ctl.status()
|
||||
ctl.wait_enabled()
|
||||
@@ -134,7 +134,7 @@ def mock_shell_fg(proc, ctl, **kwargs):
|
||||
|
||||
|
||||
def mock_shell_fg_no_termios(proc, ctl, **kwargs):
|
||||
"""PseudoShell master function for test_foreground_background."""
|
||||
"""PseudoShell controller function for test_foreground_background."""
|
||||
ctl.fg()
|
||||
ctl.status()
|
||||
ctl.wait_disabled_fg()
|
||||
@@ -143,7 +143,7 @@ def mock_shell_fg_no_termios(proc, ctl, **kwargs):
|
||||
|
||||
|
||||
def mock_shell_bg(proc, ctl, **kwargs):
|
||||
"""PseudoShell master function for test_foreground_background."""
|
||||
"""PseudoShell controller function for test_foreground_background."""
|
||||
ctl.bg()
|
||||
ctl.status()
|
||||
ctl.wait_disabled()
|
||||
@@ -152,7 +152,7 @@ def mock_shell_bg(proc, ctl, **kwargs):
|
||||
|
||||
|
||||
def mock_shell_tstp_cont(proc, ctl, **kwargs):
|
||||
"""PseudoShell master function for test_foreground_background."""
|
||||
"""PseudoShell controller function for test_foreground_background."""
|
||||
ctl.tstp()
|
||||
ctl.wait_stopped()
|
||||
|
||||
@@ -163,7 +163,7 @@ def mock_shell_tstp_cont(proc, ctl, **kwargs):
|
||||
|
||||
|
||||
def mock_shell_tstp_tstp_cont(proc, ctl, **kwargs):
|
||||
"""PseudoShell master function for test_foreground_background."""
|
||||
"""PseudoShell controller function for test_foreground_background."""
|
||||
ctl.tstp()
|
||||
ctl.wait_stopped()
|
||||
|
||||
@@ -177,7 +177,7 @@ def mock_shell_tstp_tstp_cont(proc, ctl, **kwargs):
|
||||
|
||||
|
||||
def mock_shell_tstp_tstp_cont_cont(proc, ctl, **kwargs):
|
||||
"""PseudoShell master function for test_foreground_background."""
|
||||
"""PseudoShell controller function for test_foreground_background."""
|
||||
ctl.tstp()
|
||||
ctl.wait_stopped()
|
||||
|
||||
@@ -194,7 +194,7 @@ def mock_shell_tstp_tstp_cont_cont(proc, ctl, **kwargs):
|
||||
|
||||
|
||||
def mock_shell_bg_fg(proc, ctl, **kwargs):
|
||||
"""PseudoShell master function for test_foreground_background."""
|
||||
"""PseudoShell controller function for test_foreground_background."""
|
||||
ctl.bg()
|
||||
ctl.status()
|
||||
ctl.wait_disabled()
|
||||
@@ -207,7 +207,7 @@ def mock_shell_bg_fg(proc, ctl, **kwargs):
|
||||
|
||||
|
||||
def mock_shell_bg_fg_no_termios(proc, ctl, **kwargs):
|
||||
"""PseudoShell master function for test_foreground_background."""
|
||||
"""PseudoShell controller function for test_foreground_background."""
|
||||
ctl.bg()
|
||||
ctl.status()
|
||||
ctl.wait_disabled()
|
||||
@@ -220,7 +220,7 @@ def mock_shell_bg_fg_no_termios(proc, ctl, **kwargs):
|
||||
|
||||
|
||||
def mock_shell_fg_bg(proc, ctl, **kwargs):
|
||||
"""PseudoShell master function for test_foreground_background."""
|
||||
"""PseudoShell controller function for test_foreground_background."""
|
||||
ctl.fg()
|
||||
ctl.status()
|
||||
ctl.wait_enabled()
|
||||
@@ -233,7 +233,7 @@ def mock_shell_fg_bg(proc, ctl, **kwargs):
|
||||
|
||||
|
||||
def mock_shell_fg_bg_no_termios(proc, ctl, **kwargs):
|
||||
"""PseudoShell master function for test_foreground_background."""
|
||||
"""PseudoShell controller function for test_foreground_background."""
|
||||
ctl.fg()
|
||||
ctl.status()
|
||||
ctl.wait_disabled_fg()
|
||||
@@ -299,7 +299,7 @@ def test_foreground_background(test_fn, termios_on_or_off, tmpdir):
|
||||
|
||||
|
||||
def synchronized_logger(**kwargs):
|
||||
"""Mock logger (child) process for testing log.keyboard_input.
|
||||
"""Mock logger (minion) process for testing log.keyboard_input.
|
||||
|
||||
This logger synchronizes with the parent process to test that 'v' can
|
||||
toggle output. It is used in ``test_foreground_background_output`` below.
|
||||
@@ -330,7 +330,7 @@ def handler(signum, frame):
|
||||
|
||||
|
||||
def mock_shell_v_v(proc, ctl, **kwargs):
|
||||
"""PseudoShell master function for test_foreground_background_output."""
|
||||
"""Controller function for test_foreground_background_output."""
|
||||
write_lock = kwargs["write_lock"]
|
||||
v_lock = kwargs["v_lock"]
|
||||
|
||||
@@ -357,7 +357,7 @@ def mock_shell_v_v(proc, ctl, **kwargs):
|
||||
|
||||
|
||||
def mock_shell_v_v_no_termios(proc, ctl, **kwargs):
|
||||
"""PseudoShell master function for test_foreground_background_output."""
|
||||
"""Controller function for test_foreground_background_output."""
|
||||
write_lock = kwargs["write_lock"]
|
||||
v_lock = kwargs["v_lock"]
|
||||
|
||||
@@ -395,9 +395,9 @@ def test_foreground_background_output(
|
||||
shell = PseudoShell(test_fn, synchronized_logger)
|
||||
log_path = str(tmpdir.join("log.txt"))
|
||||
|
||||
# Locks for synchronizing with child
|
||||
write_lock = multiprocessing.Lock() # must be held by child to write
|
||||
v_lock = multiprocessing.Lock() # held while master is in v mode
|
||||
# Locks for synchronizing with minion
|
||||
write_lock = multiprocessing.Lock() # must be held by minion to write
|
||||
v_lock = multiprocessing.Lock() # held while controller is in v mode
|
||||
|
||||
with termios_on_or_off():
|
||||
shell.start(
|
||||
@@ -423,16 +423,16 @@ def test_foreground_background_output(
|
||||
with open(log_path) as log:
|
||||
log = log.read().strip().split("\n")
|
||||
|
||||
# Master and child process coordinate with locks such that the child
|
||||
# Controller and minion process coordinate with locks such that the minion
|
||||
# writes "off" when echo is off, and "on" when echo is on. The
|
||||
# output should contain mostly "on" lines, but may contain an "off"
|
||||
# or two. This is because the master toggles echo by sending "v" on
|
||||
# stdin to the child, but this is not synchronized with our locks.
|
||||
# or two. This is because the controller toggles echo by sending "v" on
|
||||
# stdin to the minion, but this is not synchronized with our locks.
|
||||
# It's good enough for a test, though. We allow at most 2 "off"'s in
|
||||
# the output to account for the race.
|
||||
assert (
|
||||
['forced output', 'on'] == uniq(output) or
|
||||
output.count("off") <= 2 # if master_fd is a bit slow
|
||||
output.count("off") <= 2 # if controller_fd is a bit slow
|
||||
)
|
||||
|
||||
# log should be off for a while, then on, then off
|
||||
|
@@ -14,7 +14,7 @@
|
||||
NOTAR_EXTS = ["zip", "tgz", "tbz2", "txz"]
|
||||
|
||||
# Add PRE_EXTS and EXTS last so that .tar.gz is matched *before* .tar or .gz
|
||||
ALLOWED_ARCHIVE_TYPES = [".".join(l) for l in product(
|
||||
ALLOWED_ARCHIVE_TYPES = [".".join(ext) for ext in product(
|
||||
PRE_EXTS, EXTS)] + PRE_EXTS + EXTS + NOTAR_EXTS
|
||||
|
||||
|
||||
@@ -36,7 +36,7 @@ def decompressor_for(path, extension=None):
|
||||
bunzip2 = which('bunzip2', required=True)
|
||||
return bunzip2
|
||||
tar = which('tar', required=True)
|
||||
tar.add_default_arg('-xf')
|
||||
tar.add_default_arg('-oxf')
|
||||
return tar
|
||||
|
||||
|
||||
|
@@ -133,7 +133,7 @@ def __init__(self, hexdigest, **kwargs):
|
||||
@property
|
||||
def hash_name(self):
|
||||
"""Get the name of the hash function this Checker is using."""
|
||||
return self.hash_fun().name
|
||||
return self.hash_fun().name.lower()
|
||||
|
||||
def check(self, filename):
|
||||
"""Read the file with the specified name and check its checksum
|
||||
|
@@ -13,7 +13,7 @@
|
||||
|
||||
_gnupg_version_re = r"^gpg \(GnuPG\) (.*)$"
|
||||
|
||||
GNUPGHOME = spack.paths.gpg_path
|
||||
GNUPGHOME = os.getenv('SPACK_GNUPGHOME', spack.paths.gpg_path)
|
||||
|
||||
|
||||
def parse_keys_output(output):
|
||||
|
@@ -9,20 +9,6 @@ ENV DOCKERFILE_BASE=centos \
|
||||
CURRENTLY_BUILDING_DOCKER_IMAGE=1 \
|
||||
container=docker
|
||||
|
||||
COPY bin $SPACK_ROOT/bin
|
||||
COPY etc $SPACK_ROOT/etc
|
||||
COPY lib $SPACK_ROOT/lib
|
||||
COPY share $SPACK_ROOT/share
|
||||
COPY var $SPACK_ROOT/var
|
||||
RUN mkdir -p $SPACK_ROOT/opt/spack
|
||||
|
||||
RUN ln -s $SPACK_ROOT/share/spack/docker/entrypoint.bash \
|
||||
/usr/local/bin/docker-shell \
|
||||
&& ln -s $SPACK_ROOT/share/spack/docker/entrypoint.bash \
|
||||
/usr/local/bin/interactive-shell \
|
||||
&& ln -s $SPACK_ROOT/share/spack/docker/entrypoint.bash \
|
||||
/usr/local/bin/spack-env
|
||||
|
||||
RUN yum update -y \
|
||||
&& yum install -y epel-release \
|
||||
&& yum update -y \
|
||||
@@ -50,6 +36,20 @@ RUN yum update -y \
|
||||
&& rm -rf /var/cache/yum \
|
||||
&& yum clean all
|
||||
|
||||
COPY bin $SPACK_ROOT/bin
|
||||
COPY etc $SPACK_ROOT/etc
|
||||
COPY lib $SPACK_ROOT/lib
|
||||
COPY share $SPACK_ROOT/share
|
||||
COPY var $SPACK_ROOT/var
|
||||
RUN mkdir -p $SPACK_ROOT/opt/spack
|
||||
|
||||
RUN ln -s $SPACK_ROOT/share/spack/docker/entrypoint.bash \
|
||||
/usr/local/bin/docker-shell \
|
||||
&& ln -s $SPACK_ROOT/share/spack/docker/entrypoint.bash \
|
||||
/usr/local/bin/interactive-shell \
|
||||
&& ln -s $SPACK_ROOT/share/spack/docker/entrypoint.bash \
|
||||
/usr/local/bin/spack-env
|
||||
|
||||
RUN mkdir -p /root/.spack \
|
||||
&& cp $SPACK_ROOT/share/spack/docker/modules.yaml \
|
||||
/root/.spack/modules.yaml \
|
||||
|
@@ -9,20 +9,6 @@ ENV DOCKERFILE_BASE=centos \
|
||||
CURRENTLY_BUILDING_DOCKER_IMAGE=1 \
|
||||
container=docker
|
||||
|
||||
COPY bin $SPACK_ROOT/bin
|
||||
COPY etc $SPACK_ROOT/etc
|
||||
COPY lib $SPACK_ROOT/lib
|
||||
COPY share $SPACK_ROOT/share
|
||||
COPY var $SPACK_ROOT/var
|
||||
RUN mkdir -p $SPACK_ROOT/opt/spack
|
||||
|
||||
RUN ln -s $SPACK_ROOT/share/spack/docker/entrypoint.bash \
|
||||
/usr/local/bin/docker-shell \
|
||||
&& ln -s $SPACK_ROOT/share/spack/docker/entrypoint.bash \
|
||||
/usr/local/bin/interactive-shell \
|
||||
&& ln -s $SPACK_ROOT/share/spack/docker/entrypoint.bash \
|
||||
/usr/local/bin/spack-env
|
||||
|
||||
RUN yum update -y \
|
||||
&& yum install -y epel-release \
|
||||
&& yum update -y \
|
||||
@@ -50,6 +36,20 @@ RUN yum update -y \
|
||||
&& rm -rf /var/cache/yum \
|
||||
&& yum clean all
|
||||
|
||||
COPY bin $SPACK_ROOT/bin
|
||||
COPY etc $SPACK_ROOT/etc
|
||||
COPY lib $SPACK_ROOT/lib
|
||||
COPY share $SPACK_ROOT/share
|
||||
COPY var $SPACK_ROOT/var
|
||||
RUN mkdir -p $SPACK_ROOT/opt/spack
|
||||
|
||||
RUN ln -s $SPACK_ROOT/share/spack/docker/entrypoint.bash \
|
||||
/usr/local/bin/docker-shell \
|
||||
&& ln -s $SPACK_ROOT/share/spack/docker/entrypoint.bash \
|
||||
/usr/local/bin/interactive-shell \
|
||||
&& ln -s $SPACK_ROOT/share/spack/docker/entrypoint.bash \
|
||||
/usr/local/bin/spack-env
|
||||
|
||||
RUN mkdir -p /root/.spack \
|
||||
&& cp $SPACK_ROOT/share/spack/docker/modules.yaml \
|
||||
/root/.spack/modules.yaml \
|
||||
|
@@ -9,20 +9,6 @@ ENV DOCKERFILE_BASE=ubuntu:16.04 \
|
||||
CURRENTLY_BUILDING_DOCKER_IMAGE=1 \
|
||||
container=docker
|
||||
|
||||
COPY bin $SPACK_ROOT/bin
|
||||
COPY etc $SPACK_ROOT/etc
|
||||
COPY lib $SPACK_ROOT/lib
|
||||
COPY share $SPACK_ROOT/share
|
||||
COPY var $SPACK_ROOT/var
|
||||
RUN mkdir -p $SPACK_ROOT/opt/spack
|
||||
|
||||
RUN ln -s $SPACK_ROOT/share/spack/docker/entrypoint.bash \
|
||||
/usr/local/bin/docker-shell \
|
||||
&& ln -s $SPACK_ROOT/share/spack/docker/entrypoint.bash \
|
||||
/usr/local/bin/interactive-shell \
|
||||
&& ln -s $SPACK_ROOT/share/spack/docker/entrypoint.bash \
|
||||
/usr/local/bin/spack-env
|
||||
|
||||
RUN apt-get -yqq update \
|
||||
&& apt-get -yqq install --no-install-recommends \
|
||||
build-essential \
|
||||
@@ -48,6 +34,20 @@ RUN apt-get -yqq update \
|
||||
&& pip3 install boto3 \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
COPY bin $SPACK_ROOT/bin
|
||||
COPY etc $SPACK_ROOT/etc
|
||||
COPY lib $SPACK_ROOT/lib
|
||||
COPY share $SPACK_ROOT/share
|
||||
COPY var $SPACK_ROOT/var
|
||||
RUN mkdir -p $SPACK_ROOT/opt/spack
|
||||
|
||||
RUN ln -s $SPACK_ROOT/share/spack/docker/entrypoint.bash \
|
||||
/usr/local/bin/docker-shell \
|
||||
&& ln -s $SPACK_ROOT/share/spack/docker/entrypoint.bash \
|
||||
/usr/local/bin/interactive-shell \
|
||||
&& ln -s $SPACK_ROOT/share/spack/docker/entrypoint.bash \
|
||||
/usr/local/bin/spack-env
|
||||
|
||||
# Add LANG default to en_US.UTF-8
|
||||
ENV LANGUAGE en_US.UTF-8
|
||||
ENV LANG en_US.UTF-8
|
||||
|
@@ -9,20 +9,6 @@ ENV DOCKERFILE_BASE=ubuntu \
|
||||
CURRENTLY_BUILDING_DOCKER_IMAGE=1 \
|
||||
container=docker
|
||||
|
||||
COPY bin $SPACK_ROOT/bin
|
||||
COPY etc $SPACK_ROOT/etc
|
||||
COPY lib $SPACK_ROOT/lib
|
||||
COPY share $SPACK_ROOT/share
|
||||
COPY var $SPACK_ROOT/var
|
||||
RUN mkdir -p $SPACK_ROOT/opt/spack
|
||||
|
||||
RUN ln -s $SPACK_ROOT/share/spack/docker/entrypoint.bash \
|
||||
/usr/local/bin/docker-shell \
|
||||
&& ln -s $SPACK_ROOT/share/spack/docker/entrypoint.bash \
|
||||
/usr/local/bin/interactive-shell \
|
||||
&& ln -s $SPACK_ROOT/share/spack/docker/entrypoint.bash \
|
||||
/usr/local/bin/spack-env
|
||||
|
||||
RUN apt-get -yqq update \
|
||||
&& apt-get -yqq install --no-install-recommends \
|
||||
build-essential \
|
||||
@@ -48,6 +34,20 @@ RUN apt-get -yqq update \
|
||||
&& pip3 install boto3 \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
COPY bin $SPACK_ROOT/bin
|
||||
COPY etc $SPACK_ROOT/etc
|
||||
COPY lib $SPACK_ROOT/lib
|
||||
COPY share $SPACK_ROOT/share
|
||||
COPY var $SPACK_ROOT/var
|
||||
RUN mkdir -p $SPACK_ROOT/opt/spack
|
||||
|
||||
RUN ln -s $SPACK_ROOT/share/spack/docker/entrypoint.bash \
|
||||
/usr/local/bin/docker-shell \
|
||||
&& ln -s $SPACK_ROOT/share/spack/docker/entrypoint.bash \
|
||||
/usr/local/bin/interactive-shell \
|
||||
&& ln -s $SPACK_ROOT/share/spack/docker/entrypoint.bash \
|
||||
/usr/local/bin/spack-env
|
||||
|
||||
# Add LANG default to en_US.UTF-8
|
||||
ENV LANGUAGE en_US.UTF-8
|
||||
ENV LANG en_US.UTF-8
|
||||
|
22
share/spack/qa/install_patchelf.sh
Executable file
22
share/spack/qa/install_patchelf.sh
Executable file
@@ -0,0 +1,22 @@
|
||||
#!/bin/sh
|
||||
#
|
||||
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
#
|
||||
# Description:
|
||||
# Install patchelf for use in buildcache unit tests
|
||||
#
|
||||
# Usage:
|
||||
# install-patchelf.sh
|
||||
#
|
||||
set -ex
|
||||
if [ "$TRAVIS_OS_NAME" = "linux" ]; then
|
||||
olddir=$PWD
|
||||
cd /tmp
|
||||
wget https://github.com/NixOS/patchelf/archive/0.10.tar.gz
|
||||
tar -xvf 0.10.tar.gz
|
||||
cd patchelf-0.10 && ./bootstrap.sh && ./configure --prefix=/usr && make && sudo make install && cd $olddir
|
||||
fi
|
@@ -18,7 +18,7 @@
|
||||
ORIGINAL_PATH="$PATH"
|
||||
|
||||
. "$(dirname $0)/setup.sh"
|
||||
check_dependencies $coverage git hg svn
|
||||
check_dependencies $coverage kcov git hg svn
|
||||
|
||||
# Clean the environment by removing Spack from the path and getting rid of
|
||||
# the spack shell function
|
||||
|
@@ -37,11 +37,7 @@ bin/spack -h
|
||||
bin/spack help -a
|
||||
|
||||
# Profile and print top 20 lines for a simple call to spack spec
|
||||
if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then
|
||||
spack -p --lines 20 spec openmpi
|
||||
else
|
||||
spack -p --lines 20 spec mpileaks%gcc ^elfutils@0.170
|
||||
fi
|
||||
spack -p --lines 20 spec mpileaks%gcc ^elfutils@0.170
|
||||
|
||||
#-----------------------------------------------------------
|
||||
# Run unit tests with code coverage
|
||||
|
@@ -26,14 +26,11 @@ if [[ "$COVERAGE" == "true" ]]; then
|
||||
coverage=coverage
|
||||
coverage_run="coverage run"
|
||||
|
||||
# bash coverage depends on some other factors -- there are issues with
|
||||
# kcov for Python 2.6, unit tests, and build tests.
|
||||
if [[ $TRAVIS_PYTHON_VERSION != 2.6 ]]; then
|
||||
mkdir -p coverage
|
||||
cc_script="$SPACK_ROOT/lib/spack/env/cc"
|
||||
bashcov=$(realpath ${QA_DIR}/bashcov)
|
||||
sed -i~ "s@#\!/bin/bash@#\!${bashcov}@" "$cc_script"
|
||||
fi
|
||||
# bash coverage depends on some other factors
|
||||
mkdir -p coverage
|
||||
cc_script="$SPACK_ROOT/lib/spack/env/cc"
|
||||
bashcov=$(realpath ${QA_DIR}/bashcov)
|
||||
sed -i~ "s@#\!/bin/bash@#\!${bashcov}@" "$cc_script"
|
||||
fi
|
||||
|
||||
#
|
||||
@@ -74,6 +71,9 @@ check_dependencies() {
|
||||
spack_package=mercurial
|
||||
pip_package=mercurial
|
||||
;;
|
||||
kcov)
|
||||
spack_package=kcov
|
||||
;;
|
||||
svn)
|
||||
spack_package=subversion
|
||||
;;
|
||||
|
@@ -639,7 +639,7 @@ _spack_containerize() {
|
||||
_spack_create() {
|
||||
if $list_options
|
||||
then
|
||||
SPACK_COMPREPLY="-h --help --keep-stage -n --name -t --template -r --repo -N --namespace -f --force --skip-editor"
|
||||
SPACK_COMPREPLY="-h --help --keep-stage -n --name -t --template -r --repo -N --namespace -f --force --skip-editor -b --batch"
|
||||
else
|
||||
SPACK_COMPREPLY=""
|
||||
fi
|
||||
@@ -962,7 +962,7 @@ _spack_info() {
|
||||
_spack_install() {
|
||||
if $list_options
|
||||
then
|
||||
SPACK_COMPREPLY="-h --help --only -u --until -j --jobs --overwrite --fail-fast --keep-prefix --keep-stage --dont-restage --use-cache --no-cache --cache-only --no-check-signature --show-log-on-error --source -n --no-checksum -v --verbose --fake --only-concrete -f --file --clean --dirty --test --run-tests --log-format --log-file --help-cdash --cdash-upload-url --cdash-build --cdash-site --cdash-track --cdash-buildstamp -y --yes-to-all"
|
||||
SPACK_COMPREPLY="-h --help --only -u --until -j --jobs --overwrite --fail-fast --keep-prefix --keep-stage --dont-restage --use-cache --no-cache --cache-only --no-check-signature --show-log-on-error --source -n --no-checksum -v --verbose --fake --only-concrete -f --file --upstream -g --global --clean --dirty --test --run-tests --log-format --log-file --help-cdash --cdash-upload-url --cdash-build --cdash-site --cdash-track --cdash-buildstamp -y --yes-to-all"
|
||||
else
|
||||
_all_packages
|
||||
fi
|
||||
@@ -1436,7 +1436,7 @@ _spack_test() {
|
||||
_spack_uninstall() {
|
||||
if $list_options
|
||||
then
|
||||
SPACK_COMPREPLY="-h --help -f --force -R --dependents -y --yes-to-all -a --all"
|
||||
SPACK_COMPREPLY="-h --help -f --force -R --dependents -y --yes-to-all -a --all -u --upstream -g --global"
|
||||
else
|
||||
_installed_packages
|
||||
fi
|
||||
|
@@ -7,7 +7,7 @@ RUN mkdir {{ paths.environment }} \
|
||||
{{ manifest }} > {{ paths.environment }}/spack.yaml
|
||||
|
||||
# Install the software, remove unecessary deps
|
||||
RUN cd {{ paths.environment }} && spack env activate . && spack install && spack gc -y
|
||||
RUN cd {{ paths.environment }} && spack env activate . && spack install --fail-fast && spack gc -y
|
||||
{% if strip %}
|
||||
|
||||
# Strip all the binaries
|
||||
|
@@ -12,7 +12,7 @@ EOF
|
||||
# Install all the required software
|
||||
. /opt/spack/share/spack/setup-env.sh
|
||||
spack env activate .
|
||||
spack install
|
||||
spack install --fail-fast
|
||||
spack gc -y
|
||||
spack env deactivate
|
||||
spack env activate --sh -d . >> {{ paths.environment }}/environment_modifications.sh
|
||||
|
155
var/spack/repos/builtin.mock/packages/corge/package.py
Normal file
155
var/spack/repos/builtin.mock/packages/corge/package.py
Normal file
@@ -0,0 +1,155 @@
|
||||
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
|
||||
from spack import *
|
||||
import os
|
||||
|
||||
|
||||
class Corge(Package):
|
||||
"""A toy package to test dependencies"""
|
||||
|
||||
homepage = "https://www.example.com"
|
||||
url = "https://github.com/gartung/corge/archive/v3.0.0.tar.gz"
|
||||
|
||||
version('3.0.0',
|
||||
sha256='5058861c3b887511387c725971984cec665a8307d660158915a04d7786fed6bc')
|
||||
|
||||
depends_on('quux')
|
||||
|
||||
def install(self, spec, prefix):
|
||||
corge_cc = '''#include <iostream>
|
||||
#include <stdexcept>
|
||||
#include "corge.h"
|
||||
#include "corge_version.h"
|
||||
#include "quux/quux.h"
|
||||
|
||||
const int Corge::version_major = corge_version_major;
|
||||
const int Corge::version_minor = corge_version_minor;
|
||||
|
||||
Corge::Corge()
|
||||
{
|
||||
}
|
||||
|
||||
int
|
||||
Corge::get_version() const
|
||||
{
|
||||
return 10 * version_major + version_minor;
|
||||
}
|
||||
|
||||
int
|
||||
Corge::corgegate() const
|
||||
{
|
||||
int corge_version = get_version();
|
||||
std::cout << "Corge::corgegate version " << corge_version
|
||||
<< " invoked" << std::endl;
|
||||
std::cout << "Corge config directory = %s" <<std::endl;
|
||||
Quux quux;
|
||||
int quux_version = quux.quuxify();
|
||||
|
||||
if(quux_version != corge_version) {
|
||||
throw std::runtime_error(
|
||||
"Corge found an incompatible version of Garply.");
|
||||
}
|
||||
|
||||
return corge_version;
|
||||
}
|
||||
'''
|
||||
corge_h = '''#ifndef CORGE_H_
|
||||
|
||||
class Corge
|
||||
{
|
||||
private:
|
||||
static const int version_major;
|
||||
static const int version_minor;
|
||||
|
||||
public:
|
||||
Corge();
|
||||
int get_version() const;
|
||||
int corgegate() const;
|
||||
};
|
||||
|
||||
#endif // CORGE_H_
|
||||
'''
|
||||
corge_version_h = '''
|
||||
const int corge_version_major = %s;
|
||||
const int corge_version_minor = %s;
|
||||
'''
|
||||
corgegator_cc = '''
|
||||
#include <iostream>
|
||||
#include "corge.h"
|
||||
|
||||
int
|
||||
main(int argc, char* argv[])
|
||||
{
|
||||
std::cout << "corgerator called with ";
|
||||
if (argc == 0) {
|
||||
std::cout << "no command-line arguments" << std::endl;
|
||||
} else {
|
||||
std::cout << "command-line arguments:";
|
||||
for (int i = 0; i < argc; ++i) {
|
||||
std::cout << " \"" << argv[i] << "\"";
|
||||
}
|
||||
std::cout << std::endl;
|
||||
}
|
||||
std::cout << "corgegating.."<<std::endl;
|
||||
Corge corge;
|
||||
corge.corgegate();
|
||||
std::cout << "done."<<std::endl;
|
||||
return 0;
|
||||
}
|
||||
'''
|
||||
mkdirp(prefix.lib64)
|
||||
mkdirp('%s/corge' % prefix.include)
|
||||
mkdirp('%s/corge' % self.stage.source_path)
|
||||
with open('%s/corge_version.h' % self.stage.source_path, 'w') as f:
|
||||
f.write(corge_version_h % (self.version[0], self.version[1:]))
|
||||
with open('%s/corge/corge.cc' % self.stage.source_path, 'w') as f:
|
||||
f.write(corge_cc % prefix.config)
|
||||
with open('%s/corge/corge.h' % self.stage.source_path, 'w') as f:
|
||||
f.write(corge_h)
|
||||
with open('%s/corge/corgegator.cc' % self.stage.source_path, 'w') as f:
|
||||
f.write(corgegator_cc)
|
||||
gpp = which('/usr/bin/g++')
|
||||
gpp('-Dcorge_EXPORTS',
|
||||
'-I%s' % self.stage.source_path,
|
||||
'-I%s' % spec['quux'].prefix.include,
|
||||
'-I%s' % spec['garply'].prefix.include,
|
||||
'-O2', '-g', '-DNDEBUG', '-fPIC',
|
||||
'-o', 'corge.cc.o',
|
||||
'-c', 'corge/corge.cc')
|
||||
gpp('-Dcorge_EXPORTS',
|
||||
'-I%s' % self.stage.source_path,
|
||||
'-I%s' % spec['quux'].prefix.include,
|
||||
'-I%s' % spec['garply'].prefix.include,
|
||||
'-O2', '-g', '-DNDEBUG', '-fPIC',
|
||||
'-o', 'corgegator.cc.o',
|
||||
'-c', 'corge/corgegator.cc')
|
||||
gpp('-fPIC', '-O2', '-g', '-DNDEBUG', '-shared',
|
||||
'-Wl,-soname,libcorge.so', '-o', 'libcorge.so', 'corge.cc.o',
|
||||
'-Wl,-rpath,%s:%s::::' %
|
||||
(spec['quux'].prefix.lib64, spec['garply'].prefix.lib64),
|
||||
'%s/libquux.so' % spec['quux'].prefix.lib64,
|
||||
'%s/libgarply.so' % spec['garply'].prefix.lib64)
|
||||
gpp('-O2', '-g', '-DNDEBUG', '-rdynamic',
|
||||
'corgegator.cc.o', '-o', 'corgegator',
|
||||
'-Wl,-rpath,%s:%s:%s:::' % (prefix.lib64,
|
||||
spec['quux'].prefix.lib64,
|
||||
spec['garply'].prefix.lib64),
|
||||
'libcorge.so',
|
||||
'%s/libquux.so' % spec['quux'].prefix.lib64,
|
||||
'%s/libgarply.so' % spec['garply'].prefix.lib64)
|
||||
copy('corgegator', '%s/corgegator' % prefix.lib64)
|
||||
copy('libcorge.so', '%s/libcorge.so' % prefix.lib64)
|
||||
copy('%s/corge/corge.h' % self.stage.source_path,
|
||||
'%s/corge/corge.h' % prefix.include)
|
||||
mkdirp(prefix.bin)
|
||||
copy('corge_version.h', '%s/corge_version.h' % prefix.bin)
|
||||
os.symlink('%s/corgegator' % prefix.lib64,
|
||||
'%s/corgegator' % prefix.bin)
|
||||
os.symlink('%s/quuxifier' % spec['quux'].prefix.lib64,
|
||||
'%s/quuxifier' % prefix.bin)
|
||||
os.symlink('%s/garplinator' % spec['garply'].prefix.lib64,
|
||||
'%s/garplinator' % prefix.bin)
|
112
var/spack/repos/builtin.mock/packages/garply/package.py
Normal file
112
var/spack/repos/builtin.mock/packages/garply/package.py
Normal file
@@ -0,0 +1,112 @@
|
||||
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
|
||||
from spack import *
|
||||
import os
|
||||
|
||||
|
||||
class Garply(Package):
|
||||
"""Toy package for testing dependencies"""
|
||||
|
||||
homepage = "https://www.example.com"
|
||||
url = "https://github.com/gartung/garply/archive/v3.0.0.tar.gz"
|
||||
|
||||
version('3.0.0',
|
||||
sha256='534ac8ba7a6fed7e8bbb543bd43ca04999e65337445a531bd296939f5ac2f33d')
|
||||
|
||||
def install(self, spec, prefix):
|
||||
garply_h = '''#ifndef GARPLY_H_
|
||||
|
||||
class Garply
|
||||
{
|
||||
private:
|
||||
static const int version_major;
|
||||
static const int version_minor;
|
||||
|
||||
public:
|
||||
Garply();
|
||||
int get_version() const;
|
||||
int garplinate() const;
|
||||
};
|
||||
|
||||
#endif // GARPLY_H_
|
||||
'''
|
||||
garply_cc = '''#include "garply.h"
|
||||
#include "garply_version.h"
|
||||
#include <iostream>
|
||||
|
||||
const int Garply::version_major = garply_version_major;
|
||||
const int Garply::version_minor = garply_version_minor;
|
||||
|
||||
Garply::Garply() {}
|
||||
|
||||
int
|
||||
Garply::get_version() const
|
||||
{
|
||||
return 10 * version_major + version_minor;
|
||||
}
|
||||
|
||||
int
|
||||
Garply::garplinate() const
|
||||
{
|
||||
std::cout << "Garply::garplinate version " << get_version()
|
||||
<< " invoked" << std::endl;
|
||||
std::cout << "Garply config dir = %s" << std::endl;
|
||||
return get_version();
|
||||
}
|
||||
'''
|
||||
garplinator_cc = '''#include "garply.h"
|
||||
#include <iostream>
|
||||
|
||||
int
|
||||
main()
|
||||
{
|
||||
Garply garply;
|
||||
garply.garplinate();
|
||||
|
||||
return 0;
|
||||
}
|
||||
'''
|
||||
garply_version_h = '''const int garply_version_major = %s;
|
||||
const int garply_version_minor = %s;
|
||||
'''
|
||||
mkdirp(prefix.lib64)
|
||||
mkdirp('%s/garply' % prefix.include)
|
||||
mkdirp('%s/garply' % self.stage.source_path)
|
||||
with open('%s/garply_version.h' % self.stage.source_path, 'w') as f:
|
||||
f.write(garply_version_h % (self.version[0], self.version[1:]))
|
||||
with open('%s/garply/garply.h' % self.stage.source_path, 'w') as f:
|
||||
f.write(garply_h)
|
||||
with open('%s/garply/garply.cc' % self.stage.source_path, 'w') as f:
|
||||
f.write(garply_cc % prefix.config)
|
||||
with open('%s/garply/garplinator.cc' %
|
||||
self.stage.source_path, 'w') as f:
|
||||
f.write(garplinator_cc)
|
||||
gpp = which('/usr/bin/g++')
|
||||
gpp('-Dgarply_EXPORTS',
|
||||
'-I%s' % self.stage.source_path,
|
||||
'-O2', '-g', '-DNDEBUG', '-fPIC',
|
||||
'-o', 'garply.cc.o',
|
||||
'-c', '%s/garply/garply.cc' % self.stage.source_path)
|
||||
gpp('-Dgarply_EXPORTS',
|
||||
'-I%s' % self.stage.source_path,
|
||||
'-O2', '-g', '-DNDEBUG', '-fPIC',
|
||||
'-o', 'garplinator.cc.o',
|
||||
'-c', '%s/garply/garplinator.cc' % self.stage.source_path)
|
||||
gpp('-fPIC', '-O2', '-g', '-DNDEBUG', '-shared',
|
||||
'-Wl,-soname,libgarply.so', '-o', 'libgarply.so', 'garply.cc.o')
|
||||
gpp('-O2', '-g', '-DNDEBUG', '-rdynamic',
|
||||
'garplinator.cc.o', '-o', 'garplinator',
|
||||
'-Wl,-rpath,%s' % prefix.lib64,
|
||||
'libgarply.so')
|
||||
copy('libgarply.so', '%s/libgarply.so' % prefix.lib64)
|
||||
copy('garplinator', '%s/garplinator' % prefix.lib64)
|
||||
copy('%s/garply/garply.h' % self.stage.source_path,
|
||||
'%s/garply/garply.h' % prefix.include)
|
||||
mkdirp(prefix.bin)
|
||||
copy('garply_version.h', '%s/garply_version.h' % prefix.bin)
|
||||
os.symlink('%s/garplinator' % prefix.lib64,
|
||||
'%s/garplinator' % prefix.bin)
|
@@ -7,16 +7,17 @@
|
||||
|
||||
|
||||
class Patchelf(AutotoolsPackage):
|
||||
"""
|
||||
PatchELF is a small utility to modify the
|
||||
dynamic linker and RPATH of ELF executables.
|
||||
"""
|
||||
"""PatchELF is a small utility to modify the dynamic linker and RPATH of
|
||||
ELF executables."""
|
||||
|
||||
homepage = "https://nixos.org/patchelf.html"
|
||||
url = "http://nixos.org/releases/patchelf/patchelf-0.8/patchelf-0.8.tar.gz"
|
||||
|
||||
list_url = "http://nixos.org/releases/patchelf/"
|
||||
url = "https://nixos.org/releases/patchelf/patchelf-0.10/patchelf-0.10.tar.gz"
|
||||
list_url = "https://nixos.org/releases/patchelf/"
|
||||
list_depth = 1
|
||||
|
||||
version('0.9', '3c265508526760f233620f35d79c79fc')
|
||||
version('0.8', '407b229e6a681ffb0e2cdd5915cb2d01')
|
||||
version('0.10', sha256='b2deabce05c34ce98558c0efb965f209de592197b2c88e930298d740ead09019')
|
||||
version('0.9', sha256='f2aa40a6148cb3b0ca807a1bf836b081793e55ec9e5540a5356d800132be7e0a')
|
||||
version('0.8', sha256='14af06a2da688d577d64ff8dac065bb8903bbffbe01d30c62df7af9bf4ce72fe')
|
||||
|
||||
def install(self, spec, prefix):
|
||||
install_tree(self.stage.source_path, prefix)
|
||||
|
132
var/spack/repos/builtin.mock/packages/quux/package.py
Normal file
132
var/spack/repos/builtin.mock/packages/quux/package.py
Normal file
@@ -0,0 +1,132 @@
|
||||
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
|
||||
from spack import *
|
||||
import os
|
||||
|
||||
|
||||
class Quux(Package):
|
||||
"""Toy package for testing dependencies"""
|
||||
|
||||
homepage = "https://www.example.com"
|
||||
url = "https://github.com/gartung/quux/archive/v3.0.0.tar.gz"
|
||||
|
||||
version('3.0.0',
|
||||
sha256='b91bc96fb746495786bddac2c527039177499f2f76d3fa9dcf0b393859e68484')
|
||||
|
||||
depends_on('garply')
|
||||
|
||||
def install(self, spec, prefix):
|
||||
quux_cc = '''#include "quux.h"
|
||||
#include "garply/garply.h"
|
||||
#include "quux_version.h"
|
||||
#include <iostream>
|
||||
#include <stdexcept>
|
||||
|
||||
const int Quux::version_major = quux_version_major;
|
||||
const int Quux::version_minor = quux_version_minor;
|
||||
|
||||
Quux::Quux() {}
|
||||
|
||||
int
|
||||
Quux::get_version() const
|
||||
{
|
||||
return 10 * version_major + version_minor;
|
||||
}
|
||||
|
||||
int
|
||||
Quux::quuxify() const
|
||||
{
|
||||
int quux_version = get_version();
|
||||
std::cout << "Quux::quuxify version " << quux_version
|
||||
<< " invoked" <<std::endl;
|
||||
std::cout << "Quux config directory is %s" <<std::endl;
|
||||
Garply garply;
|
||||
int garply_version = garply.garplinate();
|
||||
|
||||
if (garply_version != quux_version) {
|
||||
throw std::runtime_error(
|
||||
"Quux found an incompatible version of Garply.");
|
||||
}
|
||||
|
||||
return quux_version;
|
||||
}
|
||||
'''
|
||||
quux_h = '''#ifndef QUUX_H_
|
||||
|
||||
class Quux
|
||||
{
|
||||
private:
|
||||
static const int version_major;
|
||||
static const int version_minor;
|
||||
|
||||
public:
|
||||
Quux();
|
||||
int get_version() const;
|
||||
int quuxify() const;
|
||||
};
|
||||
|
||||
#endif // QUUX_H_
|
||||
'''
|
||||
quuxifier_cc = '''
|
||||
#include "quux.h"
|
||||
#include <iostream>
|
||||
|
||||
int
|
||||
main()
|
||||
{
|
||||
Quux quux;
|
||||
quux.quuxify();
|
||||
|
||||
return 0;
|
||||
}
|
||||
'''
|
||||
quux_version_h = '''const int quux_version_major = %s;
|
||||
const int quux_version_minor = %s;
|
||||
'''
|
||||
mkdirp(prefix.lib64)
|
||||
mkdirp('%s/quux' % prefix.include)
|
||||
with open('%s/quux_version.h' % self.stage.source_path, 'w') as f:
|
||||
f.write(quux_version_h % (self.version[0], self.version[1:]))
|
||||
with open('%s/quux/quux.cc' % self.stage.source_path, 'w') as f:
|
||||
f.write(quux_cc % (prefix.config))
|
||||
with open('%s/quux/quux.h' % self.stage.source_path, 'w') as f:
|
||||
f.write(quux_h)
|
||||
with open('%s/quux/quuxifier.cc' % self.stage.source_path, 'w') as f:
|
||||
f.write(quuxifier_cc)
|
||||
gpp = which('/usr/bin/g++')
|
||||
gpp('-Dquux_EXPORTS',
|
||||
'-I%s' % self.stage.source_path,
|
||||
'-I%s' % spec['garply'].prefix.include,
|
||||
'-O2', '-g', '-DNDEBUG', '-fPIC',
|
||||
'-o', 'quux.cc.o',
|
||||
'-c', 'quux/quux.cc')
|
||||
gpp('-Dquux_EXPORTS',
|
||||
'-I%s' % self.stage.source_path,
|
||||
'-I%s' % spec['garply'].prefix.include,
|
||||
'-O2', '-g', '-DNDEBUG', '-fPIC',
|
||||
'-o', 'quuxifier.cc.o',
|
||||
'-c', 'quux/quuxifier.cc')
|
||||
gpp('-fPIC', '-O2', '-g', '-DNDEBUG', '-shared',
|
||||
'-Wl,-soname,libquux.so', '-o', 'libquux.so', 'quux.cc.o',
|
||||
'-Wl,-rpath,%s:%s::::' % (prefix.lib64,
|
||||
spec['garply'].prefix.lib64),
|
||||
'%s/libgarply.so' % spec['garply'].prefix.lib64)
|
||||
gpp('-O2', '-g', '-DNDEBUG', '-rdynamic',
|
||||
'quuxifier.cc.o', '-o', 'quuxifier',
|
||||
'-Wl,-rpath,%s:%s::::' % (prefix.lib64,
|
||||
spec['garply'].prefix.lib64),
|
||||
'libquux.so',
|
||||
'%s/libgarply.so' % spec['garply'].prefix.lib64)
|
||||
copy('libquux.so', '%s/libquux.so' % prefix.lib64)
|
||||
copy('quuxifier', '%s/quuxifier' % prefix.lib64)
|
||||
copy('%s/quux/quux.h' % self.stage.source_path,
|
||||
'%s/quux/quux.h' % prefix.include)
|
||||
mkdirp(prefix.bin)
|
||||
copy('quux_version.h', '%s/quux_version.h' % prefix.bin)
|
||||
os.symlink('%s/quuxifier' % prefix.lib64, '%s/quuxifier' % prefix.bin)
|
||||
os.symlink('%s/garplinator' % spec['garply'].prefix.lib64,
|
||||
'%s/garplinator' % prefix.bin)
|
@@ -56,6 +56,9 @@ class Abinit(AutotoolsPackage):
|
||||
variant('hdf5', default=False,
|
||||
description='Enables HDF5+Netcdf4 with MPI. WARNING: experimental')
|
||||
|
||||
variant('wannier90', default=False,
|
||||
description='Enables the Wannier90 library')
|
||||
|
||||
# Add dependencies
|
||||
# currently one cannot forward options to virtual packages, see #1712.
|
||||
# depends_on('blas', when='~openmp')
|
||||
@@ -84,6 +87,8 @@ class Abinit(AutotoolsPackage):
|
||||
# Cannot ask for +scalapack if it does not depend on MPI
|
||||
conflicts('+scalapack', when='~mpi')
|
||||
|
||||
depends_on("wannier90+shared", when='+wannier90')
|
||||
|
||||
# Elpa is a substitute for scalapack and needs mpi
|
||||
# conflicts('+elpa', when='~mpi')
|
||||
# conflicts('+elpa', when='+scalapack')
|
||||
@@ -95,12 +100,25 @@ def configure_args(self):
|
||||
options = []
|
||||
oapp = options.append
|
||||
|
||||
if '+wannier90' in spec:
|
||||
oapp('--with-wannier90-libs=-L{0}'
|
||||
.format(spec['wannier90'].prefix.lib + ' -lwannier -lm'))
|
||||
oapp('--with-wannier90-incs=-I{0}'
|
||||
.format(spec['wannier90'].prefix.modules))
|
||||
oapp('--with-wannier90-bins={0}'
|
||||
.format(spec['wannier90'].prefix.bin))
|
||||
oapp('--enable-connectors')
|
||||
oapp('--with-dft-flavor=wannier90')
|
||||
|
||||
if '+mpi' in spec:
|
||||
# MPI version:
|
||||
# let the configure script auto-detect MPI support from mpi_prefix
|
||||
oapp('--with-mpi-prefix={0}'.format(spec['mpi'].prefix))
|
||||
oapp('--enable-mpi=yes')
|
||||
oapp('--enable-mpi-io=yes')
|
||||
oapp('MPIFC={0}/mpifc'.format(spec['mpi'].prefix.bin))
|
||||
if '~wannier90' in spec:
|
||||
oapp('--with-dft-flavor=atompaw+libxc')
|
||||
|
||||
# Activate OpenMP in Abinit Fortran code.
|
||||
if '+openmp' in spec:
|
||||
@@ -129,7 +147,6 @@ def configure_args(self):
|
||||
'--with-fft-incs=-I%s' % spec['fftw'].prefix.include,
|
||||
'--with-fft-libs=-L%s %s' % (spec['fftw'].prefix.lib, fftlibs),
|
||||
])
|
||||
oapp('--with-dft-flavor=atompaw+libxc')
|
||||
|
||||
# LibXC library
|
||||
libxc = spec['libxc:fortran']
|
||||
|
@@ -10,10 +10,11 @@ class AbseilCpp(CMakePackage):
|
||||
"""Abseil Common Libraries (C++) """
|
||||
|
||||
homepage = "https://abseil.io/"
|
||||
url = "https://github.com/abseil/abseil-cpp/archive/20180600.tar.gz"
|
||||
url = "https://github.com/abseil/abseil-cpp/archive/20200225.2.tar.gz"
|
||||
|
||||
maintainers = ['jcftang']
|
||||
|
||||
version('20200225.2', sha256='f41868f7a938605c92936230081175d1eae87f6ea2c248f41077c8f88316f111')
|
||||
version('20200225.1', sha256='0db0d26f43ba6806a8a3338da3e646bb581f0ca5359b3a201d8fb8e4752fd5f8')
|
||||
version('20190808', sha256='8100085dada279bf3ee00cd064d43b5f55e5d913be0dfe2906f06f8f28d5b37e')
|
||||
version('20181200', sha256='e2b53bfb685f5d4130b84c4f3050c81bf48c497614dc85d91dbd3ed9129bce6d')
|
||||
|
@@ -24,6 +24,10 @@ class Acl(AutotoolsPackage):
|
||||
depends_on('automake', type='build')
|
||||
depends_on('libtool', type='build')
|
||||
depends_on('attr')
|
||||
depends_on('gettext')
|
||||
|
||||
def setup_build_environment(self, env):
|
||||
env.append_flags('LDFLAGS', '-lintl')
|
||||
|
||||
def autoreconf(self, spec, prefix):
|
||||
bash = which('bash')
|
||||
|
@@ -6,7 +6,7 @@
|
||||
from spack import *
|
||||
|
||||
|
||||
class Acts(CMakePackage):
|
||||
class Acts(CMakePackage, CudaPackage):
|
||||
"""
|
||||
A Common Tracking Software (Acts)
|
||||
|
||||
@@ -35,6 +35,12 @@ class Acts(CMakePackage):
|
||||
|
||||
# Supported Acts versions
|
||||
version('master', branch='master')
|
||||
version('0.27.1', commit='8ba3010a532137bc0ab6cf83a38b483cef646a01')
|
||||
version('0.27.0', commit='f7b1a1c27d5a95d08bb67236ad0e117fcd1c679f')
|
||||
version('0.26.0', commit='cf542b108b31fcc349fc18fb0466f889e4e42aa6')
|
||||
version('0.25.2', commit='76bf1f3e4be51d4d27126b473a2caa8d8a72b320')
|
||||
version('0.25.1', commit='6e8a1ea6d2c7385a78e3e190efb2a8a0c1fa957f')
|
||||
version('0.25.0', commit='0aca171951a214299e8ff573682b1c5ecec63d42')
|
||||
version('0.24.0', commit='ef4699c8500bfea59a5fe88bed67fde2f00f0adf')
|
||||
version('0.23.0', commit='dc443dd7e663bc4d7fb3c1e3f1f75aaf57ffd4e4')
|
||||
version('0.22.1', commit='ca1b8b1645db6b552f44c48d2ff34c8c29618f3a')
|
||||
@@ -90,10 +96,11 @@ class Acts(CMakePackage):
|
||||
|
||||
# Build dependencies
|
||||
depends_on('boost @1.62:1.69.99 +program_options +test', when='@:0.10.3')
|
||||
depends_on('boost @1.69: +filesystem +program_options +test', when='@0.10.4:')
|
||||
depends_on('boost @1.69: +filesystem +program_options +test', when='@0.10.4:0.25')
|
||||
depends_on('boost @1.69: +program_options +test', when='@0.26:')
|
||||
depends_on('cmake @3.11:', type='build')
|
||||
depends_on('dd4hep @1.10: +xercesc', when='+dd4hep')
|
||||
depends_on('dd4hep @1.10: +geant4 +xercesc', when='+dd4hep +geant4')
|
||||
depends_on('dd4hep @1.10:', when='+dd4hep')
|
||||
depends_on('dd4hep @1.10: +geant4', when='+dd4hep +geant4')
|
||||
depends_on('eigen @3.2.9:', type='build')
|
||||
depends_on('geant4', when='+geant4')
|
||||
depends_on('hepmc3@3.1:', when='+hepmc3')
|
||||
@@ -141,6 +148,7 @@ def example_cmake_variant(cmake_label, spack_variant):
|
||||
|
||||
args = [
|
||||
cmake_variant("BENCHMARKS", "benchmarks"),
|
||||
cmake_variant("CUDA_PLUGIN", "cuda"),
|
||||
cmake_variant("DD4HEP_PLUGIN", "dd4hep"),
|
||||
cmake_variant("DIGITIZATION_PLUGIN", "digitization"),
|
||||
cmake_variant("EXAMPLES", "examples"),
|
||||
@@ -157,6 +165,10 @@ def example_cmake_variant(cmake_label, spack_variant):
|
||||
cmake_variant("TGEO_PLUGIN", "tgeo")
|
||||
]
|
||||
|
||||
cuda_arch = spec.variants['cuda_arch'].value
|
||||
if cuda_arch != 'none':
|
||||
args.append('-DCUDA_FLAGS=-arch=sm_{0}'.format(cuda_arch[0]))
|
||||
|
||||
if 'root' in spec:
|
||||
cxxstd = spec['root'].variants['cxxstd'].value
|
||||
args.append("-DCMAKE_CXX_STANDARD={0}".format(cxxstd))
|
||||
|
@@ -21,4 +21,7 @@ class Aegean(MakefilePackage):
|
||||
|
||||
def edit(self, spec, prefix):
|
||||
makefile = FileFilter('Makefile')
|
||||
if spec.target.family == 'aarch64':
|
||||
makefile.filter('-m64', '')
|
||||
|
||||
makefile.filter('/usr/local', prefix)
|
||||
|
@@ -9,28 +9,41 @@
|
||||
|
||||
|
||||
class Amber(Package, CudaPackage):
|
||||
"""Amber is a suite of biomolecular simulation programs.
|
||||
"""Amber is a suite of biomolecular simulation programs together
|
||||
with Amber tools.
|
||||
|
||||
Note: A manual download is required for Amber.
|
||||
Spack will search your current directory for the download file.
|
||||
Alternatively, add this file to a mirror so that Spack can find it.
|
||||
Note: The version number is composed of the Amber version (major)
|
||||
and the tools version (minor). A manual download is required for
|
||||
both Amber and Amber tools.
|
||||
Spack will search your current directory for the download files.
|
||||
Alternatively, add the files to a mirror so that Spack can find them.
|
||||
For instructions on how to set up a mirror, see
|
||||
http://spack.readthedocs.io/en/latest/mirrors.html"""
|
||||
|
||||
homepage = "http://ambermd.org/"
|
||||
url = "file://{0}/Amber18.tar.bz2".format(os.getcwd())
|
||||
url = "file://{0}/Amber18.tar.bz2".format(os.getcwd())
|
||||
maintainers = ['hseara']
|
||||
|
||||
version('18', sha256='2060897c0b11576082d523fb63a51ba701bc7519ff7be3d299d5ec56e8e6e277')
|
||||
version('16', sha256='3b7ef281fd3c46282a51b6a6deed9ed174a1f6d468002649d84bfc8a2577ae5d')
|
||||
def url_for_version(self, version):
|
||||
url = "file://{0}/Amber{1}.tar.bz2".format(
|
||||
os.getcwd(), version.up_to(1))
|
||||
return url
|
||||
|
||||
version(
|
||||
'18.20', sha256='2060897c0b11576082d523fb63a51ba701bc7519ff7be3d299d5ec56e8e6e277')
|
||||
version(
|
||||
'18.19', sha256='2060897c0b11576082d523fb63a51ba701bc7519ff7be3d299d5ec56e8e6e277')
|
||||
version(
|
||||
'16.16', sha256='3b7ef281fd3c46282a51b6a6deed9ed174a1f6d468002649d84bfc8a2577ae5d')
|
||||
|
||||
resources = [
|
||||
# [version amber, version ambertools , sha256sum]
|
||||
('18', '20', 'b1e1f8f277c54e88abc9f590e788bbb2f7a49bcff5e8d8a6eacfaf332a4890f9'),
|
||||
('18', '19', '0c86937904854b64e4831e047851f504ec45b42e593db4ded92c1bee5973e699'),
|
||||
('16', '16', '7b876afe566e9dd7eb6a5aa952a955649044360f15c1f5d4d91ba7f41f3105fa'),
|
||||
]
|
||||
for ver, ambertools_ver, checksum in resources:
|
||||
resource(when='@{0}'.format(ver),
|
||||
resource(when='@{0}.{1}'.format(ver, ambertools_ver),
|
||||
name='AmberTools',
|
||||
url='file://{0}/AmberTools{1}.tar.bz2'.format(os.getcwd(),
|
||||
ambertools_ver),
|
||||
@@ -100,10 +113,14 @@ class Amber(Package, CudaPackage):
|
||||
depends_on('cuda@7.5.18', when='@:16+cuda')
|
||||
|
||||
# conflicts
|
||||
conflicts('+x11', when='platform=cray', msg='x11 amber applications not available for cray')
|
||||
conflicts('+openmp', when='%clang', msg='OpenMP optimizations not available for the clang compiler')
|
||||
conflicts('+openmp', when='%apple-clang', msg='OpenMP optimizations not available for the Apple clang compiler')
|
||||
conflicts('+openmp', when='%pgi', msg='OpenMP optimizations not available for the pgi compiler')
|
||||
conflicts('+x11', when='platform=cray',
|
||||
msg='x11 amber applications not available for cray')
|
||||
conflicts('+openmp', when='%clang',
|
||||
msg='OpenMP not available for the clang compiler')
|
||||
conflicts('+openmp', when='%apple-clang',
|
||||
msg='OpenMP not available for the Apple clang compiler')
|
||||
conflicts('+openmp', when='%pgi',
|
||||
msg='OpenMP not available for the pgi compiler')
|
||||
|
||||
def setup_build_environment(self, env):
|
||||
amber_src = self.stage.source_path
|
||||
|
@@ -20,4 +20,5 @@ class Amdblis(BlisBase):
|
||||
url = "https://github.com/amd/blis/archive/2.1.tar.gz"
|
||||
git = "https://github.com/amd/blis.git"
|
||||
|
||||
version('2.2', sha256='e1feb60ac919cf6d233c43c424f6a8a11eab2c62c2c6e3f2652c15ee9063c0c9')
|
||||
version('2.1', sha256='3b1d611d46f0f13b3c0917e27012e0f789b23dbefdddcf877b20327552d72fb3')
|
||||
|
@@ -18,6 +18,7 @@ class Amrex(CMakePackage):
|
||||
maintainers = ['mic84', 'asalmgren']
|
||||
|
||||
version('develop', branch='development')
|
||||
version('20.07', sha256='c386f566f4c57ee56b5630f79ce2c6117d5a612a4aab69b7b26e48d577251165')
|
||||
version('20.06', sha256='be2f2a5107111fcb8b3928b76024b370c7cb01a9e5dd79484cf7fcf59d0b4858')
|
||||
version('20.05', sha256='97d753bb75e845a0a959ec1a044a48e6adb86dd008b5e29ce7a01d49ed276338')
|
||||
version('20.04', sha256='a7ece54d5d89cc00fd555551902a0d4d0fb50db15d2600f441353eed0dddd83b')
|
||||
@@ -57,6 +58,12 @@ class Amrex(CMakePackage):
|
||||
values=('Debug', 'Release'))
|
||||
variant('sundials', default=False,
|
||||
description='Build AMReX with SUNDIALS support')
|
||||
variant('hdf5', default=False,
|
||||
description='Enable HDF5-based I/O')
|
||||
variant('hypre', default=False,
|
||||
description='Enable Hypre interfaces')
|
||||
variant('petsc', default=False,
|
||||
description='Enable PETSc interfaces')
|
||||
|
||||
# Build dependencies
|
||||
depends_on('mpi', when='+mpi')
|
||||
@@ -68,6 +75,24 @@ class Amrex(CMakePackage):
|
||||
conflicts('%apple-clang')
|
||||
conflicts('%clang')
|
||||
|
||||
# Check options compatibility
|
||||
conflicts('+sundials', when='~fortran',
|
||||
msg='AMReX SUNDIALS support needs AMReX Fortran API (+fortran)')
|
||||
conflicts('+hdf5', when='@:20.06',
|
||||
msg='AMReX HDF5 support needs AMReX newer than version 20.06')
|
||||
conflicts('+hypre', when='@:20.06',
|
||||
msg='AMReX Hypre support needs AMReX newer than version 20.06')
|
||||
conflicts('+hypre', when='~fortran',
|
||||
msg='AMReX Hypre support needs AMReX Fortran API (+fortran)')
|
||||
conflicts('+hypre', when='~linear_solvers',
|
||||
msg='AMReX Hypre support needs variant +linear_solvers')
|
||||
conflicts('+petsc', when='@:20.06',
|
||||
msg='AMReX PETSc support needs AMReX newer than version 20.06')
|
||||
conflicts('+petsc', when='~fortran',
|
||||
msg='AMReX PETSc support needs AMReX Fortran API (+fortran)')
|
||||
conflicts('+petsc', when='~linear_solvers',
|
||||
msg='AMReX PETSc support needs variant +linear_solvers')
|
||||
|
||||
def url_for_version(self, version):
|
||||
if version >= Version('20.05'):
|
||||
url = "https://github.com/AMReX-Codes/amrex/releases/download/{0}/amrex-{0}.tar.gz"
|
||||
@@ -89,11 +114,16 @@ def cmake_args(self):
|
||||
self.spec.variants['precision'].value.upper(),
|
||||
'-DENABLE_EB:BOOL=%s' % self.cmake_is_on('+eb'),
|
||||
'-DXSDK_ENABLE_Fortran:BOOL=%s' % self.cmake_is_on('+fortran'),
|
||||
'-DENABLE_FORTRAN_INTERFACES:BOOL=%s'
|
||||
% self.cmake_is_on('+fortran'),
|
||||
'-DENABLE_LINEAR_SOLVERS:BOOL=%s' %
|
||||
self.cmake_is_on('+linear_solvers'),
|
||||
'-DENABLE_AMRDATA:BOOL=%s' % self.cmake_is_on('+amrdata'),
|
||||
'-DENABLE_PARTICLES:BOOL=%s' % self.cmake_is_on('+particles'),
|
||||
'-DENABLE_SUNDIALS:BOOL=%s' % self.cmake_is_on('+sundials')
|
||||
'-DENABLE_SUNDIALS:BOOL=%s' % self.cmake_is_on('+sundials'),
|
||||
'-DENABLE_HDF5:BOOL=%s' % self.cmake_is_on('+hdf5'),
|
||||
'-DENABLE_HYPRE:BOOL=%s' % self.cmake_is_on('+hypre'),
|
||||
'-DENABLE_PETSC:BOOL=%s' % self.cmake_is_on('+petsc'),
|
||||
]
|
||||
if self.spec.satisfies('%fj'):
|
||||
args.append('-DCMAKE_Fortran_MODDIR_FLAG=-M')
|
||||
|
182
var/spack/repos/builtin/packages/apcomp/package.py
Normal file
182
var/spack/repos/builtin/packages/apcomp/package.py
Normal file
@@ -0,0 +1,182 @@
|
||||
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
|
||||
from spack import *
|
||||
import os
|
||||
import socket
|
||||
import llnl.util.tty as tty
|
||||
|
||||
|
||||
def cmake_cache_entry(name, value, vtype=None):
|
||||
"""
|
||||
Helper that creates CMake cache entry strings used in
|
||||
'host-config' files.
|
||||
"""
|
||||
if vtype is None:
|
||||
if value == "ON" or value == "OFF":
|
||||
vtype = "BOOL"
|
||||
else:
|
||||
vtype = "PATH"
|
||||
return 'set({0} "{1}" CACHE {2} "")\n\n'.format(name, value, vtype)
|
||||
|
||||
|
||||
class Apcomp(Package):
|
||||
"""A multi use-case image compositor"""
|
||||
|
||||
homepage = 'https://github.com/Alpine-DAV/ap_compositor'
|
||||
git = 'https://github.com/Alpine-DAV/ap_compositor.git'
|
||||
url = "https://github.com/Alpine-DAV/ap_compositor/releases/download/v0.0.1/apcomp-v0.0.1.tar.gz"
|
||||
|
||||
maintainers = ['mclarsen', 'cyrush']
|
||||
|
||||
version('master', branch='master', submodules='True')
|
||||
version('0.0.1', sha256="cbf85fe58d5d5bc2f468d081386cc8b79861046b3bb7e966edfa3f8e95b998b2")
|
||||
|
||||
variant('openmp', default=True, description='Build with openmp support')
|
||||
variant('mpi', default=True, description='Build with MPI support')
|
||||
variant('shared', default=True, description='Build Shared Library')
|
||||
|
||||
depends_on('cmake@3.9:', type='build')
|
||||
depends_on("mpi", when="+mpi")
|
||||
|
||||
root_cmakelists_dir = 'src'
|
||||
|
||||
def install(self, spec, prefix):
|
||||
"""
|
||||
Build and install APComp
|
||||
"""
|
||||
with working_dir('spack-build', create=True):
|
||||
host_cfg_fname = self.create_host_config(spec,
|
||||
prefix)
|
||||
cmake_args = []
|
||||
# if we have a static build, we need to avoid any of
|
||||
# spack's default cmake settings related to rpaths
|
||||
# (see: https://github.com/LLNL/spack/issues/2658)
|
||||
if "+shared" in spec:
|
||||
cmake_args.extend(std_cmake_args)
|
||||
else:
|
||||
for arg in std_cmake_args:
|
||||
if arg.count("RPATH") == 0:
|
||||
cmake_args.append(arg)
|
||||
cmake_args.extend(["-C", host_cfg_fname, "../src"])
|
||||
print("Configuring APComp...")
|
||||
cmake(*cmake_args)
|
||||
print("Building APComp...")
|
||||
make()
|
||||
print("Installing APComp...")
|
||||
make("install")
|
||||
# install copy of host config for provenance
|
||||
install(host_cfg_fname, prefix)
|
||||
|
||||
def create_host_config(self, spec, prefix):
|
||||
"""
|
||||
This method creates a 'host-config' file that specifies
|
||||
all of the options used to configure and build apcomp.
|
||||
"""
|
||||
|
||||
#######################
|
||||
# Compiler Info
|
||||
#######################
|
||||
c_compiler = env["SPACK_CC"]
|
||||
cpp_compiler = env["SPACK_CXX"]
|
||||
|
||||
#######################################################################
|
||||
# We directly fetch the names of the actual compilers to create a
|
||||
# 'host config' file that works outside of the spack install env.
|
||||
#######################################################################
|
||||
|
||||
sys_type = spec.architecture
|
||||
# if on llnl systems, we can use the SYS_TYPE
|
||||
if "SYS_TYPE" in env:
|
||||
sys_type = env["SYS_TYPE"]
|
||||
|
||||
##############################################
|
||||
# Find and record what CMake is used
|
||||
##############################################
|
||||
|
||||
if "+cmake" in spec:
|
||||
cmake_exe = spec['cmake'].command.path
|
||||
else:
|
||||
cmake_exe = which("cmake")
|
||||
if cmake_exe is None:
|
||||
msg = 'failed to find CMake (and cmake variant is off)'
|
||||
raise RuntimeError(msg)
|
||||
cmake_exe = cmake_exe.path
|
||||
|
||||
host_cfg_fname = "%s-%s-%s-apcomp.cmake" % (socket.gethostname(),
|
||||
sys_type,
|
||||
spec.compiler)
|
||||
|
||||
cfg = open(host_cfg_fname, "w")
|
||||
cfg.write("##################################\n")
|
||||
cfg.write("# spack generated host-config\n")
|
||||
cfg.write("##################################\n")
|
||||
cfg.write("# {0}-{1}\n".format(sys_type, spec.compiler))
|
||||
cfg.write("##################################\n\n")
|
||||
|
||||
# Include path to cmake for reference
|
||||
cfg.write("# cmake from spack \n")
|
||||
cfg.write("# cmake executable path: %s\n\n" % cmake_exe)
|
||||
|
||||
#######################
|
||||
# Compiler Settings
|
||||
#######################
|
||||
cfg.write("#######\n")
|
||||
cfg.write("# using %s compiler spec\n" % spec.compiler)
|
||||
cfg.write("#######\n\n")
|
||||
cfg.write("# c compiler used by spack\n")
|
||||
cfg.write(cmake_cache_entry("CMAKE_C_COMPILER", c_compiler))
|
||||
cfg.write("# cpp compiler used by spack\n")
|
||||
cfg.write(cmake_cache_entry("CMAKE_CXX_COMPILER", cpp_compiler))
|
||||
|
||||
# shared vs static libs
|
||||
if "+shared" in spec:
|
||||
cfg.write(cmake_cache_entry("BUILD_SHARED_LIBS", "ON"))
|
||||
else:
|
||||
cfg.write(cmake_cache_entry("BUILD_SHARED_LIBS", "OFF"))
|
||||
|
||||
if "+openmp" in spec:
|
||||
cfg.write(cmake_cache_entry("ENABLE_OPENMP", "ON"))
|
||||
else:
|
||||
cfg.write(cmake_cache_entry("ENABLE_OPENMP", "OFF"))
|
||||
|
||||
if "+mpi" in spec:
|
||||
mpicc_path = spec['mpi'].mpicc
|
||||
mpicxx_path = spec['mpi'].mpicxx
|
||||
mpifc_path = spec['mpi'].mpifc
|
||||
# if we are using compiler wrappers on cray systems
|
||||
# use those for mpi wrappers, b/c spec['mpi'].mpicxx
|
||||
# etc make return the spack compiler wrappers
|
||||
# which can trip up mpi detection in CMake 3.14
|
||||
if cpp_compiler == "CC":
|
||||
mpicc_path = "cc"
|
||||
mpicxx_path = "CC"
|
||||
mpifc_path = "ftn"
|
||||
cfg.write(cmake_cache_entry("ENABLE_MPI", "ON"))
|
||||
cfg.write(cmake_cache_entry("MPI_C_COMPILER", mpicc_path))
|
||||
cfg.write(cmake_cache_entry("MPI_CXX_COMPILER", mpicxx_path))
|
||||
cfg.write(cmake_cache_entry("MPI_Fortran_COMPILER", mpifc_path))
|
||||
mpiexe_bin = join_path(spec['mpi'].prefix.bin, 'mpiexec')
|
||||
if os.path.isfile(mpiexe_bin):
|
||||
# starting with cmake 3.10, FindMPI expects MPIEXEC_EXECUTABLE
|
||||
# vs the older versions which expect MPIEXEC
|
||||
if self.spec["cmake"].satisfies('@3.10:'):
|
||||
cfg.write(cmake_cache_entry("MPIEXEC_EXECUTABLE",
|
||||
mpiexe_bin))
|
||||
else:
|
||||
cfg.write(cmake_cache_entry("MPIEXEC",
|
||||
mpiexe_bin))
|
||||
else:
|
||||
cfg.write(cmake_cache_entry("ENABLE_MPI", "OFF"))
|
||||
|
||||
cfg.write("##################################\n")
|
||||
cfg.write("# end spack generated host-config\n")
|
||||
cfg.write("##################################\n")
|
||||
cfg.close()
|
||||
|
||||
host_cfg_fname = os.path.abspath(host_cfg_fname)
|
||||
tty.info("spack generated conduit host-config file: " + host_cfg_fname)
|
||||
return host_cfg_fname
|
@@ -35,22 +35,21 @@ class Ascent(Package, CudaPackage):
|
||||
|
||||
homepage = "https://github.com/Alpine-DAV/ascent"
|
||||
git = "https://github.com/Alpine-DAV/ascent.git"
|
||||
url = "https://github.com/Alpine-DAV/ascent/releases/download/v0.5.0/ascent-v0.5.0-src-with-blt.tar.gz"
|
||||
url = "https://github.com/Alpine-DAV/ascent/releases/download/v0.5.1/ascent-v0.5.1-src-with-blt.tar.gz"
|
||||
|
||||
maintainers = ['cyrush']
|
||||
|
||||
version('develop',
|
||||
branch='develop',
|
||||
submodules=True)
|
||||
|
||||
version('0.5.0', sha256='2837b7371db3ac1bcc31a479d7cf0eb62a503cacadfa4187061502b3c4a89fa0')
|
||||
submodules=True,
|
||||
preferred=True)
|
||||
|
||||
###########################################################################
|
||||
# package variants
|
||||
###########################################################################
|
||||
|
||||
variant("shared", default=True, description="Build Ascent as shared libs")
|
||||
variant("test", default=True, description='Enable Ascent unit tests')
|
||||
variant('test', default=True, description='Enable Ascent unit tests')
|
||||
|
||||
variant("mpi", default=True, description="Build Ascent MPI Support")
|
||||
variant("serial", default=True, description="build serial (non-mpi) libraries")
|
||||
@@ -68,11 +67,15 @@ class Ascent(Package, CudaPackage):
|
||||
variant("cuda", default=False, description="Build cuda support")
|
||||
variant("mfem", default=False, description="Build MFEM filter support")
|
||||
variant("adios", default=False, description="Build Adios filter support")
|
||||
variant("dray", default=False, description="Build with Devil Ray support")
|
||||
|
||||
# variants for dev-tools (docs, etc)
|
||||
variant("doc", default=False, description="Build Conduit's documentation")
|
||||
variant("doc", default=False, description="Build Ascent's documentation")
|
||||
|
||||
###########################################################################
|
||||
# variant for BabelFlow runtime
|
||||
variant("babelflow", default=False, description="Build with BabelFlow")
|
||||
|
||||
##########################################################################
|
||||
# package dependencies
|
||||
###########################################################################
|
||||
|
||||
@@ -102,29 +105,54 @@ class Ascent(Package, CudaPackage):
|
||||
depends_on("mpi", when="+mpi")
|
||||
depends_on("py-mpi4py", when="+mpi+python+shared")
|
||||
|
||||
#######################
|
||||
# BabelFlow
|
||||
#######################
|
||||
depends_on('babelflow@develop', when='+babelflow+mpi')
|
||||
depends_on('parallelmergetree@develop', when='+babelflow+mpi')
|
||||
|
||||
#############################
|
||||
# TPLs for Runtime Features
|
||||
#############################
|
||||
|
||||
depends_on("vtk-h@0.5.0", when="+vtkh")
|
||||
depends_on("vtk-h@0.5.0~openmp", when="+vtkh~openmp")
|
||||
depends_on("vtk-h@0.5.0+cuda+openmp", when="+vtkh+cuda+openmp")
|
||||
depends_on("vtk-h@0.5.0+cuda~openmp", when="+vtkh+cuda~openmp")
|
||||
depends_on("vtk-h", when="+vtkh")
|
||||
depends_on("vtk-h~openmp", when="+vtkh~openmp")
|
||||
depends_on("vtk-h+cuda+openmp", when="+vtkh+cuda+openmp")
|
||||
depends_on("vtk-h+cuda~openmp", when="+vtkh+cuda~openmp")
|
||||
|
||||
depends_on("vtk-h@0.5.0~shared", when="~shared+vtkh")
|
||||
depends_on("vtk-h@0.5.0~shared~openmp", when="~shared+vtkh~openmp")
|
||||
depends_on("vtk-h@0.5.0~shared+cuda", when="~shared+vtkh+cuda")
|
||||
depends_on("vtk-h@0.5.0~shared+cuda~openmp", when="~shared+vtkh+cuda~openmp")
|
||||
depends_on("vtk-h~shared", when="~shared+vtkh")
|
||||
depends_on("vtk-h~shared~openmp", when="~shared+vtkh~openmp")
|
||||
depends_on("vtk-h~shared+cuda", when="~shared+vtkh+cuda")
|
||||
depends_on("vtk-h~shared+cuda~openmp", when="~shared+vtkh+cuda~openmp")
|
||||
|
||||
# mfem
|
||||
depends_on("mfem+threadsafe+shared+mpi+conduit", when="+shared+mfem+mpi")
|
||||
depends_on("mfem+threadsafe~shared+mpi+conduit", when="~shared+mfem+mpi")
|
||||
depends_on("mfem~threadsafe~openmp+shared+mpi+conduit", when="+shared+mfem+mpi")
|
||||
depends_on("mfem~threadsafe~openmp~shared+mpi+conduit", when="~shared+mfem+mpi")
|
||||
|
||||
depends_on("mfem+threadsafe+shared~mpi+conduit", when="+shared+mfem~mpi")
|
||||
depends_on("mfem+threadsafe~shared~mpi+conduit", when="~shared+mfem~mpi")
|
||||
depends_on("mfem~threadsafe~openmp+shared~mpi+conduit", when="+shared+mfem~mpi")
|
||||
depends_on("mfem~threadsafe~openmp~shared~mpi+conduit", when="~shared+mfem~mpi")
|
||||
|
||||
depends_on("adios", when="+adios")
|
||||
|
||||
# devil ray variants with mpi
|
||||
# we have to specify both because mfem makes us
|
||||
depends_on("dray@develop+mpi~test~utils+shared+cuda", when="+dray+mpi+cuda+shared")
|
||||
depends_on("dray@develop+mpi~test~utils+shared+openmp", when="+dray+mpi+openmp+shared")
|
||||
depends_on("dray@develop+mpi~test~utils+shared~openmp~cuda", when="+dray+mpi~openmp~cuda+shared")
|
||||
|
||||
depends_on("dray@develop+mpi~test~utils~shared+cuda", when="+dray+mpi+cuda~shared")
|
||||
depends_on("dray@develop+mpi~test~utils~shared+openmp", when="+dray+mpi+openmp~shared")
|
||||
depends_on("dray@develop+mpi~test~utils~shared~openmp~cuda", when="+dray+mpi~openmp~cuda~shared")
|
||||
|
||||
# devil ray variants without mpi
|
||||
depends_on("dray@develop~mpi~test~utils+shared+cuda", when="+dray~mpi+cuda+shared")
|
||||
depends_on("dray@develop~mpi~test~utils+shared+openmp", when="+dray~mpi+openmp+shared")
|
||||
depends_on("dray@develop~mpi~test~utils+shared~openmp~cuda", when="+dray~mpi~openmp~cuda+shared")
|
||||
|
||||
depends_on("dray@develop~mpi~test~utils~shared+cuda", when="+dray~mpi+cuda~shared")
|
||||
depends_on("dray@develop~mpi~test~utils~shared+openmp", when="+dray~mpi+openmp~shared")
|
||||
depends_on("dray@develop~mpi~test~utils~shared~openmp~cuda", when="+dray~mpi~openmp~cuda~shared")
|
||||
|
||||
#######################
|
||||
# Documentation related
|
||||
#######################
|
||||
@@ -398,6 +426,16 @@ def create_host_config(self, spec, prefix, py_site_pkgs_dir=None):
|
||||
else:
|
||||
cfg.write(cmake_cache_entry("MPIEXEC",
|
||||
mpiexe_bin))
|
||||
|
||||
###################################
|
||||
# BABELFLOW (also depends on mpi)
|
||||
###################################
|
||||
if "+babelflow" in spec:
|
||||
cfg.write(cmake_cache_entry("ENABLE_BABELFLOW", "ON"))
|
||||
cfg.write(cmake_cache_entry("BabelFlow_DIR",
|
||||
spec['babelflow'].prefix))
|
||||
cfg.write(cmake_cache_entry("PMT_DIR",
|
||||
spec['parallelmergetree'].prefix))
|
||||
else:
|
||||
cfg.write(cmake_cache_entry("ENABLE_MPI", "OFF"))
|
||||
|
||||
@@ -449,6 +487,15 @@ def create_host_config(self, spec, prefix, py_site_pkgs_dir=None):
|
||||
else:
|
||||
cfg.write("# mfem not built by spack \n")
|
||||
|
||||
#######################
|
||||
# Devil Ray
|
||||
#######################
|
||||
if "+dray" in spec:
|
||||
cfg.write("# devil ray from spack \n")
|
||||
cfg.write(cmake_cache_entry("DRAY_DIR", spec['dray'].prefix))
|
||||
else:
|
||||
cfg.write("# devil ray not built by spack \n")
|
||||
|
||||
#######################
|
||||
# Adios
|
||||
#######################
|
||||
|
@@ -7,7 +7,7 @@
|
||||
|
||||
|
||||
class Aspect(CMakePackage):
|
||||
"""Parallel, extendible finite element code to simulate convection in the
|
||||
"""Parallel and extensible Finite Element code to simulate convection in the
|
||||
Earth's mantle and elsewhere."""
|
||||
|
||||
homepage = "https://aspect.geodynamics.org"
|
||||
@@ -17,6 +17,7 @@ class Aspect(CMakePackage):
|
||||
maintainers = ['tjhei']
|
||||
|
||||
version('develop', branch='master')
|
||||
version('2.2.0', sha256='6dc31c4b991c8a96495ba0e9a3c92e57f9305ba94b8dbed3c8c5cfbab91ec5c1')
|
||||
version('2.1.0', sha256='bd574d60ed9df1f4b98e68cd526a074d0527c0792763187c9851912327d861a3')
|
||||
version('2.0.1', sha256='0bf5600c42afce9d39c1d285b0654ecfdeb0f30e9f3421651c95f54ca01ac165')
|
||||
version('2.0.0', sha256='d485c07f54248e824bdfa35f3eec8971b65e8b7114552ffa2c771bc0dede8cc0')
|
||||
@@ -26,9 +27,11 @@ class Aspect(CMakePackage):
|
||||
values=('Debug', 'Release'))
|
||||
variant('gui', default=False, description='Enable the deal.II parameter GUI')
|
||||
variant('fpe', default=False, description='Enable floating point exception checks')
|
||||
variant('opendap', default=False, description='Enable OPeNDAP support for remote file access')
|
||||
|
||||
depends_on('dealii+p4est+trilinos+mpi')
|
||||
depends_on('dealii-parameter-gui', when='+gui')
|
||||
depends_on('libdap4', when='+opendap')
|
||||
|
||||
def cmake_args(self):
|
||||
return [
|
||||
|
23
var/spack/repos/builtin/packages/atf/package.py
Normal file
23
var/spack/repos/builtin/packages/atf/package.py
Normal file
@@ -0,0 +1,23 @@
|
||||
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
from spack import *
|
||||
|
||||
|
||||
class Atf(AutotoolsPackage):
|
||||
"""ATF, or Automated Testing Framework, is a collection of libraries
|
||||
to write test programs in C, C++ and POSIX shell."""
|
||||
|
||||
homepage = "https://github.com/jmmv/atf"
|
||||
url = "https://github.com/jmmv/atf/archive/atf-0.21.tar.gz"
|
||||
|
||||
version('0.21', sha256='da6b02d6e7242f768a7aaa7b7e52378680456e4bd9a913b6636187079c98f3cd')
|
||||
version('0.20', sha256='3677cf957d7f574835b8bdd385984ba928d5695b3ff28f958e4227f810483ab7')
|
||||
version('0.19', sha256='f9b1d76dad7c34ae61a75638edc517fc05b10fa4c8f97b1d13d739bffee79b16')
|
||||
|
||||
depends_on('m4', type='build')
|
||||
depends_on('autoconf', type='build')
|
||||
depends_on('automake', type='build')
|
||||
depends_on('libtool', type='build')
|
31
var/spack/repos/builtin/packages/babelflow/package.py
Normal file
31
var/spack/repos/builtin/packages/babelflow/package.py
Normal file
@@ -0,0 +1,31 @@
|
||||
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
from spack import *
|
||||
|
||||
|
||||
class Babelflow(CMakePackage):
|
||||
"""BabelFlow is an Embedded Domain Specific Language to describe
|
||||
algorithms using a task graph abstraction which allows them to be
|
||||
executed on top of one of several available runtime systems."""
|
||||
|
||||
homepage = "https://github.com/sci-visus/BabelFlow"
|
||||
git = 'https://github.com/sci-visus/BabelFlow.git'
|
||||
|
||||
maintainers = ['spetruzza']
|
||||
|
||||
version('develop',
|
||||
branch='ascent',
|
||||
submodules=True)
|
||||
|
||||
depends_on('mpi')
|
||||
|
||||
variant("shared", default=True, description="Build Babelflow as shared libs")
|
||||
|
||||
def cmake_args(self):
|
||||
args = [
|
||||
'-DBUILD_SHARED_LIBS:BOOL={0}'.format(
|
||||
'ON' if '+shared' in spec else 'OFF')]
|
||||
return args
|
@@ -9,13 +9,14 @@
|
||||
class Bbcp(Package):
|
||||
"""Securely and quickly copy data from source to target"""
|
||||
|
||||
homepage = "http://www.slac.stanford.edu/~abh/bbcp/"
|
||||
git = "http://www.slac.stanford.edu/~abh/bbcp/bbcp.git"
|
||||
homepage = "https://www.slac.stanford.edu/~abh/bbcp/"
|
||||
git = "https://www.slac.stanford.edu/~abh/bbcp/bbcp.git"
|
||||
|
||||
version('git', branch='master')
|
||||
version('master', branch='master')
|
||||
|
||||
depends_on('zlib')
|
||||
depends_on('openssl')
|
||||
depends_on('libnsl')
|
||||
|
||||
def install(self, spec, prefix):
|
||||
cd("src")
|
||||
|
@@ -14,8 +14,17 @@ class Bison(AutotoolsPackage, GNUMirrorPackage):
|
||||
generalized LR (GLR) parser employing LALR(1) parser tables."""
|
||||
|
||||
homepage = "https://www.gnu.org/software/bison/"
|
||||
gnu_mirror_path = "bison/bison-3.4.2.tar.gz"
|
||||
gnu_mirror_path = "bison/bison-3.6.4.tar.gz"
|
||||
|
||||
version('3.6.4', sha256='8183de64b5383f3634942c7b151bf2577f74273b2731574cdda8a8f3a0ab13e9')
|
||||
version('3.6.3', sha256='4b4c4943931e811f1073006ce3d8ee022a02b11b501e9cbf4def3613b24a3e63')
|
||||
version('3.6.2', sha256='e28ed3aad934de2d1df68be209ac0b454f7b6d3c3d6d01126e5cd2cbadba089a')
|
||||
version('3.6.1', sha256='1120f8bfe2cc13e5e1e3f671dc41b1a535ca5a75a70d5b349c19da9d4389f74d')
|
||||
version('3.6', sha256='f630645e330bde5847266cc5c8194f0135ced75cced150358d9abe572b95f81c')
|
||||
version('3.5.3', sha256='34e201d963156618a0ea5bc87220f660a1e08403dd3c7c7903d4f38db3f40039')
|
||||
version('3.5.2', sha256='b4dbb6dd080f4db7f344f16506502973ca2b15f15c7dbbd1c1c278a456d094fa')
|
||||
version('3.5.1', sha256='4cef2322d96751be1c0d04f3e57adbb30e7fea83af9c00f98efa6e7509296f25')
|
||||
version('3.5', sha256='0b36200b9868ee289b78cefd1199496b02b76899bbb7e84ff1c0733a991313d1')
|
||||
version('3.4.2', sha256='ff3922af377d514eca302a6662d470e857bd1a591e96a2050500df5a9d59facf')
|
||||
version('3.4.1', sha256='7007fc89c216fbfaff5525359b02a7e5b612694df5168c74673f67055f015095')
|
||||
version('3.3.2', sha256='0fda1d034185397430eb7b0c9e140fb37e02fbfc53b90252fa5575e382b6dbd1')
|
||||
|
@@ -21,5 +21,6 @@ def setup_build_environment(self, env):
|
||||
env.set('MACHTYPE', 'x86_64')
|
||||
|
||||
def install(self, spec, prefix):
|
||||
filter_file('CC=.*', 'CC={0}'.format(spack_cc), 'inc/common.mk')
|
||||
mkdirp(prefix.bin)
|
||||
make("BINDIR=%s" % prefix.bin)
|
||||
|
@@ -24,6 +24,7 @@ class Bliss(Package):
|
||||
patch("Makefile.spack.patch")
|
||||
|
||||
def install(self, spec, prefix):
|
||||
filter_file('__DATE__', ' __DATE__ ', 'bliss.cc')
|
||||
# The Makefile isn't portable; use our own instead
|
||||
makeargs = ["-f", "Makefile.spack",
|
||||
"PREFIX=%s" % prefix, "GMP_PREFIX=%s" % spec["gmp"].prefix]
|
||||
|
@@ -27,5 +27,12 @@ class Blktrace(MakefilePackage):
|
||||
|
||||
depends_on('libaio')
|
||||
|
||||
def edit(self, spec, prefix):
|
||||
makefiles = ['Makefile', 'btreplay/Makefile',
|
||||
'btt/Makefile', 'iowatcher/Makefile']
|
||||
for m in makefiles:
|
||||
makefile = FileFilter(m)
|
||||
makefile.filter('CC.*=.*', 'CC = {0}'.format(spack_cc))
|
||||
|
||||
def install(self, spec, prefix):
|
||||
install_tree('.', prefix)
|
||||
|
@@ -22,5 +22,6 @@ class Brpc(CMakePackage):
|
||||
depends_on('gflags')
|
||||
depends_on('protobuf')
|
||||
depends_on('leveldb')
|
||||
depends_on('openssl')
|
||||
|
||||
patch('narrow.patch', sha256='d7393029443853ddda6c09e3d2185ac2f60920a36a8b685eb83b6b80c1535539', when='@:0.9.7')
|
||||
|
13
var/spack/repos/builtin/packages/bwa/bwa_for_aarch64.patch
Normal file
13
var/spack/repos/builtin/packages/bwa/bwa_for_aarch64.patch
Normal file
@@ -0,0 +1,13 @@
|
||||
diff --git a/ksw.c b/ksw.c
|
||||
index 9793e5e..2eecef4 100644
|
||||
--- a/ksw.c
|
||||
+++ b/ksw.c
|
||||
@@ -26,7 +26,7 @@
|
||||
#include <stdlib.h>
|
||||
#include <stdint.h>
|
||||
#include <assert.h>
|
||||
-#include <emmintrin.h>
|
||||
+#include <SSE2NEON.h>
|
||||
#include "ksw.h"
|
||||
|
||||
#ifdef USE_MALLOC_WRAPPERS
|
@@ -4,6 +4,7 @@
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
from spack import *
|
||||
import platform
|
||||
|
||||
|
||||
class Bwa(Package):
|
||||
@@ -19,10 +20,20 @@ class Bwa(Package):
|
||||
url='https://github.com/lh3/bwa/archive/0.7.12.tar.gz')
|
||||
|
||||
depends_on('zlib')
|
||||
depends_on('sse2neon', when='target=aarch64:')
|
||||
|
||||
patch('bwa_for_aarch64.patch', sha256='b77213b16cf8760f01e32f9a0b2cd8988cf7bac48a11267100f703cbd55c4bfd', when='target=aarch64:')
|
||||
|
||||
def install(self, spec, prefix):
|
||||
filter_file(r'^INCLUDES=',
|
||||
"INCLUDES=-I%s" % spec['zlib'].prefix.include, 'Makefile')
|
||||
zlib_inc_path = spec['zlib'].prefix.include
|
||||
if platform.machine() == 'aarch64':
|
||||
sse2neon_inc_path = spec['sse2neon'].prefix.include
|
||||
filter_file(r'^INCLUDES=', "INCLUDES=-I%s -I%s" %
|
||||
(zlib_inc_path, sse2neon_inc_path),
|
||||
'Makefile')
|
||||
else:
|
||||
filter_file(r'^INCLUDES=', "INCLUDES=-I%s" %
|
||||
zlib_inc_path, 'Makefile')
|
||||
filter_file(r'^LIBS=', "LIBS=-L%s " % spec['zlib'].prefix.lib,
|
||||
'Makefile')
|
||||
make()
|
||||
|
@@ -30,7 +30,7 @@ class Cantera(SConsPackage):
|
||||
depends_on('googletest+gmock', when='@2.3.0:')
|
||||
depends_on('eigen', when='@2.3.0:')
|
||||
depends_on('boost')
|
||||
depends_on('sundials@:3.1.2', when='+sundials') # must be compiled with -fPIC
|
||||
depends_on('sundials@:3.1.2+lapack', when='+sundials') # must be compiled with -fPIC
|
||||
depends_on('blas')
|
||||
depends_on('lapack')
|
||||
|
||||
|
@@ -0,0 +1,13 @@
|
||||
diff --git a/configure b/configure
|
||||
index 04f1a59..602c6cc 100755
|
||||
--- a/configure
|
||||
+++ b/configure
|
||||
@@ -2434,7 +2434,7 @@ if test $($CHARMC -V | awk '{print $3}') -lt $MINIMUM_CHARM_VERSION; then
|
||||
fi
|
||||
|
||||
CHARM_PATH=${CHARMC%/bin/charmc}
|
||||
-CONV_CONFIG=${CHARM_PATH}/tmp/conv-config.sh
|
||||
+CONV_CONFIG=${CHARM_PATH}/include/conv-config.sh
|
||||
CHARMINC=${CHARM_PATH}/include
|
||||
|
||||
. ${CONV_CONFIG}
|
46
var/spack/repos/builtin/packages/changa/package.py
Normal file
46
var/spack/repos/builtin/packages/changa/package.py
Normal file
@@ -0,0 +1,46 @@
|
||||
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
from spack import *
|
||||
|
||||
|
||||
class Changa(AutotoolsPackage):
|
||||
"""ChaNGa (Charm N-body GrAvity solver) is a code to perform collisionless
|
||||
N-body simulations. It can perform cosmological simulations with periodic
|
||||
boundary conditions in comoving coordinates or simulations of isolated
|
||||
stellar systems. It also can include hydrodynamics using the Smooth
|
||||
Particle Hydrodynamics (SPH) technique. It uses a Barnes-Hut tree to
|
||||
calculate gravity, with hexadecapole expansion of nodes and
|
||||
Ewald summation for periodic forces. Timestepping is done with a leapfrog
|
||||
integrator with individual timesteps for each particle."""
|
||||
|
||||
homepage = "http://faculty.washington.edu/trq/hpcc/tools/changa.html"
|
||||
url = "https://github.com/N-BodyShop/changa/archive/v3.4.tar.gz"
|
||||
git = "https://github.com/N-BodyShop/changa.git"
|
||||
|
||||
version('master', branch='master')
|
||||
version('3.4', sha256='c2bceb6ac00025dfd704bb6960bc17c6df7c746872185845d1e75f47e6ce2a94')
|
||||
patch("fix_configure_path.patch")
|
||||
|
||||
resource(
|
||||
name="utility",
|
||||
url="https://github.com/N-BodyShop/utility/archive/v3.4.tar.gz",
|
||||
sha256="19f9f09023ce9d642e848a36948788fb29cd7deb8e9346cdaac4c945f1416667",
|
||||
placement="utility"
|
||||
)
|
||||
|
||||
depends_on("charmpp build-target=ChaNGa")
|
||||
|
||||
def configure_args(self):
|
||||
args = []
|
||||
args.append("STRUCT_DIR={0}/utility/structures"
|
||||
.format(self.stage.source_path))
|
||||
return args
|
||||
|
||||
def install(self, spec, prefix):
|
||||
with working_dir(self.build_directory):
|
||||
mkdirp(prefix.bin)
|
||||
install('ChaNGa', prefix.bin)
|
||||
install('charmrun', prefix.bin)
|
@@ -52,7 +52,7 @@ class Charmpp(Package):
|
||||
"build-target",
|
||||
default="LIBS",
|
||||
# AMPI also builds charm++, LIBS also builds AMPI and charm++
|
||||
values=("charm++", "AMPI", "LIBS"),
|
||||
values=("charm++", "AMPI", "LIBS", "ChaNGa"),
|
||||
description="Specify the target to build"
|
||||
)
|
||||
|
||||
@@ -217,7 +217,7 @@ def install(self, spec, prefix):
|
||||
present on the system")
|
||||
|
||||
target = spec.variants["build-target"].value
|
||||
builddir = prefix + "/" + str(self.charmarch)
|
||||
builddir = prefix
|
||||
|
||||
# We assume that Spack's compiler wrappers make this work. If
|
||||
# not, then we need to query the compiler vendor from Spack
|
||||
|
@@ -34,6 +34,8 @@ class Cp2k(MakefilePackage, CudaPackage):
|
||||
variant('smm', default='libxsmm', values=('libxsmm', 'libsmm', 'blas'),
|
||||
description='Library for small matrix multiplications')
|
||||
variant('plumed', default=False, description='Enable PLUMED support')
|
||||
variant('libint', default=True,
|
||||
description='Use libint, required for HFX (and possibly others)')
|
||||
variant('libxc', default=True,
|
||||
description='Support additional functionals via libxc')
|
||||
variant('pexsi', default=False,
|
||||
@@ -82,14 +84,15 @@ class Cp2k(MakefilePackage, CudaPackage):
|
||||
# use pkg-config (support added in libxsmm-1.10) to link to libxsmm
|
||||
depends_on('pkgconfig', type='build', when='smm=libxsmm')
|
||||
# ... and in CP2K 7.0+ for linking to libint2
|
||||
depends_on('pkgconfig', type='build', when='@7.0:')
|
||||
depends_on('pkgconfig', type='build', when='+libint@7.0:')
|
||||
depends_on('pkgconfig', type='build', when='+libxc@7.0:')
|
||||
|
||||
# libint & libxc are always statically linked
|
||||
depends_on('libint@1.1.4:1.2', when='@3.0:6.9', type='build')
|
||||
depends_on('libint@1.1.4:1.2', when='+libint@3.0:6.9', type='build')
|
||||
for lmax in HFX_LMAX_RANGE:
|
||||
# libint2 can be linked dynamically again
|
||||
depends_on('libint@2.6.0:+fortran tune=cp2k-lmax-{0}'.format(lmax),
|
||||
when='@7.0: lmax={0}'.format(lmax))
|
||||
when='+libint@7.0: lmax={0}'.format(lmax))
|
||||
|
||||
depends_on('libxc@2.2.2:', when='+libxc@:5.5999', type='build')
|
||||
depends_on('libxc@4.0.3:', when='+libxc@6.0:6.9', type='build')
|
||||
@@ -218,17 +221,10 @@ def edit(self, spec, prefix):
|
||||
|
||||
dflags = ['-DNDEBUG']
|
||||
cppflags = [
|
||||
'-D__LIBINT',
|
||||
'-D__FFTW3',
|
||||
'-I{0}'.format(fftw_header_dir),
|
||||
]
|
||||
|
||||
if '@:6.9' in spec:
|
||||
cppflags += [
|
||||
'-D__LIBINT_MAX_AM=6',
|
||||
'-D__LIBDERIV_MAX_AM1=5',
|
||||
]
|
||||
|
||||
if '^mpi@3:' in spec:
|
||||
cppflags.append('-D__MPI_VERSION=3')
|
||||
elif '^mpi@2:' in spec:
|
||||
@@ -287,19 +283,6 @@ def edit(self, spec, prefix):
|
||||
if 'superlu-dist@4.3' in spec:
|
||||
ldflags.insert(0, '-Wl,--allow-multiple-definition')
|
||||
|
||||
if '@:6.9' in spec:
|
||||
# libint-1.x.y has to be linked statically to work around
|
||||
# inconsistencies in its Fortran interface definition
|
||||
# (short-int vs int) which otherwise causes segfaults at runtime
|
||||
# due to wrong offsets into the shared library symbols.
|
||||
libs.extend([
|
||||
os.path.join(spec['libint'].libs.directories[0], 'libderiv.a'),
|
||||
os.path.join(spec['libint'].libs.directories[0], 'libint.a'),
|
||||
])
|
||||
else:
|
||||
fcflags += pkgconf('--cflags', 'libint2', output=str).split()
|
||||
libs += pkgconf('--libs', 'libint2', output=str).split()
|
||||
|
||||
if '+plumed' in self.spec:
|
||||
dflags.extend(['-D__PLUMED2'])
|
||||
cppflags.extend(['-D__PLUMED2'])
|
||||
@@ -363,6 +346,30 @@ def edit(self, spec, prefix):
|
||||
)
|
||||
libs.append(wannier)
|
||||
|
||||
if '+libint' in spec:
|
||||
cppflags += ['-D__LIBINT']
|
||||
|
||||
if '@:6.9' in spec:
|
||||
cppflags += [
|
||||
'-D__LIBINT_MAX_AM=6',
|
||||
'-D__LIBDERIV_MAX_AM1=5',
|
||||
]
|
||||
|
||||
# libint-1.x.y has to be linked statically to work around
|
||||
# inconsistencies in its Fortran interface definition
|
||||
# (short-int vs int) which otherwise causes segfaults at
|
||||
# runtime due to wrong offsets into the shared library
|
||||
# symbols.
|
||||
libs.extend([
|
||||
os.path.join(
|
||||
spec['libint'].libs.directories[0], 'libderiv.a'),
|
||||
os.path.join(
|
||||
spec['libint'].libs.directories[0], 'libint.a'),
|
||||
])
|
||||
else:
|
||||
fcflags += pkgconf('--cflags', 'libint2', output=str).split()
|
||||
libs += pkgconf('--libs', 'libint2', output=str).split()
|
||||
|
||||
if '+libxc' in spec:
|
||||
cppflags += ['-D__LIBXC']
|
||||
|
||||
|
@@ -11,6 +11,7 @@ class Cppcheck(MakefilePackage):
|
||||
homepage = "http://cppcheck.sourceforge.net/"
|
||||
url = "https://downloads.sourceforge.net/project/cppcheck/cppcheck/1.78/cppcheck-1.78.tar.bz2"
|
||||
|
||||
version('2.1', sha256='ab26eeef039e5b58aac01efb8cb664f2cc16bf9879c61bc93cd00c95be89a5f7')
|
||||
version('1.87', sha256='e3b0a46747822471df275417d4b74b56ecac88367433e7428f39288a32c581ca')
|
||||
version('1.81', sha256='bb694f37ae0b5fed48c6cdc2fb5e528daf32cefc64e16b1a520c5411323cf27e')
|
||||
version('1.78', sha256='e42696f7d6321b98cb479ad9728d051effe543b26aca8102428f60b9850786b1')
|
||||
|
@@ -15,13 +15,22 @@ class Cpprestsdk(CMakePackage):
|
||||
homepage = "https://github.com/Microsoft/cpprestsdk"
|
||||
url = "https://github.com/Microsoft/cpprestsdk/archive/v2.9.1.tar.gz"
|
||||
|
||||
version('2.10.16', git='https://github.com/Microsoft/cpprestsdk', branch='v2.10.16', submodules=True)
|
||||
version('2.9.1', sha256='537358760acd782f4d2ed3a85d92247b4fc423aff9c85347dc31dbb0ab9bab16')
|
||||
|
||||
depends_on('boost@:1.69.0')
|
||||
depends_on('openssl')
|
||||
|
||||
# Ref: https://github.com/microsoft/cpprestsdk/commit/f9f518e4ad84577eb684ad8235181e4495299af4
|
||||
# Ref: https://github.com/Microsoft/cpprestsdk/commit/6b2e0480018530b616f61d5cdc786c92ba148bb7
|
||||
# Ref: https://github.com/microsoft/cpprestsdk/commit/70c1b14f39f5d47984fdd8a31fc357ebb5a37851
|
||||
patch('Release.patch')
|
||||
patch('Release.patch', when='@2.9.1')
|
||||
|
||||
root_cmakelists_dir = 'Release'
|
||||
|
||||
def cmake_args(self):
|
||||
args = [
|
||||
'-DWERROR:BOOL=Off'
|
||||
]
|
||||
|
||||
return args
|
||||
|
@@ -22,6 +22,9 @@
|
||||
# format returned by platform.system() and 'arch' by platform.machine()
|
||||
|
||||
_versions = {
|
||||
'11.0.2': {
|
||||
'Linux-x86_64': ('48247ada0e3f106051029ae8f70fbd0c238040f58b0880e55026374a959a69c1', 'http://developer.download.nvidia.com/compute/cuda/11.0.2/local_installers/cuda_11.0.2_450.51.05_linux.run'),
|
||||
'Linux-ppc64le': ('db06d0f3fbf6f7aa1f106fc921ad1c86162210a26e8cb65b171c5240a3bf75da', 'http://developer.download.nvidia.com/compute/cuda/11.0.2/local_installers/cuda_11.0.2_450.51.05_linux_ppc64le.run')},
|
||||
'10.2.89': {
|
||||
'Linux-x86_64': ('560d07fdcf4a46717f2242948cd4f92c5f9b6fc7eae10dd996614da913d5ca11', 'http://developer.download.nvidia.com/compute/cuda/10.2/Prod/local_installers/cuda_10.2.89_440.33.01_linux.run'),
|
||||
'Linux-ppc64le': ('5227774fcb8b10bd2d8714f0a716a75d7a2df240a9f2a49beb76710b1c0fc619', 'http://developer.download.nvidia.com/compute/cuda/10.2/Prod/local_installers/cuda_10.2.89_440.33.01_linux_ppc64le.run')},
|
||||
|
@@ -15,6 +15,7 @@ class Curl(AutotoolsPackage):
|
||||
# URL must remain http:// so Spack can bootstrap curl
|
||||
url = "http://curl.haxx.se/download/curl-7.60.0.tar.bz2"
|
||||
|
||||
version('7.71.0', sha256='600f00ac2481a89548a4141ddf983fd9386165e1960bac91d0a1c81dca5dd341')
|
||||
version('7.68.0', sha256='207f54917dd6a2dc733065ccf18d61bb5bebeaceb5df49cd9445483e8623eeb9')
|
||||
version('7.63.0', sha256='9bab7ed4ecff77020a312d84cc5fb7eb02d58419d218f267477a724a17fd8dd8')
|
||||
version('7.60.0', sha256='897dfb2204bd99be328279f88f55b7c61592216b0542fcbe995c60aa92871e9b')
|
||||
@@ -51,6 +52,7 @@ class Curl(AutotoolsPackage):
|
||||
conflicts('platform=linux', when='+darwinssl')
|
||||
|
||||
depends_on('openssl', when='~darwinssl')
|
||||
depends_on('libidn2')
|
||||
depends_on('zlib')
|
||||
depends_on('nghttp2', when='+nghttp2')
|
||||
depends_on('libssh2', when='+libssh2')
|
||||
@@ -61,6 +63,8 @@ def configure_args(self):
|
||||
spec = self.spec
|
||||
|
||||
args = ['--with-zlib={0}'.format(spec['zlib'].prefix)]
|
||||
args.append('--with-libidn2={0}'.format(spec['libidn2'].prefix))
|
||||
|
||||
if spec.satisfies('+darwinssl'):
|
||||
args.append('--with-darwinssl')
|
||||
else:
|
||||
|
@@ -22,6 +22,7 @@ class Dd4hep(CMakePackage):
|
||||
maintainers = ['vvolkl', 'drbenmorgan']
|
||||
|
||||
version('master', branch='master')
|
||||
version('1.13.0', sha256='0b1f9d902ebe21a9178c1e41204c066b29f68c8836fd1d03a9ce979811ddb295')
|
||||
version('1.12.1', sha256='85e8c775ec03c499ce10911e228342e757c81ce9ef2a9195cb253b85175a2e93')
|
||||
version('1.12.0', sha256='133a1fb8ce0466d2482f3ebb03e60b3bebb9b2d3e33d14ba15c8fbb91706b398')
|
||||
version('1.11.2', sha256='96a53dd26cb8df11c6dae54669fbc9cc3c90dd47c67e07b24be9a1341c95abc4')
|
||||
|
147
var/spack/repos/builtin/packages/dftbplus/package.py
Normal file
147
var/spack/repos/builtin/packages/dftbplus/package.py
Normal file
@@ -0,0 +1,147 @@
|
||||
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
from spack import *
|
||||
import os
|
||||
|
||||
|
||||
class Dftbplus(MakefilePackage):
|
||||
"""DFTB+ is an implementation of the
|
||||
Density Functional based Tight Binding (DFTB) method,
|
||||
containing many extensions to the original method."""
|
||||
|
||||
homepage = "https://www.dftbplus.org"
|
||||
url = "https://github.com/dftbplus/dftbplus/archive/19.1.tar.gz"
|
||||
|
||||
version('19.1', sha256='4d07f5c6102f06999d8cfdb1d17f5b59f9f2b804697f14b3bc562e3ea094b8a8')
|
||||
|
||||
resource(name='slakos',
|
||||
url='https://github.com/dftbplus/testparams/archive/dftbplus-18.2.tar.gz',
|
||||
sha256='bd191b3d240c1a81a8754a365e53a78b581fc92eb074dd5beb8b56a669a8d3d1',
|
||||
destination='external/slakos',
|
||||
when='@18.2:')
|
||||
|
||||
variant('mpi', default=True,
|
||||
description="Build an MPI-paralelised version of the code.")
|
||||
|
||||
variant('gpu', default=False,
|
||||
description="Use the MAGMA library "
|
||||
"for GPU accelerated computation")
|
||||
|
||||
variant('elsi', default=False,
|
||||
description="Use the ELSI library for large scale systems. "
|
||||
"Only has any effect if you build with '+mpi'")
|
||||
|
||||
variant('sockets', default=False,
|
||||
description="Whether the socket library "
|
||||
"(external control) should be linked")
|
||||
|
||||
variant('arpack', default=False,
|
||||
description="Use ARPACK for excited state DFTB functionality")
|
||||
|
||||
variant('transport', default=False,
|
||||
description="Whether transport via libNEGF should be included. "
|
||||
"Only affects parallel build. "
|
||||
"(serial version is built without libNEGF/transport)")
|
||||
|
||||
variant('dftd3', default=False,
|
||||
description="Use DftD3 dispersion library "
|
||||
"(if you need this dispersion model)")
|
||||
|
||||
depends_on('lapack')
|
||||
depends_on('blas')
|
||||
depends_on('scalapack', when="+mpi")
|
||||
depends_on('mpi', when="+mpi")
|
||||
depends_on('elsi', when="+elsi")
|
||||
depends_on('magma', when="+gpu")
|
||||
depends_on('arpack-ng', when="+arpack")
|
||||
depends_on('dftd3-lib@0.9.2', when="+dftd3")
|
||||
|
||||
def edit(self, spec, prefix):
|
||||
"""
|
||||
First, change the ROOT variable, because, for some reason,
|
||||
the Makefile and the spack install script run in different directories
|
||||
|
||||
Then, if using GCC, rename the file 'sys/make.x86_64-linux-gnu'
|
||||
to make.arch.
|
||||
|
||||
After that, edit the make.arch to point to the dependencies
|
||||
|
||||
And the last thing we do here is to set the installdir
|
||||
"""
|
||||
dircwd = os.getcwd()
|
||||
makefile = FileFilter("makefile")
|
||||
makefile.filter("ROOT := .*", "ROOT := {0}".format(dircwd))
|
||||
|
||||
archmake = join_path(".", "sys", "make.x86_64-linux-gnu")
|
||||
copy(archmake, join_path(dircwd, "make.arch"))
|
||||
|
||||
march = FileFilter(join_path(dircwd, 'make.arch'))
|
||||
|
||||
mconfig = FileFilter(join_path(dircwd, 'make.config'))
|
||||
|
||||
mconfig.filter('INSTALLDIR := .*', 'INSTALLDIR := {0}'.format(prefix))
|
||||
|
||||
if '+gpu' in self.spec:
|
||||
march.filter('MAGMADIR = .*',
|
||||
'MAGMADIR = {0}'.format(spec['magma'].prefix))
|
||||
|
||||
mconfig.filter('WITH_GPU := .*', 'WITH_GPU := 1')
|
||||
|
||||
if '+mpi' in self.spec:
|
||||
march.filter('SCALAPACKDIR = .*',
|
||||
'SCALAPACKDIR = {0}'.format(spec['scalapack'].prefix))
|
||||
|
||||
march.filter('LIB_LAPACK = -l.*',
|
||||
'LIB_LAPACK = {0}'.format(spec['blas'].libs.ld_flags))
|
||||
|
||||
march.filter('mpifort', '{0}'.format(spec['mpi'].mpifc))
|
||||
|
||||
mconfig.filter('WITH_MPI := .*', 'WITH_MPI := 1')
|
||||
|
||||
if '+elsi' in self.spec:
|
||||
mconfig.filter('WITH_ELSI := .*', 'WITH_ELSI := 1')
|
||||
|
||||
has_pexsi = '+enable_pexsi' in spec['elsi']
|
||||
|
||||
mconfig.filter('WITH_PEXSI := .*', 'WITH_PEXSI := {0}'.format(
|
||||
'1' if has_pexsi is True else '0'
|
||||
))
|
||||
|
||||
march.filter("ELSIINCDIR .*", "ELSIINCDIR = {0}".format(
|
||||
spec['elsi'].prefix.include
|
||||
))
|
||||
|
||||
march.filter("ELSIDIR .*",
|
||||
"ELSIDIR = {0}".format(spec['elsi'].prefix))
|
||||
|
||||
else:
|
||||
march.filter('LIB_LAPACK += -l.*', 'LIB_LAPACK += {0}'.format(
|
||||
spec['blas'].libs.ld_flags))
|
||||
|
||||
if '+sockets' in self.spec:
|
||||
mconfig.filter('WITH_SOCKETS := .*', 'WITH_SOCKETS := 1')
|
||||
|
||||
if '+transport' in self.spec:
|
||||
mconfig.filter('WITH_TRANSPORT := .*', 'WITH_TRANSPORT := 1')
|
||||
|
||||
if '+arpack' in self.spec:
|
||||
march.filter('ARPACK_LIBS = .*', 'ARPACK_LIBS = {0}'.format(
|
||||
spec['arpack-ng'].libs.ld_flags
|
||||
))
|
||||
|
||||
mconfig.filter('WITH_ARPACK := .*', 'WITH_ARPACK := 1')
|
||||
|
||||
if '+dftd3' in self.spec:
|
||||
march.filter('COMPILE_DFTD3 = .*', 'COMPILE_DFTD3 = 0')
|
||||
march.filter('DFTD3_INCS = .*', 'DFTD3_INCS = -I{0}'.format(
|
||||
spec['dftd3-lib'].prefix.include
|
||||
))
|
||||
|
||||
march.filter('DFTD3_LIBS = .*',
|
||||
'DFTD3_LIBS = -L{0} -ldftd3'.format(
|
||||
spec['dftd3-lib'].prefix))
|
||||
|
||||
mconfig.filter('WITH_DFTD3 := .*', 'WITH_DFTD3 := 1')
|
38
var/spack/repos/builtin/packages/dftd3-lib/package.py
Normal file
38
var/spack/repos/builtin/packages/dftd3-lib/package.py
Normal file
@@ -0,0 +1,38 @@
|
||||
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
from spack import *
|
||||
|
||||
|
||||
class Dftd3Lib(MakefilePackage):
|
||||
"""A dispersion correction for density functionals,
|
||||
Hartree-Fock and semi-empirical quantum chemical methods"""
|
||||
|
||||
homepage = "https://www.chemie.uni-bonn.de/pctc/mulliken-center/software/dft-d3/dft-d3"
|
||||
url = "https://github.com/dftbplus/dftd3-lib/archive/0.9.2.tar.gz"
|
||||
|
||||
version('0.9.2', sha256='4178f3cf2f3e7e982a7084ec66bac92b4fdf164537d9fc0ada840a11b784f0e0')
|
||||
|
||||
# This fixes a concurrency bug, where make would try to start compiling
|
||||
# the dftd3 target before the lib target ended.
|
||||
# Since the library is small, disabling causes not much harm
|
||||
parallel = False
|
||||
|
||||
def edit(self, spec, prefix):
|
||||
makefile = FileFilter('make.arch')
|
||||
makefile.filter("FC = gfortran", "")
|
||||
makefile.filter("LN = gfortran", "LN = $(FC)")
|
||||
|
||||
def install(self, spec, prefix):
|
||||
mkdir(prefix.lib)
|
||||
mkdir(prefix.bin)
|
||||
mkdir(prefix.include)
|
||||
install("lib/libdftd3.a", prefix.lib)
|
||||
install("prg/dftd3", prefix.bin)
|
||||
install("lib/dftd3_api.mod", prefix.include)
|
||||
install("lib/dftd3_common.mod", prefix.include)
|
||||
install("lib/dftd3_core.mod", prefix.include)
|
||||
install("lib/dftd3_pars.mod", prefix.include)
|
||||
install("lib/dftd3_sizes.mod", prefix.include)
|
318
var/spack/repos/builtin/packages/dray/package.py
Normal file
318
var/spack/repos/builtin/packages/dray/package.py
Normal file
@@ -0,0 +1,318 @@
|
||||
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
from spack import *
|
||||
|
||||
import os
|
||||
import socket
|
||||
|
||||
import llnl.util.tty as tty
|
||||
|
||||
|
||||
def cmake_cache_entry(name, value, vtype=None):
|
||||
"""
|
||||
Helper that creates CMake cache entry strings used in
|
||||
'host-config' files.
|
||||
"""
|
||||
if vtype is None:
|
||||
if value == "ON" or value == "OFF":
|
||||
vtype = "BOOL"
|
||||
else:
|
||||
vtype = "PATH"
|
||||
return 'set({0} "{1}" CACHE {2} "")\n\n'.format(name, value, vtype)
|
||||
|
||||
|
||||
class Dray(Package, CudaPackage):
|
||||
"""High-Order Mesh Ray Tracer."""
|
||||
|
||||
homepage = "https://github.com/LLNL/devil_ray"
|
||||
git = "https://github.com/LLNL/devil_ray.git"
|
||||
url = "https://github.com/LLNL/devil_ray/releases/download/v0.1.2/dray-v0.1.2.tar.gz"
|
||||
|
||||
maintainers = ['mclarsen', 'cyrush']
|
||||
|
||||
version('develop', branch='develop', submodules='True')
|
||||
version('0.1.2', sha256='46937f20124b28dc78a634e8e063a3e7a3bbfd9f424ce2680b08417010c376da')
|
||||
version('0.1.1', sha256='e5daa49ee3367c087f5028dc5a08655298beb318014c6f3f65ef4a08fcbe346c')
|
||||
version('0.1.0', sha256='8b341138e1069361351e0a94478608c5af479cca76e2f97d556229aed45c0169')
|
||||
|
||||
variant('cuda', default=False, description='Build with CUDA backend')
|
||||
variant('openmp', default=True, description='Build OpenMP backend')
|
||||
variant("shared", default=True, description="Build as shared libs")
|
||||
variant("test", default=True, description='Build unit tests')
|
||||
variant("utils", default=True, description='Build utilities')
|
||||
variant("logging", default=False, description='Enable logging')
|
||||
variant("stats", default=False, description='Enable stats')
|
||||
variant("mpi", default=True, description='Enable MPI compiler')
|
||||
|
||||
depends_on('cuda', when='+cuda')
|
||||
depends_on('mpi', when='+mpi')
|
||||
|
||||
depends_on('cmake@3.9:', type='build')
|
||||
depends_on('cmake@3.14:', when='+cuda', type='build')
|
||||
|
||||
depends_on("conduit~shared", when="~shared")
|
||||
depends_on("conduit+shared", when="+shared")
|
||||
|
||||
depends_on("apcomp~shared+openmp+mpi", when="~shared+openmp+mpi")
|
||||
depends_on("apcomp+shared+openmp+mpi", when="+shared+openmp+mpi")
|
||||
depends_on("apcomp~shared~openmp+mpi", when="~shared~openmp+mpi")
|
||||
depends_on("apcomp+shared~openmp+mpi", when="+shared~openmp+mpi")
|
||||
depends_on("apcomp~shared+openmp~mpi", when="~shared+openmp~mpi")
|
||||
depends_on("apcomp+shared+openmp~mpi", when="+shared+openmp~mpi")
|
||||
depends_on("apcomp~shared~openmp~mpi", when="~shared~openmp~mpi")
|
||||
depends_on("apcomp+shared~openmp~mpi", when="+shared~openmp~mpi")
|
||||
|
||||
depends_on("raja@0.9.0+cuda~openmp+shared", when="+cuda~openmp+shared")
|
||||
depends_on("raja@0.9.0+cuda+openmp+shared", when="+cuda+openmp+shared")
|
||||
depends_on("raja@0.9.0+cuda~openmp~shared", when="+cuda~openmp~shared")
|
||||
depends_on("raja@0.9.0+cuda+openmp~shared", when="+cuda+openmp~shared")
|
||||
|
||||
depends_on("raja@0.9.0~cuda~openmp+shared", when="~cuda~openmp+shared")
|
||||
depends_on("raja@0.9.0~cuda+openmp+shared", when="~cuda+openmp+shared")
|
||||
depends_on("raja@0.9.0~cuda~openmp~shared", when="~cuda~openmp~shared")
|
||||
depends_on("raja@0.9.0~cuda+openmp~shared", when="~cuda+openmp~shared")
|
||||
|
||||
depends_on("umpire@1.0.0+cuda+shared", when="+cuda+shared")
|
||||
depends_on("umpire@1.0.0+cuda~shared", when="+cuda~shared")
|
||||
depends_on("umpire@1.0.0~cuda+shared", when="~cuda+shared")
|
||||
depends_on("umpire@1.0.0~cuda~shared", when="~cuda~shared")
|
||||
|
||||
depends_on("mfem+shared+conduit~threadsafe", when="+shared")
|
||||
depends_on("mfem~shared+conduit~threadsafe", when="~shared")
|
||||
|
||||
def setup_build_environment(self, env):
|
||||
env.set('CTEST_OUTPUT_ON_FAILURE', '1')
|
||||
|
||||
def install(self, spec, prefix):
|
||||
"""
|
||||
Build and install Devil Ray.
|
||||
"""
|
||||
with working_dir('spack-build', create=True):
|
||||
host_cfg_fname = self.create_host_config(spec,
|
||||
prefix)
|
||||
cmake_args = []
|
||||
# if we have a static build, we need to avoid any of
|
||||
# spack's default cmake settings related to rpaths
|
||||
# (see: https://github.com/LLNL/spack/issues/2658)
|
||||
if "+shared" in spec:
|
||||
cmake_args.extend(std_cmake_args)
|
||||
else:
|
||||
for arg in std_cmake_args:
|
||||
if arg.count("RPATH") == 0:
|
||||
cmake_args.append(arg)
|
||||
cmake_args.extend(["-C", host_cfg_fname, "../src"])
|
||||
print("Configuring Devil Ray...")
|
||||
cmake(*cmake_args)
|
||||
print("Building Devil Ray...")
|
||||
make()
|
||||
# run unit tests if requested
|
||||
if "+test" in spec and self.run_tests:
|
||||
print("Running Devil Ray Unit Tests...")
|
||||
make("test")
|
||||
print("Installing Devil Ray...")
|
||||
make("install")
|
||||
# install copy of host config for provenance
|
||||
install(host_cfg_fname, prefix)
|
||||
|
||||
def create_host_config(self, spec, prefix):
|
||||
"""
|
||||
This method creates a 'host-config' file that specifies
|
||||
all of the options used to configure and build ascent.
|
||||
|
||||
For more details about 'host-config' files see:
|
||||
http://ascent.readthedocs.io/en/latest/BuildingAscent.html
|
||||
"""
|
||||
|
||||
#######################
|
||||
# Compiler Info
|
||||
#######################
|
||||
c_compiler = env["SPACK_CC"]
|
||||
cpp_compiler = env["SPACK_CXX"]
|
||||
|
||||
#######################################################################
|
||||
# By directly fetching the names of the actual compilers we appear
|
||||
# to doing something evil here, but this is necessary to create a
|
||||
# 'host config' file that works outside of the spack install env.
|
||||
#######################################################################
|
||||
|
||||
sys_type = spec.architecture
|
||||
# if on llnl systems, we can use the SYS_TYPE
|
||||
if "SYS_TYPE" in env:
|
||||
sys_type = env["SYS_TYPE"]
|
||||
|
||||
##############################################
|
||||
# Find and record what CMake is used
|
||||
##############################################
|
||||
|
||||
if "+cmake" in spec:
|
||||
cmake_exe = spec['cmake'].command.path
|
||||
else:
|
||||
cmake_exe = which("cmake")
|
||||
if cmake_exe is None:
|
||||
msg = 'failed to find CMake (and cmake variant is off)'
|
||||
raise RuntimeError(msg)
|
||||
cmake_exe = cmake_exe.path
|
||||
|
||||
host_cfg_fname = "%s-%s-%s-devil_ray.cmake" % (socket.gethostname(),
|
||||
sys_type,
|
||||
spec.compiler)
|
||||
|
||||
cfg = open(host_cfg_fname, "w")
|
||||
cfg.write("##################################\n")
|
||||
cfg.write("# spack generated host-config\n")
|
||||
cfg.write("##################################\n")
|
||||
cfg.write("# {0}-{1}\n".format(sys_type, spec.compiler))
|
||||
cfg.write("##################################\n\n")
|
||||
|
||||
# Include path to cmake for reference
|
||||
cfg.write("# cmake from spack \n")
|
||||
cfg.write("# cmake executable path: %s\n\n" % cmake_exe)
|
||||
|
||||
#######################
|
||||
# Compiler Settings
|
||||
#######################
|
||||
cfg.write("#######\n")
|
||||
cfg.write("# using %s compiler spec\n" % spec.compiler)
|
||||
cfg.write("#######\n\n")
|
||||
|
||||
if "+mpi" in spec:
|
||||
cfg.write(cmake_cache_entry("ENABLE_MPI", "ON"))
|
||||
mpicc_path = spec['mpi'].mpicc
|
||||
mpicxx_path = spec['mpi'].mpicxx
|
||||
# if we are using compiler wrappers on cray systems
|
||||
# use those for mpi wrappers, b/c spec['mpi'].mpicxx
|
||||
# etc make return the spack compiler wrappers
|
||||
# which can trip up mpi detection in CMake 3.14
|
||||
if cpp_compiler == "CC":
|
||||
mpicc_path = "cc"
|
||||
mpicxx_path = "CC"
|
||||
|
||||
cfg.write(cmake_cache_entry("CMAKE_C_COMPILER", mpicc_path))
|
||||
cfg.write(cmake_cache_entry("CMAKE_CXX_COMPILER", mpicxx_path))
|
||||
else:
|
||||
cfg.write(cmake_cache_entry("ENABLE_MPI", "OFF"))
|
||||
cfg.write("# c compiler used by spack\n")
|
||||
cfg.write(cmake_cache_entry("CMAKE_C_COMPILER", c_compiler))
|
||||
cfg.write("# cpp compiler used by spack\n")
|
||||
cfg.write(cmake_cache_entry("CMAKE_CXX_COMPILER", cpp_compiler))
|
||||
|
||||
#######################
|
||||
# Backends
|
||||
#######################
|
||||
|
||||
cfg.write("# CUDA Support\n")
|
||||
|
||||
if "+cuda" in spec:
|
||||
cfg.write(cmake_cache_entry("ENABLE_CUDA", "ON"))
|
||||
if 'cuda_arch' in spec.variants:
|
||||
cuda_value = spec.variants['cuda_arch'].value
|
||||
cuda_arch = cuda_value[0]
|
||||
cfg.write(cmake_cache_entry('CUDA_ARCH',
|
||||
'sm_{0}'.format(cuda_arch)))
|
||||
else:
|
||||
cfg.write(cmake_cache_entry("ENABLE_CUDA", "OFF"))
|
||||
|
||||
if "+openmp" in spec:
|
||||
cfg.write(cmake_cache_entry("ENABLE_OPENMP", "ON"))
|
||||
else:
|
||||
cfg.write(cmake_cache_entry("ENABLE_OPENMP", "OFF"))
|
||||
|
||||
# shared vs static libs
|
||||
if "+shared" in spec:
|
||||
cfg.write(cmake_cache_entry("BUILD_SHARED_LIBS", "ON"))
|
||||
else:
|
||||
cfg.write(cmake_cache_entry("BUILD_SHARED_LIBS", "OFF"))
|
||||
|
||||
#######################
|
||||
# Unit Tests
|
||||
#######################
|
||||
if "+test" in spec:
|
||||
cfg.write(cmake_cache_entry("DRAY_ENABLE_TESTS", "ON"))
|
||||
else:
|
||||
cfg.write(cmake_cache_entry("DRAY_ENABLE_TESTS", "OFF"))
|
||||
|
||||
#######################
|
||||
# Utilities
|
||||
#######################
|
||||
if "+utils" in spec:
|
||||
cfg.write(cmake_cache_entry("DRAY_ENABLE_UTILS", "ON"))
|
||||
else:
|
||||
cfg.write(cmake_cache_entry("DRAY_ENABLE_UTILS", "OFF"))
|
||||
|
||||
#######################
|
||||
# Logging
|
||||
#######################
|
||||
if "+logging" in spec:
|
||||
cfg.write(cmake_cache_entry("ENABLE_LOGGING", "ON"))
|
||||
else:
|
||||
cfg.write(cmake_cache_entry("ENABLE_LOGGING", "OFF"))
|
||||
|
||||
#######################
|
||||
# Logging
|
||||
#######################
|
||||
if "+stats" in spec:
|
||||
cfg.write(cmake_cache_entry("ENABLE_STATS", "ON"))
|
||||
else:
|
||||
cfg.write(cmake_cache_entry("ENABLE_STATS", "OFF"))
|
||||
|
||||
#######################################################################
|
||||
# Core Dependencies
|
||||
#######################################################################
|
||||
|
||||
cfg.write("# conduit from spack \n")
|
||||
cfg.write(cmake_cache_entry("CONDUIT_DIR", spec['conduit'].prefix))
|
||||
|
||||
cfg.write("# mfem from spack \n")
|
||||
cfg.write(cmake_cache_entry("MFEM_DIR", spec['mfem'].prefix))
|
||||
|
||||
cfg.write("# raja from spack \n")
|
||||
cfg.write(cmake_cache_entry("RAJA_DIR", spec['raja'].prefix))
|
||||
|
||||
cfg.write("# umpire from spack \n")
|
||||
cfg.write(cmake_cache_entry("UMPIRE_DIR", spec['umpire'].prefix))
|
||||
|
||||
cfg.write("# apcompositor from spack \n")
|
||||
cfg.write(cmake_cache_entry("APCOMP_DIR", spec['apcomp'].prefix))
|
||||
|
||||
cfg.write("##################################\n")
|
||||
cfg.write("# end spack generated host-config\n")
|
||||
cfg.write("##################################\n")
|
||||
cfg.close()
|
||||
|
||||
host_cfg_fname = os.path.abspath(host_cfg_fname)
|
||||
tty.info("spack generated conduit host-config file: " + host_cfg_fname)
|
||||
return host_cfg_fname
|
||||
|
||||
def cmake_args(self):
|
||||
spec = self.spec
|
||||
|
||||
options = []
|
||||
|
||||
if '+openmp' in spec:
|
||||
options.extend([
|
||||
'-DENABLE_OPENMP=On'])
|
||||
|
||||
if '+cuda' in spec:
|
||||
options.extend([
|
||||
'-DENABLE_CUDA=On',
|
||||
'-DCUDA_TOOLKIT_ROOT_DIR=%s' % (spec['cuda'].prefix)])
|
||||
if 'cuda_arch' in spec.variants:
|
||||
cuda_value = spec.variants['cuda_arch'].value
|
||||
cuda_arch = cuda_value[0]
|
||||
options.append('-DCUDA_ARCH=sm_{0}'.format(cuda_arch))
|
||||
else:
|
||||
options.extend(['-DENABLE_CUDA=OFF'])
|
||||
|
||||
options.extend(['-DRAJA_DIR=%s' % (spec['raja'].prefix)])
|
||||
options.extend(['-DMFEM_DIR=%s' % (spec['mfem'].prefix)])
|
||||
options.extend(['-DUMPIRE_DIR=%s' % (spec['umpire'].prefix)])
|
||||
options.extend(['-DCONDUIT_DIR=%s' % (spec['conduit'].prefix)])
|
||||
options.extend(['-DDRAY_ENABLE_TESTS=OFF'])
|
||||
options.extend(['-DENABLE_LOGGING=OFF'])
|
||||
options.extend(['-DENABLE_STATS=OFF'])
|
||||
options.extend(['../src'])
|
||||
|
||||
return options
|
@@ -14,9 +14,10 @@ class Elpa(AutotoolsPackage):
|
||||
homepage = 'http://elpa.mpcdf.mpg.de/'
|
||||
url = 'http://elpa.mpcdf.mpg.de/elpa-2015.11.001.tar.gz'
|
||||
|
||||
version('2020.05.001', sha256='66ff1cf332ce1c82075dc7b5587ae72511d2bcb3a45322c94af6b01996439ce5')
|
||||
version('2019.11.001', sha256='10374a8f042e23c7e1094230f7e2993b6f3580908a213dbdf089792d05aff357')
|
||||
version('2019.05.002', sha256='d2eab5e5d74f53601220b00d18185670da8c00c13e1c1559ecfb0cd7cb2c4e8d')
|
||||
version('2018.11.001',
|
||||
sha256='cc27fe8ba46ce6e6faa8aea02c8c9983052f8e73a00cfea38abf7613cb1e1b16')
|
||||
version('2018.11.001', sha256='cc27fe8ba46ce6e6faa8aea02c8c9983052f8e73a00cfea38abf7613cb1e1b16')
|
||||
version('2018.05.001.rc1', sha256='598c01da20600a4514ea4d503b93e977ac0367e797cab7a7c1b0e0e3e86490db')
|
||||
version('2017.11.001', sha256='59f99c3abe2190fac0db8a301d0b9581ee134f438669dbc92551a54f6f861820')
|
||||
version('2017.05.003', sha256='bccd49ce35a323bd734b17642aed8f2588fea4cc78ee8133d88554753bc3bf1b')
|
||||
|
@@ -17,6 +17,8 @@ class Energyplus(Package):
|
||||
homepage = "https://energyplus.net"
|
||||
|
||||
# versions require explicit URLs as they contain hashes
|
||||
version('9.3.0', sha256='c939dc4f867224e110485a8e0712ce4cfb1e06f8462bc630b54f83a18c93876c',
|
||||
url="https://github.com/NREL/EnergyPlus/releases/download/v9.3.0/EnergyPlus-9.3.0-baff08990c-Linux-x86_64.tar.gz")
|
||||
version('8.9.0', sha256='13a5192b25815eb37b3ffd019ce3b99fd9f854935f8cc4362814f41c56e9ca98',
|
||||
url="https://github.com/NREL/EnergyPlus/releases/download/v8.9.0-WithIDDFixes/EnergyPlus-8.9.0-eba93e8e1b-Linux-x86_64.tar.gz")
|
||||
|
||||
@@ -25,13 +27,14 @@ def install(self, spec, prefix):
|
||||
# and then symlink the appropriate targets
|
||||
|
||||
# there is only one folder with a semi-predictable name so we glob it
|
||||
install_tree(glob.glob('EnergyPlus*')[0],
|
||||
join_path(prefix.lib, 'energyplus'))
|
||||
source_dir = '.'
|
||||
|
||||
if spec.satisfies('@:8.9.9'):
|
||||
source_dir = glob.glob('EnergyPlus*')[0]
|
||||
|
||||
install_tree(source_dir, prefix.lib.enregyplus)
|
||||
|
||||
mkdirp(prefix.bin)
|
||||
os.symlink(join_path(prefix.lib, 'energyplus/energyplus'),
|
||||
join_path(prefix.bin, 'energyplus'))
|
||||
os.symlink(join_path(prefix.lib, 'energyplus/EPMacro'),
|
||||
join_path(prefix.bin, 'EPMacro'))
|
||||
os.symlink(join_path(prefix.lib, 'energyplus/ExpandObjects'),
|
||||
join_path(prefix.bin, 'ExpandObjects'))
|
||||
for b in ['energyplus', 'EPMacro', 'ExpandObjects']:
|
||||
os.symlink(join_path(prefix.lib.energyplus, b),
|
||||
join_path(prefix.bin, b))
|
||||
|
@@ -169,7 +169,7 @@ def edit(self, spec, prefix):
|
||||
os.environ['ESMF_CXXLINKLIBS'] = '-lmpifort'
|
||||
elif '^openmpi' in spec:
|
||||
os.environ['ESMF_COMM'] = 'openmpi'
|
||||
elif '^intel-parallel-studio+mpi' in spec:
|
||||
elif '^intel-parallel-studio+mpi' in spec or '^intel-mpi' in spec:
|
||||
os.environ['ESMF_COMM'] = 'intelmpi'
|
||||
else:
|
||||
# Force use of the single-processor MPI-bypass library.
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user