Compare commits
242 Commits
v0.15.0-sh
...
v0.15.3-sh
Author | SHA1 | Date | |
---|---|---|---|
![]() |
008cf2ee15 | ||
![]() |
0f25462ea6 | ||
![]() |
ae4bbbd241 | ||
![]() |
24bd9e3039 | ||
![]() |
0efb8ef412 | ||
![]() |
69775fcc07 | ||
![]() |
ce772420dd | ||
![]() |
9cc01dc574 | ||
![]() |
8d8cf6201b | ||
![]() |
d6d839cd3e | ||
![]() |
3534717151 | ||
![]() |
e289d481ea | ||
![]() |
ed8250e055 | ||
![]() |
40cd845479 | ||
![]() |
3b45241566 | ||
![]() |
d5b0f85ea3 | ||
![]() |
c6241e72a6 | ||
![]() |
f528022a7d | ||
![]() |
665a47607e | ||
![]() |
12958497dc | ||
![]() |
3a8bc7ffc6 | ||
![]() |
9cbe358f84 | ||
![]() |
27af499b52 | ||
![]() |
24dff9cf20 | ||
![]() |
e4265d3135 | ||
![]() |
5e5cc99147 | ||
![]() |
474a077143 | ||
![]() |
d386c59de9 | ||
![]() |
dedadcd2ea | ||
![]() |
983aeea850 | ||
![]() |
0c44a9a504 | ||
![]() |
b81339cf80 | ||
![]() |
6c69b8a4d4 | ||
![]() |
ec1237479e | ||
![]() |
40e2a41477 | ||
![]() |
f168d63586 | ||
![]() |
78a84efb4b | ||
![]() |
c6891376f4 | ||
![]() |
83b281f36b | ||
![]() |
86ec698a33 | ||
![]() |
897e80e596 | ||
![]() |
ab32799b52 | ||
![]() |
bd236918dd | ||
![]() |
3949a85f9a | ||
![]() |
bab1852340 | ||
![]() |
ef814b7a32 | ||
![]() |
96fa6f0c1b | ||
![]() |
e4ba1c1daf | ||
![]() |
bbbf0466dc | ||
![]() |
dc18b3e3d4 | ||
![]() |
b5f82696e2 | ||
![]() |
a5aa150a98 | ||
![]() |
ae03782032 | ||
![]() |
c729c6b93c | ||
![]() |
324c383d8e | ||
![]() |
35b7a69456 | ||
![]() |
d69c32d7ef | ||
![]() |
27aaff3dc2 | ||
![]() |
fc8847cf4e | ||
![]() |
1fcc00df96 | ||
![]() |
697c2183d3 | ||
![]() |
b320be70cb | ||
![]() |
3f24188d19 | ||
![]() |
3449087284 | ||
![]() |
8e9f4d0078 | ||
![]() |
d7794540b2 | ||
![]() |
ae44b1d7b9 | ||
![]() |
df12b2bd15 | ||
![]() |
148a6a8860 | ||
![]() |
efba3731e5 | ||
![]() |
6eb332a984 | ||
![]() |
d0a83f318b | ||
![]() |
0f67b97065 | ||
![]() |
d2c2e000a7 | ||
![]() |
08926b5b12 | ||
![]() |
aaee0bcb7e | ||
![]() |
f2a35a767a | ||
![]() |
2b05d2bf5c | ||
![]() |
cc00619929 | ||
![]() |
188a371595 | ||
![]() |
dce7be9932 | ||
![]() |
4ac1a532f3 | ||
![]() |
f42dc4fa4d | ||
![]() |
d25c7ddd6f | ||
![]() |
48a9ad3652 | ||
![]() |
d55541919d | ||
![]() |
0d4740d1b1 | ||
![]() |
d56711f799 | ||
![]() |
99a47e407e | ||
![]() |
7efb0e541e | ||
![]() |
7340be98f6 | ||
![]() |
c281eaf69f | ||
![]() |
2110b98829 | ||
![]() |
88537d02e4 | ||
![]() |
a2729fcd7f | ||
![]() |
bcd41cec71 | ||
![]() |
6f6e896795 | ||
![]() |
28a25080ca | ||
![]() |
14f3f230c1 | ||
![]() |
d32bbae431 | ||
![]() |
710ff8d7ce | ||
![]() |
683881f912 | ||
![]() |
11d8aed6cd | ||
![]() |
ab68410c4c | ||
![]() |
fa614404e6 | ||
![]() |
a6abd530bd | ||
![]() |
2b809a5374 | ||
![]() |
3e13137f6e | ||
![]() |
6aa6e19d34 | ||
![]() |
c2d8d8acbd | ||
![]() |
299dcdd3eb | ||
![]() |
e0f13b298d | ||
![]() |
d2ac26f844 | ||
![]() |
fae57d1422 | ||
![]() |
c84a05b809 | ||
![]() |
05e8918076 | ||
![]() |
929cb9e62e | ||
![]() |
7d1f2abd56 | ||
![]() |
ab5f28aceb | ||
![]() |
4450377794 | ||
![]() |
45eaa442c3 | ||
![]() |
4fa519134f | ||
![]() |
815f62ce0c | ||
![]() |
b3b5ea4064 | ||
![]() |
573489db71 | ||
![]() |
9c42f246ed | ||
![]() |
dbdd2cb92f | ||
![]() |
406596af70 | ||
![]() |
73f02b10de | ||
![]() |
9629f571bc | ||
![]() |
5e50dc5acb | ||
![]() |
59bfc22d40 | ||
![]() |
1a8a147fe5 | ||
![]() |
0612a9e8e9 | ||
![]() |
1741279f16 | ||
![]() |
c2393fe566 | ||
![]() |
afbb4a5cba | ||
![]() |
e2bec75057 | ||
![]() |
054e0d1d11 | ||
![]() |
c8a83661c2 | ||
![]() |
4e4de51f0d | ||
![]() |
28549f300d | ||
![]() |
7717f00dac | ||
![]() |
44681dbca5 | ||
![]() |
f2889e698a | ||
![]() |
ea546425e8 | ||
![]() |
7269a5bf51 | ||
![]() |
00d7e817c6 | ||
![]() |
ed7d485b58 | ||
![]() |
38d387c9a5 | ||
![]() |
02dd90ebf9 | ||
![]() |
e72e2568dd | ||
![]() |
d9923a05e0 | ||
![]() |
8c6fa66b2a | ||
![]() |
84eae97f91 | ||
![]() |
12099ed55e | ||
![]() |
d0f5b69a19 | ||
![]() |
ce9d30f80f | ||
![]() |
e02d955aed | ||
![]() |
b3fff20d1f | ||
![]() |
8c41173678 | ||
![]() |
0bed621d0c | ||
![]() |
1d2754c3f6 | ||
![]() |
ae2a867a7f | ||
![]() |
207e496162 | ||
![]() |
f0391db096 | ||
![]() |
084994db9c | ||
![]() |
f85da868ac | ||
![]() |
f1f31e3dfe | ||
![]() |
7f8e827db8 | ||
![]() |
a63761f875 | ||
![]() |
3ce16c89b7 | ||
![]() |
f4ac3770b4 | ||
![]() |
b0506a722e | ||
![]() |
650ab563f4 | ||
![]() |
90285c7d61 | ||
![]() |
6c300ab717 | ||
![]() |
05d8ba170b | ||
![]() |
51f65152a5 | ||
![]() |
1113357e35 | ||
![]() |
d65a076c0d | ||
![]() |
845139740f | ||
![]() |
1f87b07689 | ||
![]() |
cbaa1bca1c | ||
![]() |
5fb6a06c37 | ||
![]() |
6e38fc56f6 | ||
![]() |
c00a05bfba | ||
![]() |
9ec9327f5a | ||
![]() |
11088df402 | ||
![]() |
4ea76dc95c | ||
![]() |
f0275d7e1b | ||
![]() |
516c3e659f | ||
![]() |
e62ddcb582 | ||
![]() |
b3bc538df6 | ||
![]() |
29fc94e29e | ||
![]() |
466f7fd996 | ||
![]() |
58cfe4e078 | ||
![]() |
00f7577273 | ||
![]() |
4e6d189a94 | ||
![]() |
9abadd4985 | ||
![]() |
66d4bc3f3c | ||
![]() |
52cafe6c96 | ||
![]() |
8d5aa46765 | ||
![]() |
e5ec89ad5b | ||
![]() |
7bba9cd2a5 | ||
![]() |
cce629e791 | ||
![]() |
bb15addad5 | ||
![]() |
e9e3e88f63 | ||
![]() |
c797a0611c | ||
![]() |
04f3000646 | ||
![]() |
f3eba3c482 | ||
![]() |
02fa7b680f | ||
![]() |
8fcd917e51 | ||
![]() |
9c85d87b90 | ||
![]() |
b45fc97564 | ||
![]() |
986f68f7ed | ||
![]() |
2cd9e1eb62 | ||
![]() |
61804f201a | ||
![]() |
cf104b0f10 | ||
![]() |
17106a131d | ||
![]() |
cc0dda95c4 | ||
![]() |
7679e20e83 | ||
![]() |
4349c091e7 | ||
![]() |
ff60f51a7a | ||
![]() |
06da1f195c | ||
![]() |
3d98ad3f4c | ||
![]() |
f1bb8999ab | ||
![]() |
1e75dde7b2 | ||
![]() |
f780839b87 | ||
![]() |
204f15b4c1 | ||
![]() |
a4fff39d7e | ||
![]() |
10016a34e0 | ||
![]() |
e133b44da6 | ||
![]() |
5732d8de50 | ||
![]() |
509b3c3016 | ||
![]() |
8a9fa9bd18 | ||
![]() |
a5eabfad91 | ||
![]() |
6a77f1ff45 | ||
![]() |
60283775b3 | ||
![]() |
4433e4de2d | ||
![]() |
aaf6f80d4c | ||
![]() |
59fb789290 |
23
.github/workflows/install_spack.sh
vendored
23
.github/workflows/install_spack.sh
vendored
@@ -1,5 +1,20 @@
|
||||
#!/usr/bin/env sh
|
||||
git clone https://github.com/spack/spack.git
|
||||
echo -e "config:\n build_jobs: 2" > spack/etc/spack/config.yaml
|
||||
. spack/share/spack/setup-env.sh
|
||||
spack compilers
|
||||
. share/spack/setup-env.sh
|
||||
echo -e "config:\n build_jobs: 2" > etc/spack/config.yaml
|
||||
spack config add "packages:all:target:[x86_64]"
|
||||
# TODO: remove this explicit setting once apple-clang detection is fixed
|
||||
cat <<EOF > etc/spack/compilers.yaml
|
||||
compilers:
|
||||
- compiler:
|
||||
spec: apple-clang@11.0.3
|
||||
paths:
|
||||
cc: /usr/bin/clang
|
||||
cxx: /usr/bin/clang++
|
||||
f77: /usr/local/bin/gfortran-9
|
||||
fc: /usr/local/bin/gfortran-9
|
||||
modules: []
|
||||
operating_system: catalina
|
||||
target: x86_64
|
||||
EOF
|
||||
spack compiler info apple-clang
|
||||
spack debug report
|
||||
|
3
.github/workflows/linux_build_tests.yaml
vendored
3
.github/workflows/linux_build_tests.yaml
vendored
@@ -3,13 +3,12 @@ name: linux builds
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- develop
|
||||
- releases/**
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- develop
|
||||
- releases/**
|
||||
paths-ignore:
|
||||
# Don't run if we only modified packages in the built-in repository
|
||||
- 'var/spack/repos/builtin/**'
|
||||
|
80
.github/workflows/linux_unit_tests.yaml
vendored
80
.github/workflows/linux_unit_tests.yaml
vendored
@@ -60,3 +60,83 @@ jobs:
|
||||
uses: codecov/codecov-action@v1
|
||||
with:
|
||||
flags: unittests,linux
|
||||
flake8:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: 3.8
|
||||
- name: Install Python packages
|
||||
run: |
|
||||
pip install --upgrade pip six setuptools flake8
|
||||
- name: Setup git configuration
|
||||
run: |
|
||||
# Need this for the git tests to succeed.
|
||||
git --version
|
||||
git config --global user.email "spack@example.com"
|
||||
git config --global user.name "Test User"
|
||||
git fetch -u origin develop:develop
|
||||
- name: Run flake8 tests
|
||||
run: |
|
||||
share/spack/qa/run-flake8-tests
|
||||
shell:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: 3.8
|
||||
- name: Install System packages
|
||||
run: |
|
||||
sudo apt-get -y update
|
||||
sudo apt-get install -y coreutils gfortran gnupg2 mercurial ninja-build patchelf zsh fish
|
||||
# Needed for kcov
|
||||
sudo apt-get -y install cmake binutils-dev libcurl4-openssl-dev zlib1g-dev libdw-dev libiberty-dev
|
||||
- name: Install Python packages
|
||||
run: |
|
||||
pip install --upgrade pip six setuptools codecov coverage
|
||||
- name: Setup git configuration
|
||||
run: |
|
||||
# Need this for the git tests to succeed.
|
||||
git --version
|
||||
git config --global user.email "spack@example.com"
|
||||
git config --global user.name "Test User"
|
||||
git fetch -u origin develop:develop
|
||||
- name: Install kcov for bash script coverage
|
||||
env:
|
||||
KCOV_VERSION: 38
|
||||
run: |
|
||||
KCOV_ROOT=$(mktemp -d)
|
||||
wget --output-document=${KCOV_ROOT}/${KCOV_VERSION}.tar.gz https://github.com/SimonKagstrom/kcov/archive/v${KCOV_VERSION}.tar.gz
|
||||
tar -C ${KCOV_ROOT} -xzvf ${KCOV_ROOT}/${KCOV_VERSION}.tar.gz
|
||||
mkdir -p ${KCOV_ROOT}/build
|
||||
cd ${KCOV_ROOT}/build && cmake -Wno-dev ${KCOV_ROOT}/kcov-${KCOV_VERSION} && cd -
|
||||
make -C ${KCOV_ROOT}/build && sudo make -C ${KCOV_ROOT}/build install
|
||||
- name: Run shell tests
|
||||
env:
|
||||
COVERAGE: true
|
||||
run: |
|
||||
share/spack/qa/run-shell-tests
|
||||
- name: Upload to codecov.io
|
||||
uses: codecov/codecov-action@v1
|
||||
with:
|
||||
flags: shelltests,linux
|
||||
documentation:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: 3.8
|
||||
- name: Install System packages
|
||||
run: |
|
||||
sudo apt-get -y update
|
||||
sudo apt-get install -y coreutils ninja-build graphviz
|
||||
- name: Install Python packages
|
||||
run: |
|
||||
pip install --upgrade pip six setuptools
|
||||
pip install --upgrade -r lib/spack/docs/requirements.txt
|
||||
- name: Build documentation
|
||||
run: |
|
||||
share/spack/qa/run-doc-tests
|
||||
|
23
.github/workflows/macos_python.yml
vendored
23
.github/workflows/macos_python.yml
vendored
@@ -8,6 +8,13 @@ on:
|
||||
schedule:
|
||||
# nightly at 1 AM
|
||||
- cron: '0 1 * * *'
|
||||
pull_request:
|
||||
branches:
|
||||
- develop
|
||||
paths:
|
||||
# Run if we modify this yaml file
|
||||
- '.github/workflows/macos_python.yml'
|
||||
# TODO: run if we touch any of the recipes involved in this
|
||||
|
||||
# GitHub Action Limits
|
||||
# https://help.github.com/en/actions/reference/workflow-syntax-for-github-actions
|
||||
@@ -21,7 +28,8 @@ jobs:
|
||||
- name: spack install
|
||||
run: |
|
||||
. .github/workflows/install_spack.sh
|
||||
spack install -v gcc
|
||||
# 9.2.0 is the latest version on which we apply homebrew patch
|
||||
spack install -v --fail-fast gcc@9.2.0 %apple-clang
|
||||
|
||||
install_jupyter_clang:
|
||||
name: jupyter
|
||||
@@ -32,7 +40,8 @@ jobs:
|
||||
- name: spack install
|
||||
run: |
|
||||
. .github/workflows/install_spack.sh
|
||||
spack install -v py-jupyter %clang
|
||||
spack config add packages:opengl:paths:opengl@4.1:/usr/X11R6
|
||||
spack install -v --fail-fast py-jupyter %apple-clang
|
||||
|
||||
install_scipy_clang:
|
||||
name: scipy, mpl, pd
|
||||
@@ -42,9 +51,9 @@ jobs:
|
||||
- name: spack install
|
||||
run: |
|
||||
. .github/workflows/install_spack.sh
|
||||
spack install -v py-scipy %clang
|
||||
spack install -v py-matplotlib %clang
|
||||
spack install -v py-pandas %clang
|
||||
spack install -v --fail-fast py-scipy %apple-clang
|
||||
spack install -v --fail-fast py-matplotlib %apple-clang
|
||||
spack install -v --fail-fast py-pandas %apple-clang
|
||||
|
||||
install_mpi4py_clang:
|
||||
name: mpi4py, petsc4py
|
||||
@@ -54,5 +63,5 @@ jobs:
|
||||
- name: spack install
|
||||
run: |
|
||||
. .github/workflows/install_spack.sh
|
||||
spack install -v py-mpi4py %clang
|
||||
spack install -v py-petsc4py %clang
|
||||
spack install -v --fail-fast py-mpi4py %apple-clang
|
||||
spack install -v --fail-fast py-petsc4py %apple-clang
|
||||
|
3
.github/workflows/macos_unit_tests.yaml
vendored
3
.github/workflows/macos_unit_tests.yaml
vendored
@@ -3,13 +3,12 @@ name: macos tests
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- develop
|
||||
- releases/**
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- develop
|
||||
- releases/**
|
||||
jobs:
|
||||
build:
|
||||
|
||||
|
@@ -3,13 +3,12 @@ name: python version check
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- develop
|
||||
- releases/**
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- develop
|
||||
- releases/**
|
||||
jobs:
|
||||
validate:
|
||||
|
||||
|
124
.travis.yml
124
.travis.yml
@@ -1,101 +1,32 @@
|
||||
#=============================================================================
|
||||
# Project settings
|
||||
#=============================================================================
|
||||
# Only build master and develop on push; do not build every branch.
|
||||
# Only build releases and develop on push; do not build every branch.
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
- develop
|
||||
- /^releases\/.*$/
|
||||
|
||||
#=============================================================================
|
||||
# Build matrix
|
||||
#=============================================================================
|
||||
|
||||
dist: bionic
|
||||
|
||||
jobs:
|
||||
fast_finish: true
|
||||
include:
|
||||
- stage: 'style checks'
|
||||
python: '3.8'
|
||||
os: linux
|
||||
language: python
|
||||
env: TEST_SUITE=flake8
|
||||
- stage: 'unit tests + documentation'
|
||||
python: '2.6'
|
||||
dist: trusty
|
||||
os: linux
|
||||
language: python
|
||||
addons:
|
||||
apt:
|
||||
# Everything but patchelf, that is not available for trusty
|
||||
packages:
|
||||
- ccache
|
||||
- gfortran
|
||||
- graphviz
|
||||
- gnupg2
|
||||
- kcov
|
||||
- mercurial
|
||||
- ninja-build
|
||||
- realpath
|
||||
- zsh
|
||||
- fish
|
||||
env: [ TEST_SUITE=unit, COVERAGE=true ]
|
||||
- python: '3.8'
|
||||
os: linux
|
||||
language: python
|
||||
env: [ TEST_SUITE=shell, COVERAGE=true, KCOV_VERSION=38 ]
|
||||
- python: '3.8'
|
||||
os: linux
|
||||
language: python
|
||||
env: TEST_SUITE=doc
|
||||
|
||||
stages:
|
||||
- 'style checks'
|
||||
- 'unit tests + documentation'
|
||||
|
||||
|
||||
#=============================================================================
|
||||
# Environment
|
||||
#=============================================================================
|
||||
|
||||
# Docs need graphviz to build
|
||||
language: python
|
||||
python: '2.6'
|
||||
dist: trusty
|
||||
os: linux
|
||||
addons:
|
||||
# for Linux builds, we use APT
|
||||
apt:
|
||||
packages:
|
||||
- ccache
|
||||
- coreutils
|
||||
- gfortran
|
||||
- graphviz
|
||||
- gnupg2
|
||||
- kcov
|
||||
- mercurial
|
||||
- ninja-build
|
||||
- patchelf
|
||||
- realpath
|
||||
- zsh
|
||||
- fish
|
||||
update: true
|
||||
|
||||
# ~/.ccache needs to be cached directly as Travis is not taking care of it
|
||||
# (possibly because we use 'language: python' and not 'language: c')
|
||||
cache:
|
||||
pip: true
|
||||
ccache: true
|
||||
directories:
|
||||
- ~/.ccache
|
||||
|
||||
before_install:
|
||||
- ccache -M 2G && ccache -z
|
||||
# Install kcov manually, since it's not packaged for bionic beaver
|
||||
- if [[ "$KCOV_VERSION" ]]; then
|
||||
sudo apt-get -y install cmake binutils-dev libcurl4-openssl-dev zlib1g-dev libdw-dev libiberty-dev;
|
||||
KCOV_ROOT=$(mktemp -d);
|
||||
wget --output-document=${KCOV_ROOT}/${KCOV_VERSION}.tar.gz https://github.com/SimonKagstrom/kcov/archive/v${KCOV_VERSION}.tar.gz;
|
||||
tar -C ${KCOV_ROOT} -xzvf ${KCOV_ROOT}/${KCOV_VERSION}.tar.gz;
|
||||
mkdir -p ${KCOV_ROOT}/build;
|
||||
cd ${KCOV_ROOT}/build && cmake -Wno-dev ${KCOV_ROOT}/kcov-${KCOV_VERSION} && cd - ;
|
||||
make -C ${KCOV_ROOT}/build && sudo make -C ${KCOV_ROOT}/build install;
|
||||
- if [[ "$TRAVIS_DIST" == "trusty" ]]; then
|
||||
share/spack/qa/install_patchelf.sh;
|
||||
else
|
||||
sudo apt-get update;
|
||||
sudo apt-get -y install patchelf;
|
||||
fi
|
||||
|
||||
# Install various dependencies
|
||||
@@ -103,12 +34,8 @@ install:
|
||||
- pip install --upgrade pip
|
||||
- pip install --upgrade six
|
||||
- pip install --upgrade setuptools
|
||||
- pip install --upgrade codecov coverage==4.5.4
|
||||
- pip install --upgrade flake8
|
||||
- pip install --upgrade pep8-naming
|
||||
- if [[ "$TEST_SUITE" == "doc" ]]; then
|
||||
pip install --upgrade -r lib/spack/docs/requirements.txt;
|
||||
fi
|
||||
|
||||
before_script:
|
||||
# Need this for the git tests to succeed.
|
||||
@@ -118,31 +45,12 @@ before_script:
|
||||
# Need this to be able to compute the list of changed files
|
||||
- git fetch origin ${TRAVIS_BRANCH}:${TRAVIS_BRANCH}
|
||||
|
||||
#=============================================================================
|
||||
# Building
|
||||
#=============================================================================
|
||||
script:
|
||||
- share/spack/qa/run-$TEST_SUITE-tests
|
||||
- python bin/spack -h
|
||||
- python bin/spack help -a
|
||||
- python bin/spack -p --lines 20 spec mpileaks%gcc ^elfutils@0.170
|
||||
- python bin/spack test -x --verbose
|
||||
|
||||
after_success:
|
||||
- ccache -s
|
||||
- case "$TEST_SUITE" in
|
||||
unit)
|
||||
if [[ "$COVERAGE" == "true" ]]; then
|
||||
codecov --env PYTHON_VERSION
|
||||
--required
|
||||
--flags "${TEST_SUITE}${TRAVIS_OS_NAME}";
|
||||
fi
|
||||
;;
|
||||
shell)
|
||||
codecov --env PYTHON_VERSION
|
||||
--required
|
||||
--flags "${TEST_SUITE}${TRAVIS_OS_NAME}";
|
||||
esac
|
||||
|
||||
#=============================================================================
|
||||
# Notifications
|
||||
#=============================================================================
|
||||
notifications:
|
||||
email:
|
||||
recipients:
|
||||
|
47
CHANGELOG.md
47
CHANGELOG.md
@@ -1,3 +1,50 @@
|
||||
# v0.15.3 (2020-07-28)
|
||||
|
||||
This release contains the following bugfixes:
|
||||
|
||||
* Fix handling of relative view paths (#17721)
|
||||
* Fixes for binary relocation (#17418, #17455)
|
||||
* Fix redundant printing of error messages in build environment (#17709)
|
||||
|
||||
It also adds a support script for Spack tutorials:
|
||||
|
||||
* Add a tutorial setup script to share/spack (#17705, #17722)
|
||||
|
||||
# v0.15.2 (2020-07-23)
|
||||
|
||||
This minor release includes two new features:
|
||||
|
||||
* Spack install verbosity is decreased, and more debug levels are added (#17546)
|
||||
* The $spack/share/spack/keys directory contains public keys that may be optionally trusted for public binary mirrors (#17684)
|
||||
|
||||
This release also includes several important fixes:
|
||||
|
||||
* MPICC and related variables are now cleand in the build environment (#17450)
|
||||
* LLVM flang only builds CUDA offload components when +cuda (#17466)
|
||||
* CI pipelines no longer upload user environments that can contain secrets to the internet (#17545)
|
||||
* CI pipelines add bootstrapped compilers to the compiler config (#17536)
|
||||
* `spack buildcache list` does not exit on first failure and lists later mirrors (#17565)
|
||||
* Apple's "gcc" executable that is an apple-clang compiler does not generate a gcc compiler config (#17589)
|
||||
* Mixed compiler toolchains are merged more naturally across different compiler suffixes (#17590)
|
||||
* Cray Shasta platforms detect the OS properly (#17467)
|
||||
* Additional more minor fixes.
|
||||
|
||||
# v0.15.1 (2020-07-10)
|
||||
|
||||
This minor release includes several important fixes:
|
||||
|
||||
* Fix shell support on Cray (#17386)
|
||||
* Fix use of externals installed with other Spack instances (#16954)
|
||||
* Fix gcc+binutils build (#9024)
|
||||
* Fixes for usage of intel-mpi (#17378 and #17382)
|
||||
* Fixes to Autotools config.guess detection (#17333 and #17356)
|
||||
* Update `spack install` message to prompt user when an environment is not
|
||||
explicitly activated (#17454)
|
||||
|
||||
This release also adds a mirror for all sources that are
|
||||
fetched in Spack (#17077). It is expected to be useful when the
|
||||
official website for a Spack package is unavailable.
|
||||
|
||||
# v0.15.0 (2020-06-28)
|
||||
|
||||
`v0.15.0` is a major feature release.
|
||||
|
28
README.md
28
README.md
@@ -78,11 +78,29 @@ these guidelines with [Travis CI](https://travis-ci.org/spack/spack). To
|
||||
run these tests locally, and for helpful tips on git, see our
|
||||
[Contribution Guide](http://spack.readthedocs.io/en/latest/contribution_guide.html).
|
||||
|
||||
Spack uses a rough approximation of the
|
||||
[Git Flow](http://nvie.com/posts/a-successful-git-branching-model/)
|
||||
branching model. The ``develop`` branch contains the latest
|
||||
contributions, and ``master`` is always tagged and points to the latest
|
||||
stable release.
|
||||
Spack's `develop` branch has the latest contributions. Pull requests
|
||||
should target `develop`, and users who want the latest package versions,
|
||||
features, etc. can use `develop`.
|
||||
|
||||
Releases
|
||||
--------
|
||||
|
||||
For multi-user site deployments or other use cases that need very stable
|
||||
software installations, we recommend using Spack's
|
||||
[stable releases](https://github.com/spack/spack/releases).
|
||||
|
||||
Each Spack release series also has a corresponding branch, e.g.
|
||||
`releases/v0.14` has `0.14.x` versions of Spack, and `releases/v0.13` has
|
||||
`0.13.x` versions. We backport important bug fixes to these branches but
|
||||
we do not advance the package versions or make other changes that would
|
||||
change the way Spack concretizes dependencies within a release branch.
|
||||
So, you can base your Spack deployment on a release branch and `git pull`
|
||||
to get fixes, without the package churn that comes with `develop`.
|
||||
|
||||
The latest release is always available with the `releases/latest` tag.
|
||||
|
||||
See the [docs on releases](https://spack.readthedocs.io/en/latest/developer_guide.html#releases)
|
||||
for more details.
|
||||
|
||||
Code of Conduct
|
||||
------------------------
|
||||
|
2
etc/spack/defaults/mirrors.yaml
Normal file
2
etc/spack/defaults/mirrors.yaml
Normal file
@@ -0,0 +1,2 @@
|
||||
mirrors:
|
||||
spack-public: https://spack-llnl-mirror.s3-us-west-2.amazonaws.com/
|
@@ -23,8 +23,12 @@ packages:
|
||||
daal: [intel-daal]
|
||||
elf: [elfutils]
|
||||
fftw-api: [fftw]
|
||||
gl: [mesa+opengl, opengl]
|
||||
glx: [mesa+glx, opengl]
|
||||
gl: [libglvnd-fe, mesa+opengl~glvnd, opengl~glvnd]
|
||||
glx: [libglvnd-fe+glx, mesa+glx~glvnd, opengl+glx~glvnd]
|
||||
egl: [libglvnd-fe+egl, opengl+egl~glvnd]
|
||||
libglvnd-be-gl: [mesa+glvnd, opengl+glvnd]
|
||||
libglvnd-be-glx: [mesa+glx+glvnd, opengl+glx+glvnd]
|
||||
libglvnd-be-egl: [opengl+egl+glvnd]
|
||||
glu: [mesa-glu, openglu]
|
||||
golang: [gcc]
|
||||
iconv: [libiconv]
|
||||
|
@@ -45,7 +45,7 @@ Environments:
|
||||
&& echo " view: /opt/view") > /opt/spack-environment/spack.yaml
|
||||
|
||||
# Install the software, remove unnecessary deps
|
||||
RUN cd /opt/spack-environment && spack install && spack gc -y
|
||||
RUN cd /opt/spack-environment && spack env activate . && spack install && spack gc -y
|
||||
|
||||
# Strip all the binaries
|
||||
RUN find -L /opt/view/* -type f -exec readlink -f '{}' \; | \
|
||||
@@ -267,7 +267,7 @@ following ``Dockerfile``:
|
||||
&& echo " view: /opt/view") > /opt/spack-environment/spack.yaml
|
||||
|
||||
# Install the software, remove unnecessary deps
|
||||
RUN cd /opt/spack-environment && spack install && spack gc -y
|
||||
RUN cd /opt/spack-environment && spack env activate . && spack install && spack gc -y
|
||||
|
||||
# Strip all the binaries
|
||||
RUN find -L /opt/view/* -type f -exec readlink -f '{}' \; | \
|
||||
|
@@ -27,11 +27,22 @@ correspond to one feature/bugfix/extension/etc. One can create PRs with
|
||||
changes relevant to different ideas, however reviewing such PRs becomes tedious
|
||||
and error prone. If possible, try to follow the **one-PR-one-package/feature** rule.
|
||||
|
||||
Spack uses a rough approximation of the `Git Flow <http://nvie.com/posts/a-successful-git-branching-model/>`_
|
||||
branching model. The develop branch contains the latest contributions, and
|
||||
master is always tagged and points to the latest stable release. Therefore, when
|
||||
you send your request, make ``develop`` the destination branch on the
|
||||
`Spack repository <https://github.com/spack/spack>`_.
|
||||
--------
|
||||
Branches
|
||||
--------
|
||||
|
||||
Spack's ``develop`` branch has the latest contributions. Nearly all pull
|
||||
requests should start from ``develop`` and target ``develop``.
|
||||
|
||||
There is a branch for each major release series. Release branches
|
||||
originate from ``develop`` and have tags for each point release in the
|
||||
series. For example, ``releases/v0.14`` has tags for ``0.14.0``,
|
||||
``0.14.1``, ``0.14.2``, etc. versions of Spack. We backport important bug
|
||||
fixes to these branches, but we do not advance the package versions or
|
||||
make other changes that would change the way Spack concretizes
|
||||
dependencies. Currently, the maintainers manage these branches by
|
||||
cherry-picking from ``develop``. See :ref:`releases` for more
|
||||
information.
|
||||
|
||||
----------------------
|
||||
Continuous Integration
|
||||
|
@@ -495,3 +495,393 @@ The bottom of the output shows the top most time consuming functions,
|
||||
slowest on top. The profiling support is from Python's built-in tool,
|
||||
`cProfile
|
||||
<https://docs.python.org/2/library/profile.html#module-cProfile>`_.
|
||||
|
||||
.. _releases:
|
||||
|
||||
--------
|
||||
Releases
|
||||
--------
|
||||
|
||||
This section documents Spack's release process. It is intended for
|
||||
project maintainers, as the tasks described here require maintainer
|
||||
privileges on the Spack repository. For others, we hope this section at
|
||||
least provides some insight into how the Spack project works.
|
||||
|
||||
.. _release-branches:
|
||||
|
||||
^^^^^^^^^^^^^^^^
|
||||
Release branches
|
||||
^^^^^^^^^^^^^^^^
|
||||
|
||||
There are currently two types of Spack releases: :ref:`major releases
|
||||
<major-releases>` (``0.13.0``, ``0.14.0``, etc.) and :ref:`point releases
|
||||
<point-releases>` (``0.13.1``, ``0.13.2``, ``0.13.3``, etc.). Here is a
|
||||
diagram of how Spack release branches work::
|
||||
|
||||
o branch: develop (latest version)
|
||||
|
|
||||
o merge v0.14.1 into develop
|
||||
|\
|
||||
| o branch: releases/v0.14, tag: v0.14.1
|
||||
o | merge v0.14.0 into develop
|
||||
|\|
|
||||
| o tag: v0.14.0
|
||||
|/
|
||||
o merge v0.13.2 into develop
|
||||
|\
|
||||
| o branch: releases/v0.13, tag: v0.13.2
|
||||
o | merge v0.13.1 into develop
|
||||
|\|
|
||||
| o tag: v0.13.1
|
||||
o | merge v0.13.0 into develop
|
||||
|\|
|
||||
| o tag: v0.13.0
|
||||
o |
|
||||
| o
|
||||
|/
|
||||
o
|
||||
|
||||
The ``develop`` branch has the latest contributions, and nearly all pull
|
||||
requests target ``develop``.
|
||||
|
||||
Each Spack release series also has a corresponding branch, e.g.
|
||||
``releases/v0.14`` has ``0.14.x`` versions of Spack, and
|
||||
``releases/v0.13`` has ``0.13.x`` versions. A major release is the first
|
||||
tagged version on a release branch. Minor releases are back-ported from
|
||||
develop onto release branches. This is typically done by cherry-picking
|
||||
bugfix commits off of ``develop``.
|
||||
|
||||
To avoid version churn for users of a release series, minor releases
|
||||
should **not** make changes that would change the concretization of
|
||||
packages. They should generally only contain fixes to the Spack core.
|
||||
|
||||
Both major and minor releases are tagged. After each release, we merge
|
||||
the release branch back into ``develop`` so that the version bump and any
|
||||
other release-specific changes are visible in the mainline. As a
|
||||
convenience, we also tag the latest release as ``releases/latest``,
|
||||
so that users can easily check it out to get the latest
|
||||
stable version. See :ref:`merging-releases` for more details.
|
||||
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Scheduling work for releases
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
We schedule work for releases by creating `GitHub projects
|
||||
<https://github.com/spack/spack/projects>`_. At any time, there may be
|
||||
several open release projects. For example, here are two releases (from
|
||||
some past version of the page linked above):
|
||||
|
||||
.. image:: images/projects.png
|
||||
|
||||
Here, there's one release in progress for ``0.15.1`` and another for
|
||||
``0.16.0``. Each of these releases has a project board containing issues
|
||||
and pull requests. GitHub shows a status bar with completed work in
|
||||
green, work in progress in purple, and work not started yet in gray, so
|
||||
it's fairly easy to see progress.
|
||||
|
||||
Spack's project boards are not firm commitments, and we move work between
|
||||
releases frequently. If we need to make a release and some tasks are not
|
||||
yet done, we will simply move them to next minor or major release, rather
|
||||
than delaying the release to complete them.
|
||||
|
||||
For more on using GitHub project boards, see `GitHub's documentation
|
||||
<https://docs.github.com/en/github/managing-your-work-on-github/about-project-boards>`_.
|
||||
|
||||
.. _major-releases:
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
Making Major Releases
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Assuming you've already created a project board and completed the work
|
||||
for a major release, the steps to make the release are as follows:
|
||||
|
||||
#. Create two new project boards:
|
||||
|
||||
* One for the next major release
|
||||
* One for the next point release
|
||||
|
||||
#. Move any tasks that aren't done yet to one of the new project boards.
|
||||
Small bugfixes should go to the next point release. Major features,
|
||||
refactors, and changes that could affect concretization should go in
|
||||
the next major release.
|
||||
|
||||
#. Create a branch for the release, based on ``develop``:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ git checkout -b releases/v0.15 develop
|
||||
|
||||
For a version ``vX.Y.Z``, the branch's name should be
|
||||
``releases/vX.Y``. That is, you should create a ``releases/vX.Y``
|
||||
branch if you are preparing the ``X.Y.0`` release.
|
||||
|
||||
#. Bump the version in ``lib/spack/spack/__init__.py``. See `this example from 0.13.0
|
||||
<https://github.com/spack/spack/commit/8eeb64096c98b8a43d1c587f13ece743c864fba9>`_
|
||||
|
||||
#. Updaate the release version lists in these files to include the new version:
|
||||
|
||||
* ``lib/spack/spack/schema/container.py``
|
||||
* ``lib/spack/spack/container/images.json``
|
||||
|
||||
**TODO**: We should get rid of this step in some future release.
|
||||
|
||||
#. Update ``CHANGELOG.md`` with major highlights in bullet form. Use
|
||||
proper markdown formatting, like `this example from 0.15.0
|
||||
<https://github.com/spack/spack/commit/d4bf70d9882fcfe88507e9cb444331d7dd7ba71c>`_.
|
||||
|
||||
#. Push the release branch to GitHub.
|
||||
|
||||
#. Make sure CI passes on the release branch, including:
|
||||
* Regular unit tests
|
||||
* Build tests
|
||||
* The E4S pipeline at `gitlab.spack.io <https://gitlab.spack.io>`_
|
||||
|
||||
If CI is not passing, submit pull requests to ``develop`` as normal
|
||||
and keep rebasing the release branch on ``develop`` until CI passes.
|
||||
|
||||
#. Follow the steps in :ref:`publishing-releases`.
|
||||
|
||||
#. Follow the steps in :ref:`merging-releases`.
|
||||
|
||||
#. Follow the steps in :ref:`announcing-releases`.
|
||||
|
||||
|
||||
.. _point-releases:
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
Making Point Releases
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
This assumes you've already created a project board for a point release
|
||||
and completed the work to be done for the release. To make a point
|
||||
release:
|
||||
|
||||
#. Create one new project board for the next point release.
|
||||
|
||||
#. Move any cards that aren't done yet to the next project board.
|
||||
|
||||
#. Check out the release branch (it should already exist). For the
|
||||
``X.Y.Z`` release, the release branch is called ``releases/vX.Y``. For
|
||||
``v0.15.1``, you would check out ``releases/v0.15``:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ git checkout releases/v0.15
|
||||
|
||||
#. Cherry-pick each pull request in the ``Done`` column of the release
|
||||
project onto the release branch.
|
||||
|
||||
This is **usually** fairly simple since we squash the commits from the
|
||||
vast majority of pull requests, which means there is only one commit
|
||||
per pull request to cherry-pick. For example, `this pull request
|
||||
<https://github.com/spack/spack/pull/15777>`_ has three commits, but
|
||||
the were squashed into a single commit on merge. You can see the
|
||||
commit that was created here:
|
||||
|
||||
.. image:: images/pr-commit.png
|
||||
|
||||
You can easily cherry pick it like this (assuming you already have the
|
||||
release branch checked out):
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ git cherry-pick 7e46da7
|
||||
|
||||
For pull requests that were rebased, you'll need to cherry-pick each
|
||||
rebased commit individually. There have not been any rebased PRs like
|
||||
this in recent point releases.
|
||||
|
||||
.. warning::
|
||||
|
||||
It is important to cherry-pick commits in the order they happened,
|
||||
otherwise you can get conflicts while cherry-picking. When
|
||||
cherry-picking onto a point release, look at the merge date,
|
||||
**not** the number of the pull request or the date it was opened.
|
||||
|
||||
Sometimes you may **still** get merge conflicts even if you have
|
||||
cherry-picked all the commits in order. This generally means there
|
||||
is some other intervening pull request that the one you're trying
|
||||
to pick depends on. In these cases, you'll need to make a judgment
|
||||
call:
|
||||
|
||||
1. If the dependency is small, you might just cherry-pick it, too.
|
||||
If you do this, add it to the release board.
|
||||
|
||||
2. If it is large, then you may decide that this fix is not worth
|
||||
including in a point release, in which case you should remove it
|
||||
from the release project.
|
||||
|
||||
3. You can always decide to manually back-port the fix to the release
|
||||
branch if neither of the above options makes sense, but this can
|
||||
require a lot of work. It's seldom the right choice.
|
||||
|
||||
#. Bump the version in ``lib/spack/spack/__init__.py``. See `this example from 0.14.1
|
||||
<https://github.com/spack/spack/commit/ff0abb9838121522321df2a054d18e54b566b44a>`_.
|
||||
|
||||
#. Updaate the release version lists in these files to include the new version:
|
||||
|
||||
* ``lib/spack/spack/schema/container.py``
|
||||
* ``lib/spack/spack/container/images.json``
|
||||
|
||||
**TODO**: We should get rid of this step in some future release.
|
||||
|
||||
#. Update ``CHANGELOG.md`` with a list of bugfixes. This is typically just a
|
||||
summary of the commits you cherry-picked onto the release branch. See
|
||||
`the changelog from 0.14.1
|
||||
<https://github.com/spack/spack/commit/ff0abb9838121522321df2a054d18e54b566b44a>`_.
|
||||
|
||||
#. Push the release branch to GitHub.
|
||||
|
||||
#. Make sure CI passes on the release branch, including:
|
||||
* Regular unit tests
|
||||
* Build tests
|
||||
* The E4S pipeline at `gitlab.spack.io <https://gitlab.spack.io>`_
|
||||
|
||||
If CI does not pass, you'll need to figure out why, and make changes
|
||||
to the release branch until it does. You can make more commits, modify
|
||||
or remove cherry-picked commits, or cherry-pick **more** from
|
||||
``develop`` to make this happen.
|
||||
|
||||
#. Follow the steps in :ref:`publishing-releases`.
|
||||
|
||||
#. Follow the steps in :ref:`merging-releases`.
|
||||
|
||||
#. Follow the steps in :ref:`announcing-releases`.
|
||||
|
||||
|
||||
.. _publishing-releases:
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Publishing a release on GitHub
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
#. Go to `github.com/spack/spack/releases
|
||||
<https://github.com/spack/spack/releases>`_ and click ``Draft a new
|
||||
release``. Set the following:
|
||||
|
||||
* ``Tag version`` should start with ``v`` and contain *all three*
|
||||
parts of the version, .g. ``v0.15.1``. This is the name of the tag
|
||||
that will be created.
|
||||
|
||||
* ``Target`` should be the ``releases/vX.Y`` branch (e.g., ``releases/v0.15``).
|
||||
|
||||
* ``Release title`` should be ``vX.Y.Z`` (To match the tag, e.g., ``v0.15.1``).
|
||||
|
||||
* For the text, paste the latest release markdown from your ``CHANGELOG.md``.
|
||||
|
||||
You can save the draft and keep coming back to this as you prepare the release.
|
||||
|
||||
#. When you are done, click ``Publish release``.
|
||||
|
||||
#. Immediately after publishing, go back to
|
||||
`github.com/spack/spack/releases
|
||||
<https://github.com/spack/spack/releases>`_ and download the
|
||||
auto-generated ``.tar.gz`` file for the release. It's the ``Source
|
||||
code (tar.gz)`` link.
|
||||
|
||||
#. Click ``Edit`` on the release you just did and attach the downloaded
|
||||
release tarball as a binary. This does two things:
|
||||
|
||||
#. Makes sure that the hash of our releases doesn't change over time.
|
||||
GitHub sometimes annoyingly changes they way they generate
|
||||
tarballs, and then hashes can change if you rely on the
|
||||
auto-generated tarball links.
|
||||
|
||||
#. Gets us download counts on releases visible through the GitHub
|
||||
API. GitHub tracks downloads of artifacts, but *not* the source
|
||||
links. See the `releases
|
||||
page <https://api.github.com/repos/spack/spack/releases>`_ and search
|
||||
for ``download_count`` to see this.
|
||||
|
||||
|
||||
.. _merging-releases:
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Updating `releases/latest` and `develop`
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
If the new release is the **highest** Spack release yet, you should
|
||||
also tag it as ``releases/latest``. For example, suppose the highest
|
||||
release is currently ``0.15.3``:
|
||||
|
||||
* If you are releasing ``0.15.4`` or ``0.16.0``, then you should tag
|
||||
it with ``releases/latest``, as these are higher than ``0.15.3``.
|
||||
|
||||
* If you are making a new release of an **older** major version of
|
||||
Spack, e.g. ``0.14.4``, then you should not tag it as
|
||||
``releases/latest`` (as there are newer major versions).
|
||||
|
||||
To tag ``releases/latest``, do this:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ git checkout releases/vX.Y # vX.Y is the new release's branch
|
||||
$ git tag --force releases/latest
|
||||
$ git push --tags
|
||||
|
||||
The ``--force`` argument makes ``git`` overwrite the existing
|
||||
``releases/latest`` tag with the new one.
|
||||
|
||||
We also merge each release that we tag as ``releases/latest`` into ``develop``.
|
||||
Make sure to do this with a merge commit:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ git checkout develop
|
||||
$ git merge --no-ff vX.Y.Z # vX.Y.Z is the new release's tag
|
||||
$ git push
|
||||
|
||||
We merge back to ``develop`` because it:
|
||||
|
||||
* updates the version and ``CHANGELOG.md`` on ``develop``.
|
||||
* ensures that your release tag is reachable from the head of
|
||||
``develop``
|
||||
|
||||
We *must* use a real merge commit (via the ``--no-ff`` option) because it
|
||||
ensures that the release tag is reachable from the tip of ``develop``.
|
||||
This is necessary for ``spack -V`` to work properly -- it uses ``git
|
||||
describe --tags`` to find the last reachable tag in the repository and
|
||||
reports how far we are from it. For example:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack -V
|
||||
0.14.2-1486-b80d5e74e5
|
||||
|
||||
This says that we are at commit ``b80d5e74e5``, which is 1,486 commits
|
||||
ahead of the ``0.14.2`` release.
|
||||
|
||||
We put this step last in the process because it's best to do it only once
|
||||
the release is complete and tagged. If you do it before you've tagged the
|
||||
release and later decide you want to tag some later commit, you'll need
|
||||
to merge again.
|
||||
|
||||
.. _announcing-releases:
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^
|
||||
Announcing a release
|
||||
^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
We announce releases in all of the major Spack communication channels.
|
||||
Publishing the release takes care of GitHub. The remaining channels are
|
||||
Twitter, Slack, and the mailing list. Here are the steps:
|
||||
|
||||
#. Make a tweet to announce the release. It should link to the release's
|
||||
page on GitHub. You can base it on `this example tweet
|
||||
<https://twitter.com/spackpm/status/1231761858182307840>`_.
|
||||
|
||||
#. Ping ``@channel`` in ``#general`` on Slack (`spackpm.slack.com
|
||||
<https://spackpm.slack.com>`_) with a link to the tweet. The tweet
|
||||
will be shown inline so that you do not have to retype your release
|
||||
announcement.
|
||||
|
||||
#. Email the Spack mailing list to let them know about the release. As
|
||||
with the tweet, you likely want to link to the release's page on
|
||||
GitHub. It's also helpful to include some information directly in the
|
||||
email. You can base yours on this `example email
|
||||
<https://groups.google.com/forum/#!topic/spack/WT4CT9i_X4s>`_.
|
||||
|
||||
Once you've announced the release, congratulations, you're done! You've
|
||||
finished making the release!
|
||||
|
@@ -811,6 +811,100 @@ to add the following to ``packages.yaml``:
|
||||
present in PATH, however it will have lower precedence compared to paths
|
||||
from other dependencies. This ensures that binaries in Spack dependencies
|
||||
are preferred over system binaries.
|
||||
|
||||
^^^^^^
|
||||
OpenGL
|
||||
^^^^^^
|
||||
|
||||
To use hardware-accelerated rendering from a system-supplied OpenGL driver,
|
||||
add something like the following to your ``packages`` configuration:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
packages:
|
||||
opengl:
|
||||
paths:
|
||||
opengl+glx@4.5: /usr
|
||||
buildable: False
|
||||
all:
|
||||
providers:
|
||||
gl: [opengl]
|
||||
glx: [opengl]
|
||||
|
||||
For `EGL <https://www.khronos.org/egl>` support, or for certain modern drivers,
|
||||
OpenGL calls are dispatched dynamically at run time to the hardware graphics
|
||||
implementation. This dynamic dispatch is performed using `libglvnd
|
||||
<https://github.com/NVIDIA/libglvnd>`. In this mode, the graphics library
|
||||
(e.g.: opengl) must be built to work with libglvnd. Applications then link
|
||||
against libglvnd instead of the underlying implementation. Environment
|
||||
variables set at run time govern the process by which libglvnd loads the
|
||||
underlying implementation and dispatches calls to it. See `this
|
||||
<https://github.com/NVIDIA/libglvnd/issues/177#issuecomment-496562769>` comment
|
||||
for details on loading a specific GLX implementation and `this
|
||||
<https://github.com/NVIDIA/libglvnd/blob/master/src/EGL/icd_enumeration.md>`
|
||||
page for information about EGL ICD enumeration.
|
||||
|
||||
This codependency between libglvnd and the underlying implementation is modeled
|
||||
in Spack with two packages for libglvnd: libglvnd, which provides libglvnd
|
||||
proper; and libglvnd-fe, a bundle package that depends on libglvnd and an
|
||||
implementation. Implementations that work through libglvnd are no longer
|
||||
providers for graphics virtual dependencies, like "gl" or "glx", but instead
|
||||
provide libglvnd versions of these dependencies ("libglvnd-be-gl",
|
||||
"libglvnd-be-glx", etc.). The libglvnd-fe package depends on these
|
||||
"libglvnd-be-..." virtual packages, which provide the actual implementation.
|
||||
It also depends on libglvnd, itself, and exposes its libraries to downstream
|
||||
applications. For correct operation, the Spack package for the underlying
|
||||
implementation has to set the runtime environment to ensure that it is loaded
|
||||
when an application linked against libglvnd runs. This last detail is
|
||||
important for users who want to set up an external OpenGL implementation that
|
||||
requires libglvnd to work. This setup requires modifying the ``modules``
|
||||
configuration so that modules generated for the external OpenGL implementation
|
||||
set the necessary environment variables.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
packages:
|
||||
opengl:
|
||||
paths:
|
||||
opengl@4.5+glx+egl+glvnd: /does/not/exist
|
||||
buildable: False
|
||||
variants:+glx+egl+glvnd
|
||||
libglvnd-fe:
|
||||
variants:+gl+glx+egl
|
||||
all:
|
||||
providers:
|
||||
glvnd-be-gl: [opengl]
|
||||
glvnd-be-glx: [opengl]
|
||||
glvnd-be-egl: [opengl]
|
||||
gl: [libglvnd-fe]
|
||||
glx: [libglvnd-fe]
|
||||
egl: [libglvnd-fe]
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
modules:
|
||||
tcl:
|
||||
opengl@4.5+glx+glvnd:
|
||||
environment:
|
||||
set:
|
||||
__GLX_VENDOR_LIBRARY_NAME: nvidia
|
||||
opengl@4.5+egl+glvnd:
|
||||
environment:
|
||||
set:
|
||||
__EGL_VENDOR_LIBRARY_FILENAMES: /usr/share/glvnd/egl_vendor.d/10_nvidia.json
|
||||
|
||||
One final detail about the above example is that it avoids setting the true
|
||||
root of the external OpenGL implementation, instead opting to set it to a path
|
||||
that is not expected to exist on the system. This is done for two reasons.
|
||||
First, Spack would add directories under this root to environment variables
|
||||
that would affect the process of building and installing other packages, such
|
||||
as ``PATH`` and ``PKG_CONFIG_PATH``. These additions may potentially prevent
|
||||
those packages from installing successfully, and this risk is especially great
|
||||
for paths that house many libraries and applications, like ``/usr``. Second,
|
||||
providing the true root of the external implementation in the ``packages``
|
||||
configuration is not necessary because libglvnd need only the environment
|
||||
variables set above in the ``modules`` configuration to determine what OpenGL
|
||||
implementation to dispatch calls to at run time.
|
||||
|
||||
^^^
|
||||
Git
|
||||
@@ -818,7 +912,7 @@ Git
|
||||
|
||||
Some Spack packages use ``git`` to download, which might not work on
|
||||
some computers. For example, the following error was
|
||||
encountered on a Macintosh during ``spack install julia-master``:
|
||||
encountered on a Macintosh during ``spack install julia@master``:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
|
BIN
lib/spack/docs/images/pr-commit.png
Normal file
BIN
lib/spack/docs/images/pr-commit.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 44 KiB |
BIN
lib/spack/docs/images/projects.png
Normal file
BIN
lib/spack/docs/images/projects.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 68 KiB |
@@ -82,9 +82,9 @@ or Amazon Elastic Kubernetes Service (`EKS <https://aws.amazon.com/eks>`_), thou
|
||||
topics are outside the scope of this document.
|
||||
|
||||
Spack's pipelines are now making use of the
|
||||
`trigger <https://docs.gitlab.com/12.9/ee/ci/yaml/README.html#trigger>` syntax to run
|
||||
`trigger <https://docs.gitlab.com/12.9/ee/ci/yaml/README.html#trigger>`_ syntax to run
|
||||
dynamically generated
|
||||
`child pipelines <https://docs.gitlab.com/12.9/ee/ci/parent_child_pipelines.html>`.
|
||||
`child pipelines <https://docs.gitlab.com/12.9/ee/ci/parent_child_pipelines.html>`_.
|
||||
Note that the use of dynamic child pipelines requires running Gitlab version
|
||||
``>= 12.9``.
|
||||
|
||||
|
@@ -1405,11 +1405,12 @@ The main points that are implemented below:
|
||||
- export CXXFLAGS="-std=c++11"
|
||||
|
||||
install:
|
||||
- if ! which spack >/dev/null; then
|
||||
- |
|
||||
if ! which spack >/dev/null; then
|
||||
mkdir -p $SPACK_ROOT &&
|
||||
git clone --depth 50 https://github.com/spack/spack.git $SPACK_ROOT &&
|
||||
echo -e "config:""\n build_jobs:"" 2" > $SPACK_ROOT/etc/spack/config.yaml **
|
||||
echo -e "packages:""\n all:""\n target:"" ['x86_64']"
|
||||
printf "config:\n build_jobs: 2\n" > $SPACK_ROOT/etc/spack/config.yaml &&
|
||||
printf "packages:\n all:\n target: ['x86_64']\n" \
|
||||
> $SPACK_ROOT/etc/spack/packages.yaml;
|
||||
fi
|
||||
- travis_wait spack install cmake@3.7.2~openssl~ncurses
|
||||
|
@@ -174,8 +174,9 @@ def _lock(self, op, timeout=None):
|
||||
# If the file were writable, we'd have opened it 'r+'
|
||||
raise LockROFileError(self.path)
|
||||
|
||||
tty.debug("{0} locking [{1}:{2}]: timeout {3} sec"
|
||||
.format(lock_type[op], self._start, self._length, timeout))
|
||||
self._log_debug("{0} locking [{1}:{2}]: timeout {3} sec"
|
||||
.format(lock_type[op], self._start, self._length,
|
||||
timeout))
|
||||
|
||||
poll_intervals = iter(Lock._poll_interval_generator())
|
||||
start_time = time.time()
|
||||
@@ -211,14 +212,14 @@ def _poll_lock(self, op):
|
||||
# help for debugging distributed locking
|
||||
if self.debug:
|
||||
# All locks read the owner PID and host
|
||||
self._read_debug_data()
|
||||
tty.debug('{0} locked {1} [{2}:{3}] (owner={4})'
|
||||
.format(lock_type[op], self.path,
|
||||
self._start, self._length, self.pid))
|
||||
self._read_log_debug_data()
|
||||
self._log_debug('{0} locked {1} [{2}:{3}] (owner={4})'
|
||||
.format(lock_type[op], self.path,
|
||||
self._start, self._length, self.pid))
|
||||
|
||||
# Exclusive locks write their PID/host
|
||||
if op == fcntl.LOCK_EX:
|
||||
self._write_debug_data()
|
||||
self._write_log_debug_data()
|
||||
|
||||
return True
|
||||
|
||||
@@ -245,7 +246,7 @@ def _ensure_parent_directory(self):
|
||||
raise
|
||||
return parent
|
||||
|
||||
def _read_debug_data(self):
|
||||
def _read_log_debug_data(self):
|
||||
"""Read PID and host data out of the file if it is there."""
|
||||
self.old_pid = self.pid
|
||||
self.old_host = self.host
|
||||
@@ -257,7 +258,7 @@ def _read_debug_data(self):
|
||||
_, _, self.host = host.rpartition('=')
|
||||
self.pid = int(self.pid)
|
||||
|
||||
def _write_debug_data(self):
|
||||
def _write_log_debug_data(self):
|
||||
"""Write PID and host data to the file, recording old values."""
|
||||
self.old_pid = self.pid
|
||||
self.old_host = self.host
|
||||
@@ -473,9 +474,6 @@ def release_write(self, release_fn=None):
|
||||
else:
|
||||
return False
|
||||
|
||||
def _debug(self, *args):
|
||||
tty.debug(*args)
|
||||
|
||||
def _get_counts_desc(self):
|
||||
return '(reads {0}, writes {1})'.format(self._reads, self._writes) \
|
||||
if tty.is_verbose() else ''
|
||||
@@ -484,58 +482,50 @@ def _log_acquired(self, locktype, wait_time, nattempts):
|
||||
attempts_part = _attempts_str(wait_time, nattempts)
|
||||
now = datetime.now()
|
||||
desc = 'Acquired at %s' % now.strftime("%H:%M:%S.%f")
|
||||
self._debug(self._status_msg(locktype, '{0}{1}'.
|
||||
format(desc, attempts_part)))
|
||||
self._log_debug(self._status_msg(locktype, '{0}{1}'
|
||||
.format(desc, attempts_part)))
|
||||
|
||||
def _log_acquiring(self, locktype):
|
||||
self._debug2(self._status_msg(locktype, 'Acquiring'))
|
||||
self._log_debug(self._status_msg(locktype, 'Acquiring'), level=3)
|
||||
|
||||
def _log_debug(self, *args, **kwargs):
|
||||
"""Output lock debug messages."""
|
||||
kwargs['level'] = kwargs.get('level', 2)
|
||||
tty.debug(*args, **kwargs)
|
||||
|
||||
def _log_downgraded(self, wait_time, nattempts):
|
||||
attempts_part = _attempts_str(wait_time, nattempts)
|
||||
now = datetime.now()
|
||||
desc = 'Downgraded at %s' % now.strftime("%H:%M:%S.%f")
|
||||
self._debug(self._status_msg('READ LOCK', '{0}{1}'
|
||||
.format(desc, attempts_part)))
|
||||
self._log_debug(self._status_msg('READ LOCK', '{0}{1}'
|
||||
.format(desc, attempts_part)))
|
||||
|
||||
def _log_downgrading(self):
|
||||
self._debug2(self._status_msg('WRITE LOCK', 'Downgrading'))
|
||||
self._log_debug(self._status_msg('WRITE LOCK', 'Downgrading'), level=3)
|
||||
|
||||
def _log_released(self, locktype):
|
||||
now = datetime.now()
|
||||
desc = 'Released at %s' % now.strftime("%H:%M:%S.%f")
|
||||
self._debug(self._status_msg(locktype, desc))
|
||||
self._log_debug(self._status_msg(locktype, desc))
|
||||
|
||||
def _log_releasing(self, locktype):
|
||||
self._debug2(self._status_msg(locktype, 'Releasing'))
|
||||
self._log_debug(self._status_msg(locktype, 'Releasing'), level=3)
|
||||
|
||||
def _log_upgraded(self, wait_time, nattempts):
|
||||
attempts_part = _attempts_str(wait_time, nattempts)
|
||||
now = datetime.now()
|
||||
desc = 'Upgraded at %s' % now.strftime("%H:%M:%S.%f")
|
||||
self._debug(self._status_msg('WRITE LOCK', '{0}{1}'.
|
||||
format(desc, attempts_part)))
|
||||
self._log_debug(self._status_msg('WRITE LOCK', '{0}{1}'.
|
||||
format(desc, attempts_part)))
|
||||
|
||||
def _log_upgrading(self):
|
||||
self._debug2(self._status_msg('READ LOCK', 'Upgrading'))
|
||||
self._log_debug(self._status_msg('READ LOCK', 'Upgrading'), level=3)
|
||||
|
||||
def _status_msg(self, locktype, status):
|
||||
status_desc = '[{0}] {1}'.format(status, self._get_counts_desc())
|
||||
return '{0}{1.desc}: {1.path}[{1._start}:{1._length}] {2}'.format(
|
||||
locktype, self, status_desc)
|
||||
|
||||
def _debug2(self, *args):
|
||||
# TODO: Easy place to make a single, temporary change to the
|
||||
# TODO: debug level associated with the more detailed messages.
|
||||
# TODO:
|
||||
# TODO: Someday it would be great if we could switch this to
|
||||
# TODO: another level, perhaps _between_ debug and verbose, or
|
||||
# TODO: some other form of filtering so the first level of
|
||||
# TODO: debugging doesn't have to generate these messages. Using
|
||||
# TODO: verbose here did not work as expected because tests like
|
||||
# TODO: test_spec_json will write the verbose messages to the
|
||||
# TODO: output that is used to check test correctness.
|
||||
tty.debug(*args)
|
||||
|
||||
|
||||
class LockTransaction(object):
|
||||
"""Simple nested transaction context manager that uses a file lock.
|
||||
|
@@ -19,7 +19,8 @@
|
||||
|
||||
from llnl.util.tty.color import cprint, cwrite, cescape, clen
|
||||
|
||||
_debug = False
|
||||
# Globals
|
||||
_debug = 0
|
||||
_verbose = False
|
||||
_stacktrace = False
|
||||
_timestamp = False
|
||||
@@ -29,21 +30,26 @@
|
||||
indent = " "
|
||||
|
||||
|
||||
def debug_level():
|
||||
return _debug
|
||||
|
||||
|
||||
def is_verbose():
|
||||
return _verbose
|
||||
|
||||
|
||||
def is_debug():
|
||||
return _debug
|
||||
def is_debug(level=1):
|
||||
return _debug >= level
|
||||
|
||||
|
||||
def is_stacktrace():
|
||||
return _stacktrace
|
||||
|
||||
|
||||
def set_debug(flag):
|
||||
def set_debug(level=0):
|
||||
global _debug
|
||||
_debug = flag
|
||||
assert level >= 0, 'Debug level must be a positive value'
|
||||
_debug = level
|
||||
|
||||
|
||||
def set_verbose(flag):
|
||||
@@ -132,12 +138,17 @@ def process_stacktrace(countback):
|
||||
return st_text
|
||||
|
||||
|
||||
def show_pid():
|
||||
return is_debug(2)
|
||||
|
||||
|
||||
def get_timestamp(force=False):
|
||||
"""Get a string timestamp"""
|
||||
if _debug or _timestamp or force:
|
||||
# Note inclusion of the PID is useful for parallel builds.
|
||||
return '[{0}, {1}] '.format(
|
||||
datetime.now().strftime("%Y-%m-%d-%H:%M:%S.%f"), os.getpid())
|
||||
pid = ', {0}'.format(os.getpid()) if show_pid() else ''
|
||||
return '[{0}{1}] '.format(
|
||||
datetime.now().strftime("%Y-%m-%d-%H:%M:%S.%f"), pid)
|
||||
else:
|
||||
return ''
|
||||
|
||||
@@ -197,7 +208,8 @@ def verbose(message, *args, **kwargs):
|
||||
|
||||
|
||||
def debug(message, *args, **kwargs):
|
||||
if _debug:
|
||||
level = kwargs.get('level', 1)
|
||||
if is_debug(level):
|
||||
kwargs.setdefault('format', 'g')
|
||||
kwargs.setdefault('stream', sys.stderr)
|
||||
info(message, *args, **kwargs)
|
||||
|
@@ -323,14 +323,14 @@ class log_output(object):
|
||||
work within test frameworks like nose and pytest.
|
||||
"""
|
||||
|
||||
def __init__(self, file_like=None, echo=False, debug=False, buffer=False):
|
||||
def __init__(self, file_like=None, echo=False, debug=0, buffer=False):
|
||||
"""Create a new output log context manager.
|
||||
|
||||
Args:
|
||||
file_like (str or stream): open file object or name of file where
|
||||
output should be logged
|
||||
echo (bool): whether to echo output in addition to logging it
|
||||
debug (bool): whether to enable tty debug mode during logging
|
||||
debug (int): positive to enable tty debug mode during logging
|
||||
buffer (bool): pass buffer=True to skip unbuffering output; note
|
||||
this doesn't set up any *new* buffering
|
||||
|
||||
@@ -355,7 +355,7 @@ def __init__(self, file_like=None, echo=False, debug=False, buffer=False):
|
||||
self._active = False # used to prevent re-entry
|
||||
|
||||
def __call__(self, file_like=None, echo=None, debug=None, buffer=None):
|
||||
"""Thie behaves the same as init. It allows a logger to be reused.
|
||||
"""This behaves the same as init. It allows a logger to be reused.
|
||||
|
||||
Arguments are the same as for ``__init__()``. Args here take
|
||||
precedence over those passed to ``__init__()``.
|
||||
|
@@ -31,17 +31,17 @@
|
||||
class ProcessController(object):
|
||||
"""Wrapper around some fundamental process control operations.
|
||||
|
||||
This allows one process to drive another similar to the way a shell
|
||||
would, by sending signals and I/O.
|
||||
This allows one process (the controller) to drive another (the
|
||||
minion) similar to the way a shell would, by sending signals and I/O.
|
||||
|
||||
"""
|
||||
def __init__(self, pid, master_fd,
|
||||
def __init__(self, pid, controller_fd,
|
||||
timeout=1, sleep_time=1e-1, debug=False):
|
||||
"""Create a controller to manipulate the process with id ``pid``
|
||||
|
||||
Args:
|
||||
pid (int): id of process to control
|
||||
master_fd (int): master file descriptor attached to pid's stdin
|
||||
controller_fd (int): controller fd attached to pid's stdin
|
||||
timeout (int): time in seconds for wait operations to time out
|
||||
(default 1 second)
|
||||
sleep_time (int): time to sleep after signals, to control the
|
||||
@@ -58,7 +58,7 @@ def __init__(self, pid, master_fd,
|
||||
"""
|
||||
self.pid = pid
|
||||
self.pgid = os.getpgid(pid)
|
||||
self.master_fd = master_fd
|
||||
self.controller_fd = controller_fd
|
||||
self.timeout = timeout
|
||||
self.sleep_time = sleep_time
|
||||
self.debug = debug
|
||||
@@ -67,8 +67,8 @@ def __init__(self, pid, master_fd,
|
||||
self.ps = which("ps", required=True)
|
||||
|
||||
def get_canon_echo_attrs(self):
|
||||
"""Get echo and canon attributes of the terminal of master_fd."""
|
||||
cfg = termios.tcgetattr(self.master_fd)
|
||||
"""Get echo and canon attributes of the terminal of controller_fd."""
|
||||
cfg = termios.tcgetattr(self.controller_fd)
|
||||
return (
|
||||
bool(cfg[3] & termios.ICANON),
|
||||
bool(cfg[3] & termios.ECHO),
|
||||
@@ -82,7 +82,7 @@ def horizontal_line(self, name):
|
||||
)
|
||||
|
||||
def status(self):
|
||||
"""Print debug message with status info for the child."""
|
||||
"""Print debug message with status info for the minion."""
|
||||
if self.debug:
|
||||
canon, echo = self.get_canon_echo_attrs()
|
||||
sys.stderr.write("canon: %s, echo: %s\n" % (
|
||||
@@ -94,12 +94,12 @@ def status(self):
|
||||
sys.stderr.write("\n")
|
||||
|
||||
def input_on(self):
|
||||
"""True if keyboard input is enabled on the master_fd pty."""
|
||||
"""True if keyboard input is enabled on the controller_fd pty."""
|
||||
return self.get_canon_echo_attrs() == (False, False)
|
||||
|
||||
def background(self):
|
||||
"""True if pgid is in a background pgroup of master_fd's terminal."""
|
||||
return self.pgid != os.tcgetpgrp(self.master_fd)
|
||||
"""True if pgid is in a background pgroup of controller_fd's tty."""
|
||||
return self.pgid != os.tcgetpgrp(self.controller_fd)
|
||||
|
||||
def tstp(self):
|
||||
"""Send SIGTSTP to the controlled process."""
|
||||
@@ -115,18 +115,18 @@ def cont(self):
|
||||
def fg(self):
|
||||
self.horizontal_line("fg")
|
||||
with log.ignore_signal(signal.SIGTTOU):
|
||||
os.tcsetpgrp(self.master_fd, os.getpgid(self.pid))
|
||||
os.tcsetpgrp(self.controller_fd, os.getpgid(self.pid))
|
||||
time.sleep(self.sleep_time)
|
||||
|
||||
def bg(self):
|
||||
self.horizontal_line("bg")
|
||||
with log.ignore_signal(signal.SIGTTOU):
|
||||
os.tcsetpgrp(self.master_fd, os.getpgrp())
|
||||
os.tcsetpgrp(self.controller_fd, os.getpgrp())
|
||||
time.sleep(self.sleep_time)
|
||||
|
||||
def write(self, byte_string):
|
||||
self.horizontal_line("write '%s'" % byte_string.decode("utf-8"))
|
||||
os.write(self.master_fd, byte_string)
|
||||
os.write(self.controller_fd, byte_string)
|
||||
|
||||
def wait(self, condition):
|
||||
start = time.time()
|
||||
@@ -156,50 +156,51 @@ def wait_running(self):
|
||||
|
||||
|
||||
class PseudoShell(object):
|
||||
"""Sets up master and child processes with a PTY.
|
||||
"""Sets up controller and minion processes with a PTY.
|
||||
|
||||
You can create a ``PseudoShell`` if you want to test how some
|
||||
function responds to terminal input. This is a pseudo-shell from a
|
||||
job control perspective; ``master_function`` and ``child_function``
|
||||
are set up with a pseudoterminal (pty) so that the master can drive
|
||||
the child through process control signals and I/O.
|
||||
job control perspective; ``controller_function`` and ``minion_function``
|
||||
are set up with a pseudoterminal (pty) so that the controller can drive
|
||||
the minion through process control signals and I/O.
|
||||
|
||||
The two functions should have signatures like this::
|
||||
|
||||
def master_function(proc, ctl, **kwargs)
|
||||
def child_function(**kwargs)
|
||||
def controller_function(proc, ctl, **kwargs)
|
||||
def minion_function(**kwargs)
|
||||
|
||||
``master_function`` is spawned in its own process and passed three
|
||||
``controller_function`` is spawned in its own process and passed three
|
||||
arguments:
|
||||
|
||||
proc
|
||||
the ``multiprocessing.Process`` object representing the child
|
||||
the ``multiprocessing.Process`` object representing the minion
|
||||
ctl
|
||||
a ``ProcessController`` object tied to the child
|
||||
a ``ProcessController`` object tied to the minion
|
||||
kwargs
|
||||
keyword arguments passed from ``PseudoShell.start()``.
|
||||
|
||||
``child_function`` is only passed ``kwargs`` delegated from
|
||||
``minion_function`` is only passed ``kwargs`` delegated from
|
||||
``PseudoShell.start()``.
|
||||
|
||||
The ``ctl.master_fd`` will have its ``master_fd`` connected to
|
||||
``sys.stdin`` in the child process. Both processes will share the
|
||||
The ``ctl.controller_fd`` will have its ``controller_fd`` connected to
|
||||
``sys.stdin`` in the minion process. Both processes will share the
|
||||
same ``sys.stdout`` and ``sys.stderr`` as the process instantiating
|
||||
``PseudoShell``.
|
||||
|
||||
Here are the relationships between processes created::
|
||||
|
||||
._________________________________________________________.
|
||||
| Child Process | pid 2
|
||||
| - runs child_function | pgroup 2
|
||||
| Minion Process | pid 2
|
||||
| - runs minion_function | pgroup 2
|
||||
|_________________________________________________________| session 1
|
||||
^
|
||||
| create process with master_fd connected to stdin
|
||||
| create process with controller_fd connected to stdin
|
||||
| stdout, stderr are the same as caller
|
||||
._________________________________________________________.
|
||||
| Master Process | pid 1
|
||||
| - runs master_function | pgroup 1
|
||||
| - uses ProcessController and master_fd to control child | session 1
|
||||
| Controller Process | pid 1
|
||||
| - runs controller_function | pgroup 1
|
||||
| - uses ProcessController and controller_fd to | session 1
|
||||
| control minion |
|
||||
|_________________________________________________________|
|
||||
^
|
||||
| create process
|
||||
@@ -207,51 +208,51 @@ def child_function(**kwargs)
|
||||
._________________________________________________________.
|
||||
| Caller | pid 0
|
||||
| - Constructs, starts, joins PseudoShell | pgroup 0
|
||||
| - provides master_function, child_function | session 0
|
||||
| - provides controller_function, minion_function | session 0
|
||||
|_________________________________________________________|
|
||||
|
||||
"""
|
||||
def __init__(self, master_function, child_function):
|
||||
def __init__(self, controller_function, minion_function):
|
||||
self.proc = None
|
||||
self.master_function = master_function
|
||||
self.child_function = child_function
|
||||
self.controller_function = controller_function
|
||||
self.minion_function = minion_function
|
||||
|
||||
# these can be optionally set to change defaults
|
||||
self.controller_timeout = 1
|
||||
self.sleep_time = 0
|
||||
|
||||
def start(self, **kwargs):
|
||||
"""Start the master and child processes.
|
||||
"""Start the controller and minion processes.
|
||||
|
||||
Arguments:
|
||||
kwargs (dict): arbitrary keyword arguments that will be
|
||||
passed to master and child functions
|
||||
passed to controller and minion functions
|
||||
|
||||
The master process will create the child, then call
|
||||
``master_function``. The child process will call
|
||||
``child_function``.
|
||||
The controller process will create the minion, then call
|
||||
``controller_function``. The minion process will call
|
||||
``minion_function``.
|
||||
|
||||
"""
|
||||
self.proc = multiprocessing.Process(
|
||||
target=PseudoShell._set_up_and_run_master_function,
|
||||
args=(self.master_function, self.child_function,
|
||||
target=PseudoShell._set_up_and_run_controller_function,
|
||||
args=(self.controller_function, self.minion_function,
|
||||
self.controller_timeout, self.sleep_time),
|
||||
kwargs=kwargs,
|
||||
)
|
||||
self.proc.start()
|
||||
|
||||
def join(self):
|
||||
"""Wait for the child process to finish, and return its exit code."""
|
||||
"""Wait for the minion process to finish, and return its exit code."""
|
||||
self.proc.join()
|
||||
return self.proc.exitcode
|
||||
|
||||
@staticmethod
|
||||
def _set_up_and_run_child_function(
|
||||
tty_name, stdout_fd, stderr_fd, ready, child_function, **kwargs):
|
||||
"""Child process wrapper for PseudoShell.
|
||||
def _set_up_and_run_minion_function(
|
||||
tty_name, stdout_fd, stderr_fd, ready, minion_function, **kwargs):
|
||||
"""Minion process wrapper for PseudoShell.
|
||||
|
||||
Handles the mechanics of setting up a PTY, then calls
|
||||
``child_function``.
|
||||
``minion_function``.
|
||||
|
||||
"""
|
||||
# new process group, like a command or pipeline launched by a shell
|
||||
@@ -266,45 +267,45 @@ def _set_up_and_run_child_function(
|
||||
|
||||
if kwargs.get("debug"):
|
||||
sys.stderr.write(
|
||||
"child: stdin.isatty(): %s\n" % sys.stdin.isatty())
|
||||
"minion: stdin.isatty(): %s\n" % sys.stdin.isatty())
|
||||
|
||||
# tell the parent that we're really running
|
||||
if kwargs.get("debug"):
|
||||
sys.stderr.write("child: ready!\n")
|
||||
sys.stderr.write("minion: ready!\n")
|
||||
ready.value = True
|
||||
|
||||
try:
|
||||
child_function(**kwargs)
|
||||
minion_function(**kwargs)
|
||||
except BaseException:
|
||||
traceback.print_exc()
|
||||
|
||||
@staticmethod
|
||||
def _set_up_and_run_master_function(
|
||||
master_function, child_function, controller_timeout, sleep_time,
|
||||
**kwargs):
|
||||
"""Set up a pty, spawn a child process, and execute master_function.
|
||||
def _set_up_and_run_controller_function(
|
||||
controller_function, minion_function, controller_timeout,
|
||||
sleep_time, **kwargs):
|
||||
"""Set up a pty, spawn a minion process, execute controller_function.
|
||||
|
||||
Handles the mechanics of setting up a PTY, then calls
|
||||
``master_function``.
|
||||
``controller_function``.
|
||||
|
||||
"""
|
||||
os.setsid() # new session; this process is the controller
|
||||
|
||||
master_fd, child_fd = os.openpty()
|
||||
pty_name = os.ttyname(child_fd)
|
||||
controller_fd, minion_fd = os.openpty()
|
||||
pty_name = os.ttyname(minion_fd)
|
||||
|
||||
# take controlling terminal
|
||||
pty_fd = os.open(pty_name, os.O_RDWR)
|
||||
os.close(pty_fd)
|
||||
|
||||
ready = multiprocessing.Value('i', False)
|
||||
child_process = multiprocessing.Process(
|
||||
target=PseudoShell._set_up_and_run_child_function,
|
||||
minion_process = multiprocessing.Process(
|
||||
target=PseudoShell._set_up_and_run_minion_function,
|
||||
args=(pty_name, sys.stdout.fileno(), sys.stderr.fileno(),
|
||||
ready, child_function),
|
||||
ready, minion_function),
|
||||
kwargs=kwargs,
|
||||
)
|
||||
child_process.start()
|
||||
minion_process.start()
|
||||
|
||||
# wait for subprocess to be running and connected.
|
||||
while not ready.value:
|
||||
@@ -315,30 +316,31 @@ def _set_up_and_run_master_function(
|
||||
sys.stderr.write("pid: %d\n" % os.getpid())
|
||||
sys.stderr.write("pgid: %d\n" % os.getpgrp())
|
||||
sys.stderr.write("sid: %d\n" % os.getsid(0))
|
||||
sys.stderr.write("tcgetpgrp: %d\n" % os.tcgetpgrp(master_fd))
|
||||
sys.stderr.write("tcgetpgrp: %d\n" % os.tcgetpgrp(controller_fd))
|
||||
sys.stderr.write("\n")
|
||||
|
||||
child_pgid = os.getpgid(child_process.pid)
|
||||
sys.stderr.write("child pid: %d\n" % child_process.pid)
|
||||
sys.stderr.write("child pgid: %d\n" % child_pgid)
|
||||
sys.stderr.write("child sid: %d\n" % os.getsid(child_process.pid))
|
||||
minion_pgid = os.getpgid(minion_process.pid)
|
||||
sys.stderr.write("minion pid: %d\n" % minion_process.pid)
|
||||
sys.stderr.write("minion pgid: %d\n" % minion_pgid)
|
||||
sys.stderr.write(
|
||||
"minion sid: %d\n" % os.getsid(minion_process.pid))
|
||||
sys.stderr.write("\n")
|
||||
sys.stderr.flush()
|
||||
# set up master to ignore SIGTSTP, like a shell
|
||||
# set up controller to ignore SIGTSTP, like a shell
|
||||
signal.signal(signal.SIGTSTP, signal.SIG_IGN)
|
||||
|
||||
# call the master function once the child is ready
|
||||
# call the controller function once the minion is ready
|
||||
try:
|
||||
controller = ProcessController(
|
||||
child_process.pid, master_fd, debug=kwargs.get("debug"))
|
||||
minion_process.pid, controller_fd, debug=kwargs.get("debug"))
|
||||
controller.timeout = controller_timeout
|
||||
controller.sleep_time = sleep_time
|
||||
error = master_function(child_process, controller, **kwargs)
|
||||
error = controller_function(minion_process, controller, **kwargs)
|
||||
except BaseException:
|
||||
error = 1
|
||||
traceback.print_exc()
|
||||
|
||||
child_process.join()
|
||||
minion_process.join()
|
||||
|
||||
# return whether either the parent or child failed
|
||||
return error or child_process.exitcode
|
||||
# return whether either the parent or minion failed
|
||||
return error or minion_process.exitcode
|
||||
|
@@ -5,7 +5,7 @@
|
||||
|
||||
|
||||
#: major, minor, patch version for Spack, in a tuple
|
||||
spack_version_info = (0, 15, 0)
|
||||
spack_version_info = (0, 15, 3)
|
||||
|
||||
#: String containing Spack version joined with .'s
|
||||
spack_version = '.'.join(str(v) for v in spack_version_info)
|
||||
|
@@ -466,8 +466,8 @@ def build_tarball(spec, outdir, force=False, rel=False, unsigned=False,
|
||||
web_util.push_to_url(
|
||||
specfile_path, remote_specfile_path, keep_original=False)
|
||||
|
||||
tty.msg('Buildache for "%s" written to \n %s' %
|
||||
(spec, remote_spackfile_path))
|
||||
tty.debug('Buildcache for "{0}" written to \n {1}'
|
||||
.format(spec, remote_spackfile_path))
|
||||
|
||||
try:
|
||||
# create an index.html for the build_cache directory so specs can be
|
||||
@@ -498,6 +498,7 @@ def download_tarball(spec):
|
||||
|
||||
# stage the tarball into standard place
|
||||
stage = Stage(url, name="build_cache", keep=True)
|
||||
stage.create()
|
||||
try:
|
||||
stage.fetch()
|
||||
return stage.save_filename
|
||||
@@ -602,15 +603,11 @@ def is_backup_file(file):
|
||||
if not is_backup_file(text_name):
|
||||
text_names.append(text_name)
|
||||
|
||||
# If we are installing back to the same location don't replace anything
|
||||
# If we are not installing back to the same install tree do the relocation
|
||||
if old_layout_root != new_layout_root:
|
||||
paths_to_relocate = [old_spack_prefix, old_layout_root]
|
||||
paths_to_relocate.extend(prefix_to_hash.keys())
|
||||
files_to_relocate = list(filter(
|
||||
lambda pathname: not relocate.file_is_relocatable(
|
||||
pathname, paths_to_relocate=paths_to_relocate),
|
||||
map(lambda filename: os.path.join(workdir, filename),
|
||||
buildinfo['relocate_binaries'])))
|
||||
files_to_relocate = [os.path.join(workdir, filename)
|
||||
for filename in buildinfo.get('relocate_binaries')
|
||||
]
|
||||
# If the buildcache was not created with relativized rpaths
|
||||
# do the relocation of path in binaries
|
||||
if (spec.architecture.platform == 'darwin' or
|
||||
@@ -646,6 +643,13 @@ def is_backup_file(file):
|
||||
new_spack_prefix,
|
||||
prefix_to_prefix)
|
||||
|
||||
paths_to_relocate = [old_prefix, old_layout_root]
|
||||
paths_to_relocate.extend(prefix_to_hash.keys())
|
||||
files_to_relocate = list(filter(
|
||||
lambda pathname: not relocate.file_is_relocatable(
|
||||
pathname, paths_to_relocate=paths_to_relocate),
|
||||
map(lambda filename: os.path.join(workdir, filename),
|
||||
buildinfo['relocate_binaries'])))
|
||||
# relocate the install prefixes in binary files including dependencies
|
||||
relocate.relocate_text_bin(files_to_relocate,
|
||||
old_prefix, new_prefix,
|
||||
@@ -653,6 +657,17 @@ def is_backup_file(file):
|
||||
new_spack_prefix,
|
||||
prefix_to_prefix)
|
||||
|
||||
# If we are installing back to the same location
|
||||
# relocate the sbang location if the spack directory changed
|
||||
else:
|
||||
if old_spack_prefix != new_spack_prefix:
|
||||
relocate.relocate_text(text_names,
|
||||
old_layout_root, new_layout_root,
|
||||
old_prefix, new_prefix,
|
||||
old_spack_prefix,
|
||||
new_spack_prefix,
|
||||
prefix_to_prefix)
|
||||
|
||||
|
||||
def extract_tarball(spec, filename, allow_root=False, unsigned=False,
|
||||
force=False):
|
||||
@@ -828,13 +843,13 @@ def get_spec(spec=None, force=False):
|
||||
|
||||
mirror_dir = url_util.local_file_path(fetch_url_build_cache)
|
||||
if mirror_dir:
|
||||
tty.msg("Finding buildcaches in %s" % mirror_dir)
|
||||
tty.debug('Finding buildcaches in {0}'.format(mirror_dir))
|
||||
link = url_util.join(fetch_url_build_cache, specfile_name)
|
||||
urls.add(link)
|
||||
|
||||
else:
|
||||
tty.msg("Finding buildcaches at %s" %
|
||||
url_util.format(fetch_url_build_cache))
|
||||
tty.debug('Finding buildcaches at {0}'
|
||||
.format(url_util.format(fetch_url_build_cache)))
|
||||
link = url_util.join(fetch_url_build_cache, specfile_name)
|
||||
urls.add(link)
|
||||
|
||||
@@ -857,8 +872,8 @@ def get_specs(allarch=False):
|
||||
fetch_url_build_cache = url_util.join(
|
||||
mirror.fetch_url, _build_cache_relative_path)
|
||||
|
||||
tty.msg("Finding buildcaches at %s" %
|
||||
url_util.format(fetch_url_build_cache))
|
||||
tty.debug('Finding buildcaches at {0}'
|
||||
.format(url_util.format(fetch_url_build_cache)))
|
||||
|
||||
index_url = url_util.join(fetch_url_build_cache, 'index.json')
|
||||
|
||||
@@ -869,8 +884,8 @@ def get_specs(allarch=False):
|
||||
except (URLError, web_util.SpackWebError) as url_err:
|
||||
tty.error('Failed to read index {0}'.format(index_url))
|
||||
tty.debug(url_err)
|
||||
# Just return whatever specs we may already have cached
|
||||
return _cached_specs
|
||||
# Continue on to the next mirror
|
||||
continue
|
||||
|
||||
tmpdir = tempfile.mkdtemp()
|
||||
index_file_path = os.path.join(tmpdir, 'index.json')
|
||||
@@ -909,15 +924,15 @@ def get_keys(install=False, trust=False, force=False):
|
||||
|
||||
mirror_dir = url_util.local_file_path(fetch_url_build_cache)
|
||||
if mirror_dir:
|
||||
tty.msg("Finding public keys in %s" % mirror_dir)
|
||||
tty.debug('Finding public keys in {0}'.format(mirror_dir))
|
||||
files = os.listdir(str(mirror_dir))
|
||||
for file in files:
|
||||
if re.search(r'\.key', file) or re.search(r'\.pub', file):
|
||||
link = url_util.join(fetch_url_build_cache, file)
|
||||
keys.add(link)
|
||||
else:
|
||||
tty.msg("Finding public keys at %s" %
|
||||
url_util.format(fetch_url_build_cache))
|
||||
tty.debug('Finding public keys at {0}'
|
||||
.format(url_util.format(fetch_url_build_cache)))
|
||||
# For s3 mirror need to request index.html directly
|
||||
p, links = web_util.spider(
|
||||
url_util.join(fetch_url_build_cache, 'index.html'))
|
||||
@@ -935,14 +950,14 @@ def get_keys(install=False, trust=False, force=False):
|
||||
stage.fetch()
|
||||
except fs.FetchError:
|
||||
continue
|
||||
tty.msg('Found key %s' % link)
|
||||
tty.debug('Found key {0}'.format(link))
|
||||
if install:
|
||||
if trust:
|
||||
Gpg.trust(stage.save_filename)
|
||||
tty.msg('Added this key to trusted keys.')
|
||||
tty.debug('Added this key to trusted keys.')
|
||||
else:
|
||||
tty.msg('Will not add this key to trusted keys.'
|
||||
'Use -t to install all downloaded keys')
|
||||
tty.debug('Will not add this key to trusted keys.'
|
||||
'Use -t to install all downloaded keys')
|
||||
|
||||
|
||||
def needs_rebuild(spec, mirror_url, rebuild_on_errors=False):
|
||||
@@ -1029,7 +1044,7 @@ def check_specs_against_mirrors(mirrors, specs, output_file=None,
|
||||
"""
|
||||
rebuilds = {}
|
||||
for mirror in spack.mirror.MirrorCollection(mirrors).values():
|
||||
tty.msg('Checking for built specs at %s' % mirror.fetch_url)
|
||||
tty.debug('Checking for built specs at {0}'.format(mirror.fetch_url))
|
||||
|
||||
rebuild_list = []
|
||||
|
||||
|
@@ -174,6 +174,14 @@ def clean_environment():
|
||||
for v in build_system_vars:
|
||||
env.unset(v)
|
||||
|
||||
# Unset mpi environment vars. These flags should only be set by
|
||||
# mpi providers for packages with mpi dependencies
|
||||
mpi_vars = [
|
||||
'MPICC', 'MPICXX', 'MPIFC', 'MPIF77', 'MPIF90'
|
||||
]
|
||||
for v in mpi_vars:
|
||||
env.unset(v)
|
||||
|
||||
build_lang = spack.config.get('config:build_language')
|
||||
if build_lang:
|
||||
# Override language-related variables. This can be used to force
|
||||
|
@@ -118,15 +118,13 @@ def _do_patch_config_files(self):
|
||||
config_file = 'config.{0}'.format(config_name)
|
||||
if os.path.exists(config_file):
|
||||
# First search the top-level source directory
|
||||
my_config_files[config_name] = os.path.join(
|
||||
self.configure_directory, config_file)
|
||||
my_config_files[config_name] = os.path.abspath(config_file)
|
||||
else:
|
||||
# Then search in all sub directories recursively.
|
||||
# We would like to use AC_CONFIG_AUX_DIR, but not all packages
|
||||
# ship with their configure.in or configure.ac.
|
||||
config_path = next((os.path.join(r, f)
|
||||
for r, ds, fs in os.walk(
|
||||
self.configure_directory) for f in fs
|
||||
config_path = next((os.path.abspath(os.path.join(r, f))
|
||||
for r, ds, fs in os.walk('.') for f in fs
|
||||
if f == config_file), None)
|
||||
my_config_files[config_name] = config_path
|
||||
|
||||
|
@@ -12,8 +12,9 @@
|
||||
class CudaPackage(PackageBase):
|
||||
"""Auxiliary class which contains CUDA variant, dependencies and conflicts
|
||||
and is meant to unify and facilitate its usage.
|
||||
|
||||
Maintainers: ax3l, svenevs
|
||||
"""
|
||||
maintainers = ['ax3l', 'svenevs']
|
||||
|
||||
# https://docs.nvidia.com/cuda/cuda-compiler-driver-nvcc/index.html#gpu-feature-list
|
||||
# https://developer.nvidia.com/cuda-gpus
|
||||
@@ -25,6 +26,7 @@ class CudaPackage(PackageBase):
|
||||
'50', '52', '53',
|
||||
'60', '61', '62',
|
||||
'70', '72', '75',
|
||||
'80',
|
||||
]
|
||||
|
||||
# FIXME: keep cuda and cuda_arch separate to make usage easier until
|
||||
@@ -48,6 +50,7 @@ def cuda_flags(arch_list):
|
||||
|
||||
# CUDA version vs Architecture
|
||||
# https://en.wikipedia.org/wiki/CUDA#GPUs_supported
|
||||
# https://docs.nvidia.com/cuda/cuda-toolkit-release-notes/index.html#deprecated-features
|
||||
depends_on('cuda@:6.0', when='cuda_arch=10')
|
||||
depends_on('cuda@:6.5', when='cuda_arch=11')
|
||||
depends_on('cuda@2.1:6.5', when='cuda_arch=12')
|
||||
@@ -58,8 +61,8 @@ def cuda_flags(arch_list):
|
||||
|
||||
depends_on('cuda@5.0:10.2', when='cuda_arch=30')
|
||||
depends_on('cuda@5.0:10.2', when='cuda_arch=32')
|
||||
depends_on('cuda@5.0:10.2', when='cuda_arch=35')
|
||||
depends_on('cuda@6.5:10.2', when='cuda_arch=37')
|
||||
depends_on('cuda@5.0:', when='cuda_arch=35')
|
||||
depends_on('cuda@6.5:', when='cuda_arch=37')
|
||||
|
||||
depends_on('cuda@6.0:', when='cuda_arch=50')
|
||||
depends_on('cuda@6.5:', when='cuda_arch=52')
|
||||
@@ -73,6 +76,8 @@ def cuda_flags(arch_list):
|
||||
depends_on('cuda@9.0:', when='cuda_arch=72')
|
||||
depends_on('cuda@10.0:', when='cuda_arch=75')
|
||||
|
||||
depends_on('cuda@11.0:', when='cuda_arch=80')
|
||||
|
||||
# There are at least three cases to be aware of for compiler conflicts
|
||||
# 1. Linux x86_64
|
||||
# 2. Linux ppc64le
|
||||
@@ -88,12 +93,15 @@ def cuda_flags(arch_list):
|
||||
conflicts('%gcc@7:', when='+cuda ^cuda@:9.1' + arch_platform)
|
||||
conflicts('%gcc@8:', when='+cuda ^cuda@:10.0.130' + arch_platform)
|
||||
conflicts('%gcc@9:', when='+cuda ^cuda@:10.2.89' + arch_platform)
|
||||
conflicts('%gcc@:4,10:', when='+cuda ^cuda@:11.0.2' + arch_platform)
|
||||
conflicts('%pgi@:14.8', when='+cuda ^cuda@:7.0.27' + arch_platform)
|
||||
conflicts('%pgi@:15.3,15.5:', when='+cuda ^cuda@7.5' + arch_platform)
|
||||
conflicts('%pgi@:16.2,16.0:16.3', when='+cuda ^cuda@8' + arch_platform)
|
||||
conflicts('%pgi@:15,18:', when='+cuda ^cuda@9.0:9.1' + arch_platform)
|
||||
conflicts('%pgi@:16', when='+cuda ^cuda@9.2.88:10' + arch_platform)
|
||||
conflicts('%pgi@:17', when='+cuda ^cuda@10.2.89' + arch_platform)
|
||||
conflicts('%pgi@:16,19:', when='+cuda ^cuda@9.2.88:10' + arch_platform)
|
||||
conflicts('%pgi@:17,20:',
|
||||
when='+cuda ^cuda@10.1.105:10.2.89' + arch_platform)
|
||||
conflicts('%pgi@:17,20.2:', when='+cuda ^cuda@11.0.2' + arch_platform)
|
||||
conflicts('%clang@:3.4', when='+cuda ^cuda@:7.5' + arch_platform)
|
||||
conflicts('%clang@:3.7,4:',
|
||||
when='+cuda ^cuda@8.0:9.0' + arch_platform)
|
||||
@@ -104,7 +112,8 @@ def cuda_flags(arch_list):
|
||||
conflicts('%clang@:3.7,7.1:', when='+cuda ^cuda@10.1.105' + arch_platform)
|
||||
conflicts('%clang@:3.7,8.1:',
|
||||
when='+cuda ^cuda@10.1.105:10.1.243' + arch_platform)
|
||||
conflicts('%clang@:3.2,9.0:', when='+cuda ^cuda@10.2.89' + arch_platform)
|
||||
conflicts('%clang@:3.2,9:', when='+cuda ^cuda@10.2.89' + arch_platform)
|
||||
conflicts('%clang@:5,10:', when='+cuda ^cuda@11.0.2' + arch_platform)
|
||||
|
||||
# x86_64 vs. ppc64le differ according to NVidia docs
|
||||
# Linux ppc64le compiler conflicts from Table from the docs below:
|
||||
@@ -119,6 +128,8 @@ def cuda_flags(arch_list):
|
||||
conflicts('%gcc@6:', when='+cuda ^cuda@:9' + arch_platform)
|
||||
conflicts('%gcc@8:', when='+cuda ^cuda@:10.0.130' + arch_platform)
|
||||
conflicts('%gcc@9:', when='+cuda ^cuda@:10.1.243' + arch_platform)
|
||||
# officially, CUDA 11.0.2 only supports the system GCC 8.3 on ppc64le
|
||||
conflicts('%gcc@:4,10:', when='+cuda ^cuda@:11.0.2' + arch_platform)
|
||||
conflicts('%pgi', when='+cuda ^cuda@:8' + arch_platform)
|
||||
conflicts('%pgi@:16', when='+cuda ^cuda@:9.1.185' + arch_platform)
|
||||
conflicts('%pgi@:17', when='+cuda ^cuda@:10' + arch_platform)
|
||||
@@ -128,6 +139,7 @@ def cuda_flags(arch_list):
|
||||
conflicts('%clang@7:', when='+cuda ^cuda@10.0.130' + arch_platform)
|
||||
conflicts('%clang@7.1:', when='+cuda ^cuda@:10.1.105' + arch_platform)
|
||||
conflicts('%clang@8.1:', when='+cuda ^cuda@:10.2.89' + arch_platform)
|
||||
conflicts('%clang@:5,10.0:', when='+cuda ^cuda@11.0.2' + arch_platform)
|
||||
|
||||
# Intel is mostly relevant for x86_64 Linux, even though it also
|
||||
# exists for Mac OS X. No information prior to CUDA 3.2 or Intel 11.1
|
||||
@@ -141,11 +153,13 @@ def cuda_flags(arch_list):
|
||||
conflicts('%intel@17.0:', when='+cuda ^cuda@:8.0.60')
|
||||
conflicts('%intel@18.0:', when='+cuda ^cuda@:9.9')
|
||||
conflicts('%intel@19.0:', when='+cuda ^cuda@:10.0')
|
||||
conflicts('%intel@19.1:', when='+cuda ^cuda@:10.1')
|
||||
conflicts('%intel@19.2:', when='+cuda ^cuda@:11.0.2')
|
||||
|
||||
# XL is mostly relevant for ppc64le Linux
|
||||
conflicts('%xl@:12,14:', when='+cuda ^cuda@:9.1')
|
||||
conflicts('%xl@:12,14:15,17:', when='+cuda ^cuda@9.2')
|
||||
conflicts('%xl@17:', when='+cuda ^cuda@:10.2.89')
|
||||
conflicts('%xl@:12,17:', when='+cuda ^cuda@:11.0.2')
|
||||
|
||||
# Mac OS X
|
||||
# platform = ' platform=darwin'
|
||||
@@ -156,7 +170,7 @@ def cuda_flags(arch_list):
|
||||
# `clang-apple@x.y.z as a possible fix.
|
||||
# Compiler conflicts will be eventual taken from here:
|
||||
# https://docs.nvidia.com/cuda/cuda-installation-guide-mac-os-x/index.html#abstract
|
||||
conflicts('platform=darwin', when='+cuda ^cuda@11.0:')
|
||||
conflicts('platform=darwin', when='+cuda ^cuda@11.0.2:')
|
||||
|
||||
# Make sure cuda_arch can not be used without +cuda
|
||||
for value in cuda_arch_values:
|
||||
|
@@ -493,7 +493,7 @@ def generate_gitlab_ci_yaml(env, print_summary, output_file,
|
||||
after_script = None
|
||||
if custom_spack_repo:
|
||||
if not custom_spack_ref:
|
||||
custom_spack_ref = 'master'
|
||||
custom_spack_ref = 'develop'
|
||||
before_script = [
|
||||
('git clone "{0}"'.format(custom_spack_repo)),
|
||||
'pushd ./spack && git checkout "{0}" && popd'.format(
|
||||
@@ -613,7 +613,7 @@ def generate_gitlab_ci_yaml(env, print_summary, output_file,
|
||||
debug_flag = '-d '
|
||||
|
||||
job_scripts = [
|
||||
'spack env activate .',
|
||||
'spack env activate --without-view .',
|
||||
'spack {0}ci rebuild'.format(debug_flag),
|
||||
]
|
||||
|
||||
@@ -1043,17 +1043,10 @@ def copy_stage_logs_to_artifacts(job_spec, job_log_dir):
|
||||
tty.debug('job package: {0}'.format(job_pkg))
|
||||
stage_dir = job_pkg.stage.path
|
||||
tty.debug('stage dir: {0}'.format(stage_dir))
|
||||
build_env_src = os.path.join(stage_dir, 'spack-build-env.txt')
|
||||
build_out_src = os.path.join(stage_dir, 'spack-build-out.txt')
|
||||
build_env_dst = os.path.join(
|
||||
job_log_dir, 'spack-build-env.txt')
|
||||
build_out_dst = os.path.join(
|
||||
job_log_dir, 'spack-build-out.txt')
|
||||
tty.debug('Copying logs to artifacts:')
|
||||
tty.debug(' 1: {0} -> {1}'.format(
|
||||
build_env_src, build_env_dst))
|
||||
shutil.copyfile(build_env_src, build_env_dst)
|
||||
tty.debug(' 2: {0} -> {1}'.format(
|
||||
tty.debug('Copying build log ({0}) to artifacts ({1})'.format(
|
||||
build_out_src, build_out_dst))
|
||||
shutil.copyfile(build_out_src, build_out_dst)
|
||||
except Exception as inst:
|
||||
|
@@ -65,7 +65,7 @@ def checksum(parser, args):
|
||||
|
||||
version_lines = spack.stage.get_checksums_for_versions(
|
||||
url_dict, pkg.name, keep_stage=args.keep_stage,
|
||||
batch=(args.batch or len(args.versions) > 0),
|
||||
batch=(args.batch or len(args.versions) > 0 or len(url_dict) == 1),
|
||||
fetch_options=pkg.fetch_options)
|
||||
|
||||
print()
|
||||
|
@@ -445,6 +445,9 @@ def setup_parser(subparser):
|
||||
subparser.add_argument(
|
||||
'--skip-editor', action='store_true',
|
||||
help="skip the edit session for the package (e.g., automation)")
|
||||
subparser.add_argument(
|
||||
'-b', '--batch', action='store_true',
|
||||
help="don't ask which versions to checksum")
|
||||
|
||||
|
||||
class BuildSystemGuesser:
|
||||
@@ -511,7 +514,7 @@ def __call__(self, stage, url):
|
||||
# Determine the build system based on the files contained
|
||||
# in the archive.
|
||||
for pattern, bs in clues:
|
||||
if any(re.search(pattern, l) for l in lines):
|
||||
if any(re.search(pattern, line) for line in lines):
|
||||
self.build_system = bs
|
||||
break
|
||||
|
||||
@@ -629,7 +632,8 @@ def get_versions(args, name):
|
||||
|
||||
versions = spack.stage.get_checksums_for_versions(
|
||||
url_dict, name, first_stage_function=guesser,
|
||||
keep_stage=args.keep_stage, batch=True)
|
||||
keep_stage=args.keep_stage,
|
||||
batch=(args.batch or len(url_dict) == 1))
|
||||
else:
|
||||
versions = unhashed_versions
|
||||
|
||||
|
@@ -351,6 +351,9 @@ def env_status(args):
|
||||
% (ev.manifest_name, env.path))
|
||||
else:
|
||||
tty.msg('In environment %s' % env.name)
|
||||
|
||||
# Check if environment views can be safely activated
|
||||
env.check_views()
|
||||
else:
|
||||
tty.msg('No active environment')
|
||||
|
||||
|
@@ -310,7 +310,7 @@ def install(parser, args, **kwargs):
|
||||
return
|
||||
|
||||
if not args.spec and not args.specfiles:
|
||||
# if there are no args but an active environment or spack.yaml file
|
||||
# if there are no args but an active environment
|
||||
# then install the packages from it.
|
||||
env = ev.get_env(args, 'install')
|
||||
if env:
|
||||
@@ -331,7 +331,18 @@ def install(parser, args, **kwargs):
|
||||
env.regenerate_views()
|
||||
return
|
||||
else:
|
||||
tty.die("install requires a package argument or a spack.yaml file")
|
||||
msg = "install requires a package argument or active environment"
|
||||
if 'spack.yaml' in os.listdir(os.getcwd()):
|
||||
# There's a spack.yaml file in the working dir, the user may
|
||||
# have intended to use that
|
||||
msg += "\n\n"
|
||||
msg += "Did you mean to install using the `spack.yaml`"
|
||||
msg += " in this directory? Try: \n"
|
||||
msg += " spack env activate .\n"
|
||||
msg += " spack install\n"
|
||||
msg += " OR\n"
|
||||
msg += " spack --env . install"
|
||||
tty.die(msg)
|
||||
|
||||
if args.no_checksum:
|
||||
spack.config.set('config:checksum', False, scope='command_line')
|
||||
|
@@ -28,7 +28,7 @@
|
||||
|
||||
|
||||
@llnl.util.lang.memoized
|
||||
def get_compiler_version_output(compiler_path, version_arg, ignore_errors=()):
|
||||
def _get_compiler_version_output(compiler_path, version_arg, ignore_errors=()):
|
||||
"""Invokes the compiler at a given path passing a single
|
||||
version argument and returns the output.
|
||||
|
||||
@@ -42,6 +42,18 @@ def get_compiler_version_output(compiler_path, version_arg, ignore_errors=()):
|
||||
return output
|
||||
|
||||
|
||||
def get_compiler_version_output(compiler_path, *args, **kwargs):
|
||||
"""Wrapper for _get_compiler_version_output()."""
|
||||
# This ensures that we memoize compiler output by *absolute path*,
|
||||
# not just executable name. If we don't do this, and the path changes
|
||||
# (e.g., during testing), we can get incorrect results.
|
||||
if not os.path.isabs(compiler_path):
|
||||
compiler_path = spack.util.executable.which_string(
|
||||
compiler_path, required=True)
|
||||
|
||||
return _get_compiler_version_output(compiler_path, *args, **kwargs)
|
||||
|
||||
|
||||
def tokenize_flags(flags_str):
|
||||
"""Given a compiler flag specification as a string, this returns a list
|
||||
where the entries are the flags. For compiler options which set values
|
||||
|
@@ -650,23 +650,18 @@ def make_compiler_list(detected_versions):
|
||||
Returns:
|
||||
list of Compiler objects
|
||||
"""
|
||||
# We don't sort on the path of the compiler
|
||||
sort_fn = lambda x: (x.id, x.variation, x.language)
|
||||
compilers_s = sorted(detected_versions, key=sort_fn)
|
||||
group_fn = lambda x: (x.id, x.variation, x.language)
|
||||
sorted_compilers = sorted(detected_versions, key=group_fn)
|
||||
|
||||
# Gather items in a dictionary by the id, name variation and language
|
||||
compilers_d = {}
|
||||
for sort_key, group in itertools.groupby(compilers_s, key=sort_fn):
|
||||
for sort_key, group in itertools.groupby(sorted_compilers, key=group_fn):
|
||||
compiler_id, name_variation, language = sort_key
|
||||
by_compiler_id = compilers_d.setdefault(compiler_id, {})
|
||||
by_name_variation = by_compiler_id.setdefault(name_variation, {})
|
||||
by_name_variation[language] = next(x.path for x in group)
|
||||
|
||||
# For each unique compiler id select the name variation with most entries
|
||||
# i.e. the one that supports most languages
|
||||
compilers = []
|
||||
|
||||
def _default(cmp_id, paths):
|
||||
def _default_make_compilers(cmp_id, paths):
|
||||
operating_system, compiler_name, version = cmp_id
|
||||
compiler_cls = spack.compilers.class_for_compiler_name(compiler_name)
|
||||
spec = spack.spec.CompilerSpec(compiler_cls.name, version)
|
||||
@@ -677,16 +672,39 @@ def _default(cmp_id, paths):
|
||||
)
|
||||
return [compiler]
|
||||
|
||||
for compiler_id, by_compiler_id in compilers_d.items():
|
||||
_, selected_name_variation = max(
|
||||
(len(by_compiler_id[variation]), variation)
|
||||
for variation in by_compiler_id
|
||||
)
|
||||
# For compilers with the same compiler id:
|
||||
#
|
||||
# - Prefer with C compiler to without
|
||||
# - Prefer with C++ compiler to without
|
||||
# - Prefer no variations to variations (e.g., clang to clang-gpu)
|
||||
#
|
||||
sort_fn = lambda variation: (
|
||||
'cc' not in by_compiler_id[variation], # None last
|
||||
'cxx' not in by_compiler_id[variation], # None last
|
||||
|
||||
getattr(variation, 'prefix', None),
|
||||
getattr(variation, 'suffix', None),
|
||||
)
|
||||
|
||||
compilers = []
|
||||
for compiler_id, by_compiler_id in compilers_d.items():
|
||||
ordered = sorted(by_compiler_id, key=sort_fn)
|
||||
selected_variation = ordered[0]
|
||||
selected = by_compiler_id[selected_variation]
|
||||
|
||||
# fill any missing parts from subsequent entries
|
||||
for lang in ['cxx', 'f77', 'fc']:
|
||||
if lang not in selected:
|
||||
next_lang = next((
|
||||
by_compiler_id[v][lang] for v in ordered
|
||||
if lang in by_compiler_id[v]), None)
|
||||
if next_lang:
|
||||
selected[lang] = next_lang
|
||||
|
||||
# Add it to the list of compilers
|
||||
selected = by_compiler_id[selected_name_variation]
|
||||
operating_system, _, _ = compiler_id
|
||||
make_compilers = getattr(operating_system, 'make_compilers', _default)
|
||||
make_compilers = getattr(
|
||||
operating_system, 'make_compilers', _default_make_compilers)
|
||||
|
||||
compilers.extend(make_compilers(compiler_id, selected))
|
||||
|
||||
return compilers
|
||||
|
@@ -23,7 +23,12 @@ def extract_version_from_output(cls, output):
|
||||
ver = 'unknown'
|
||||
match = re.search(
|
||||
# Apple's LLVM compiler has its own versions, so suffix them.
|
||||
r'^Apple (?:LLVM|clang) version ([^ )]+)', output
|
||||
r'^Apple (?:LLVM|clang) version ([^ )]+)',
|
||||
output,
|
||||
# Multi-line, since 'Apple clang' may not be on the first line
|
||||
# in particular, when run as gcc, it seems to output
|
||||
# "Configured with: --prefix=..." as the first line
|
||||
re.M,
|
||||
)
|
||||
if match:
|
||||
ver = match.group(match.lastindex)
|
||||
|
@@ -5,13 +5,13 @@
|
||||
|
||||
import re
|
||||
|
||||
import spack.compilers.clang
|
||||
import spack.compiler
|
||||
import spack.compilers.apple_clang as apple_clang
|
||||
|
||||
from spack.compiler import Compiler, UnsupportedCompilerFlag
|
||||
from spack.version import ver
|
||||
|
||||
|
||||
class Gcc(Compiler):
|
||||
class Gcc(spack.compiler.Compiler):
|
||||
# Subclasses use possible names of C compiler
|
||||
cc_names = ['gcc']
|
||||
|
||||
@@ -64,10 +64,8 @@ def cxx98_flag(self):
|
||||
@property
|
||||
def cxx11_flag(self):
|
||||
if self.version < ver('4.3'):
|
||||
raise UnsupportedCompilerFlag(self,
|
||||
"the C++11 standard",
|
||||
"cxx11_flag",
|
||||
" < 4.3")
|
||||
raise spack.compiler.UnsupportedCompilerFlag(
|
||||
self, "the C++11 standard", "cxx11_flag", " < 4.3")
|
||||
elif self.version < ver('4.7'):
|
||||
return "-std=c++0x"
|
||||
else:
|
||||
@@ -76,10 +74,8 @@ def cxx11_flag(self):
|
||||
@property
|
||||
def cxx14_flag(self):
|
||||
if self.version < ver('4.8'):
|
||||
raise UnsupportedCompilerFlag(self,
|
||||
"the C++14 standard",
|
||||
"cxx14_flag",
|
||||
"< 4.8")
|
||||
raise spack.compiler.UnsupportedCompilerFlag(
|
||||
self, "the C++14 standard", "cxx14_flag", "< 4.8")
|
||||
elif self.version < ver('4.9'):
|
||||
return "-std=c++1y"
|
||||
elif self.version < ver('6.0'):
|
||||
@@ -90,10 +86,8 @@ def cxx14_flag(self):
|
||||
@property
|
||||
def cxx17_flag(self):
|
||||
if self.version < ver('5.0'):
|
||||
raise UnsupportedCompilerFlag(self,
|
||||
"the C++17 standard",
|
||||
"cxx17_flag",
|
||||
"< 5.0")
|
||||
raise spack.compiler.UnsupportedCompilerFlag(
|
||||
self, "the C++17 standard", "cxx17_flag", "< 5.0")
|
||||
elif self.version < ver('6.0'):
|
||||
return "-std=c++1z"
|
||||
else:
|
||||
@@ -102,19 +96,15 @@ def cxx17_flag(self):
|
||||
@property
|
||||
def c99_flag(self):
|
||||
if self.version < ver('4.5'):
|
||||
raise UnsupportedCompilerFlag(self,
|
||||
"the C99 standard",
|
||||
"c99_flag",
|
||||
"< 4.5")
|
||||
raise spack.compiler.UnsupportedCompilerFlag(
|
||||
self, "the C99 standard", "c99_flag", "< 4.5")
|
||||
return "-std=c99"
|
||||
|
||||
@property
|
||||
def c11_flag(self):
|
||||
if self.version < ver('4.7'):
|
||||
raise UnsupportedCompilerFlag(self,
|
||||
"the C11 standard",
|
||||
"c11_flag",
|
||||
"< 4.7")
|
||||
raise spack.compiler.UnsupportedCompilerFlag(
|
||||
self, "the C11 standard", "c11_flag", "< 4.7")
|
||||
return "-std=c11"
|
||||
|
||||
@property
|
||||
@@ -152,10 +142,10 @@ def default_version(cls, cc):
|
||||
|
||||
7.2.0
|
||||
"""
|
||||
# Skip any gcc versions that are actually clang, like Apple's gcc.
|
||||
# Returning "unknown" makes them not detected by default.
|
||||
# Users can add these manually to compilers.yaml at their own risk.
|
||||
if spack.compilers.clang.Clang.default_version(cc) != 'unknown':
|
||||
# Apple's gcc is actually apple clang, so skip it. Returning
|
||||
# "unknown" ensures this compiler is not detected by default.
|
||||
# Users can add it manually to compilers.yaml at their own risk.
|
||||
if apple_clang.AppleClang.default_version(cc) != 'unknown':
|
||||
return 'unknown'
|
||||
|
||||
version = super(Gcc, cls).default_version(cc)
|
||||
|
@@ -12,7 +12,10 @@
|
||||
"0.14.1": "0.14.1",
|
||||
"0.14.2": "0.14.2",
|
||||
"0.15": "0.15",
|
||||
"0.15.0": "0.15.0"
|
||||
"0.15.0": "0.15.0",
|
||||
"0.15.1": "0.15.1",
|
||||
"0.15.2": "0.15.2",
|
||||
"0.15.3": "0.15.3"
|
||||
}
|
||||
},
|
||||
"ubuntu:16.04": {
|
||||
@@ -28,7 +31,10 @@
|
||||
"0.14.1": "0.14.1",
|
||||
"0.14.2": "0.14.2",
|
||||
"0.15": "0.15",
|
||||
"0.15.0": "0.15.0"
|
||||
"0.15.0": "0.15.0",
|
||||
"0.15.1": "0.15.1",
|
||||
"0.15.2": "0.15.2",
|
||||
"0.15.3": "0.15.3"
|
||||
}
|
||||
},
|
||||
"centos:7": {
|
||||
@@ -44,7 +50,10 @@
|
||||
"0.14.1": "0.14.1",
|
||||
"0.14.2": "0.14.2",
|
||||
"0.15": "0.15",
|
||||
"0.15.0": "0.15.0"
|
||||
"0.15.0": "0.15.0",
|
||||
"0.15.1": "0.15.1",
|
||||
"0.15.2": "0.15.2",
|
||||
"0.15.3": "0.15.3"
|
||||
}
|
||||
},
|
||||
"centos:6": {
|
||||
@@ -60,7 +69,10 @@
|
||||
"0.14.1": "0.14.1",
|
||||
"0.14.2": "0.14.2",
|
||||
"0.15": "0.15",
|
||||
"0.15.0": "0.15.0"
|
||||
"0.15.0": "0.15.0",
|
||||
"0.15.1": "0.15.1",
|
||||
"0.15.2": "0.15.2",
|
||||
"0.15.3": "0.15.3"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -175,9 +175,20 @@ def activate(
|
||||
# MANPATH, PYTHONPATH, etc. All variables that end in PATH (case-sensitive)
|
||||
# become PATH variables.
|
||||
#
|
||||
if add_view and default_view_name in env.views:
|
||||
with spack.store.db.read_transaction():
|
||||
cmds += env.add_default_view_to_shell(shell)
|
||||
try:
|
||||
if add_view and default_view_name in env.views:
|
||||
with spack.store.db.read_transaction():
|
||||
cmds += env.add_default_view_to_shell(shell)
|
||||
except (spack.repo.UnknownPackageError,
|
||||
spack.repo.UnknownNamespaceError) as e:
|
||||
tty.error(e)
|
||||
tty.die(
|
||||
'Environment view is broken due to a missing package or repo.\n',
|
||||
' To activate without views enabled, activate with:\n',
|
||||
' spack env activate -V {0}\n'.format(env.name),
|
||||
' To remove it and resolve the issue, '
|
||||
'force concretize with the command:\n',
|
||||
' spack -e {0} concretize --force'.format(env.name))
|
||||
|
||||
return cmds
|
||||
|
||||
@@ -230,9 +241,15 @@ def deactivate(shell='sh'):
|
||||
cmds += ' unset SPACK_OLD_PS1; export SPACK_OLD_PS1;\n'
|
||||
cmds += 'fi;\n'
|
||||
|
||||
if default_view_name in _active_environment.views:
|
||||
with spack.store.db.read_transaction():
|
||||
cmds += _active_environment.rm_default_view_from_shell(shell)
|
||||
try:
|
||||
if default_view_name in _active_environment.views:
|
||||
with spack.store.db.read_transaction():
|
||||
cmds += _active_environment.rm_default_view_from_shell(shell)
|
||||
except (spack.repo.UnknownPackageError,
|
||||
spack.repo.UnknownNamespaceError) as e:
|
||||
tty.warn(e)
|
||||
tty.warn('Could not fully deactivate view due to missing package '
|
||||
'or repo, shell environment may be corrupt.')
|
||||
|
||||
tty.debug("Deactivated environmennt '%s'" % _active_environment.name)
|
||||
_active_environment = None
|
||||
@@ -446,8 +463,9 @@ def _eval_conditional(string):
|
||||
|
||||
|
||||
class ViewDescriptor(object):
|
||||
def __init__(self, root, projections={}, select=[], exclude=[],
|
||||
def __init__(self, base_path, root, projections={}, select=[], exclude=[],
|
||||
link=default_view_link):
|
||||
self.base = base_path
|
||||
self.root = root
|
||||
self.projections = projections
|
||||
self.select = select
|
||||
@@ -477,15 +495,19 @@ def to_dict(self):
|
||||
return ret
|
||||
|
||||
@staticmethod
|
||||
def from_dict(d):
|
||||
return ViewDescriptor(d['root'],
|
||||
def from_dict(base_path, d):
|
||||
return ViewDescriptor(base_path,
|
||||
d['root'],
|
||||
d.get('projections', {}),
|
||||
d.get('select', []),
|
||||
d.get('exclude', []),
|
||||
d.get('link', default_view_link))
|
||||
|
||||
def view(self):
|
||||
return YamlFilesystemView(self.root, spack.store.layout,
|
||||
root = self.root
|
||||
if not os.path.isabs(root):
|
||||
root = os.path.normpath(os.path.join(self.base, self.root))
|
||||
return YamlFilesystemView(root, spack.store.layout,
|
||||
ignore_conflicts=True,
|
||||
projections=self.projections)
|
||||
|
||||
@@ -527,20 +549,29 @@ def regenerate(self, all_specs, roots):
|
||||
installed_specs_for_view = set(
|
||||
s for s in specs_for_view if s in self and s.package.installed)
|
||||
|
||||
view = self.view()
|
||||
# To ensure there are no conflicts with packages being installed
|
||||
# that cannot be resolved or have repos that have been removed
|
||||
# we always regenerate the view from scratch. We must first make
|
||||
# sure the root directory exists for the very first time though.
|
||||
root = self.root
|
||||
if not os.path.isabs(root):
|
||||
root = os.path.normpath(os.path.join(self.base, self.root))
|
||||
fs.mkdirp(root)
|
||||
with fs.replace_directory_transaction(root):
|
||||
view = self.view()
|
||||
|
||||
view.clean()
|
||||
specs_in_view = set(view.get_all_specs())
|
||||
tty.msg("Updating view at {0}".format(self.root))
|
||||
view.clean()
|
||||
specs_in_view = set(view.get_all_specs())
|
||||
tty.msg("Updating view at {0}".format(self.root))
|
||||
|
||||
rm_specs = specs_in_view - installed_specs_for_view
|
||||
add_specs = installed_specs_for_view - specs_in_view
|
||||
rm_specs = specs_in_view - installed_specs_for_view
|
||||
add_specs = installed_specs_for_view - specs_in_view
|
||||
|
||||
# pass all_specs in, as it's expensive to read all the
|
||||
# spec.yaml files twice.
|
||||
view.remove_specs(*rm_specs, with_dependents=False,
|
||||
all_specs=specs_in_view)
|
||||
view.add_specs(*add_specs, with_dependencies=False)
|
||||
# pass all_specs in, as it's expensive to read all the
|
||||
# spec.yaml files twice.
|
||||
view.remove_specs(*rm_specs, with_dependents=False,
|
||||
all_specs=specs_in_view)
|
||||
view.add_specs(*add_specs, with_dependencies=False)
|
||||
|
||||
|
||||
class Environment(object):
|
||||
@@ -586,9 +617,11 @@ def __init__(self, path, init_file=None, with_view=None):
|
||||
self.views = {}
|
||||
elif with_view is True:
|
||||
self.views = {
|
||||
default_view_name: ViewDescriptor(self.view_path_default)}
|
||||
default_view_name: ViewDescriptor(self.path,
|
||||
self.view_path_default)}
|
||||
elif isinstance(with_view, six.string_types):
|
||||
self.views = {default_view_name: ViewDescriptor(with_view)}
|
||||
self.views = {default_view_name: ViewDescriptor(self.path,
|
||||
with_view)}
|
||||
# If with_view is None, then defer to the view settings determined by
|
||||
# the manifest file
|
||||
|
||||
@@ -659,11 +692,14 @@ def _read_manifest(self, f, raw_yaml=None):
|
||||
# enable_view can be boolean, string, or None
|
||||
if enable_view is True or enable_view is None:
|
||||
self.views = {
|
||||
default_view_name: ViewDescriptor(self.view_path_default)}
|
||||
default_view_name: ViewDescriptor(self.path,
|
||||
self.view_path_default)}
|
||||
elif isinstance(enable_view, six.string_types):
|
||||
self.views = {default_view_name: ViewDescriptor(enable_view)}
|
||||
self.views = {default_view_name: ViewDescriptor(self.path,
|
||||
enable_view)}
|
||||
elif enable_view:
|
||||
self.views = dict((name, ViewDescriptor.from_dict(values))
|
||||
path = self.path
|
||||
self.views = dict((name, ViewDescriptor.from_dict(path, values))
|
||||
for name, values in enable_view.items())
|
||||
else:
|
||||
self.views = {}
|
||||
@@ -1097,7 +1133,7 @@ def update_default_view(self, viewpath):
|
||||
if name in self.views:
|
||||
self.default_view.root = viewpath
|
||||
else:
|
||||
self.views[name] = ViewDescriptor(viewpath)
|
||||
self.views[name] = ViewDescriptor(self.path, viewpath)
|
||||
else:
|
||||
self.views.pop(name, None)
|
||||
|
||||
@@ -1111,6 +1147,24 @@ def regenerate_views(self):
|
||||
for view in self.views.values():
|
||||
view.regenerate(specs, self.roots())
|
||||
|
||||
def check_views(self):
|
||||
"""Checks if the environments default view can be activated."""
|
||||
try:
|
||||
# This is effectively a no-op, but it touches all packages in the
|
||||
# default view if they are installed.
|
||||
for view_name, view in self.views.items():
|
||||
for _, spec in self.concretized_specs():
|
||||
if spec in view and spec.package.installed:
|
||||
tty.debug(
|
||||
'Spec %s in view %s' % (spec.name, view_name))
|
||||
except (spack.repo.UnknownPackageError,
|
||||
spack.repo.UnknownNamespaceError) as e:
|
||||
tty.warn(e)
|
||||
tty.warn(
|
||||
'Environment %s includes out of date packages or repos. '
|
||||
'Loading the environment view will require reconcretization.'
|
||||
% self.name)
|
||||
|
||||
def _env_modifications_for_default_view(self, reverse=False):
|
||||
all_mods = spack.util.environment.EnvironmentModifications()
|
||||
|
||||
@@ -1490,9 +1544,10 @@ def write(self, regenerate_views=True):
|
||||
default_name = default_view_name
|
||||
if self.views and len(self.views) == 1 and default_name in self.views:
|
||||
path = self.default_view.root
|
||||
if self.default_view == ViewDescriptor(self.view_path_default):
|
||||
if self.default_view == ViewDescriptor(self.path,
|
||||
self.view_path_default):
|
||||
view = True
|
||||
elif self.default_view == ViewDescriptor(path):
|
||||
elif self.default_view == ViewDescriptor(self.path, path):
|
||||
view = path
|
||||
else:
|
||||
view = dict((name, view.to_dict())
|
||||
|
@@ -289,10 +289,11 @@ def candidate_urls(self):
|
||||
@_needs_stage
|
||||
def fetch(self):
|
||||
if self.archive_file:
|
||||
tty.msg("Already downloaded %s" % self.archive_file)
|
||||
tty.debug('Already downloaded {0}'.format(self.archive_file))
|
||||
return
|
||||
|
||||
url = None
|
||||
errors = []
|
||||
for url in self.candidate_urls:
|
||||
try:
|
||||
partial_file, save_file = self._fetch_from_url(url)
|
||||
@@ -300,8 +301,10 @@ def fetch(self):
|
||||
os.rename(partial_file, save_file)
|
||||
break
|
||||
except FetchError as e:
|
||||
tty.msg(str(e))
|
||||
pass
|
||||
errors.append(str(e))
|
||||
|
||||
for msg in errors:
|
||||
tty.debug(msg)
|
||||
|
||||
if not self.archive_file:
|
||||
raise FailedDownloadError(url)
|
||||
@@ -312,7 +315,7 @@ def _fetch_from_url(self, url):
|
||||
if self.stage.save_filename:
|
||||
save_file = self.stage.save_filename
|
||||
partial_file = self.stage.save_filename + '.part'
|
||||
tty.msg("Fetching %s" % url)
|
||||
tty.debug('Fetching {0}'.format(url))
|
||||
if partial_file:
|
||||
save_args = ['-C',
|
||||
'-', # continue partial downloads
|
||||
@@ -327,6 +330,8 @@ def _fetch_from_url(self, url):
|
||||
'-', # print out HTML headers
|
||||
'-L', # resolve 3xx redirects
|
||||
url,
|
||||
'--stderr', # redirect stderr output
|
||||
'-', # redirect to stdout
|
||||
]
|
||||
|
||||
if not spack.config.get('config:verify_ssl'):
|
||||
@@ -412,8 +417,8 @@ def cachable(self):
|
||||
@_needs_stage
|
||||
def expand(self):
|
||||
if not self.expand_archive:
|
||||
tty.msg("Staging unexpanded archive %s in %s" % (
|
||||
self.archive_file, self.stage.source_path))
|
||||
tty.debug('Staging unexpanded archive {0} in {1}'
|
||||
.format(self.archive_file, self.stage.source_path))
|
||||
if not self.stage.expanded:
|
||||
mkdirp(self.stage.source_path)
|
||||
dest = os.path.join(self.stage.source_path,
|
||||
@@ -421,7 +426,7 @@ def expand(self):
|
||||
shutil.move(self.archive_file, dest)
|
||||
return
|
||||
|
||||
tty.msg("Staging archive: %s" % self.archive_file)
|
||||
tty.debug('Staging archive: {0}'.format(self.archive_file))
|
||||
|
||||
if not self.archive_file:
|
||||
raise NoArchiveFileError(
|
||||
@@ -564,7 +569,7 @@ def fetch(self):
|
||||
raise
|
||||
|
||||
# Notify the user how we fetched.
|
||||
tty.msg('Using cached archive: %s' % path)
|
||||
tty.debug('Using cached archive: {0}'.format(path))
|
||||
|
||||
|
||||
class VCSFetchStrategy(FetchStrategy):
|
||||
@@ -594,7 +599,8 @@ def __init__(self, **kwargs):
|
||||
|
||||
@_needs_stage
|
||||
def check(self):
|
||||
tty.msg("No checksum needed when fetching with %s" % self.url_attr)
|
||||
tty.debug('No checksum needed when fetching with {0}'
|
||||
.format(self.url_attr))
|
||||
|
||||
@_needs_stage
|
||||
def expand(self):
|
||||
@@ -672,7 +678,7 @@ def go(self):
|
||||
|
||||
@_needs_stage
|
||||
def fetch(self):
|
||||
tty.msg("Getting go resource:", self.url)
|
||||
tty.debug('Getting go resource: {0}'.format(self.url))
|
||||
|
||||
with working_dir(self.stage.path):
|
||||
try:
|
||||
@@ -788,10 +794,10 @@ def _repo_info(self):
|
||||
@_needs_stage
|
||||
def fetch(self):
|
||||
if self.stage.expanded:
|
||||
tty.msg("Already fetched {0}".format(self.stage.source_path))
|
||||
tty.debug('Already fetched {0}'.format(self.stage.source_path))
|
||||
return
|
||||
|
||||
tty.msg("Cloning git repository: {0}".format(self._repo_info()))
|
||||
tty.debug('Cloning git repository: {0}'.format(self._repo_info()))
|
||||
|
||||
git = self.git
|
||||
if self.commit:
|
||||
@@ -959,10 +965,10 @@ def mirror_id(self):
|
||||
@_needs_stage
|
||||
def fetch(self):
|
||||
if self.stage.expanded:
|
||||
tty.msg("Already fetched %s" % self.stage.source_path)
|
||||
tty.debug('Already fetched {0}'.format(self.stage.source_path))
|
||||
return
|
||||
|
||||
tty.msg("Checking out subversion repository: %s" % self.url)
|
||||
tty.debug('Checking out subversion repository: {0}'.format(self.url))
|
||||
|
||||
args = ['checkout', '--force', '--quiet']
|
||||
if self.revision:
|
||||
@@ -1068,13 +1074,14 @@ def mirror_id(self):
|
||||
@_needs_stage
|
||||
def fetch(self):
|
||||
if self.stage.expanded:
|
||||
tty.msg("Already fetched %s" % self.stage.source_path)
|
||||
tty.debug('Already fetched {0}'.format(self.stage.source_path))
|
||||
return
|
||||
|
||||
args = []
|
||||
if self.revision:
|
||||
args.append('at revision %s' % self.revision)
|
||||
tty.msg("Cloning mercurial repository:", self.url, *args)
|
||||
tty.debug('Cloning mercurial repository: {0} {1}'
|
||||
.format(self.url, args))
|
||||
|
||||
args = ['clone']
|
||||
|
||||
@@ -1130,7 +1137,7 @@ def __init__(self, *args, **kwargs):
|
||||
@_needs_stage
|
||||
def fetch(self):
|
||||
if self.archive_file:
|
||||
tty.msg("Already downloaded %s" % self.archive_file)
|
||||
tty.debug('Already downloaded {0}'.format(self.archive_file))
|
||||
return
|
||||
|
||||
parsed_url = url_util.parse(self.url)
|
||||
@@ -1138,7 +1145,7 @@ def fetch(self):
|
||||
raise FetchError(
|
||||
'S3FetchStrategy can only fetch from s3:// urls.')
|
||||
|
||||
tty.msg("Fetching %s" % self.url)
|
||||
tty.debug('Fetching {0}'.format(self.url))
|
||||
|
||||
basename = os.path.basename(parsed_url.path)
|
||||
|
||||
|
@@ -215,18 +215,18 @@ def _hms(seconds):
|
||||
|
||||
def _install_from_cache(pkg, cache_only, explicit, unsigned=False):
|
||||
"""
|
||||
Install the package from binary cache
|
||||
Extract the package from binary cache
|
||||
|
||||
Args:
|
||||
pkg (PackageBase): the package to install from the binary cache
|
||||
cache_only (bool): only install from binary cache
|
||||
cache_only (bool): only extract from binary cache
|
||||
explicit (bool): ``True`` if installing the package was explicitly
|
||||
requested by the user, otherwise, ``False``
|
||||
unsigned (bool): ``True`` if binary package signatures to be checked,
|
||||
otherwise, ``False``
|
||||
|
||||
Return:
|
||||
(bool) ``True`` if the package was installed from binary cache,
|
||||
(bool) ``True`` if the package was extract from binary cache,
|
||||
``False`` otherwise
|
||||
"""
|
||||
installed_from_cache = _try_install_from_binary_cache(pkg, explicit,
|
||||
@@ -237,10 +237,10 @@ def _install_from_cache(pkg, cache_only, explicit, unsigned=False):
|
||||
if cache_only:
|
||||
tty.die('{0} when cache-only specified'.format(pre))
|
||||
|
||||
tty.debug('{0}: installing from source'.format(pre))
|
||||
tty.msg('{0}: installing from source'.format(pre))
|
||||
return False
|
||||
|
||||
tty.debug('Successfully installed {0} from binary cache'.format(pkg_id))
|
||||
tty.debug('Successfully extracted {0} from binary cache'.format(pkg_id))
|
||||
_print_installed_pkg(pkg.spec.prefix)
|
||||
spack.hooks.post_install(pkg.spec)
|
||||
return True
|
||||
@@ -275,17 +275,17 @@ def _process_external_package(pkg, explicit):
|
||||
if spec.external_module:
|
||||
tty.msg('{0} has external module in {1}'
|
||||
.format(pre, spec.external_module))
|
||||
tty.msg('{0} is actually installed in {1}'
|
||||
.format(pre, spec.external_path))
|
||||
tty.debug('{0} is actually installed in {1}'
|
||||
.format(pre, spec.external_path))
|
||||
else:
|
||||
tty.msg("{0} externally installed in {1}"
|
||||
tty.msg('{0} externally installed in {1}'
|
||||
.format(pre, spec.external_path))
|
||||
|
||||
try:
|
||||
# Check if the package was already registered in the DB.
|
||||
# If this is the case, then just exit.
|
||||
rec = spack.store.db.get_record(spec)
|
||||
tty.msg('{0} already registered in DB'.format(pre))
|
||||
tty.debug('{0} already registered in DB'.format(pre))
|
||||
|
||||
# Update the value of rec.explicit if it is necessary
|
||||
_update_explicit_entry_in_db(pkg, rec, explicit)
|
||||
@@ -294,11 +294,11 @@ def _process_external_package(pkg, explicit):
|
||||
# If not, register it and generate the module file.
|
||||
# For external packages we just need to run
|
||||
# post-install hooks to generate module files.
|
||||
tty.msg('{0} generating module file'.format(pre))
|
||||
tty.debug('{0} generating module file'.format(pre))
|
||||
spack.hooks.post_install(spec)
|
||||
|
||||
# Add to the DB
|
||||
tty.msg('{0} registering into DB'.format(pre))
|
||||
tty.debug('{0} registering into DB'.format(pre))
|
||||
spack.store.db.add(spec, None, explicit=explicit)
|
||||
|
||||
|
||||
@@ -314,7 +314,7 @@ def _process_binary_cache_tarball(pkg, binary_spec, explicit, unsigned):
|
||||
otherwise, ``False``
|
||||
|
||||
Return:
|
||||
(bool) ``True`` if the package was installed from binary cache,
|
||||
(bool) ``True`` if the package was extracted from binary cache,
|
||||
else ``False``
|
||||
"""
|
||||
tarball = binary_distribution.download_tarball(binary_spec)
|
||||
@@ -325,7 +325,7 @@ def _process_binary_cache_tarball(pkg, binary_spec, explicit, unsigned):
|
||||
return False
|
||||
|
||||
pkg_id = package_id(pkg)
|
||||
tty.msg('Installing {0} from binary cache'.format(pkg_id))
|
||||
tty.msg('Extracting {0} from binary cache'.format(pkg_id))
|
||||
binary_distribution.extract_tarball(binary_spec, tarball, allow_root=False,
|
||||
unsigned=unsigned, force=False)
|
||||
pkg.installed_from_binary_cache = True
|
||||
@@ -335,10 +335,10 @@ def _process_binary_cache_tarball(pkg, binary_spec, explicit, unsigned):
|
||||
|
||||
def _try_install_from_binary_cache(pkg, explicit, unsigned=False):
|
||||
"""
|
||||
Try to install the package from binary cache.
|
||||
Try to extract the package from binary cache.
|
||||
|
||||
Args:
|
||||
pkg (PackageBase): the package to be installed from binary cache
|
||||
pkg (PackageBase): the package to be extracted from binary cache
|
||||
explicit (bool): the package was explicitly requested by the user
|
||||
unsigned (bool): ``True`` if binary package signatures to be checked,
|
||||
otherwise, ``False``
|
||||
@@ -369,7 +369,7 @@ def _update_explicit_entry_in_db(pkg, rec, explicit):
|
||||
with spack.store.db.write_transaction():
|
||||
rec = spack.store.db.get_record(pkg.spec)
|
||||
message = '{s.name}@{s.version} : marking the package explicit'
|
||||
tty.msg(message.format(s=pkg.spec))
|
||||
tty.debug(message.format(s=pkg.spec))
|
||||
rec.explicit = True
|
||||
|
||||
|
||||
@@ -405,9 +405,14 @@ def dump_packages(spec, path):
|
||||
source = spack.store.layout.build_packages_path(node)
|
||||
source_repo_root = os.path.join(source, node.namespace)
|
||||
|
||||
# There's no provenance installed for the source package. Skip it.
|
||||
# User can always get something current from the builtin repo.
|
||||
if not os.path.isdir(source_repo_root):
|
||||
# If there's no provenance installed for the package, skip it.
|
||||
# If it's external, skip it because it either:
|
||||
# 1) it wasn't built with Spack, so it has no Spack metadata
|
||||
# 2) it was built by another Spack instance, and we do not
|
||||
# (currently) use Spack metadata to associate repos with externals
|
||||
# built by other Spack instances.
|
||||
# Spack can always get something current from the builtin repo.
|
||||
if node.external or not os.path.isdir(source_repo_root):
|
||||
continue
|
||||
|
||||
# Create a source repo and get the pkg directory out of it.
|
||||
@@ -447,7 +452,8 @@ def install_msg(name, pid):
|
||||
Return:
|
||||
(str) Colorized installing message
|
||||
"""
|
||||
return '{0}: '.format(pid) + colorize('@*{Installing} @*g{%s}' % name)
|
||||
pre = '{0}: '.format(pid) if tty.show_pid() else ''
|
||||
return pre + colorize('@*{Installing} @*g{%s}' % name)
|
||||
|
||||
|
||||
def log(pkg):
|
||||
@@ -1052,11 +1058,15 @@ def _install_task(self, task, **kwargs):
|
||||
if use_cache and \
|
||||
_install_from_cache(pkg, cache_only, explicit, unsigned):
|
||||
self._update_installed(task)
|
||||
if task.compiler:
|
||||
spack.compilers.add_compilers_to_config(
|
||||
spack.compilers.find_compilers([pkg.spec.prefix]))
|
||||
return
|
||||
|
||||
pkg.run_tests = (tests is True or tests and pkg.name in tests)
|
||||
|
||||
pre = '{0}: {1}:'.format(self.pid, pkg.name)
|
||||
pid = '{0}: '.format(self.pid) if tty.show_pid() else ''
|
||||
pre = '{0}{1}:'.format(pid, pkg.name)
|
||||
|
||||
def build_process():
|
||||
"""
|
||||
@@ -1075,8 +1085,8 @@ def build_process():
|
||||
pkg.do_stage()
|
||||
|
||||
pkg_id = package_id(pkg)
|
||||
tty.msg('{0} Building {1} [{2}]'
|
||||
.format(pre, pkg_id, pkg.build_system_class))
|
||||
tty.debug('{0} Building {1} [{2}]'
|
||||
.format(pre, pkg_id, pkg.build_system_class))
|
||||
|
||||
# get verbosity from do_install() parameter or saved value
|
||||
echo = verbose
|
||||
@@ -1097,8 +1107,8 @@ def build_process():
|
||||
if install_source and os.path.isdir(source_path):
|
||||
src_target = os.path.join(pkg.spec.prefix, 'share',
|
||||
pkg.name, 'src')
|
||||
tty.msg('{0} Copying source to {1}'
|
||||
.format(pre, src_target))
|
||||
tty.debug('{0} Copying source to {1}'
|
||||
.format(pre, src_target))
|
||||
fs.install_tree(pkg.stage.source_path, src_target)
|
||||
|
||||
# Do the real install in the source directory.
|
||||
@@ -1120,7 +1130,7 @@ def build_process():
|
||||
pass
|
||||
|
||||
# cache debug settings
|
||||
debug_enabled = tty.is_debug()
|
||||
debug_level = tty.debug_level()
|
||||
|
||||
# Spawn a daemon that reads from a pipe and redirects
|
||||
# everything to log_path
|
||||
@@ -1129,11 +1139,11 @@ def build_process():
|
||||
pkg.phases, pkg._InstallPhase_phases):
|
||||
|
||||
with logger.force_echo():
|
||||
inner_debug = tty.is_debug()
|
||||
tty.set_debug(debug_enabled)
|
||||
inner_debug_level = tty.debug_level()
|
||||
tty.set_debug(debug_level)
|
||||
tty.msg("{0} Executing phase: '{1}'"
|
||||
.format(pre, phase_name))
|
||||
tty.set_debug(inner_debug)
|
||||
tty.set_debug(inner_debug_level)
|
||||
|
||||
# Redirect stdout and stderr to daemon pipe
|
||||
phase = getattr(pkg, phase_attr)
|
||||
@@ -1149,11 +1159,11 @@ def build_process():
|
||||
pkg._total_time = time.time() - start_time
|
||||
build_time = pkg._total_time - pkg._fetch_time
|
||||
|
||||
tty.msg('{0} Successfully installed {1}'
|
||||
.format(pre, pkg_id),
|
||||
'Fetch: {0}. Build: {1}. Total: {2}.'
|
||||
.format(_hms(pkg._fetch_time), _hms(build_time),
|
||||
_hms(pkg._total_time)))
|
||||
tty.debug('{0} Successfully installed {1}'
|
||||
.format(pre, pkg_id),
|
||||
'Fetch: {0}. Build: {1}. Total: {2}.'
|
||||
.format(_hms(pkg._fetch_time), _hms(build_time),
|
||||
_hms(pkg._total_time)))
|
||||
_print_installed_pkg(pkg.prefix)
|
||||
|
||||
# preserve verbosity across runs
|
||||
@@ -1184,7 +1194,8 @@ def build_process():
|
||||
except spack.build_environment.StopPhase as e:
|
||||
# A StopPhase exception means that do_install was asked to
|
||||
# stop early from clients, and is not an error at this point
|
||||
tty.debug('{0} {1}'.format(self.pid, str(e)))
|
||||
pre = '{0}'.format(self.pid) if tty.show_pid() else ''
|
||||
tty.debug('{0}{1}'.format(pid, str(e)))
|
||||
tty.debug('Package stage directory : {0}'
|
||||
.format(pkg.stage.source_path))
|
||||
|
||||
@@ -1557,9 +1568,14 @@ def install(self, **kwargs):
|
||||
except (Exception, SystemExit) as exc:
|
||||
# Best effort installs suppress the exception and mark the
|
||||
# package as a failure UNLESS this is the explicit package.
|
||||
err = 'Failed to install {0} due to {1}: {2}'
|
||||
tty.error(err.format(pkg.name, exc.__class__.__name__,
|
||||
str(exc)))
|
||||
if (not isinstance(exc, spack.error.SpackError) or
|
||||
not exc.printed):
|
||||
# SpackErrors can be printed by the build process or at
|
||||
# lower levels -- skip printing if already printed.
|
||||
# TODO: sort out this and SpackEror.print_context()
|
||||
err = 'Failed to install {0} due to {1}: {2}'
|
||||
tty.error(
|
||||
err.format(pkg.name, exc.__class__.__name__, str(exc)))
|
||||
|
||||
self._update_failed(task, True, exc)
|
||||
|
||||
|
@@ -362,8 +362,9 @@ def make_argument_parser(**kwargs):
|
||||
'-C', '--config-scope', dest='config_scopes', action='append',
|
||||
metavar='DIR', help="add a custom configuration scope")
|
||||
parser.add_argument(
|
||||
'-d', '--debug', action='store_true',
|
||||
help="write out debug logs during compile")
|
||||
'-d', '--debug', action='count', default=0,
|
||||
help="write out debug messages "
|
||||
"(more d's for more verbosity: -d, -dd, -ddd, etc.)")
|
||||
parser.add_argument(
|
||||
'--timestamp', action='store_true',
|
||||
help="Add a timestamp to tty output")
|
||||
@@ -438,7 +439,7 @@ def setup_main_options(args):
|
||||
tty.set_debug(args.debug)
|
||||
tty.set_stacktrace(args.stacktrace)
|
||||
|
||||
# debug must be set first so that it can even affect behvaior of
|
||||
# debug must be set first so that it can even affect behavior of
|
||||
# errors raised by spack.config.
|
||||
if args.debug:
|
||||
spack.error.debug = True
|
||||
@@ -702,15 +703,15 @@ def main(argv=None):
|
||||
if stored_var_name in os.environ:
|
||||
os.environ[var] = os.environ[stored_var_name]
|
||||
|
||||
# make spack.config aware of any command line configuration scopes
|
||||
if args.config_scopes:
|
||||
spack.config.command_line_scopes = args.config_scopes
|
||||
|
||||
# activate an environment if one was specified on the command line
|
||||
if not args.no_env:
|
||||
env = ev.find_environment(args)
|
||||
if env:
|
||||
ev.activate(env, args.use_env_repo)
|
||||
|
||||
# make spack.config aware of any command line configuration scopes
|
||||
if args.config_scopes:
|
||||
spack.config.command_line_scopes = args.config_scopes
|
||||
ev.activate(env, args.use_env_repo, add_view=False)
|
||||
|
||||
if args.print_shell_vars:
|
||||
print_setup_info(*args.print_shell_vars.split(','))
|
||||
|
@@ -217,8 +217,16 @@ def root_path(name):
|
||||
"""
|
||||
|
||||
# Root folders where the various module files should be written
|
||||
roots = spack.config.get('config:module_roots', {})
|
||||
path = roots.get(name, os.path.join(spack.paths.share_path, name))
|
||||
active_upstream = spack.config.get('config:active_upstream')
|
||||
if active_upstream is not None:
|
||||
# Installs module files to upstream share directory.
|
||||
# Extra logic is needed for determining this location.
|
||||
roots = spack.config.get('upstreams')[active_upstream]['modules']
|
||||
path = roots.get(name, os.path.join(spack.paths.user_share_path, name))
|
||||
else:
|
||||
# If no upstream is active install module file to user share directory.
|
||||
roots = spack.config.get('config:module_roots', {})
|
||||
path = roots.get(name, os.path.join(spack.paths.user_share_path, name))
|
||||
return spack.util.path.canonicalize_path(path)
|
||||
|
||||
|
||||
|
@@ -97,6 +97,9 @@ def __str__(self):
|
||||
def _detect_crayos_version(cls):
|
||||
if os.path.isfile(_cle_release_file):
|
||||
release_attrs = read_cle_release_file()
|
||||
if 'RELEASE' not in release_attrs:
|
||||
# This Cray system uses a base OS not CLE/CNL
|
||||
return None
|
||||
v = spack.version.Version(release_attrs['RELEASE'])
|
||||
return v[0]
|
||||
elif os.path.isfile(_clerelease_file):
|
||||
|
@@ -22,6 +22,7 @@
|
||||
import sys
|
||||
import textwrap
|
||||
import time
|
||||
import traceback
|
||||
from six import StringIO
|
||||
from six import string_types
|
||||
from six import with_metaclass
|
||||
@@ -1121,9 +1122,8 @@ def do_fetch(self, mirror_only=False):
|
||||
raise ValueError("Can only fetch concrete packages.")
|
||||
|
||||
if not self.has_code:
|
||||
tty.msg(
|
||||
"No fetch required for %s: package has no code." % self.name
|
||||
)
|
||||
tty.debug('No fetch required for {0}: package has no code.'
|
||||
.format(self.name))
|
||||
|
||||
start_time = time.time()
|
||||
checksum = spack.config.get('config:checksum')
|
||||
@@ -1139,7 +1139,8 @@ def do_fetch(self, mirror_only=False):
|
||||
ignore_checksum = tty.get_yes_or_no(" Fetch anyway?",
|
||||
default=False)
|
||||
if ignore_checksum:
|
||||
tty.msg("Fetching with no checksum.", ck_msg)
|
||||
tty.debug('Fetching with no checksum. {0}'
|
||||
.format(ck_msg))
|
||||
|
||||
if not ignore_checksum:
|
||||
raise FetchError("Will not fetch %s" %
|
||||
@@ -1195,7 +1196,7 @@ def do_patch(self):
|
||||
|
||||
# If there are no patches, note it.
|
||||
if not patches and not has_patch_fun:
|
||||
tty.msg("No patches needed for %s" % self.name)
|
||||
tty.debug('No patches needed for {0}'.format(self.name))
|
||||
return
|
||||
|
||||
# Construct paths to special files in the archive dir used to
|
||||
@@ -1208,15 +1209,15 @@ def do_patch(self):
|
||||
# If we encounter an archive that failed to patch, restage it
|
||||
# so that we can apply all the patches again.
|
||||
if os.path.isfile(bad_file):
|
||||
tty.msg("Patching failed last time. Restaging.")
|
||||
tty.debug('Patching failed last time. Restaging.')
|
||||
self.stage.restage()
|
||||
|
||||
# If this file exists, then we already applied all the patches.
|
||||
if os.path.isfile(good_file):
|
||||
tty.msg("Already patched %s" % self.name)
|
||||
tty.debug('Already patched {0}'.format(self.name))
|
||||
return
|
||||
elif os.path.isfile(no_patches_file):
|
||||
tty.msg("No patches needed for %s" % self.name)
|
||||
tty.debug('No patches needed for {0}'.format(self.name))
|
||||
return
|
||||
|
||||
# Apply all the patches for specs that match this one
|
||||
@@ -1225,7 +1226,7 @@ def do_patch(self):
|
||||
try:
|
||||
with working_dir(self.stage.source_path):
|
||||
patch.apply(self.stage)
|
||||
tty.msg('Applied patch %s' % patch.path_or_url)
|
||||
tty.debug('Applied patch {0}'.format(patch.path_or_url))
|
||||
patched = True
|
||||
except spack.error.SpackError as e:
|
||||
tty.debug(e)
|
||||
@@ -1239,7 +1240,7 @@ def do_patch(self):
|
||||
try:
|
||||
with working_dir(self.stage.source_path):
|
||||
self.patch()
|
||||
tty.msg("Ran patch() for %s" % self.name)
|
||||
tty.debug('Ran patch() for {0}'.format(self.name))
|
||||
patched = True
|
||||
except spack.multimethod.NoSuchMethodError:
|
||||
# We are running a multimethod without a default case.
|
||||
@@ -1249,12 +1250,12 @@ def do_patch(self):
|
||||
# directive, AND the patch function didn't apply, say
|
||||
# no patches are needed. Otherwise, we already
|
||||
# printed a message for each patch.
|
||||
tty.msg("No patches needed for %s" % self.name)
|
||||
tty.debug('No patches needed for {0}'.format(self.name))
|
||||
except spack.error.SpackError as e:
|
||||
tty.debug(e)
|
||||
|
||||
# Touch bad file if anything goes wrong.
|
||||
tty.msg("patch() function failed for %s" % self.name)
|
||||
tty.msg('patch() function failed for {0}'.format(self.name))
|
||||
touch(bad_file)
|
||||
raise
|
||||
|
||||
@@ -1341,7 +1342,7 @@ def _has_make_target(self, target):
|
||||
if os.path.exists(makefile):
|
||||
break
|
||||
else:
|
||||
tty.msg('No Makefile found in the build directory')
|
||||
tty.debug('No Makefile found in the build directory')
|
||||
return False
|
||||
|
||||
# Check if 'target' is a valid target.
|
||||
@@ -1372,7 +1373,8 @@ def _has_make_target(self, target):
|
||||
|
||||
for missing_target_msg in missing_target_msgs:
|
||||
if missing_target_msg.format(target) in stderr:
|
||||
tty.msg("Target '" + target + "' not found in " + makefile)
|
||||
tty.debug("Target '{0}' not found in {1}"
|
||||
.format(target, makefile))
|
||||
return False
|
||||
|
||||
return True
|
||||
@@ -1400,7 +1402,7 @@ def _has_ninja_target(self, target):
|
||||
|
||||
# Check if we have a Ninja build script
|
||||
if not os.path.exists('build.ninja'):
|
||||
tty.msg('No Ninja build script found in the build directory')
|
||||
tty.debug('No Ninja build script found in the build directory')
|
||||
return False
|
||||
|
||||
# Get a list of all targets in the Ninja build script
|
||||
@@ -1412,7 +1414,8 @@ def _has_ninja_target(self, target):
|
||||
if line.startswith(target + ':')]
|
||||
|
||||
if not matches:
|
||||
tty.msg("Target '" + target + "' not found in build.ninja")
|
||||
tty.debug("Target '{0}' not found in build.ninja"
|
||||
.format(target))
|
||||
return False
|
||||
|
||||
return True
|
||||
@@ -1719,11 +1722,12 @@ def uninstall_by_spec(spec, force=False, deprecator=None):
|
||||
if specs:
|
||||
if deprecator:
|
||||
spack.store.db.deprecate(specs[0], deprecator)
|
||||
tty.msg("Deprecating stale DB entry for "
|
||||
"%s" % spec.short_spec)
|
||||
tty.debug('Deprecating stale DB entry for {0}'
|
||||
.format(spec.short_spec))
|
||||
else:
|
||||
spack.store.db.remove(specs[0])
|
||||
tty.msg("Removed stale DB entry for %s" % spec.short_spec)
|
||||
tty.debug('Removed stale DB entry for {0}'
|
||||
.format(spec.short_spec))
|
||||
return
|
||||
else:
|
||||
raise InstallError(str(spec) + " is not installed.")
|
||||
@@ -1744,7 +1748,23 @@ def uninstall_by_spec(spec, force=False, deprecator=None):
|
||||
with spack.store.db.prefix_write_lock(spec):
|
||||
|
||||
if pkg is not None:
|
||||
spack.hooks.pre_uninstall(spec)
|
||||
try:
|
||||
spack.hooks.pre_uninstall(spec)
|
||||
except Exception as error:
|
||||
if force:
|
||||
error_msg = (
|
||||
"One or more pre_uninstall hooks have failed"
|
||||
" for {0}, but Spack is continuing with the"
|
||||
" uninstall".format(str(spec)))
|
||||
if isinstance(error, spack.error.SpackError):
|
||||
error_msg += (
|
||||
"\n\nError message: {0}".format(str(error)))
|
||||
tty.warn(error_msg)
|
||||
# Note that if the uninstall succeeds then we won't be
|
||||
# seeing this error again and won't have another chance
|
||||
# to run the hook.
|
||||
else:
|
||||
raise
|
||||
|
||||
# Uninstalling in Spack only requires removing the prefix.
|
||||
if not spec.external:
|
||||
@@ -1765,9 +1785,22 @@ def uninstall_by_spec(spec, force=False, deprecator=None):
|
||||
spack.store.db.remove(spec)
|
||||
|
||||
if pkg is not None:
|
||||
spack.hooks.post_uninstall(spec)
|
||||
try:
|
||||
spack.hooks.post_uninstall(spec)
|
||||
except Exception:
|
||||
# If there is a failure here, this is our only chance to do
|
||||
# something about it: at this point the Spec has been removed
|
||||
# from the DB and prefix, so the post-uninstallation hooks
|
||||
# will not have another chance to run.
|
||||
error_msg = (
|
||||
"One or more post-uninstallation hooks failed for"
|
||||
" {0}, but the prefix has been removed (if it is not"
|
||||
" external).".format(str(spec)))
|
||||
tb_msg = traceback.format_exc()
|
||||
error_msg += "\n\nThe error:\n\n{0}".format(tb_msg)
|
||||
tty.warn(error_msg)
|
||||
|
||||
tty.msg("Successfully uninstalled %s" % spec.short_spec)
|
||||
tty.msg('Successfully uninstalled {0}'.format(spec.short_spec))
|
||||
|
||||
def do_uninstall(self, force=False):
|
||||
"""Uninstall this package by spec."""
|
||||
|
@@ -45,6 +45,7 @@
|
||||
stage_path = os.path.join(user_var_path, "stage")
|
||||
repos_path = os.path.join(var_path, "repos")
|
||||
share_path = os.path.join(prefix, "share", "spack")
|
||||
user_share_path = os.path.join(user_config_path, "share", "spack")
|
||||
|
||||
# Paths to built-in Spack repositories.
|
||||
packages_path = os.path.join(repos_path, "builtin")
|
||||
|
@@ -20,7 +20,7 @@
|
||||
_craype_name_to_target_name = {
|
||||
'x86-cascadelake': 'cascadelake',
|
||||
'x86-naples': 'zen',
|
||||
'x86-rome': 'zen', # Cheating because we have the wrong modules on rzcrayz
|
||||
'x86-rome': 'zen2',
|
||||
'x86-skylake': 'skylake_avx512',
|
||||
'mic-knl': 'mic_knl',
|
||||
'interlagos': 'bulldozer',
|
||||
|
@@ -804,15 +804,17 @@ def relocate_text(
|
||||
where they should be relocated
|
||||
"""
|
||||
# TODO: reduce the number of arguments (8 seems too much)
|
||||
sbang_regex = r'#!/bin/bash {0}/bin/sbang'.format(orig_spack)
|
||||
new_sbang = r'#!/bin/bash {0}/bin/sbang'.format(new_spack)
|
||||
orig_sbang = '#!/bin/bash {0}/bin/sbang'.format(orig_spack)
|
||||
new_sbang = '#!/bin/bash {0}/bin/sbang'.format(new_spack)
|
||||
|
||||
for file in files:
|
||||
_replace_prefix_text(file, orig_install_prefix, new_install_prefix)
|
||||
for orig_dep_prefix, new_dep_prefix in new_prefixes.items():
|
||||
_replace_prefix_text(file, orig_dep_prefix, new_dep_prefix)
|
||||
_replace_prefix_text(file, orig_layout_root, new_layout_root)
|
||||
_replace_prefix_text(file, sbang_regex, new_sbang)
|
||||
# relocate the sbang location only if the spack directory changed
|
||||
if orig_spack != new_spack:
|
||||
_replace_prefix_text(file, orig_sbang, new_sbang)
|
||||
|
||||
|
||||
def relocate_text_bin(
|
||||
|
@@ -32,7 +32,8 @@
|
||||
'enum': [
|
||||
'develop',
|
||||
'0.14', '0.14.0', '0.14.1', '0.14.2',
|
||||
'0.15', '0.15.0',
|
||||
'0.15', '0.15.0', '0.15.1', '0.15.2',
|
||||
'0.15.3',
|
||||
]
|
||||
}
|
||||
},
|
||||
|
@@ -415,10 +415,11 @@ def fetch(self, mirror_only=False):
|
||||
# Join URLs of mirror roots with mirror paths. Because
|
||||
# urljoin() will strip everything past the final '/' in
|
||||
# the root, so we add a '/' if it is not present.
|
||||
urls = []
|
||||
mirror_urls = []
|
||||
for mirror in spack.mirror.MirrorCollection().values():
|
||||
for rel_path in self.mirror_paths:
|
||||
urls.append(url_util.join(mirror.fetch_url, rel_path))
|
||||
mirror_urls.append(
|
||||
url_util.join(mirror.fetch_url, rel_path))
|
||||
|
||||
# If this archive is normally fetched from a tarball URL,
|
||||
# then use the same digest. `spack mirror` ensures that
|
||||
@@ -436,7 +437,8 @@ def fetch(self, mirror_only=False):
|
||||
self.skip_checksum_for_mirror = not bool(digest)
|
||||
|
||||
# Add URL strategies for all the mirrors with the digest
|
||||
for url in urls:
|
||||
# Insert fetchers in the order that the URLs are provided.
|
||||
for url in reversed(mirror_urls):
|
||||
fetchers.insert(
|
||||
0, fs.from_url_scheme(
|
||||
url, digest, expand=expand, extension=extension))
|
||||
@@ -458,6 +460,11 @@ def generate_fetchers():
|
||||
for fetcher in dynamic_fetchers:
|
||||
yield fetcher
|
||||
|
||||
def print_errors(errors):
|
||||
for msg in errors:
|
||||
tty.debug(msg)
|
||||
|
||||
errors = []
|
||||
for fetcher in generate_fetchers():
|
||||
try:
|
||||
fetcher.stage = self
|
||||
@@ -468,14 +475,18 @@ def generate_fetchers():
|
||||
# Don't bother reporting when something is not cached.
|
||||
continue
|
||||
except spack.error.SpackError as e:
|
||||
tty.msg("Fetching from %s failed." % fetcher)
|
||||
errors.append('Fetching from {0} failed.'.format(fetcher))
|
||||
tty.debug(e)
|
||||
continue
|
||||
else:
|
||||
err_msg = "All fetchers failed for %s" % self.name
|
||||
print_errors(errors)
|
||||
|
||||
err_msg = 'All fetchers failed for {0}'.format(self.name)
|
||||
self.fetcher = self.default_fetcher
|
||||
raise fs.FetchError(err_msg, None)
|
||||
|
||||
print_errors(errors)
|
||||
|
||||
def check(self):
|
||||
"""Check the downloaded archive against a checksum digest.
|
||||
No-op if this stage checks code out of a repository."""
|
||||
@@ -536,9 +547,9 @@ def expand_archive(self):
|
||||
downloaded."""
|
||||
if not self.expanded:
|
||||
self.fetcher.expand()
|
||||
tty.msg("Created stage in %s" % self.path)
|
||||
tty.debug('Created stage in {0}'.format(self.path))
|
||||
else:
|
||||
tty.msg("Already staged %s in %s" % (self.name, self.path))
|
||||
tty.debug('Already staged {0} in {1}'.format(self.name, self.path))
|
||||
|
||||
def restage(self):
|
||||
"""Removes the expanded archive path if it exists, then re-expands
|
||||
@@ -709,13 +720,13 @@ def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
pass
|
||||
|
||||
def fetch(self, *args, **kwargs):
|
||||
tty.msg("No need to fetch for DIY.")
|
||||
tty.debug('No need to fetch for DIY.')
|
||||
|
||||
def check(self):
|
||||
tty.msg("No checksum needed for DIY.")
|
||||
tty.debug('No checksum needed for DIY.')
|
||||
|
||||
def expand_archive(self):
|
||||
tty.msg("Using source directory: %s" % self.source_path)
|
||||
tty.debug('Using source directory: {0}'.format(self.source_path))
|
||||
|
||||
@property
|
||||
def expanded(self):
|
||||
@@ -733,7 +744,7 @@ def destroy(self):
|
||||
pass
|
||||
|
||||
def cache_local(self):
|
||||
tty.msg("Sources for DIY stages are not cached")
|
||||
tty.debug('Sources for DIY stages are not cached')
|
||||
|
||||
|
||||
def ensure_access(file):
|
||||
@@ -783,12 +794,12 @@ def get_checksums_for_versions(
|
||||
max_len = max(len(str(v)) for v in sorted_versions)
|
||||
num_ver = len(sorted_versions)
|
||||
|
||||
tty.msg("Found {0} version{1} of {2}:".format(
|
||||
num_ver, '' if num_ver == 1 else 's', name),
|
||||
"",
|
||||
*spack.cmd.elide_list(
|
||||
["{0:{1}} {2}".format(str(v), max_len, url_dict[v])
|
||||
for v in sorted_versions]))
|
||||
tty.debug('Found {0} version{1} of {2}:'.format(
|
||||
num_ver, '' if num_ver == 1 else 's', name),
|
||||
'',
|
||||
*spack.cmd.elide_list(
|
||||
['{0:{1}} {2}'.format(str(v), max_len, url_dict[v])
|
||||
for v in sorted_versions]))
|
||||
print()
|
||||
|
||||
if batch:
|
||||
@@ -803,9 +814,10 @@ def get_checksums_for_versions(
|
||||
versions = sorted_versions[:archives_to_fetch]
|
||||
urls = [url_dict[v] for v in versions]
|
||||
|
||||
tty.msg("Downloading...")
|
||||
tty.debug('Downloading...')
|
||||
version_hashes = []
|
||||
i = 0
|
||||
errors = []
|
||||
for url, version in zip(urls, versions):
|
||||
try:
|
||||
if fetch_options:
|
||||
@@ -826,10 +838,12 @@ def get_checksums_for_versions(
|
||||
hashlib.sha256, stage.archive_file)))
|
||||
i += 1
|
||||
except FailedDownloadError:
|
||||
tty.msg("Failed to fetch {0}".format(url))
|
||||
errors.append('Failed to fetch {0}'.format(url))
|
||||
except Exception as e:
|
||||
tty.msg("Something failed on {0}, skipping.".format(url),
|
||||
" ({0})".format(e))
|
||||
tty.msg('Something failed on {0}, skipping. ({1})'.format(url, e))
|
||||
|
||||
for msg in errors:
|
||||
tty.debug(msg)
|
||||
|
||||
if not version_hashes:
|
||||
tty.die("Could not fetch any versions for {0}".format(name))
|
||||
@@ -844,8 +858,8 @@ def get_checksums_for_versions(
|
||||
])
|
||||
|
||||
num_hash = len(version_hashes)
|
||||
tty.msg("Checksummed {0} version{1} of {2}:".format(
|
||||
num_hash, '' if num_hash == 1 else 's', name))
|
||||
tty.debug('Checksummed {0} version{1} of {2}:'.format(
|
||||
num_hash, '' if num_hash == 1 else 's', name))
|
||||
|
||||
return version_lines
|
||||
|
||||
|
480
lib/spack/spack/test/bindist.py
Normal file
480
lib/spack/spack/test/bindist.py
Normal file
@@ -0,0 +1,480 @@
|
||||
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
"""
|
||||
This test checks creating and install buildcaches
|
||||
"""
|
||||
import os
|
||||
import py
|
||||
import pytest
|
||||
import argparse
|
||||
import platform
|
||||
import spack.repo
|
||||
import spack.store
|
||||
import spack.binary_distribution as bindist
|
||||
import spack.cmd.buildcache as buildcache
|
||||
import spack.cmd.install as install
|
||||
import spack.cmd.uninstall as uninstall
|
||||
import spack.cmd.mirror as mirror
|
||||
from spack.spec import Spec
|
||||
from spack.directory_layout import YamlDirectoryLayout
|
||||
|
||||
|
||||
def_install_path_scheme = '${ARCHITECTURE}/${COMPILERNAME}-${COMPILERVER}/${PACKAGE}-${VERSION}-${HASH}' # noqa: E501
|
||||
ndef_install_path_scheme = '${PACKAGE}/${VERSION}/${ARCHITECTURE}-${COMPILERNAME}-${COMPILERVER}-${HASH}' # noqa: E501
|
||||
|
||||
mirror_path_def = None
|
||||
mirror_path_rel = None
|
||||
|
||||
|
||||
@pytest.fixture(scope='function')
|
||||
def cache_directory(tmpdir):
|
||||
old_cache_path = spack.caches.fetch_cache
|
||||
tmpdir.ensure('fetch_cache', dir=True)
|
||||
fsc = spack.fetch_strategy.FsCache(str(tmpdir.join('fetch_cache')))
|
||||
spack.config.caches = fsc
|
||||
yield spack.config.caches
|
||||
tmpdir.join('fetch_cache').remove()
|
||||
spack.config.caches = old_cache_path
|
||||
|
||||
|
||||
@pytest.fixture(scope='session')
|
||||
def session_mirror_def(tmpdir_factory):
|
||||
dir = tmpdir_factory.mktemp('mirror')
|
||||
global mirror_path_rel
|
||||
mirror_path_rel = dir
|
||||
dir.ensure('build_cache', dir=True)
|
||||
yield dir
|
||||
dir.join('build_cache').remove()
|
||||
|
||||
|
||||
@pytest.fixture(scope='function')
|
||||
def mirror_directory_def(session_mirror_def):
|
||||
yield str(session_mirror_def)
|
||||
|
||||
|
||||
@pytest.fixture(scope='session')
|
||||
def session_mirror_rel(tmpdir_factory):
|
||||
dir = tmpdir_factory.mktemp('mirror')
|
||||
global mirror_path_rel
|
||||
mirror_path_rel = dir
|
||||
dir.ensure('build_cache', dir=True)
|
||||
yield dir
|
||||
dir.join('build_cache').remove()
|
||||
|
||||
|
||||
@pytest.fixture(scope='function')
|
||||
def mirror_directory_rel(session_mirror_rel):
|
||||
yield(session_mirror_rel)
|
||||
|
||||
|
||||
@pytest.fixture(scope='session')
|
||||
def config_directory(tmpdir_factory):
|
||||
tmpdir = tmpdir_factory.mktemp('test_configs')
|
||||
# restore some sane defaults for packages and config
|
||||
config_path = py.path.local(spack.paths.etc_path)
|
||||
modules_yaml = config_path.join('spack', 'defaults', 'modules.yaml')
|
||||
os_modules_yaml = config_path.join('spack', 'defaults', '%s' %
|
||||
platform.system().lower(),
|
||||
'modules.yaml')
|
||||
packages_yaml = config_path.join('spack', 'defaults', 'packages.yaml')
|
||||
config_yaml = config_path.join('spack', 'defaults', 'config.yaml')
|
||||
repos_yaml = config_path.join('spack', 'defaults', 'repos.yaml')
|
||||
tmpdir.ensure('site', dir=True)
|
||||
tmpdir.ensure('user', dir=True)
|
||||
tmpdir.ensure('site/%s' % platform.system().lower(), dir=True)
|
||||
modules_yaml.copy(tmpdir.join('site', 'modules.yaml'))
|
||||
os_modules_yaml.copy(tmpdir.join('site/%s' % platform.system().lower(),
|
||||
'modules.yaml'))
|
||||
packages_yaml.copy(tmpdir.join('site', 'packages.yaml'))
|
||||
config_yaml.copy(tmpdir.join('site', 'config.yaml'))
|
||||
repos_yaml.copy(tmpdir.join('site', 'repos.yaml'))
|
||||
yield tmpdir
|
||||
tmpdir.remove()
|
||||
|
||||
|
||||
@pytest.fixture(scope='function')
|
||||
def default_config(tmpdir_factory, config_directory, monkeypatch):
|
||||
|
||||
# Global Upstream Not Registered in these tests.
|
||||
global_upstream = spack.config.get('upstreams')
|
||||
|
||||
mutable_dir = tmpdir_factory.mktemp('mutable_config').join('tmp')
|
||||
config_directory.copy(mutable_dir)
|
||||
|
||||
cfg = spack.config.Configuration(
|
||||
*[spack.config.ConfigScope(name, str(mutable_dir))
|
||||
for name in ['site/%s' % platform.system().lower(),
|
||||
'site', 'user']])
|
||||
|
||||
monkeypatch.setattr(spack.config, 'config', cfg)
|
||||
|
||||
# Set Global Upstream
|
||||
upstreams = spack.config.get('upstreams')
|
||||
if not upstreams:
|
||||
spack.config.set('upstreams', global_upstream, scope='user')
|
||||
|
||||
# This is essential, otherwise the cache will create weird side effects
|
||||
# that will compromise subsequent tests if compilers.yaml is modified
|
||||
monkeypatch.setattr(spack.compilers, '_cache_config_file', [])
|
||||
njobs = spack.config.get('config:build_jobs')
|
||||
if not njobs:
|
||||
spack.config.set('config:build_jobs', 4, scope='user')
|
||||
extensions = spack.config.get('config:template_dirs')
|
||||
if not extensions:
|
||||
spack.config.set('config:template_dirs',
|
||||
[os.path.join(spack.paths.share_path, 'templates')],
|
||||
scope='user')
|
||||
|
||||
mutable_dir.ensure('build_stage', dir=True)
|
||||
build_stage = spack.config.get('config:build_stage')
|
||||
if not build_stage:
|
||||
spack.config.set('config:build_stage',
|
||||
[str(mutable_dir.join('build_stage'))], scope='user')
|
||||
timeout = spack.config.get('config:connect_timeout')
|
||||
if not timeout:
|
||||
spack.config.set('config:connect_timeout', 10, scope='user')
|
||||
|
||||
yield spack.config.config
|
||||
mutable_dir.remove()
|
||||
|
||||
|
||||
@pytest.fixture(scope='function')
|
||||
def install_dir_default_layout(tmpdir):
|
||||
"""Hooks a fake install directory with a default layout"""
|
||||
real_store = spack.store.store
|
||||
real_layout = spack.store.layout
|
||||
spack.store.store = spack.store.Store(str(tmpdir.join('opt')))
|
||||
spack.store.layout = YamlDirectoryLayout(str(tmpdir.join('opt')),
|
||||
path_scheme=def_install_path_scheme) # noqa: E501
|
||||
yield spack.store
|
||||
spack.store.store = real_store
|
||||
spack.store.layout = real_layout
|
||||
|
||||
|
||||
@pytest.fixture(scope='function')
|
||||
def install_dir_non_default_layout(tmpdir):
|
||||
"""Hooks a fake install directory with a non-default layout"""
|
||||
real_store = spack.store.store
|
||||
real_layout = spack.store.layout
|
||||
spack.store.store = spack.store.Store(str(tmpdir.join('opt')))
|
||||
spack.store.layout = YamlDirectoryLayout(str(tmpdir.join('opt')),
|
||||
path_scheme=ndef_install_path_scheme) # noqa: E501
|
||||
yield spack.store
|
||||
spack.store.store = real_store
|
||||
spack.store.layout = real_layout
|
||||
|
||||
|
||||
@pytest.mark.requires_executables(
|
||||
'/usr/bin/gcc', 'patchelf', 'strings', 'file')
|
||||
@pytest.mark.disable_clean_stage_check
|
||||
@pytest.mark.maybeslow
|
||||
@pytest.mark.usefixtures('default_config', 'cache_directory',
|
||||
'install_dir_default_layout')
|
||||
def test_default_rpaths_create_install_default_layout(tmpdir,
|
||||
mirror_directory_def,
|
||||
install_mockery):
|
||||
"""
|
||||
Test the creation and installation of buildcaches with default rpaths
|
||||
into the default directory layout scheme.
|
||||
"""
|
||||
|
||||
gspec = Spec('garply')
|
||||
gspec.concretize()
|
||||
cspec = Spec('corge')
|
||||
cspec.concretize()
|
||||
|
||||
# Install patchelf needed for relocate in linux test environment
|
||||
iparser = argparse.ArgumentParser()
|
||||
install.setup_parser(iparser)
|
||||
# Install some packages with dependent packages
|
||||
iargs = iparser.parse_args(['--no-cache', cspec.name])
|
||||
install.install(iparser, iargs)
|
||||
|
||||
global mirror_path_def
|
||||
mirror_path_def = mirror_directory_def
|
||||
mparser = argparse.ArgumentParser()
|
||||
mirror.setup_parser(mparser)
|
||||
margs = mparser.parse_args(
|
||||
['add', '--scope', 'site', 'test-mirror-def', 'file://%s' % mirror_path_def])
|
||||
mirror.mirror(mparser, margs)
|
||||
margs = mparser.parse_args(['list'])
|
||||
mirror.mirror(mparser, margs)
|
||||
|
||||
# setup argument parser
|
||||
parser = argparse.ArgumentParser()
|
||||
buildcache.setup_parser(parser)
|
||||
|
||||
# Set default buildcache args
|
||||
create_args = ['create', '-a', '-u', '-d', str(mirror_path_def),
|
||||
cspec.name]
|
||||
install_args = ['install', '-a', '-u', cspec.name]
|
||||
|
||||
# Create a buildache
|
||||
args = parser.parse_args(create_args)
|
||||
buildcache.buildcache(parser, args)
|
||||
# Test force overwrite create buildcache
|
||||
create_args.insert(create_args.index('-a'), '-f')
|
||||
args = parser.parse_args(create_args)
|
||||
buildcache.buildcache(parser, args)
|
||||
# create mirror index
|
||||
args = parser.parse_args(['update-index', '-d', 'file://%s' % str(mirror_path_def)])
|
||||
buildcache.buildcache(parser, args)
|
||||
# list the buildcaches in the mirror
|
||||
args = parser.parse_args(['list', '-a', '-l', '-v'])
|
||||
buildcache.buildcache(parser, args)
|
||||
|
||||
# Uninstall the package and deps
|
||||
uparser = argparse.ArgumentParser()
|
||||
uninstall.setup_parser(uparser)
|
||||
uargs = uparser.parse_args(['-y', '--dependents', gspec.name])
|
||||
uninstall.uninstall(uparser, uargs)
|
||||
|
||||
# test install
|
||||
args = parser.parse_args(install_args)
|
||||
buildcache.buildcache(parser, args)
|
||||
|
||||
# This gives warning that spec is already installed
|
||||
buildcache.buildcache(parser, args)
|
||||
|
||||
# test overwrite install
|
||||
install_args.insert(install_args.index('-a'), '-f')
|
||||
args = parser.parse_args(install_args)
|
||||
buildcache.buildcache(parser, args)
|
||||
|
||||
args = parser.parse_args(['keys', '-f'])
|
||||
buildcache.buildcache(parser, args)
|
||||
|
||||
args = parser.parse_args(['list'])
|
||||
buildcache.buildcache(parser, args)
|
||||
|
||||
args = parser.parse_args(['list', '-a'])
|
||||
buildcache.buildcache(parser, args)
|
||||
|
||||
args = parser.parse_args(['list', '-l', '-v'])
|
||||
buildcache.buildcache(parser, args)
|
||||
bindist._cached_specs = set()
|
||||
spack.stage.purge()
|
||||
margs = mparser.parse_args(
|
||||
['rm', '--scope', 'site', 'test-mirror-def'])
|
||||
mirror.mirror(mparser, margs)
|
||||
|
||||
|
||||
@pytest.mark.requires_executables(
|
||||
'/usr/bin/gcc', 'patchelf', 'strings', 'file')
|
||||
@pytest.mark.disable_clean_stage_check
|
||||
@pytest.mark.maybeslow
|
||||
@pytest.mark.nomockstage
|
||||
@pytest.mark.usefixtures('default_config', 'cache_directory',
|
||||
'install_dir_non_default_layout')
|
||||
def test_default_rpaths_install_nondefault_layout(tmpdir,
|
||||
install_mockery):
|
||||
"""
|
||||
Test the creation and installation of buildcaches with default rpaths
|
||||
into the non-default directory layout scheme.
|
||||
"""
|
||||
|
||||
gspec = Spec('garply')
|
||||
gspec.concretize()
|
||||
cspec = Spec('corge')
|
||||
cspec.concretize()
|
||||
|
||||
global mirror_path_def
|
||||
mparser = argparse.ArgumentParser()
|
||||
mirror.setup_parser(mparser)
|
||||
margs = mparser.parse_args(
|
||||
['add', '--scope', 'site', 'test-mirror-def', 'file://%s' % mirror_path_def])
|
||||
mirror.mirror(mparser, margs)
|
||||
|
||||
# setup argument parser
|
||||
parser = argparse.ArgumentParser()
|
||||
buildcache.setup_parser(parser)
|
||||
|
||||
# Set default buildcache args
|
||||
install_args = ['install', '-a', '-u', '%s' % cspec.name]
|
||||
|
||||
# Install some packages with dependent packages
|
||||
# test install in non-default install path scheme
|
||||
args = parser.parse_args(install_args)
|
||||
buildcache.buildcache(parser, args)
|
||||
# test force install in non-default install path scheme
|
||||
install_args.insert(install_args.index('-a'), '-f')
|
||||
args = parser.parse_args(install_args)
|
||||
buildcache.buildcache(parser, args)
|
||||
|
||||
bindist._cached_specs = set()
|
||||
spack.stage.purge()
|
||||
margs = mparser.parse_args(
|
||||
['rm', '--scope', 'site', 'test-mirror-def'])
|
||||
mirror.mirror(mparser, margs)
|
||||
|
||||
|
||||
@pytest.mark.requires_executables(
|
||||
'/usr/bin/gcc', 'patchelf', 'strings', 'file')
|
||||
@pytest.mark.disable_clean_stage_check
|
||||
@pytest.mark.maybeslow
|
||||
@pytest.mark.nomockstage
|
||||
@pytest.mark.usefixtures('default_config', 'cache_directory',
|
||||
'install_dir_default_layout')
|
||||
def test_relative_rpaths_create_default_layout(tmpdir,
|
||||
mirror_directory_rel,
|
||||
install_mockery):
|
||||
"""
|
||||
Test the creation and installation of buildcaches with relative
|
||||
rpaths into the default directory layout scheme.
|
||||
"""
|
||||
|
||||
gspec = Spec('garply')
|
||||
gspec.concretize()
|
||||
cspec = Spec('corge')
|
||||
cspec.concretize()
|
||||
|
||||
global mirror_path_rel
|
||||
mirror_path_rel = mirror_directory_rel
|
||||
# Install patchelf needed for relocate in linux test environment
|
||||
iparser = argparse.ArgumentParser()
|
||||
install.setup_parser(iparser)
|
||||
# Install some packages with dependent packages
|
||||
iargs = iparser.parse_args(['--no-cache', cspec.name])
|
||||
install.install(iparser, iargs)
|
||||
|
||||
# setup argument parser
|
||||
parser = argparse.ArgumentParser()
|
||||
buildcache.setup_parser(parser)
|
||||
|
||||
# set default buildcache args
|
||||
create_args = ['create', '-a', '-u', '-r', '-d',
|
||||
str(mirror_path_rel),
|
||||
cspec.name]
|
||||
|
||||
# create build cache with relatived rpaths
|
||||
args = parser.parse_args(create_args)
|
||||
buildcache.buildcache(parser, args)
|
||||
# create mirror index
|
||||
args = parser.parse_args(['update-index', '-d', 'file://%s' % str(mirror_path_rel)])
|
||||
buildcache.buildcache(parser, args)
|
||||
# Uninstall the package and deps
|
||||
uparser = argparse.ArgumentParser()
|
||||
uninstall.setup_parser(uparser)
|
||||
uargs = uparser.parse_args(['-y', '--dependents', gspec.name])
|
||||
uninstall.uninstall(uparser, uargs)
|
||||
|
||||
bindist._cached_specs = set()
|
||||
spack.stage.purge()
|
||||
|
||||
|
||||
@pytest.mark.requires_executables(
|
||||
'/usr/bin/gcc', 'patchelf', 'strings', 'file')
|
||||
@pytest.mark.disable_clean_stage_check
|
||||
@pytest.mark.maybeslow
|
||||
@pytest.mark.nomockstage
|
||||
@pytest.mark.usefixtures('default_config', 'cache_directory',
|
||||
'install_dir_default_layout')
|
||||
def test_relative_rpaths_install_default_layout(tmpdir,
|
||||
install_mockery):
|
||||
"""
|
||||
Test the creation and installation of buildcaches with relative
|
||||
rpaths into the default directory layout scheme.
|
||||
"""
|
||||
|
||||
gspec = Spec('garply')
|
||||
gspec.concretize()
|
||||
cspec = Spec('corge')
|
||||
cspec.concretize()
|
||||
|
||||
global mirror_path_rel
|
||||
mparser = argparse.ArgumentParser()
|
||||
mirror.setup_parser(mparser)
|
||||
margs = mparser.parse_args(
|
||||
['add', '--scope', 'site', 'test-mirror-rel', 'file://%s' % mirror_path_rel])
|
||||
mirror.mirror(mparser, margs)
|
||||
|
||||
# Install patchelf needed for relocate in linux test environment
|
||||
iparser = argparse.ArgumentParser()
|
||||
install.setup_parser(iparser)
|
||||
|
||||
# setup argument parser
|
||||
parser = argparse.ArgumentParser()
|
||||
buildcache.setup_parser(parser)
|
||||
|
||||
# set default buildcache args
|
||||
install_args = ['install', '-a', '-u',
|
||||
cspec.name]
|
||||
|
||||
# install buildcache created with relativized rpaths
|
||||
args = parser.parse_args(install_args)
|
||||
buildcache.buildcache(parser, args)
|
||||
|
||||
# This gives warning that spec is already installed
|
||||
buildcache.buildcache(parser, args)
|
||||
|
||||
# Uninstall the package and deps
|
||||
uparser = argparse.ArgumentParser()
|
||||
uninstall.setup_parser(uparser)
|
||||
uargs = uparser.parse_args(['-y', '--dependents', gspec.name])
|
||||
uninstall.uninstall(uparser, uargs)
|
||||
|
||||
# install build cache
|
||||
buildcache.buildcache(parser, args)
|
||||
|
||||
# test overwrite install
|
||||
install_args.insert(install_args.index('-a'), '-f')
|
||||
args = parser.parse_args(install_args)
|
||||
buildcache.buildcache(parser, args)
|
||||
|
||||
bindist._cached_specs = set()
|
||||
spack.stage.purge()
|
||||
margs = mparser.parse_args(
|
||||
['rm', '--scope', 'site', 'test-mirror-rel'])
|
||||
mirror.mirror(mparser, margs)
|
||||
|
||||
|
||||
@pytest.mark.requires_executables(
|
||||
'/usr/bin/gcc', 'patchelf', 'strings', 'file')
|
||||
@pytest.mark.disable_clean_stage_check
|
||||
@pytest.mark.maybeslow
|
||||
@pytest.mark.nomockstage
|
||||
@pytest.mark.usefixtures('default_config', 'cache_directory',
|
||||
'install_dir_non_default_layout')
|
||||
def test_relative_rpaths_install_nondefault(tmpdir,
|
||||
install_mockery):
|
||||
"""
|
||||
Test the installation of buildcaches with relativized rpaths
|
||||
into the non-default directory layout scheme.
|
||||
"""
|
||||
|
||||
gspec = Spec('garply')
|
||||
gspec.concretize()
|
||||
cspec = Spec('corge')
|
||||
cspec.concretize()
|
||||
|
||||
global mirror_path_rel
|
||||
|
||||
mparser = argparse.ArgumentParser()
|
||||
mirror.setup_parser(mparser)
|
||||
margs = mparser.parse_args(
|
||||
['add', '--scope', 'site', 'test-mirror-rel', 'file://%s' % mirror_path_rel])
|
||||
mirror.mirror(mparser, margs)
|
||||
|
||||
# Install patchelf needed for relocate in linux test environment
|
||||
iparser = argparse.ArgumentParser()
|
||||
install.setup_parser(iparser)
|
||||
|
||||
# setup argument parser
|
||||
parser = argparse.ArgumentParser()
|
||||
buildcache.setup_parser(parser)
|
||||
|
||||
# Set default buildcache args
|
||||
install_args = ['install', '-a', '-u', '%s' % cspec.name]
|
||||
|
||||
# test install in non-default install path scheme and relative path
|
||||
args = parser.parse_args(install_args)
|
||||
buildcache.buildcache(parser, args)
|
||||
|
||||
bindist._cached_specs = set()
|
||||
spack.stage.purge()
|
||||
margs = mparser.parse_args(
|
||||
['rm', '--scope', 'site', 'test-mirror-rel'])
|
||||
mirror.mirror(mparser, margs)
|
36
lib/spack/spack/test/cache_fetch.py
Normal file
36
lib/spack/spack/test/cache_fetch.py
Normal file
@@ -0,0 +1,36 @@
|
||||
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
import os
|
||||
import pytest
|
||||
|
||||
from llnl.util.filesystem import mkdirp, touch
|
||||
|
||||
from spack.stage import Stage
|
||||
from spack.fetch_strategy import CacheURLFetchStrategy, NoCacheError
|
||||
|
||||
|
||||
def test_fetch_missing_cache(tmpdir):
|
||||
"""Ensure raise a missing cache file."""
|
||||
testpath = str(tmpdir)
|
||||
|
||||
fetcher = CacheURLFetchStrategy(url='file:///not-a-real-cache-file')
|
||||
with Stage(fetcher, path=testpath):
|
||||
with pytest.raises(NoCacheError, match=r'No cache'):
|
||||
fetcher.fetch()
|
||||
|
||||
|
||||
def test_fetch(tmpdir):
|
||||
"""Ensure a fetch after expanding is effectively a no-op."""
|
||||
testpath = str(tmpdir)
|
||||
cache = os.path.join(testpath, 'cache.tar.gz')
|
||||
touch(cache)
|
||||
url = 'file:///{0}'.format(cache)
|
||||
|
||||
fetcher = CacheURLFetchStrategy(url=url)
|
||||
with Stage(fetcher, path=testpath) as stage:
|
||||
source_path = stage.source_path
|
||||
mkdirp(source_path)
|
||||
fetcher.fetch()
|
@@ -751,7 +751,6 @@ def test_push_mirror_contents(tmpdir, mutable_mock_env_path, env_deactivate,
|
||||
|
||||
logs_dir_list = os.listdir(logs_dir.strpath)
|
||||
|
||||
assert('spack-build-env.txt' in logs_dir_list)
|
||||
assert('spack-build-out.txt' in logs_dir_list)
|
||||
|
||||
# Also just make sure that if something goes wrong with the
|
||||
|
@@ -3,6 +3,8 @@
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
import os
|
||||
import shutil
|
||||
import sys
|
||||
|
||||
import pytest
|
||||
|
||||
@@ -14,7 +16,7 @@
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def no_compilers_yaml(mutable_config, monkeypatch):
|
||||
def no_compilers_yaml(mutable_config):
|
||||
"""Creates a temporary configuration without compilers.yaml"""
|
||||
|
||||
for scope, local_config in mutable_config.scopes.items():
|
||||
@@ -64,7 +66,7 @@ def test_compiler_find_without_paths(no_compilers_yaml, working_env, tmpdir):
|
||||
with tmpdir.as_cwd():
|
||||
with open('gcc', 'w') as f:
|
||||
f.write("""\
|
||||
#!/bin/bash
|
||||
#!/bin/sh
|
||||
echo "0.0.0"
|
||||
""")
|
||||
os.chmod('gcc', 0o700)
|
||||
@@ -75,6 +77,33 @@ def test_compiler_find_without_paths(no_compilers_yaml, working_env, tmpdir):
|
||||
assert 'gcc' in output
|
||||
|
||||
|
||||
@pytest.mark.regression('17589')
|
||||
def test_compiler_find_no_apple_gcc(no_compilers_yaml, working_env, tmpdir):
|
||||
with tmpdir.as_cwd():
|
||||
# make a script to emulate apple gcc's version args
|
||||
with open('gcc', 'w') as f:
|
||||
f.write("""\
|
||||
#!/bin/sh
|
||||
if [ "$1" = "-dumpversion" ]; then
|
||||
echo "4.2.1"
|
||||
elif [ "$1" = "--version" ]; then
|
||||
echo "Configured with: --prefix=/dummy"
|
||||
echo "Apple clang version 11.0.0 (clang-1100.0.33.16)"
|
||||
echo "Target: x86_64-apple-darwin18.7.0"
|
||||
echo "Thread model: posix"
|
||||
echo "InstalledDir: /dummy"
|
||||
else
|
||||
echo "clang: error: no input files"
|
||||
fi
|
||||
""")
|
||||
os.chmod('gcc', 0o700)
|
||||
|
||||
os.environ['PATH'] = str(tmpdir)
|
||||
output = compiler('find', '--scope=site')
|
||||
|
||||
assert 'gcc' not in output
|
||||
|
||||
|
||||
def test_compiler_remove(mutable_config, mock_packages):
|
||||
args = spack.util.pattern.Bunch(
|
||||
all=True, compiler_spec='gcc@4.5.0', add_paths=[], scope=None
|
||||
@@ -103,3 +132,121 @@ def test_compiler_add(
|
||||
new_compiler = new_compilers - old_compilers
|
||||
assert any(c.version == spack.version.Version(mock_compiler_version)
|
||||
for c in new_compiler)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def clangdir(tmpdir):
|
||||
"""Create a directory with some dummy compiler scripts in it.
|
||||
|
||||
Scripts are:
|
||||
- clang
|
||||
- clang++
|
||||
- gcc
|
||||
- g++
|
||||
- gfortran-8
|
||||
|
||||
"""
|
||||
with tmpdir.as_cwd():
|
||||
with open('clang', 'w') as f:
|
||||
f.write("""\
|
||||
#!/bin/sh
|
||||
if [ "$1" = "--version" ]; then
|
||||
echo "clang version 11.0.0 (clang-1100.0.33.16)"
|
||||
echo "Target: x86_64-apple-darwin18.7.0"
|
||||
echo "Thread model: posix"
|
||||
echo "InstalledDir: /dummy"
|
||||
else
|
||||
echo "clang: error: no input files"
|
||||
exit 1
|
||||
fi
|
||||
""")
|
||||
shutil.copy('clang', 'clang++')
|
||||
|
||||
gcc_script = """\
|
||||
#!/bin/sh
|
||||
if [ "$1" = "-dumpversion" ]; then
|
||||
echo "8"
|
||||
elif [ "$1" = "-dumpfullversion" ]; then
|
||||
echo "8.4.0"
|
||||
elif [ "$1" = "--version" ]; then
|
||||
echo "{0} (GCC) 8.4.0 20120313 (Red Hat 8.4.0-1)"
|
||||
echo "Copyright (C) 2010 Free Software Foundation, Inc."
|
||||
else
|
||||
echo "{1}: fatal error: no input files"
|
||||
echo "compilation terminated."
|
||||
exit 1
|
||||
fi
|
||||
"""
|
||||
with open('gcc-8', 'w') as f:
|
||||
f.write(gcc_script.format('gcc', 'gcc-8'))
|
||||
with open('g++-8', 'w') as f:
|
||||
f.write(gcc_script.format('g++', 'g++-8'))
|
||||
with open('gfortran-8', 'w') as f:
|
||||
f.write(gcc_script.format('GNU Fortran', 'gfortran-8'))
|
||||
os.chmod('clang', 0o700)
|
||||
os.chmod('clang++', 0o700)
|
||||
os.chmod('gcc-8', 0o700)
|
||||
os.chmod('g++-8', 0o700)
|
||||
os.chmod('gfortran-8', 0o700)
|
||||
|
||||
yield tmpdir
|
||||
|
||||
|
||||
@pytest.mark.regression('17590')
|
||||
def test_compiler_find_mixed_suffixes(
|
||||
no_compilers_yaml, working_env, clangdir):
|
||||
"""Ensure that we'll mix compilers with different suffixes when necessary.
|
||||
"""
|
||||
os.environ['PATH'] = str(clangdir)
|
||||
output = compiler('find', '--scope=site')
|
||||
|
||||
assert 'clang@11.0.0' in output
|
||||
assert 'gcc@8.4.0' in output
|
||||
|
||||
config = spack.compilers.get_compiler_config('site', False)
|
||||
clang = next(c['compiler'] for c in config
|
||||
if c['compiler']['spec'] == 'clang@11.0.0')
|
||||
gcc = next(c['compiler'] for c in config
|
||||
if c['compiler']['spec'] == 'gcc@8.4.0')
|
||||
|
||||
gfortran_path = str(clangdir.join('gfortran-8'))
|
||||
|
||||
assert clang['paths'] == {
|
||||
'cc': str(clangdir.join('clang')),
|
||||
'cxx': str(clangdir.join('clang++')),
|
||||
# we only auto-detect mixed clang on macos
|
||||
'f77': gfortran_path if sys.platform == 'darwin' else None,
|
||||
'fc': gfortran_path if sys.platform == 'darwin' else None,
|
||||
}
|
||||
|
||||
assert gcc['paths'] == {
|
||||
'cc': str(clangdir.join('gcc-8')),
|
||||
'cxx': str(clangdir.join('g++-8')),
|
||||
'f77': gfortran_path,
|
||||
'fc': gfortran_path,
|
||||
}
|
||||
|
||||
|
||||
@pytest.mark.regression('17590')
|
||||
def test_compiler_find_prefer_no_suffix(
|
||||
no_compilers_yaml, working_env, clangdir):
|
||||
"""Ensure that we'll pick 'clang' over 'clang-gpu' when there is a choice.
|
||||
"""
|
||||
with clangdir.as_cwd():
|
||||
shutil.copy('clang', 'clang-gpu')
|
||||
shutil.copy('clang++', 'clang++-gpu')
|
||||
os.chmod('clang-gpu', 0o700)
|
||||
os.chmod('clang++-gpu', 0o700)
|
||||
|
||||
os.environ['PATH'] = str(clangdir)
|
||||
output = compiler('find', '--scope=site')
|
||||
|
||||
assert 'clang@11.0.0' in output
|
||||
assert 'gcc@8.4.0' in output
|
||||
|
||||
config = spack.compilers.get_compiler_config('site', False)
|
||||
clang = next(c['compiler'] for c in config
|
||||
if c['compiler']['spec'] == 'clang@11.0.0')
|
||||
|
||||
assert clang['paths']['cc'] == str(clangdir.join('clang'))
|
||||
assert clang['paths']['cxx'] == str(clangdir.join('clang++'))
|
||||
|
@@ -16,7 +16,7 @@
|
||||
|
||||
from spack.cmd.env import _env_create
|
||||
from spack.spec import Spec
|
||||
from spack.main import SpackCommand
|
||||
from spack.main import SpackCommand, SpackCommandError
|
||||
from spack.stage import stage_prefix
|
||||
|
||||
from spack.util.mock_package import MockPackageMultiRepo
|
||||
@@ -284,6 +284,45 @@ def test_environment_status(capsys, tmpdir):
|
||||
assert 'in current directory' in env('status')
|
||||
|
||||
|
||||
def test_env_status_broken_view(
|
||||
mutable_mock_env_path, mock_archive, mock_fetch, mock_packages,
|
||||
install_mockery
|
||||
):
|
||||
with ev.create('test'):
|
||||
install('trivial-install-test-package')
|
||||
|
||||
# switch to a new repo that doesn't include the installed package
|
||||
# test that Spack detects the missing package and warns the user
|
||||
new_repo = MockPackageMultiRepo()
|
||||
with spack.repo.swap(new_repo):
|
||||
output = env('status')
|
||||
assert 'In environment test' in output
|
||||
assert 'Environment test includes out of date' in output
|
||||
|
||||
# Test that the warning goes away when it's fixed
|
||||
output = env('status')
|
||||
assert 'In environment test' in output
|
||||
assert 'Environment test includes out of date' not in output
|
||||
|
||||
|
||||
def test_env_activate_broken_view(
|
||||
mutable_mock_env_path, mock_archive, mock_fetch, mock_packages,
|
||||
install_mockery
|
||||
):
|
||||
with ev.create('test'):
|
||||
install('trivial-install-test-package')
|
||||
|
||||
# switch to a new repo that doesn't include the installed package
|
||||
# test that Spack detects the missing package and fails gracefully
|
||||
new_repo = MockPackageMultiRepo()
|
||||
with spack.repo.swap(new_repo):
|
||||
with pytest.raises(SpackCommandError):
|
||||
env('activate', '--sh', 'test')
|
||||
|
||||
# test replacing repo fixes it
|
||||
env('activate', '--sh', 'test')
|
||||
|
||||
|
||||
def test_to_lockfile_dict():
|
||||
e = ev.create('test')
|
||||
e.add('mpileaks')
|
||||
|
@@ -29,6 +29,9 @@
|
||||
install = SpackCommand('install')
|
||||
env = SpackCommand('env')
|
||||
add = SpackCommand('add')
|
||||
mirror = SpackCommand('mirror')
|
||||
uninstall = SpackCommand('uninstall')
|
||||
buildcache = SpackCommand('buildcache')
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
@@ -170,8 +173,8 @@ def test_package_output(tmpdir, capsys, install_mockery, mock_fetch):
|
||||
|
||||
# make sure that output from the actual package file appears in the
|
||||
# right place in the build log.
|
||||
assert re.search(r"BEFORE INSTALL\n==>( \[.+\])? './configure'", out)
|
||||
assert "'install'\nAFTER INSTALL" in out
|
||||
assert "BEFORE INSTALL" in out
|
||||
assert "AFTER INSTALL" in out
|
||||
|
||||
|
||||
@pytest.mark.disable_clean_stage_check
|
||||
@@ -217,10 +220,12 @@ def test_show_log_on_error(mock_packages, mock_archive, mock_fetch,
|
||||
assert install.error.pkg.name == 'build-error'
|
||||
assert 'Full build log:' in out
|
||||
|
||||
# Message shows up for ProcessError (1), ChildError (1), and output (1)
|
||||
print(out)
|
||||
|
||||
# Message shows up for ProcessError (1) and output (1)
|
||||
errors = [line for line in out.split('\n')
|
||||
if 'configure: error: cannot run C compiled programs' in line]
|
||||
assert len(errors) == 3
|
||||
assert len(errors) == 2
|
||||
|
||||
|
||||
def test_install_overwrite(
|
||||
@@ -773,6 +778,40 @@ def test_compiler_bootstrap(
|
||||
install('a%gcc@2.0')
|
||||
|
||||
|
||||
def test_compiler_bootstrap_from_binary_mirror(
|
||||
install_mockery_mutable_config, mock_packages, mock_fetch,
|
||||
mock_archive, mutable_config, monkeypatch, tmpdir):
|
||||
"""Make sure installing compiler from buildcache registers compiler"""
|
||||
|
||||
# Create a temp mirror directory for buildcache usage
|
||||
mirror_dir = tmpdir.join('mirror_dir')
|
||||
mirror_url = 'file://{0}'.format(mirror_dir.strpath)
|
||||
|
||||
# Install a compiler, because we want to put it in a buildcache
|
||||
install('gcc@2.0')
|
||||
|
||||
# Put installed compiler in the buildcache
|
||||
buildcache('create', '-u', '-a', '-f', '-d', mirror_dir.strpath, 'gcc@2.0')
|
||||
|
||||
# Now uninstall the compiler
|
||||
uninstall('-y', 'gcc@2.0')
|
||||
|
||||
monkeypatch.setattr(spack.concretize.Concretizer,
|
||||
'check_for_compiler_existence', False)
|
||||
spack.config.set('config:install_missing_compilers', True)
|
||||
assert CompilerSpec('gcc@2.0') not in compilers.all_compiler_specs()
|
||||
|
||||
# Configure the mirror where we put that buildcache w/ the compiler
|
||||
mirror('add', 'test-mirror', mirror_url)
|
||||
|
||||
# Now make sure that when the compiler is installed from binary mirror,
|
||||
# it also gets configured as a compiler. Test succeeds if it does not
|
||||
# raise an error
|
||||
install('--no-check-signature', '--cache-only', '--only',
|
||||
'dependencies', 'b%gcc@2.0')
|
||||
install('--no-cache', '--only', 'package', 'b%gcc@2.0')
|
||||
|
||||
|
||||
@pytest.mark.regression('16221')
|
||||
def test_compiler_bootstrap_already_installed(
|
||||
install_mockery_mutable_config, mock_packages, mock_fetch,
|
||||
@@ -786,3 +825,27 @@ def test_compiler_bootstrap_already_installed(
|
||||
# Test succeeds if it does not raise an error
|
||||
install('gcc@2.0')
|
||||
install('a%gcc@2.0')
|
||||
|
||||
|
||||
def test_install_fails_no_args(tmpdir):
|
||||
# ensure no spack.yaml in directory
|
||||
with tmpdir.as_cwd():
|
||||
output = install(fail_on_error=False)
|
||||
|
||||
# check we got the short version of the error message with no spack.yaml
|
||||
assert 'requires a package argument or active environment' in output
|
||||
assert 'spack env activate .' not in output
|
||||
assert 'using the `spack.yaml` in this directory' not in output
|
||||
|
||||
|
||||
def test_install_fails_no_args_suggests_env_activation(tmpdir):
|
||||
# ensure spack.yaml in directory
|
||||
tmpdir.ensure('spack.yaml')
|
||||
|
||||
with tmpdir.as_cwd():
|
||||
output = install(fail_on_error=False)
|
||||
|
||||
# check we got the long version of the error message with spack.yaml
|
||||
assert 'requires a package argument or active environment' in output
|
||||
assert 'spack env activate .' in output
|
||||
assert 'using the `spack.yaml` in this directory' in output
|
||||
|
@@ -1,7 +0,0 @@
|
||||
upstreams:
|
||||
global:
|
||||
install_tree: $spack/opt/spack
|
||||
modules:
|
||||
tcl: $spack/share/spack/modules
|
||||
lmod: $spack/share/spack/lmod
|
||||
dotkit: $spack/share/spack/dotkit
|
@@ -344,10 +344,9 @@ def test_nosource_pkg_install(
|
||||
|
||||
# Make sure install works even though there is no associated code.
|
||||
pkg.do_install()
|
||||
|
||||
# Also make sure an error is raised if `do_fetch` is called.
|
||||
pkg.do_fetch()
|
||||
assert "No fetch required for nosource" in capfd.readouterr()[0]
|
||||
out = capfd.readouterr()
|
||||
assert "Installing dependency-install" in out[0]
|
||||
assert "Missing a source id for nosource" in out[1]
|
||||
|
||||
|
||||
def test_nosource_pkg_install_post_install(
|
||||
|
@@ -99,10 +99,21 @@ def test_hms(sec, result):
|
||||
assert inst._hms(sec) == result
|
||||
|
||||
|
||||
def test_install_msg():
|
||||
def test_install_msg(monkeypatch):
|
||||
"""Test results of call to install_msg based on debug level."""
|
||||
name = 'some-package'
|
||||
pid = 123456
|
||||
expected = "{0}: Installing {1}".format(pid, name)
|
||||
install_msg = 'Installing {0}'.format(name)
|
||||
|
||||
monkeypatch.setattr(tty, '_debug', 0)
|
||||
assert inst.install_msg(name, pid) == install_msg
|
||||
|
||||
monkeypatch.setattr(tty, '_debug', 1)
|
||||
assert inst.install_msg(name, pid) == install_msg
|
||||
|
||||
# Expect the PID to be added at debug level 2
|
||||
monkeypatch.setattr(tty, '_debug', 2)
|
||||
expected = "{0}: {1}".format(pid, install_msg)
|
||||
assert inst.install_msg(name, pid) == expected
|
||||
|
||||
|
||||
@@ -151,7 +162,6 @@ def test_process_external_package_module(install_mockery, monkeypatch, capfd):
|
||||
|
||||
out = capfd.readouterr()[0]
|
||||
assert 'has external module in {0}'.format(spec.external_module) in out
|
||||
assert 'is actually installed in {0}'.format(spec.external_path) in out
|
||||
|
||||
|
||||
def test_process_binary_cache_tarball_none(install_mockery, monkeypatch,
|
||||
@@ -180,7 +190,7 @@ def _spec(spec):
|
||||
spec = spack.spec.Spec('a').concretized()
|
||||
assert inst._process_binary_cache_tarball(spec.package, spec, False, False)
|
||||
|
||||
assert 'Installing a from binary cache' in capfd.readouterr()[0]
|
||||
assert 'Extracting a from binary cache' in capfd.readouterr()[0]
|
||||
|
||||
|
||||
def test_try_install_from_binary_cache(install_mockery, mock_packages,
|
||||
|
@@ -1143,8 +1143,6 @@ def read():
|
||||
assert vals['read'] == 1
|
||||
|
||||
|
||||
@pytest.mark.skipif('macos' in os.environ.get('GITHUB_WORKFLOW', ''),
|
||||
reason="Skip failing test for GA on MacOS")
|
||||
def test_lock_debug_output(lock_path):
|
||||
host = socket.getfqdn()
|
||||
|
||||
|
@@ -111,7 +111,7 @@ def test_log_subproc_and_echo_output_capfd(capfd, tmpdir):
|
||||
# Tests below use a pseudoterminal to test llnl.util.tty.log
|
||||
#
|
||||
def simple_logger(**kwargs):
|
||||
"""Mock logger (child) process for testing log.keyboard_input."""
|
||||
"""Mock logger (minion) process for testing log.keyboard_input."""
|
||||
def handler(signum, frame):
|
||||
running[0] = False
|
||||
signal.signal(signal.SIGUSR1, handler)
|
||||
@@ -125,7 +125,7 @@ def handler(signum, frame):
|
||||
|
||||
|
||||
def mock_shell_fg(proc, ctl, **kwargs):
|
||||
"""PseudoShell master function for test_foreground_background."""
|
||||
"""PseudoShell controller function for test_foreground_background."""
|
||||
ctl.fg()
|
||||
ctl.status()
|
||||
ctl.wait_enabled()
|
||||
@@ -134,7 +134,7 @@ def mock_shell_fg(proc, ctl, **kwargs):
|
||||
|
||||
|
||||
def mock_shell_fg_no_termios(proc, ctl, **kwargs):
|
||||
"""PseudoShell master function for test_foreground_background."""
|
||||
"""PseudoShell controller function for test_foreground_background."""
|
||||
ctl.fg()
|
||||
ctl.status()
|
||||
ctl.wait_disabled_fg()
|
||||
@@ -143,7 +143,7 @@ def mock_shell_fg_no_termios(proc, ctl, **kwargs):
|
||||
|
||||
|
||||
def mock_shell_bg(proc, ctl, **kwargs):
|
||||
"""PseudoShell master function for test_foreground_background."""
|
||||
"""PseudoShell controller function for test_foreground_background."""
|
||||
ctl.bg()
|
||||
ctl.status()
|
||||
ctl.wait_disabled()
|
||||
@@ -152,7 +152,7 @@ def mock_shell_bg(proc, ctl, **kwargs):
|
||||
|
||||
|
||||
def mock_shell_tstp_cont(proc, ctl, **kwargs):
|
||||
"""PseudoShell master function for test_foreground_background."""
|
||||
"""PseudoShell controller function for test_foreground_background."""
|
||||
ctl.tstp()
|
||||
ctl.wait_stopped()
|
||||
|
||||
@@ -163,7 +163,7 @@ def mock_shell_tstp_cont(proc, ctl, **kwargs):
|
||||
|
||||
|
||||
def mock_shell_tstp_tstp_cont(proc, ctl, **kwargs):
|
||||
"""PseudoShell master function for test_foreground_background."""
|
||||
"""PseudoShell controller function for test_foreground_background."""
|
||||
ctl.tstp()
|
||||
ctl.wait_stopped()
|
||||
|
||||
@@ -177,7 +177,7 @@ def mock_shell_tstp_tstp_cont(proc, ctl, **kwargs):
|
||||
|
||||
|
||||
def mock_shell_tstp_tstp_cont_cont(proc, ctl, **kwargs):
|
||||
"""PseudoShell master function for test_foreground_background."""
|
||||
"""PseudoShell controller function for test_foreground_background."""
|
||||
ctl.tstp()
|
||||
ctl.wait_stopped()
|
||||
|
||||
@@ -194,7 +194,7 @@ def mock_shell_tstp_tstp_cont_cont(proc, ctl, **kwargs):
|
||||
|
||||
|
||||
def mock_shell_bg_fg(proc, ctl, **kwargs):
|
||||
"""PseudoShell master function for test_foreground_background."""
|
||||
"""PseudoShell controller function for test_foreground_background."""
|
||||
ctl.bg()
|
||||
ctl.status()
|
||||
ctl.wait_disabled()
|
||||
@@ -207,7 +207,7 @@ def mock_shell_bg_fg(proc, ctl, **kwargs):
|
||||
|
||||
|
||||
def mock_shell_bg_fg_no_termios(proc, ctl, **kwargs):
|
||||
"""PseudoShell master function for test_foreground_background."""
|
||||
"""PseudoShell controller function for test_foreground_background."""
|
||||
ctl.bg()
|
||||
ctl.status()
|
||||
ctl.wait_disabled()
|
||||
@@ -220,7 +220,7 @@ def mock_shell_bg_fg_no_termios(proc, ctl, **kwargs):
|
||||
|
||||
|
||||
def mock_shell_fg_bg(proc, ctl, **kwargs):
|
||||
"""PseudoShell master function for test_foreground_background."""
|
||||
"""PseudoShell controller function for test_foreground_background."""
|
||||
ctl.fg()
|
||||
ctl.status()
|
||||
ctl.wait_enabled()
|
||||
@@ -233,7 +233,7 @@ def mock_shell_fg_bg(proc, ctl, **kwargs):
|
||||
|
||||
|
||||
def mock_shell_fg_bg_no_termios(proc, ctl, **kwargs):
|
||||
"""PseudoShell master function for test_foreground_background."""
|
||||
"""PseudoShell controller function for test_foreground_background."""
|
||||
ctl.fg()
|
||||
ctl.status()
|
||||
ctl.wait_disabled_fg()
|
||||
@@ -299,7 +299,7 @@ def test_foreground_background(test_fn, termios_on_or_off, tmpdir):
|
||||
|
||||
|
||||
def synchronized_logger(**kwargs):
|
||||
"""Mock logger (child) process for testing log.keyboard_input.
|
||||
"""Mock logger (minion) process for testing log.keyboard_input.
|
||||
|
||||
This logger synchronizes with the parent process to test that 'v' can
|
||||
toggle output. It is used in ``test_foreground_background_output`` below.
|
||||
@@ -330,7 +330,7 @@ def handler(signum, frame):
|
||||
|
||||
|
||||
def mock_shell_v_v(proc, ctl, **kwargs):
|
||||
"""PseudoShell master function for test_foreground_background_output."""
|
||||
"""Controller function for test_foreground_background_output."""
|
||||
write_lock = kwargs["write_lock"]
|
||||
v_lock = kwargs["v_lock"]
|
||||
|
||||
@@ -357,7 +357,7 @@ def mock_shell_v_v(proc, ctl, **kwargs):
|
||||
|
||||
|
||||
def mock_shell_v_v_no_termios(proc, ctl, **kwargs):
|
||||
"""PseudoShell master function for test_foreground_background_output."""
|
||||
"""Controller function for test_foreground_background_output."""
|
||||
write_lock = kwargs["write_lock"]
|
||||
v_lock = kwargs["v_lock"]
|
||||
|
||||
@@ -395,9 +395,9 @@ def test_foreground_background_output(
|
||||
shell = PseudoShell(test_fn, synchronized_logger)
|
||||
log_path = str(tmpdir.join("log.txt"))
|
||||
|
||||
# Locks for synchronizing with child
|
||||
write_lock = multiprocessing.Lock() # must be held by child to write
|
||||
v_lock = multiprocessing.Lock() # held while master is in v mode
|
||||
# Locks for synchronizing with minion
|
||||
write_lock = multiprocessing.Lock() # must be held by minion to write
|
||||
v_lock = multiprocessing.Lock() # held while controller is in v mode
|
||||
|
||||
with termios_on_or_off():
|
||||
shell.start(
|
||||
@@ -423,16 +423,16 @@ def test_foreground_background_output(
|
||||
with open(log_path) as log:
|
||||
log = log.read().strip().split("\n")
|
||||
|
||||
# Master and child process coordinate with locks such that the child
|
||||
# Controller and minion process coordinate with locks such that the minion
|
||||
# writes "off" when echo is off, and "on" when echo is on. The
|
||||
# output should contain mostly "on" lines, but may contain an "off"
|
||||
# or two. This is because the master toggles echo by sending "v" on
|
||||
# stdin to the child, but this is not synchronized with our locks.
|
||||
# or two. This is because the controller toggles echo by sending "v" on
|
||||
# stdin to the minion, but this is not synchronized with our locks.
|
||||
# It's good enough for a test, though. We allow at most 2 "off"'s in
|
||||
# the output to account for the race.
|
||||
assert (
|
||||
['forced output', 'on'] == uniq(output) or
|
||||
output.count("off") <= 2 # if master_fd is a bit slow
|
||||
output.count("off") <= 2 # if controller_fd is a bit slow
|
||||
)
|
||||
|
||||
# log should be off for a while, then on, then off
|
||||
|
87
lib/spack/spack/test/llnl/util/tty/tty.py
Normal file
87
lib/spack/spack/test/llnl/util/tty/tty.py
Normal file
@@ -0,0 +1,87 @@
|
||||
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
import os
|
||||
|
||||
import pytest
|
||||
import llnl.util.tty as tty
|
||||
|
||||
|
||||
def test_get_timestamp(monkeypatch):
|
||||
"""Ensure the results of get_timestamp are reasonable."""
|
||||
|
||||
# Debug disabled should return an empty string
|
||||
monkeypatch.setattr(tty, '_debug', 0)
|
||||
assert not tty.get_timestamp(False), 'Expected an empty string'
|
||||
|
||||
# Debug disabled but force the timestamp should return a string
|
||||
assert tty.get_timestamp(True), 'Expected a timestamp/non-empty string'
|
||||
|
||||
pid_str = ' {0}'.format(os.getpid())
|
||||
|
||||
# Level 1 debugging should return a timestamp WITHOUT the pid
|
||||
monkeypatch.setattr(tty, '_debug', 1)
|
||||
out_str = tty.get_timestamp(False)
|
||||
assert out_str and pid_str not in out_str, 'Expected no PID in results'
|
||||
|
||||
# Level 2 debugging should also return a timestamp WITH the pid
|
||||
monkeypatch.setattr(tty, '_debug', 2)
|
||||
out_str = tty.get_timestamp(False)
|
||||
assert out_str and pid_str in out_str, 'Expected PID in results'
|
||||
|
||||
|
||||
@pytest.mark.parametrize('msg,enabled,trace,newline', [
|
||||
('', False, False, False), # Nothing is output
|
||||
(Exception(''), True, False, True), # Exception output
|
||||
('trace', True, True, False), # stacktrace output
|
||||
('newline', True, False, True), # newline in output
|
||||
('no newline', True, False, False) # no newline output
|
||||
])
|
||||
def test_msg(capfd, monkeypatch, enabled, msg, trace, newline):
|
||||
"""Ensure the output from msg with options is appropriate."""
|
||||
|
||||
# temporarily use the parameterized settings
|
||||
monkeypatch.setattr(tty, '_msg_enabled', enabled)
|
||||
monkeypatch.setattr(tty, '_stacktrace', trace)
|
||||
|
||||
expected = [msg if isinstance(msg, str) else 'Exception: ']
|
||||
if newline:
|
||||
expected[0] = '{0}\n'.format(expected[0])
|
||||
if trace:
|
||||
expected.insert(0, '.py')
|
||||
|
||||
tty.msg(msg, newline=newline)
|
||||
out = capfd.readouterr()[0]
|
||||
for msg in expected:
|
||||
assert msg in out
|
||||
|
||||
|
||||
@pytest.mark.parametrize('msg,trace,wrap', [
|
||||
(Exception(''), False, False), # Exception output
|
||||
('trace', True, False), # stacktrace output
|
||||
('wrap', False, True), # wrap in output
|
||||
])
|
||||
def test_info(capfd, monkeypatch, msg, trace, wrap):
|
||||
"""Ensure the output from info with options is appropriate."""
|
||||
|
||||
# temporarily use the parameterized settings
|
||||
monkeypatch.setattr(tty, '_stacktrace', trace)
|
||||
|
||||
expected = [msg if isinstance(msg, str) else 'Exception: ']
|
||||
if trace:
|
||||
expected.insert(0, '.py')
|
||||
|
||||
extra = 'This extra argument *should* make for a sufficiently long line' \
|
||||
' that needs to be wrapped if the option is enabled.'
|
||||
args = [msg, extra]
|
||||
|
||||
num_newlines = 3 if wrap else 2
|
||||
|
||||
tty.info(*args, wrap=wrap, countback=3)
|
||||
out = capfd.readouterr()[0]
|
||||
for msg in expected:
|
||||
assert msg in out
|
||||
|
||||
assert out.count('\n') == num_newlines
|
@@ -3,6 +3,7 @@
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
import os
|
||||
import pytest
|
||||
|
||||
import spack.fetch_strategy as spack_fs
|
||||
@@ -27,3 +28,19 @@ def test_s3fetchstrategy_bad_url(tmpdir):
|
||||
assert fetcher.archive_file is None
|
||||
with pytest.raises(spack_fs.FetchError):
|
||||
fetcher.fetch()
|
||||
|
||||
|
||||
def test_s3fetchstrategy_downloaded(tmpdir):
|
||||
"""Ensure fetch with archive file already downloaded is a noop."""
|
||||
testpath = str(tmpdir)
|
||||
archive = os.path.join(testpath, 's3.tar.gz')
|
||||
|
||||
class Archived_S3FS(spack_fs.S3FetchStrategy):
|
||||
@property
|
||||
def archive_file(self):
|
||||
return archive
|
||||
|
||||
url = 's3:///{0}'.format(archive)
|
||||
fetcher = Archived_S3FS(url=url)
|
||||
with spack_stage.Stage(fetcher, path=testpath):
|
||||
fetcher.fetch()
|
||||
|
@@ -6,7 +6,10 @@
|
||||
import sys
|
||||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
import llnl.util.filesystem as fs
|
||||
|
||||
import spack
|
||||
import spack.util.executable as ex
|
||||
from spack.hooks.sbang import filter_shebangs_in_directory
|
||||
@@ -35,3 +38,18 @@ def test_read_unicode(tmpdir, working_env):
|
||||
# read the unicode back in and see whether things work
|
||||
script = ex.Executable('./%s' % script_name)
|
||||
assert u'\xc3' == script(output=str).strip()
|
||||
|
||||
|
||||
def test_which(tmpdir):
|
||||
os.environ["PATH"] = str(tmpdir)
|
||||
assert ex.which("spack-test-exe") is None
|
||||
with pytest.raises(ex.CommandNotFoundError):
|
||||
ex.which("spack-test-exe", required=True)
|
||||
|
||||
with tmpdir.as_cwd():
|
||||
fs.touch("spack-test-exe")
|
||||
fs.set_executable('spack-test-exe')
|
||||
|
||||
exe = ex.which("spack-test-exe")
|
||||
assert exe is not None
|
||||
assert exe.path == str(tmpdir.join("spack-test-exe"))
|
||||
|
@@ -14,7 +14,7 @@
|
||||
NOTAR_EXTS = ["zip", "tgz", "tbz2", "txz"]
|
||||
|
||||
# Add PRE_EXTS and EXTS last so that .tar.gz is matched *before* .tar or .gz
|
||||
ALLOWED_ARCHIVE_TYPES = [".".join(l) for l in product(
|
||||
ALLOWED_ARCHIVE_TYPES = [".".join(ext) for ext in product(
|
||||
PRE_EXTS, EXTS)] + PRE_EXTS + EXTS + NOTAR_EXTS
|
||||
|
||||
|
||||
@@ -36,7 +36,7 @@ def decompressor_for(path, extension=None):
|
||||
bunzip2 = which('bunzip2', required=True)
|
||||
return bunzip2
|
||||
tar = which('tar', required=True)
|
||||
tar.add_default_arg('-xf')
|
||||
tar.add_default_arg('-oxf')
|
||||
return tar
|
||||
|
||||
|
||||
|
@@ -133,7 +133,7 @@ def __init__(self, hexdigest, **kwargs):
|
||||
@property
|
||||
def hash_name(self):
|
||||
"""Get the name of the hash function this Checker is using."""
|
||||
return self.hash_fun().name
|
||||
return self.hash_fun().name.lower()
|
||||
|
||||
def check(self, filename):
|
||||
"""Read the file with the specified name and check its checksum
|
||||
|
@@ -239,7 +239,8 @@ def which_string(*args, **kwargs):
|
||||
return exe
|
||||
|
||||
if required:
|
||||
tty.die("spack requires '%s'. Make sure it is in your path." % args[0])
|
||||
raise CommandNotFoundError(
|
||||
"spack requires '%s'. Make sure it is in your path." % args[0])
|
||||
|
||||
return None
|
||||
|
||||
@@ -266,3 +267,7 @@ def which(*args, **kwargs):
|
||||
|
||||
class ProcessError(spack.error.SpackError):
|
||||
"""ProcessErrors are raised when Executables exit with an error code."""
|
||||
|
||||
|
||||
class CommandNotFoundError(spack.error.SpackError):
|
||||
"""Raised when ``which()`` can't find a required executable."""
|
||||
|
@@ -13,7 +13,7 @@
|
||||
|
||||
_gnupg_version_re = r"^gpg \(GnuPG\) (.*)$"
|
||||
|
||||
GNUPGHOME = spack.paths.gpg_path
|
||||
GNUPGHOME = os.getenv('SPACK_GNUPGHOME', spack.paths.gpg_path)
|
||||
|
||||
|
||||
def parse_keys_output(output):
|
||||
|
@@ -77,6 +77,8 @@ def __init__(self):
|
||||
def get(self, spec):
|
||||
if not isinstance(spec, spack.spec.Spec):
|
||||
spec = Spec(spec)
|
||||
if spec.name not in self.spec_to_pkg:
|
||||
raise spack.repo.UnknownPackageError(spec.fullname)
|
||||
return self.spec_to_pkg[spec.name]
|
||||
|
||||
def get_pkg_class(self, name):
|
||||
|
@@ -9,20 +9,6 @@ ENV DOCKERFILE_BASE=centos \
|
||||
CURRENTLY_BUILDING_DOCKER_IMAGE=1 \
|
||||
container=docker
|
||||
|
||||
COPY bin $SPACK_ROOT/bin
|
||||
COPY etc $SPACK_ROOT/etc
|
||||
COPY lib $SPACK_ROOT/lib
|
||||
COPY share $SPACK_ROOT/share
|
||||
COPY var $SPACK_ROOT/var
|
||||
RUN mkdir -p $SPACK_ROOT/opt/spack
|
||||
|
||||
RUN ln -s $SPACK_ROOT/share/spack/docker/entrypoint.bash \
|
||||
/usr/local/bin/docker-shell \
|
||||
&& ln -s $SPACK_ROOT/share/spack/docker/entrypoint.bash \
|
||||
/usr/local/bin/interactive-shell \
|
||||
&& ln -s $SPACK_ROOT/share/spack/docker/entrypoint.bash \
|
||||
/usr/local/bin/spack-env
|
||||
|
||||
RUN yum update -y \
|
||||
&& yum install -y epel-release \
|
||||
&& yum update -y \
|
||||
@@ -50,6 +36,20 @@ RUN yum update -y \
|
||||
&& rm -rf /var/cache/yum \
|
||||
&& yum clean all
|
||||
|
||||
COPY bin $SPACK_ROOT/bin
|
||||
COPY etc $SPACK_ROOT/etc
|
||||
COPY lib $SPACK_ROOT/lib
|
||||
COPY share $SPACK_ROOT/share
|
||||
COPY var $SPACK_ROOT/var
|
||||
RUN mkdir -p $SPACK_ROOT/opt/spack
|
||||
|
||||
RUN ln -s $SPACK_ROOT/share/spack/docker/entrypoint.bash \
|
||||
/usr/local/bin/docker-shell \
|
||||
&& ln -s $SPACK_ROOT/share/spack/docker/entrypoint.bash \
|
||||
/usr/local/bin/interactive-shell \
|
||||
&& ln -s $SPACK_ROOT/share/spack/docker/entrypoint.bash \
|
||||
/usr/local/bin/spack-env
|
||||
|
||||
RUN mkdir -p /root/.spack \
|
||||
&& cp $SPACK_ROOT/share/spack/docker/modules.yaml \
|
||||
/root/.spack/modules.yaml \
|
||||
|
@@ -9,20 +9,6 @@ ENV DOCKERFILE_BASE=centos \
|
||||
CURRENTLY_BUILDING_DOCKER_IMAGE=1 \
|
||||
container=docker
|
||||
|
||||
COPY bin $SPACK_ROOT/bin
|
||||
COPY etc $SPACK_ROOT/etc
|
||||
COPY lib $SPACK_ROOT/lib
|
||||
COPY share $SPACK_ROOT/share
|
||||
COPY var $SPACK_ROOT/var
|
||||
RUN mkdir -p $SPACK_ROOT/opt/spack
|
||||
|
||||
RUN ln -s $SPACK_ROOT/share/spack/docker/entrypoint.bash \
|
||||
/usr/local/bin/docker-shell \
|
||||
&& ln -s $SPACK_ROOT/share/spack/docker/entrypoint.bash \
|
||||
/usr/local/bin/interactive-shell \
|
||||
&& ln -s $SPACK_ROOT/share/spack/docker/entrypoint.bash \
|
||||
/usr/local/bin/spack-env
|
||||
|
||||
RUN yum update -y \
|
||||
&& yum install -y epel-release \
|
||||
&& yum update -y \
|
||||
@@ -50,6 +36,20 @@ RUN yum update -y \
|
||||
&& rm -rf /var/cache/yum \
|
||||
&& yum clean all
|
||||
|
||||
COPY bin $SPACK_ROOT/bin
|
||||
COPY etc $SPACK_ROOT/etc
|
||||
COPY lib $SPACK_ROOT/lib
|
||||
COPY share $SPACK_ROOT/share
|
||||
COPY var $SPACK_ROOT/var
|
||||
RUN mkdir -p $SPACK_ROOT/opt/spack
|
||||
|
||||
RUN ln -s $SPACK_ROOT/share/spack/docker/entrypoint.bash \
|
||||
/usr/local/bin/docker-shell \
|
||||
&& ln -s $SPACK_ROOT/share/spack/docker/entrypoint.bash \
|
||||
/usr/local/bin/interactive-shell \
|
||||
&& ln -s $SPACK_ROOT/share/spack/docker/entrypoint.bash \
|
||||
/usr/local/bin/spack-env
|
||||
|
||||
RUN mkdir -p /root/.spack \
|
||||
&& cp $SPACK_ROOT/share/spack/docker/modules.yaml \
|
||||
/root/.spack/modules.yaml \
|
||||
|
@@ -9,20 +9,6 @@ ENV DOCKERFILE_BASE=ubuntu:16.04 \
|
||||
CURRENTLY_BUILDING_DOCKER_IMAGE=1 \
|
||||
container=docker
|
||||
|
||||
COPY bin $SPACK_ROOT/bin
|
||||
COPY etc $SPACK_ROOT/etc
|
||||
COPY lib $SPACK_ROOT/lib
|
||||
COPY share $SPACK_ROOT/share
|
||||
COPY var $SPACK_ROOT/var
|
||||
RUN mkdir -p $SPACK_ROOT/opt/spack
|
||||
|
||||
RUN ln -s $SPACK_ROOT/share/spack/docker/entrypoint.bash \
|
||||
/usr/local/bin/docker-shell \
|
||||
&& ln -s $SPACK_ROOT/share/spack/docker/entrypoint.bash \
|
||||
/usr/local/bin/interactive-shell \
|
||||
&& ln -s $SPACK_ROOT/share/spack/docker/entrypoint.bash \
|
||||
/usr/local/bin/spack-env
|
||||
|
||||
RUN apt-get -yqq update \
|
||||
&& apt-get -yqq install --no-install-recommends \
|
||||
build-essential \
|
||||
@@ -48,6 +34,20 @@ RUN apt-get -yqq update \
|
||||
&& pip3 install boto3 \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
COPY bin $SPACK_ROOT/bin
|
||||
COPY etc $SPACK_ROOT/etc
|
||||
COPY lib $SPACK_ROOT/lib
|
||||
COPY share $SPACK_ROOT/share
|
||||
COPY var $SPACK_ROOT/var
|
||||
RUN mkdir -p $SPACK_ROOT/opt/spack
|
||||
|
||||
RUN ln -s $SPACK_ROOT/share/spack/docker/entrypoint.bash \
|
||||
/usr/local/bin/docker-shell \
|
||||
&& ln -s $SPACK_ROOT/share/spack/docker/entrypoint.bash \
|
||||
/usr/local/bin/interactive-shell \
|
||||
&& ln -s $SPACK_ROOT/share/spack/docker/entrypoint.bash \
|
||||
/usr/local/bin/spack-env
|
||||
|
||||
# Add LANG default to en_US.UTF-8
|
||||
ENV LANGUAGE en_US.UTF-8
|
||||
ENV LANG en_US.UTF-8
|
||||
|
@@ -9,20 +9,6 @@ ENV DOCKERFILE_BASE=ubuntu \
|
||||
CURRENTLY_BUILDING_DOCKER_IMAGE=1 \
|
||||
container=docker
|
||||
|
||||
COPY bin $SPACK_ROOT/bin
|
||||
COPY etc $SPACK_ROOT/etc
|
||||
COPY lib $SPACK_ROOT/lib
|
||||
COPY share $SPACK_ROOT/share
|
||||
COPY var $SPACK_ROOT/var
|
||||
RUN mkdir -p $SPACK_ROOT/opt/spack
|
||||
|
||||
RUN ln -s $SPACK_ROOT/share/spack/docker/entrypoint.bash \
|
||||
/usr/local/bin/docker-shell \
|
||||
&& ln -s $SPACK_ROOT/share/spack/docker/entrypoint.bash \
|
||||
/usr/local/bin/interactive-shell \
|
||||
&& ln -s $SPACK_ROOT/share/spack/docker/entrypoint.bash \
|
||||
/usr/local/bin/spack-env
|
||||
|
||||
RUN apt-get -yqq update \
|
||||
&& apt-get -yqq install --no-install-recommends \
|
||||
build-essential \
|
||||
@@ -48,6 +34,20 @@ RUN apt-get -yqq update \
|
||||
&& pip3 install boto3 \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
COPY bin $SPACK_ROOT/bin
|
||||
COPY etc $SPACK_ROOT/etc
|
||||
COPY lib $SPACK_ROOT/lib
|
||||
COPY share $SPACK_ROOT/share
|
||||
COPY var $SPACK_ROOT/var
|
||||
RUN mkdir -p $SPACK_ROOT/opt/spack
|
||||
|
||||
RUN ln -s $SPACK_ROOT/share/spack/docker/entrypoint.bash \
|
||||
/usr/local/bin/docker-shell \
|
||||
&& ln -s $SPACK_ROOT/share/spack/docker/entrypoint.bash \
|
||||
/usr/local/bin/interactive-shell \
|
||||
&& ln -s $SPACK_ROOT/share/spack/docker/entrypoint.bash \
|
||||
/usr/local/bin/spack-env
|
||||
|
||||
# Add LANG default to en_US.UTF-8
|
||||
ENV LANGUAGE en_US.UTF-8
|
||||
ENV LANG en_US.UTF-8
|
||||
|
38
share/spack/keys/tutorial.pub
Normal file
38
share/spack/keys/tutorial.pub
Normal file
@@ -0,0 +1,38 @@
|
||||
-----BEGIN PGP PUBLIC KEY BLOCK-----
|
||||
|
||||
mQENBF1IgqcBCADqSIBM0TT4+6Acv6SUpQ2l1Ql+UVRtJ74VGFOw+8I8aBWcBryB
|
||||
wNsS/Drxn9M9rX8il2aGtAmwc1dhTh0JvdZO7KqG8Q4vvWOytdLnGSE61LV4147q
|
||||
S/dJiYH2DCvhMKpOByIsEiuoTrUHzd1EQBnEPSwAQV8oWPrc1++f3iYmRemsOBCT
|
||||
BldAu7Y5RwjI3qQ6GazoCF5rd1uyiMYrpT4amEKFE91VRe+IG8XfEaSTapOc/hO3
|
||||
Sw4fzPelA2qD12I+JMj56vM0fQy3TXD5qngIb+leb2jGI+0bTz8RGS0xSMYVvftA
|
||||
upzQPaQIfzijVBt3tFSayx/NXKR0p+EuCqGBABEBAAG0MFNwYWNrIEJ1aWxkIFBp
|
||||
cGVsaW5lIChEZW1vIEtleSkgPGtleUBzcGFjay5kZW1vPokBTgQTAQgAOBYhBDHI
|
||||
4nh6FErErdiO0pX4aBGV4jnYBQJdSIKnAhsvBQsJCAcCBhUKCQgLAgQWAgMBAh4B
|
||||
AheAAAoJEJX4aBGV4jnYpf0IAJDYEjpm0h1pNswTvmnEhgNVbojCGRfAts7F5uf8
|
||||
IFXGafKQsekMWZh0Ig0YXVn72jsOuNK/+keErMfXM3DFNTq0Ki7mcFedR9r5EfLf
|
||||
4YW2n6mphsfMgsg8NwKVLFYWyhQQ4OzhdydPxkGVhEebHwfHNQ3aIcqbFmzkhxnX
|
||||
CIYh2Flf3T306tKX4lXbhsXKG1L/bLtDiFRaMCBp66HGZ8u9Dbyy/W8aDwyx4duD
|
||||
MG+y2OrhOf+zEu3ZPFyc/jsjmfnUtIfQVyRajh/8vh+i9fkvFlLaOQittNElt3z1
|
||||
8+ybGjE9qWY/mvR2ZqnP8SVkGvxSpBVfVXiFFdepvuPAcLu5AQ0EXUiCpwEIAJ2s
|
||||
npNBAVocDUSdOF/Z/eCRvy3epuYm5f1Ge1ao9K2qWYno2FatnsYxK4qqB5yGRkfj
|
||||
sEzAGP8JtJvqDSuB5Xk7CIjRNOwoSB3hqvmxWh2h+HsITUhMl11FZ0Cllz+etXcK
|
||||
APz2ZHSKnA3R8uf4JzIr1cHLS+gDBoj8NgBCZhcyva2b5UC///FLm1+/Lpvekd0U
|
||||
n7B524hbXhFUG+UMfHO/U1c4TvCMt7RGMoWUtRzfO6XB1VQCwWJBVcVGl8Yy59Zk
|
||||
3K76VbFWQWOq6fRBE0xHBAga7pOgCc9qrb+FGl1IHUT8aV8CzkxckHlNb3PlntmE
|
||||
lXZLPcGFWaPtGtuIJVsAEQEAAYkCbAQYAQgAIBYhBDHI4nh6FErErdiO0pX4aBGV
|
||||
4jnYBQJdSIKnAhsuAUAJEJX4aBGV4jnYwHQgBBkBCAAdFiEEneR3pKqi9Rnivv07
|
||||
CYCNVr37XP0FAl1IgqcACgkQCYCNVr37XP13RQf/Ttxidgo9upF8jxrWnT5YhM6D
|
||||
ozzGWzqE+/KDBX+o4f33o6uzozjESRXQUKdclC9ftDJQ84lFTMs3Z+/12ZDqCV2k
|
||||
2qf0VfXg4e5xMq4tt6hojXUeYSfeGZXNU9LzjURCcMD+amIKjVztFg4kl3KHW3Pi
|
||||
/aPTr4xWWgy2tZ1FDEuA5J6AZiKKJSVeoSPOGANouPqm4fNj273XFXQepIhQ5wve
|
||||
4No0abxfXcLt5Yp3y06rNCBC9QdC++19N5+ajn2z9Qd2ZwztPb0mNuqHAok4vrlE
|
||||
1c4WBWk93Nfy9fKImalGENpPDz0td2H9pNC9IafOWltGSWSINRrU1GeaNXS/uAOT
|
||||
CADjcDN+emLbDTTReW4FLoQ0mPJ0tACgszGW50PtncTMPSj4uxSktQPWWk41oD9q
|
||||
gpXm1Vgto4GvPWYs/ewR6Kyd8K0YkBxbRFyYOmycu3/zzYJnry+EHdvtQspwUDPg
|
||||
QlI/avDrncERzICsbd86Jz0CMY4kzpg5v9dt/N6WnHlSk/S+vv4pPUDSz26Q4Ehh
|
||||
iDvDavLGyzKSlVzWQ4bzzlQxXbDL6TZyVAQ4DBI4sI+WGtLbfD51EI5G9BfmDsbw
|
||||
XJ0Dt2yEwRfDUx/lYbAMvhUnWEu2DSpYdJb8GG0GKTGqU4YpvO1JgTCsLSLIAHfT
|
||||
tQMw04Gs+kORRNbggsdTD4sR
|
||||
=N5Wp
|
||||
-----END PGP PUBLIC KEY BLOCK-----
|
||||
|
22
share/spack/qa/install_patchelf.sh
Executable file
22
share/spack/qa/install_patchelf.sh
Executable file
@@ -0,0 +1,22 @@
|
||||
#!/bin/sh
|
||||
#
|
||||
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
#
|
||||
# Description:
|
||||
# Install patchelf for use in buildcache unit tests
|
||||
#
|
||||
# Usage:
|
||||
# install-patchelf.sh
|
||||
#
|
||||
set -ex
|
||||
if [ "$TRAVIS_OS_NAME" = "linux" ]; then
|
||||
olddir=$PWD
|
||||
cd /tmp
|
||||
wget https://github.com/NixOS/patchelf/archive/0.10.tar.gz
|
||||
tar -xvf 0.10.tar.gz
|
||||
cd patchelf-0.10 && ./bootstrap.sh && ./configure --prefix=/usr && make && sudo make install && cd $olddir
|
||||
fi
|
@@ -18,7 +18,7 @@
|
||||
ORIGINAL_PATH="$PATH"
|
||||
|
||||
. "$(dirname $0)/setup.sh"
|
||||
check_dependencies $coverage git hg svn
|
||||
check_dependencies $coverage kcov git hg svn
|
||||
|
||||
# Clean the environment by removing Spack from the path and getting rid of
|
||||
# the spack shell function
|
||||
|
@@ -37,11 +37,7 @@ bin/spack -h
|
||||
bin/spack help -a
|
||||
|
||||
# Profile and print top 20 lines for a simple call to spack spec
|
||||
if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then
|
||||
spack -p --lines 20 spec openmpi
|
||||
else
|
||||
spack -p --lines 20 spec mpileaks%gcc ^elfutils@0.170
|
||||
fi
|
||||
spack -p --lines 20 spec mpileaks%gcc ^elfutils@0.170
|
||||
|
||||
#-----------------------------------------------------------
|
||||
# Run unit tests with code coverage
|
||||
|
@@ -26,14 +26,11 @@ if [[ "$COVERAGE" == "true" ]]; then
|
||||
coverage=coverage
|
||||
coverage_run="coverage run"
|
||||
|
||||
# bash coverage depends on some other factors -- there are issues with
|
||||
# kcov for Python 2.6, unit tests, and build tests.
|
||||
if [[ $TRAVIS_PYTHON_VERSION != 2.6 ]]; then
|
||||
mkdir -p coverage
|
||||
cc_script="$SPACK_ROOT/lib/spack/env/cc"
|
||||
bashcov=$(realpath ${QA_DIR}/bashcov)
|
||||
sed -i~ "s@#\!/bin/bash@#\!${bashcov}@" "$cc_script"
|
||||
fi
|
||||
# bash coverage depends on some other factors
|
||||
mkdir -p coverage
|
||||
cc_script="$SPACK_ROOT/lib/spack/env/cc"
|
||||
bashcov=$(realpath ${QA_DIR}/bashcov)
|
||||
sed -i~ "s@#\!/bin/bash@#\!${bashcov}@" "$cc_script"
|
||||
fi
|
||||
|
||||
#
|
||||
@@ -74,6 +71,9 @@ check_dependencies() {
|
||||
spack_package=mercurial
|
||||
pip_package=mercurial
|
||||
;;
|
||||
kcov)
|
||||
spack_package=kcov
|
||||
;;
|
||||
svn)
|
||||
spack_package=subversion
|
||||
;;
|
||||
|
@@ -12,6 +12,13 @@
|
||||
# setenv SPACK_ROOT /path/to/spack
|
||||
# source $SPACK_ROOT/share/spack/setup-env.csh
|
||||
#
|
||||
|
||||
# prevent infinite recursion when spack shells out (e.g., on cray for modules)
|
||||
if ($?_sp_initializing) then
|
||||
exit 0
|
||||
endif
|
||||
setenv _sp_initializing true
|
||||
|
||||
if ($?SPACK_ROOT) then
|
||||
set _spack_source_file = $SPACK_ROOT/share/spack/setup-env.csh
|
||||
set _spack_share_dir = $SPACK_ROOT/share/spack
|
||||
@@ -38,3 +45,6 @@ else
|
||||
echo "ERROR: Sourcing spack setup-env.csh requires setting SPACK_ROOT to "
|
||||
echo " the root of your spack installation."
|
||||
endif
|
||||
|
||||
# done: unset sentinel variable as we're no longer initializing
|
||||
unsetenv _sp_initializing
|
||||
|
@@ -36,6 +36,12 @@
|
||||
# to come up with a user-friendly naming scheme for spack dotfiles.
|
||||
#################################################################################
|
||||
|
||||
# prevent infinite recursion when spack shells out (e.g., on cray for modules)
|
||||
if test -n "$_sp_initializing"
|
||||
exit 0
|
||||
end
|
||||
set -x _sp_initializing true
|
||||
|
||||
|
||||
#
|
||||
# Test for STDERR-NOCARET feature: if this is off, fish will redirect stderr to
|
||||
@@ -721,3 +727,6 @@ sp_multi_pathadd MODULEPATH $_sp_tcl_roots
|
||||
# [3]: When the test in the if statement fails, the `status` flag is set to 1.
|
||||
# `true` here manuallt resets the value of `status` to 0. Since `set`
|
||||
# passes `status` along, we thus avoid the function returning 1 by mistake.
|
||||
|
||||
# done: unset sentinel variable as we're no longer initializing
|
||||
set -e _sp_initializing
|
||||
|
@@ -39,6 +39,12 @@
|
||||
# spack module files.
|
||||
########################################################################
|
||||
|
||||
# prevent infinite recursion when spack shells out (e.g., on cray for modules)
|
||||
if [ -n "${_sp_initializing:-}" ]; then
|
||||
exit 0
|
||||
fi
|
||||
export _sp_initializing=true
|
||||
|
||||
spack() {
|
||||
# Store LD_LIBRARY_PATH variables from spack shell function
|
||||
# This is necessary because MacOS System Integrity Protection clears
|
||||
@@ -357,3 +363,7 @@ _sp_multi_pathadd MODULEPATH "$_sp_tcl_roots"
|
||||
if [ "$_sp_shell" = bash ]; then
|
||||
source $_sp_share_dir/spack-completion.bash
|
||||
fi
|
||||
|
||||
# done: unset sentinel variable as we're no longer initializing
|
||||
unset _sp_initializing
|
||||
export _sp_initializing
|
||||
|
129
share/spack/setup-tutorial-env.sh
Executable file
129
share/spack/setup-tutorial-env.sh
Executable file
@@ -0,0 +1,129 @@
|
||||
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
###############################################################################
|
||||
#
|
||||
# This file is part of Spack and sets up the environment for the Spack tutorial
|
||||
# It is intended to be run on ubuntu-18.04 or an ubuntu-18.04 container or AWS
|
||||
# cloud9 environment
|
||||
#
|
||||
# Components:
|
||||
# 1. apt installs for packages used in the tutorial
|
||||
# these include compilers and externals used by the tutorial and
|
||||
# basic spack requirements like python and curl
|
||||
# 2. spack configuration files
|
||||
# these set the default configuration for Spack to use x86_64 and suppress
|
||||
# certain gpg warnings. The gpg warnings are not relevant for the tutorial
|
||||
# and the default x86_64 architecture allows us to run the same tutorial on
|
||||
# any x86_64 architecture without needing new binary packages.
|
||||
# 3. aws cloud9 configuration to expand available storage
|
||||
# when we run on aws cloud9 we have to expand the storage from 10G to 30G
|
||||
# because we install too much software for a default cloud9 instance
|
||||
###############################################################################
|
||||
|
||||
####
|
||||
# Ensure we're on Ubuntu 18.04
|
||||
####
|
||||
|
||||
if [ -f /etc/os-release ]; then
|
||||
. /etc/os-release
|
||||
fi
|
||||
if [ x"$UBUNTU_CODENAME" != "xbionic" ]; then
|
||||
echo "The tutorial setup script must be run on Ubuntu 18.04."
|
||||
return 1 &>/dev/null || exit 1 # works if sourced or run
|
||||
fi
|
||||
|
||||
####
|
||||
# Install packages needed for tutorial
|
||||
####
|
||||
|
||||
# compilers, basic system components, externals
|
||||
# There are retries around these because apt fails frequently on new instances,
|
||||
# due to unattended updates running in the background and taking the lock.
|
||||
until sudo apt-get update -y; do
|
||||
echo "==> apt-get update failed. retrying..."
|
||||
sleep 5
|
||||
done
|
||||
|
||||
until sudo apt-get install -y --no-install-recommends \
|
||||
autoconf make python3 python3-pip \
|
||||
build-essential ca-certificates curl git gnupg2 iproute2 emacs \
|
||||
file openssh-server tcl unzip vim wget \
|
||||
clang g++ g++-6 gcc gcc-6 gfortran gfortran-6 \
|
||||
zlib1g zlib1g-dev mpich; do
|
||||
echo "==> apt-get install failed. retrying..."
|
||||
sleep 5
|
||||
done
|
||||
|
||||
####
|
||||
# Upgrade boto3 python package on AWS systems
|
||||
####
|
||||
pip3 install --upgrade boto3
|
||||
|
||||
|
||||
####
|
||||
# Spack configuration settings for tutorial
|
||||
####
|
||||
|
||||
# create spack system config
|
||||
sudo mkdir -p /etc/spack
|
||||
|
||||
# set default arch to x86_64
|
||||
sudo tee /etc/spack/packages.yaml << EOF > /dev/null
|
||||
packages:
|
||||
all:
|
||||
target: [x86_64]
|
||||
EOF
|
||||
|
||||
# suppress gpg warnings
|
||||
sudo tee /etc/spack/config.yaml << EOF > /dev/null
|
||||
config:
|
||||
suppress_gpg_warnings: true
|
||||
EOF
|
||||
|
||||
####
|
||||
# AWS set volume size to at least 30G
|
||||
####
|
||||
|
||||
# Hardcode the specified size to 30G
|
||||
SIZE=30
|
||||
|
||||
# Get the ID of the environment host Amazon EC2 instance.
|
||||
INSTANCEID=$(curl http://169.254.169.254/latest/meta-data//instance-id)
|
||||
|
||||
# Get the ID of the Amazon EBS volume associated with the instance.
|
||||
VOLUMEID=$(aws ec2 describe-instances \
|
||||
--instance-id $INSTANCEID \
|
||||
--query "Reservations[0].Instances[0].BlockDeviceMappings[0].Ebs.VolumeId" \
|
||||
--output text)
|
||||
|
||||
# Resize the EBS volume.
|
||||
aws ec2 modify-volume --volume-id $VOLUMEID --size $SIZE
|
||||
|
||||
# Wait for the resize to finish.
|
||||
while [ \
|
||||
"$(aws ec2 describe-volumes-modifications \
|
||||
--volume-id $VOLUMEID \
|
||||
--filters Name=modification-state,Values="optimizing","completed" \
|
||||
--query "length(VolumesModifications)"\
|
||||
--output text)" != "1" ]; do
|
||||
sleep 1
|
||||
done
|
||||
|
||||
if [ -e /dev/xvda1 ]
|
||||
then
|
||||
# Rewrite the partition table so that the partition takes up all the space that it can.
|
||||
sudo growpart /dev/xvda 1
|
||||
|
||||
# Expand the size of the file system.
|
||||
sudo resize2fs /dev/xvda1
|
||||
|
||||
else
|
||||
# Rewrite the partition table so that the partition takes up all the space that it can.
|
||||
sudo growpart /dev/nvme0n1 1
|
||||
|
||||
# Expand the size of the file system.
|
||||
sudo resize2fs /dev/nvme0n1p1
|
||||
fi
|
@@ -639,7 +639,7 @@ _spack_containerize() {
|
||||
_spack_create() {
|
||||
if $list_options
|
||||
then
|
||||
SPACK_COMPREPLY="-h --help --keep-stage -n --name -t --template -r --repo -N --namespace -f --force --skip-editor"
|
||||
SPACK_COMPREPLY="-h --help --keep-stage -n --name -t --template -r --repo -N --namespace -f --force --skip-editor -b --batch"
|
||||
else
|
||||
SPACK_COMPREPLY=""
|
||||
fi
|
||||
|
@@ -7,7 +7,7 @@ RUN mkdir {{ paths.environment }} \
|
||||
{{ manifest }} > {{ paths.environment }}/spack.yaml
|
||||
|
||||
# Install the software, remove unecessary deps
|
||||
RUN cd {{ paths.environment }} && spack env activate . && spack install && spack gc -y
|
||||
RUN cd {{ paths.environment }} && spack env activate . && spack install --fail-fast && spack gc -y
|
||||
{% if strip %}
|
||||
|
||||
# Strip all the binaries
|
||||
|
@@ -12,7 +12,7 @@ EOF
|
||||
# Install all the required software
|
||||
. /opt/spack/share/spack/setup-env.sh
|
||||
spack env activate .
|
||||
spack install
|
||||
spack install --fail-fast
|
||||
spack gc -y
|
||||
spack env deactivate
|
||||
spack env activate --sh -d . >> {{ paths.environment }}/environment_modifications.sh
|
||||
|
155
var/spack/repos/builtin.mock/packages/corge/package.py
Normal file
155
var/spack/repos/builtin.mock/packages/corge/package.py
Normal file
@@ -0,0 +1,155 @@
|
||||
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
|
||||
from spack import *
|
||||
import os
|
||||
|
||||
|
||||
class Corge(Package):
|
||||
"""A toy package to test dependencies"""
|
||||
|
||||
homepage = "https://www.example.com"
|
||||
url = "https://github.com/gartung/corge/archive/v3.0.0.tar.gz"
|
||||
|
||||
version('3.0.0',
|
||||
sha256='5058861c3b887511387c725971984cec665a8307d660158915a04d7786fed6bc')
|
||||
|
||||
depends_on('quux')
|
||||
|
||||
def install(self, spec, prefix):
|
||||
corge_cc = '''#include <iostream>
|
||||
#include <stdexcept>
|
||||
#include "corge.h"
|
||||
#include "corge_version.h"
|
||||
#include "quux/quux.h"
|
||||
|
||||
const int Corge::version_major = corge_version_major;
|
||||
const int Corge::version_minor = corge_version_minor;
|
||||
|
||||
Corge::Corge()
|
||||
{
|
||||
}
|
||||
|
||||
int
|
||||
Corge::get_version() const
|
||||
{
|
||||
return 10 * version_major + version_minor;
|
||||
}
|
||||
|
||||
int
|
||||
Corge::corgegate() const
|
||||
{
|
||||
int corge_version = get_version();
|
||||
std::cout << "Corge::corgegate version " << corge_version
|
||||
<< " invoked" << std::endl;
|
||||
std::cout << "Corge config directory = %s" <<std::endl;
|
||||
Quux quux;
|
||||
int quux_version = quux.quuxify();
|
||||
|
||||
if(quux_version != corge_version) {
|
||||
throw std::runtime_error(
|
||||
"Corge found an incompatible version of Garply.");
|
||||
}
|
||||
|
||||
return corge_version;
|
||||
}
|
||||
'''
|
||||
corge_h = '''#ifndef CORGE_H_
|
||||
|
||||
class Corge
|
||||
{
|
||||
private:
|
||||
static const int version_major;
|
||||
static const int version_minor;
|
||||
|
||||
public:
|
||||
Corge();
|
||||
int get_version() const;
|
||||
int corgegate() const;
|
||||
};
|
||||
|
||||
#endif // CORGE_H_
|
||||
'''
|
||||
corge_version_h = '''
|
||||
const int corge_version_major = %s;
|
||||
const int corge_version_minor = %s;
|
||||
'''
|
||||
corgegator_cc = '''
|
||||
#include <iostream>
|
||||
#include "corge.h"
|
||||
|
||||
int
|
||||
main(int argc, char* argv[])
|
||||
{
|
||||
std::cout << "corgerator called with ";
|
||||
if (argc == 0) {
|
||||
std::cout << "no command-line arguments" << std::endl;
|
||||
} else {
|
||||
std::cout << "command-line arguments:";
|
||||
for (int i = 0; i < argc; ++i) {
|
||||
std::cout << " \"" << argv[i] << "\"";
|
||||
}
|
||||
std::cout << std::endl;
|
||||
}
|
||||
std::cout << "corgegating.."<<std::endl;
|
||||
Corge corge;
|
||||
corge.corgegate();
|
||||
std::cout << "done."<<std::endl;
|
||||
return 0;
|
||||
}
|
||||
'''
|
||||
mkdirp(prefix.lib64)
|
||||
mkdirp('%s/corge' % prefix.include)
|
||||
mkdirp('%s/corge' % self.stage.source_path)
|
||||
with open('%s/corge_version.h' % self.stage.source_path, 'w') as f:
|
||||
f.write(corge_version_h % (self.version[0], self.version[1:]))
|
||||
with open('%s/corge/corge.cc' % self.stage.source_path, 'w') as f:
|
||||
f.write(corge_cc % prefix.config)
|
||||
with open('%s/corge/corge.h' % self.stage.source_path, 'w') as f:
|
||||
f.write(corge_h)
|
||||
with open('%s/corge/corgegator.cc' % self.stage.source_path, 'w') as f:
|
||||
f.write(corgegator_cc)
|
||||
gpp = which('/usr/bin/g++')
|
||||
gpp('-Dcorge_EXPORTS',
|
||||
'-I%s' % self.stage.source_path,
|
||||
'-I%s' % spec['quux'].prefix.include,
|
||||
'-I%s' % spec['garply'].prefix.include,
|
||||
'-O2', '-g', '-DNDEBUG', '-fPIC',
|
||||
'-o', 'corge.cc.o',
|
||||
'-c', 'corge/corge.cc')
|
||||
gpp('-Dcorge_EXPORTS',
|
||||
'-I%s' % self.stage.source_path,
|
||||
'-I%s' % spec['quux'].prefix.include,
|
||||
'-I%s' % spec['garply'].prefix.include,
|
||||
'-O2', '-g', '-DNDEBUG', '-fPIC',
|
||||
'-o', 'corgegator.cc.o',
|
||||
'-c', 'corge/corgegator.cc')
|
||||
gpp('-fPIC', '-O2', '-g', '-DNDEBUG', '-shared',
|
||||
'-Wl,-soname,libcorge.so', '-o', 'libcorge.so', 'corge.cc.o',
|
||||
'-Wl,-rpath,%s:%s::::' %
|
||||
(spec['quux'].prefix.lib64, spec['garply'].prefix.lib64),
|
||||
'%s/libquux.so' % spec['quux'].prefix.lib64,
|
||||
'%s/libgarply.so' % spec['garply'].prefix.lib64)
|
||||
gpp('-O2', '-g', '-DNDEBUG', '-rdynamic',
|
||||
'corgegator.cc.o', '-o', 'corgegator',
|
||||
'-Wl,-rpath,%s:%s:%s:::' % (prefix.lib64,
|
||||
spec['quux'].prefix.lib64,
|
||||
spec['garply'].prefix.lib64),
|
||||
'libcorge.so',
|
||||
'%s/libquux.so' % spec['quux'].prefix.lib64,
|
||||
'%s/libgarply.so' % spec['garply'].prefix.lib64)
|
||||
copy('corgegator', '%s/corgegator' % prefix.lib64)
|
||||
copy('libcorge.so', '%s/libcorge.so' % prefix.lib64)
|
||||
copy('%s/corge/corge.h' % self.stage.source_path,
|
||||
'%s/corge/corge.h' % prefix.include)
|
||||
mkdirp(prefix.bin)
|
||||
copy('corge_version.h', '%s/corge_version.h' % prefix.bin)
|
||||
os.symlink('%s/corgegator' % prefix.lib64,
|
||||
'%s/corgegator' % prefix.bin)
|
||||
os.symlink('%s/quuxifier' % spec['quux'].prefix.lib64,
|
||||
'%s/quuxifier' % prefix.bin)
|
||||
os.symlink('%s/garplinator' % spec['garply'].prefix.lib64,
|
||||
'%s/garplinator' % prefix.bin)
|
112
var/spack/repos/builtin.mock/packages/garply/package.py
Normal file
112
var/spack/repos/builtin.mock/packages/garply/package.py
Normal file
@@ -0,0 +1,112 @@
|
||||
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
|
||||
from spack import *
|
||||
import os
|
||||
|
||||
|
||||
class Garply(Package):
|
||||
"""Toy package for testing dependencies"""
|
||||
|
||||
homepage = "https://www.example.com"
|
||||
url = "https://github.com/gartung/garply/archive/v3.0.0.tar.gz"
|
||||
|
||||
version('3.0.0',
|
||||
sha256='534ac8ba7a6fed7e8bbb543bd43ca04999e65337445a531bd296939f5ac2f33d')
|
||||
|
||||
def install(self, spec, prefix):
|
||||
garply_h = '''#ifndef GARPLY_H_
|
||||
|
||||
class Garply
|
||||
{
|
||||
private:
|
||||
static const int version_major;
|
||||
static const int version_minor;
|
||||
|
||||
public:
|
||||
Garply();
|
||||
int get_version() const;
|
||||
int garplinate() const;
|
||||
};
|
||||
|
||||
#endif // GARPLY_H_
|
||||
'''
|
||||
garply_cc = '''#include "garply.h"
|
||||
#include "garply_version.h"
|
||||
#include <iostream>
|
||||
|
||||
const int Garply::version_major = garply_version_major;
|
||||
const int Garply::version_minor = garply_version_minor;
|
||||
|
||||
Garply::Garply() {}
|
||||
|
||||
int
|
||||
Garply::get_version() const
|
||||
{
|
||||
return 10 * version_major + version_minor;
|
||||
}
|
||||
|
||||
int
|
||||
Garply::garplinate() const
|
||||
{
|
||||
std::cout << "Garply::garplinate version " << get_version()
|
||||
<< " invoked" << std::endl;
|
||||
std::cout << "Garply config dir = %s" << std::endl;
|
||||
return get_version();
|
||||
}
|
||||
'''
|
||||
garplinator_cc = '''#include "garply.h"
|
||||
#include <iostream>
|
||||
|
||||
int
|
||||
main()
|
||||
{
|
||||
Garply garply;
|
||||
garply.garplinate();
|
||||
|
||||
return 0;
|
||||
}
|
||||
'''
|
||||
garply_version_h = '''const int garply_version_major = %s;
|
||||
const int garply_version_minor = %s;
|
||||
'''
|
||||
mkdirp(prefix.lib64)
|
||||
mkdirp('%s/garply' % prefix.include)
|
||||
mkdirp('%s/garply' % self.stage.source_path)
|
||||
with open('%s/garply_version.h' % self.stage.source_path, 'w') as f:
|
||||
f.write(garply_version_h % (self.version[0], self.version[1:]))
|
||||
with open('%s/garply/garply.h' % self.stage.source_path, 'w') as f:
|
||||
f.write(garply_h)
|
||||
with open('%s/garply/garply.cc' % self.stage.source_path, 'w') as f:
|
||||
f.write(garply_cc % prefix.config)
|
||||
with open('%s/garply/garplinator.cc' %
|
||||
self.stage.source_path, 'w') as f:
|
||||
f.write(garplinator_cc)
|
||||
gpp = which('/usr/bin/g++')
|
||||
gpp('-Dgarply_EXPORTS',
|
||||
'-I%s' % self.stage.source_path,
|
||||
'-O2', '-g', '-DNDEBUG', '-fPIC',
|
||||
'-o', 'garply.cc.o',
|
||||
'-c', '%s/garply/garply.cc' % self.stage.source_path)
|
||||
gpp('-Dgarply_EXPORTS',
|
||||
'-I%s' % self.stage.source_path,
|
||||
'-O2', '-g', '-DNDEBUG', '-fPIC',
|
||||
'-o', 'garplinator.cc.o',
|
||||
'-c', '%s/garply/garplinator.cc' % self.stage.source_path)
|
||||
gpp('-fPIC', '-O2', '-g', '-DNDEBUG', '-shared',
|
||||
'-Wl,-soname,libgarply.so', '-o', 'libgarply.so', 'garply.cc.o')
|
||||
gpp('-O2', '-g', '-DNDEBUG', '-rdynamic',
|
||||
'garplinator.cc.o', '-o', 'garplinator',
|
||||
'-Wl,-rpath,%s' % prefix.lib64,
|
||||
'libgarply.so')
|
||||
copy('libgarply.so', '%s/libgarply.so' % prefix.lib64)
|
||||
copy('garplinator', '%s/garplinator' % prefix.lib64)
|
||||
copy('%s/garply/garply.h' % self.stage.source_path,
|
||||
'%s/garply/garply.h' % prefix.include)
|
||||
mkdirp(prefix.bin)
|
||||
copy('garply_version.h', '%s/garply_version.h' % prefix.bin)
|
||||
os.symlink('%s/garplinator' % prefix.lib64,
|
||||
'%s/garplinator' % prefix.bin)
|
@@ -7,16 +7,17 @@
|
||||
|
||||
|
||||
class Patchelf(AutotoolsPackage):
|
||||
"""
|
||||
PatchELF is a small utility to modify the
|
||||
dynamic linker and RPATH of ELF executables.
|
||||
"""
|
||||
"""PatchELF is a small utility to modify the dynamic linker and RPATH of
|
||||
ELF executables."""
|
||||
|
||||
homepage = "https://nixos.org/patchelf.html"
|
||||
url = "http://nixos.org/releases/patchelf/patchelf-0.8/patchelf-0.8.tar.gz"
|
||||
|
||||
list_url = "http://nixos.org/releases/patchelf/"
|
||||
url = "https://nixos.org/releases/patchelf/patchelf-0.10/patchelf-0.10.tar.gz"
|
||||
list_url = "https://nixos.org/releases/patchelf/"
|
||||
list_depth = 1
|
||||
|
||||
version('0.9', '3c265508526760f233620f35d79c79fc')
|
||||
version('0.8', '407b229e6a681ffb0e2cdd5915cb2d01')
|
||||
version('0.10', sha256='b2deabce05c34ce98558c0efb965f209de592197b2c88e930298d740ead09019')
|
||||
version('0.9', sha256='f2aa40a6148cb3b0ca807a1bf836b081793e55ec9e5540a5356d800132be7e0a')
|
||||
version('0.8', sha256='14af06a2da688d577d64ff8dac065bb8903bbffbe01d30c62df7af9bf4ce72fe')
|
||||
|
||||
def install(self, spec, prefix):
|
||||
install_tree(self.stage.source_path, prefix)
|
||||
|
132
var/spack/repos/builtin.mock/packages/quux/package.py
Normal file
132
var/spack/repos/builtin.mock/packages/quux/package.py
Normal file
@@ -0,0 +1,132 @@
|
||||
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
|
||||
from spack import *
|
||||
import os
|
||||
|
||||
|
||||
class Quux(Package):
|
||||
"""Toy package for testing dependencies"""
|
||||
|
||||
homepage = "https://www.example.com"
|
||||
url = "https://github.com/gartung/quux/archive/v3.0.0.tar.gz"
|
||||
|
||||
version('3.0.0',
|
||||
sha256='b91bc96fb746495786bddac2c527039177499f2f76d3fa9dcf0b393859e68484')
|
||||
|
||||
depends_on('garply')
|
||||
|
||||
def install(self, spec, prefix):
|
||||
quux_cc = '''#include "quux.h"
|
||||
#include "garply/garply.h"
|
||||
#include "quux_version.h"
|
||||
#include <iostream>
|
||||
#include <stdexcept>
|
||||
|
||||
const int Quux::version_major = quux_version_major;
|
||||
const int Quux::version_minor = quux_version_minor;
|
||||
|
||||
Quux::Quux() {}
|
||||
|
||||
int
|
||||
Quux::get_version() const
|
||||
{
|
||||
return 10 * version_major + version_minor;
|
||||
}
|
||||
|
||||
int
|
||||
Quux::quuxify() const
|
||||
{
|
||||
int quux_version = get_version();
|
||||
std::cout << "Quux::quuxify version " << quux_version
|
||||
<< " invoked" <<std::endl;
|
||||
std::cout << "Quux config directory is %s" <<std::endl;
|
||||
Garply garply;
|
||||
int garply_version = garply.garplinate();
|
||||
|
||||
if (garply_version != quux_version) {
|
||||
throw std::runtime_error(
|
||||
"Quux found an incompatible version of Garply.");
|
||||
}
|
||||
|
||||
return quux_version;
|
||||
}
|
||||
'''
|
||||
quux_h = '''#ifndef QUUX_H_
|
||||
|
||||
class Quux
|
||||
{
|
||||
private:
|
||||
static const int version_major;
|
||||
static const int version_minor;
|
||||
|
||||
public:
|
||||
Quux();
|
||||
int get_version() const;
|
||||
int quuxify() const;
|
||||
};
|
||||
|
||||
#endif // QUUX_H_
|
||||
'''
|
||||
quuxifier_cc = '''
|
||||
#include "quux.h"
|
||||
#include <iostream>
|
||||
|
||||
int
|
||||
main()
|
||||
{
|
||||
Quux quux;
|
||||
quux.quuxify();
|
||||
|
||||
return 0;
|
||||
}
|
||||
'''
|
||||
quux_version_h = '''const int quux_version_major = %s;
|
||||
const int quux_version_minor = %s;
|
||||
'''
|
||||
mkdirp(prefix.lib64)
|
||||
mkdirp('%s/quux' % prefix.include)
|
||||
with open('%s/quux_version.h' % self.stage.source_path, 'w') as f:
|
||||
f.write(quux_version_h % (self.version[0], self.version[1:]))
|
||||
with open('%s/quux/quux.cc' % self.stage.source_path, 'w') as f:
|
||||
f.write(quux_cc % (prefix.config))
|
||||
with open('%s/quux/quux.h' % self.stage.source_path, 'w') as f:
|
||||
f.write(quux_h)
|
||||
with open('%s/quux/quuxifier.cc' % self.stage.source_path, 'w') as f:
|
||||
f.write(quuxifier_cc)
|
||||
gpp = which('/usr/bin/g++')
|
||||
gpp('-Dquux_EXPORTS',
|
||||
'-I%s' % self.stage.source_path,
|
||||
'-I%s' % spec['garply'].prefix.include,
|
||||
'-O2', '-g', '-DNDEBUG', '-fPIC',
|
||||
'-o', 'quux.cc.o',
|
||||
'-c', 'quux/quux.cc')
|
||||
gpp('-Dquux_EXPORTS',
|
||||
'-I%s' % self.stage.source_path,
|
||||
'-I%s' % spec['garply'].prefix.include,
|
||||
'-O2', '-g', '-DNDEBUG', '-fPIC',
|
||||
'-o', 'quuxifier.cc.o',
|
||||
'-c', 'quux/quuxifier.cc')
|
||||
gpp('-fPIC', '-O2', '-g', '-DNDEBUG', '-shared',
|
||||
'-Wl,-soname,libquux.so', '-o', 'libquux.so', 'quux.cc.o',
|
||||
'-Wl,-rpath,%s:%s::::' % (prefix.lib64,
|
||||
spec['garply'].prefix.lib64),
|
||||
'%s/libgarply.so' % spec['garply'].prefix.lib64)
|
||||
gpp('-O2', '-g', '-DNDEBUG', '-rdynamic',
|
||||
'quuxifier.cc.o', '-o', 'quuxifier',
|
||||
'-Wl,-rpath,%s:%s::::' % (prefix.lib64,
|
||||
spec['garply'].prefix.lib64),
|
||||
'libquux.so',
|
||||
'%s/libgarply.so' % spec['garply'].prefix.lib64)
|
||||
copy('libquux.so', '%s/libquux.so' % prefix.lib64)
|
||||
copy('quuxifier', '%s/quuxifier' % prefix.lib64)
|
||||
copy('%s/quux/quux.h' % self.stage.source_path,
|
||||
'%s/quux/quux.h' % prefix.include)
|
||||
mkdirp(prefix.bin)
|
||||
copy('quux_version.h', '%s/quux_version.h' % prefix.bin)
|
||||
os.symlink('%s/quuxifier' % prefix.lib64, '%s/quuxifier' % prefix.bin)
|
||||
os.symlink('%s/garplinator' % spec['garply'].prefix.lib64,
|
||||
'%s/garplinator' % prefix.bin)
|
@@ -56,6 +56,9 @@ class Abinit(AutotoolsPackage):
|
||||
variant('hdf5', default=False,
|
||||
description='Enables HDF5+Netcdf4 with MPI. WARNING: experimental')
|
||||
|
||||
variant('wannier90', default=False,
|
||||
description='Enables the Wannier90 library')
|
||||
|
||||
# Add dependencies
|
||||
# currently one cannot forward options to virtual packages, see #1712.
|
||||
# depends_on('blas', when='~openmp')
|
||||
@@ -84,6 +87,8 @@ class Abinit(AutotoolsPackage):
|
||||
# Cannot ask for +scalapack if it does not depend on MPI
|
||||
conflicts('+scalapack', when='~mpi')
|
||||
|
||||
depends_on("wannier90+shared", when='+wannier90')
|
||||
|
||||
# Elpa is a substitute for scalapack and needs mpi
|
||||
# conflicts('+elpa', when='~mpi')
|
||||
# conflicts('+elpa', when='+scalapack')
|
||||
@@ -95,12 +100,25 @@ def configure_args(self):
|
||||
options = []
|
||||
oapp = options.append
|
||||
|
||||
if '+wannier90' in spec:
|
||||
oapp('--with-wannier90-libs=-L{0}'
|
||||
.format(spec['wannier90'].prefix.lib + ' -lwannier -lm'))
|
||||
oapp('--with-wannier90-incs=-I{0}'
|
||||
.format(spec['wannier90'].prefix.modules))
|
||||
oapp('--with-wannier90-bins={0}'
|
||||
.format(spec['wannier90'].prefix.bin))
|
||||
oapp('--enable-connectors')
|
||||
oapp('--with-dft-flavor=wannier90')
|
||||
|
||||
if '+mpi' in spec:
|
||||
# MPI version:
|
||||
# let the configure script auto-detect MPI support from mpi_prefix
|
||||
oapp('--with-mpi-prefix={0}'.format(spec['mpi'].prefix))
|
||||
oapp('--enable-mpi=yes')
|
||||
oapp('--enable-mpi-io=yes')
|
||||
oapp('MPIFC={0}/mpifc'.format(spec['mpi'].prefix.bin))
|
||||
if '~wannier90' in spec:
|
||||
oapp('--with-dft-flavor=atompaw+libxc')
|
||||
|
||||
# Activate OpenMP in Abinit Fortran code.
|
||||
if '+openmp' in spec:
|
||||
@@ -129,7 +147,6 @@ def configure_args(self):
|
||||
'--with-fft-incs=-I%s' % spec['fftw'].prefix.include,
|
||||
'--with-fft-libs=-L%s %s' % (spec['fftw'].prefix.lib, fftlibs),
|
||||
])
|
||||
oapp('--with-dft-flavor=atompaw+libxc')
|
||||
|
||||
# LibXC library
|
||||
libxc = spec['libxc:fortran']
|
||||
|
@@ -10,10 +10,11 @@ class AbseilCpp(CMakePackage):
|
||||
"""Abseil Common Libraries (C++) """
|
||||
|
||||
homepage = "https://abseil.io/"
|
||||
url = "https://github.com/abseil/abseil-cpp/archive/20180600.tar.gz"
|
||||
url = "https://github.com/abseil/abseil-cpp/archive/20200225.2.tar.gz"
|
||||
|
||||
maintainers = ['jcftang']
|
||||
|
||||
version('20200225.2', sha256='f41868f7a938605c92936230081175d1eae87f6ea2c248f41077c8f88316f111')
|
||||
version('20200225.1', sha256='0db0d26f43ba6806a8a3338da3e646bb581f0ca5359b3a201d8fb8e4752fd5f8')
|
||||
version('20190808', sha256='8100085dada279bf3ee00cd064d43b5f55e5d913be0dfe2906f06f8f28d5b37e')
|
||||
version('20181200', sha256='e2b53bfb685f5d4130b84c4f3050c81bf48c497614dc85d91dbd3ed9129bce6d')
|
||||
|
@@ -24,6 +24,10 @@ class Acl(AutotoolsPackage):
|
||||
depends_on('automake', type='build')
|
||||
depends_on('libtool', type='build')
|
||||
depends_on('attr')
|
||||
depends_on('gettext')
|
||||
|
||||
def setup_build_environment(self, env):
|
||||
env.append_flags('LDFLAGS', '-lintl')
|
||||
|
||||
def autoreconf(self, spec, prefix):
|
||||
bash = which('bash')
|
||||
|
@@ -6,7 +6,7 @@
|
||||
from spack import *
|
||||
|
||||
|
||||
class Acts(CMakePackage):
|
||||
class Acts(CMakePackage, CudaPackage):
|
||||
"""
|
||||
A Common Tracking Software (Acts)
|
||||
|
||||
@@ -35,6 +35,12 @@ class Acts(CMakePackage):
|
||||
|
||||
# Supported Acts versions
|
||||
version('master', branch='master')
|
||||
version('0.27.1', commit='8ba3010a532137bc0ab6cf83a38b483cef646a01')
|
||||
version('0.27.0', commit='f7b1a1c27d5a95d08bb67236ad0e117fcd1c679f')
|
||||
version('0.26.0', commit='cf542b108b31fcc349fc18fb0466f889e4e42aa6')
|
||||
version('0.25.2', commit='76bf1f3e4be51d4d27126b473a2caa8d8a72b320')
|
||||
version('0.25.1', commit='6e8a1ea6d2c7385a78e3e190efb2a8a0c1fa957f')
|
||||
version('0.25.0', commit='0aca171951a214299e8ff573682b1c5ecec63d42')
|
||||
version('0.24.0', commit='ef4699c8500bfea59a5fe88bed67fde2f00f0adf')
|
||||
version('0.23.0', commit='dc443dd7e663bc4d7fb3c1e3f1f75aaf57ffd4e4')
|
||||
version('0.22.1', commit='ca1b8b1645db6b552f44c48d2ff34c8c29618f3a')
|
||||
@@ -90,10 +96,11 @@ class Acts(CMakePackage):
|
||||
|
||||
# Build dependencies
|
||||
depends_on('boost @1.62:1.69.99 +program_options +test', when='@:0.10.3')
|
||||
depends_on('boost @1.69: +filesystem +program_options +test', when='@0.10.4:')
|
||||
depends_on('boost @1.69: +filesystem +program_options +test', when='@0.10.4:0.25')
|
||||
depends_on('boost @1.69: +program_options +test', when='@0.26:')
|
||||
depends_on('cmake @3.11:', type='build')
|
||||
depends_on('dd4hep @1.10: +xercesc', when='+dd4hep')
|
||||
depends_on('dd4hep @1.10: +geant4 +xercesc', when='+dd4hep +geant4')
|
||||
depends_on('dd4hep @1.10:', when='+dd4hep')
|
||||
depends_on('dd4hep @1.10: +geant4', when='+dd4hep +geant4')
|
||||
depends_on('eigen @3.2.9:', type='build')
|
||||
depends_on('geant4', when='+geant4')
|
||||
depends_on('hepmc3@3.1:', when='+hepmc3')
|
||||
@@ -141,6 +148,7 @@ def example_cmake_variant(cmake_label, spack_variant):
|
||||
|
||||
args = [
|
||||
cmake_variant("BENCHMARKS", "benchmarks"),
|
||||
cmake_variant("CUDA_PLUGIN", "cuda"),
|
||||
cmake_variant("DD4HEP_PLUGIN", "dd4hep"),
|
||||
cmake_variant("DIGITIZATION_PLUGIN", "digitization"),
|
||||
cmake_variant("EXAMPLES", "examples"),
|
||||
@@ -157,6 +165,10 @@ def example_cmake_variant(cmake_label, spack_variant):
|
||||
cmake_variant("TGEO_PLUGIN", "tgeo")
|
||||
]
|
||||
|
||||
cuda_arch = spec.variants['cuda_arch'].value
|
||||
if cuda_arch != 'none':
|
||||
args.append('-DCUDA_FLAGS=-arch=sm_{0}'.format(cuda_arch[0]))
|
||||
|
||||
if 'root' in spec:
|
||||
cxxstd = spec['root'].variants['cxxstd'].value
|
||||
args.append("-DCMAKE_CXX_STANDARD={0}".format(cxxstd))
|
||||
|
@@ -15,6 +15,6 @@ class AdeptUtils(CMakePackage):
|
||||
version('1.0.1', sha256='259f777aeb368ede3583d3617bb779f0fde778319bf2122fdd216bdf223c015e')
|
||||
version('1.0', sha256='fed29366c9bcf5f3799220ae3b351d2cb338e2aa42133d61584ea650aa8d6ff7')
|
||||
|
||||
depends_on('boost')
|
||||
depends_on('boost@:1.72.0')
|
||||
depends_on('mpi')
|
||||
depends_on('cmake@2.8:', type='build')
|
||||
|
@@ -21,4 +21,7 @@ class Aegean(MakefilePackage):
|
||||
|
||||
def edit(self, spec, prefix):
|
||||
makefile = FileFilter('Makefile')
|
||||
if spec.target.family == 'aarch64':
|
||||
makefile.filter('-m64', '')
|
||||
|
||||
makefile.filter('/usr/local', prefix)
|
||||
|
@@ -9,28 +9,41 @@
|
||||
|
||||
|
||||
class Amber(Package, CudaPackage):
|
||||
"""Amber is a suite of biomolecular simulation programs.
|
||||
"""Amber is a suite of biomolecular simulation programs together
|
||||
with Amber tools.
|
||||
|
||||
Note: A manual download is required for Amber.
|
||||
Spack will search your current directory for the download file.
|
||||
Alternatively, add this file to a mirror so that Spack can find it.
|
||||
Note: The version number is composed of the Amber version (major)
|
||||
and the tools version (minor). A manual download is required for
|
||||
both Amber and Amber tools.
|
||||
Spack will search your current directory for the download files.
|
||||
Alternatively, add the files to a mirror so that Spack can find them.
|
||||
For instructions on how to set up a mirror, see
|
||||
http://spack.readthedocs.io/en/latest/mirrors.html"""
|
||||
|
||||
homepage = "http://ambermd.org/"
|
||||
url = "file://{0}/Amber18.tar.bz2".format(os.getcwd())
|
||||
url = "file://{0}/Amber18.tar.bz2".format(os.getcwd())
|
||||
maintainers = ['hseara']
|
||||
|
||||
version('18', sha256='2060897c0b11576082d523fb63a51ba701bc7519ff7be3d299d5ec56e8e6e277')
|
||||
version('16', sha256='3b7ef281fd3c46282a51b6a6deed9ed174a1f6d468002649d84bfc8a2577ae5d')
|
||||
def url_for_version(self, version):
|
||||
url = "file://{0}/Amber{1}.tar.bz2".format(
|
||||
os.getcwd(), version.up_to(1))
|
||||
return url
|
||||
|
||||
version(
|
||||
'18.20', sha256='2060897c0b11576082d523fb63a51ba701bc7519ff7be3d299d5ec56e8e6e277')
|
||||
version(
|
||||
'18.19', sha256='2060897c0b11576082d523fb63a51ba701bc7519ff7be3d299d5ec56e8e6e277')
|
||||
version(
|
||||
'16.16', sha256='3b7ef281fd3c46282a51b6a6deed9ed174a1f6d468002649d84bfc8a2577ae5d')
|
||||
|
||||
resources = [
|
||||
# [version amber, version ambertools , sha256sum]
|
||||
('18', '20', 'b1e1f8f277c54e88abc9f590e788bbb2f7a49bcff5e8d8a6eacfaf332a4890f9'),
|
||||
('18', '19', '0c86937904854b64e4831e047851f504ec45b42e593db4ded92c1bee5973e699'),
|
||||
('16', '16', '7b876afe566e9dd7eb6a5aa952a955649044360f15c1f5d4d91ba7f41f3105fa'),
|
||||
]
|
||||
for ver, ambertools_ver, checksum in resources:
|
||||
resource(when='@{0}'.format(ver),
|
||||
resource(when='@{0}.{1}'.format(ver, ambertools_ver),
|
||||
name='AmberTools',
|
||||
url='file://{0}/AmberTools{1}.tar.bz2'.format(os.getcwd(),
|
||||
ambertools_ver),
|
||||
@@ -100,10 +113,14 @@ class Amber(Package, CudaPackage):
|
||||
depends_on('cuda@7.5.18', when='@:16+cuda')
|
||||
|
||||
# conflicts
|
||||
conflicts('+x11', when='platform=cray', msg='x11 amber applications not available for cray')
|
||||
conflicts('+openmp', when='%clang', msg='OpenMP optimizations not available for the clang compiler')
|
||||
conflicts('+openmp', when='%apple-clang', msg='OpenMP optimizations not available for the Apple clang compiler')
|
||||
conflicts('+openmp', when='%pgi', msg='OpenMP optimizations not available for the pgi compiler')
|
||||
conflicts('+x11', when='platform=cray',
|
||||
msg='x11 amber applications not available for cray')
|
||||
conflicts('+openmp', when='%clang',
|
||||
msg='OpenMP not available for the clang compiler')
|
||||
conflicts('+openmp', when='%apple-clang',
|
||||
msg='OpenMP not available for the Apple clang compiler')
|
||||
conflicts('+openmp', when='%pgi',
|
||||
msg='OpenMP not available for the pgi compiler')
|
||||
|
||||
def setup_build_environment(self, env):
|
||||
amber_src = self.stage.source_path
|
||||
|
@@ -20,4 +20,5 @@ class Amdblis(BlisBase):
|
||||
url = "https://github.com/amd/blis/archive/2.1.tar.gz"
|
||||
git = "https://github.com/amd/blis.git"
|
||||
|
||||
version('2.2', sha256='e1feb60ac919cf6d233c43c424f6a8a11eab2c62c2c6e3f2652c15ee9063c0c9')
|
||||
version('2.1', sha256='3b1d611d46f0f13b3c0917e27012e0f789b23dbefdddcf877b20327552d72fb3')
|
||||
|
@@ -18,6 +18,7 @@ class Amrex(CMakePackage):
|
||||
maintainers = ['mic84', 'asalmgren']
|
||||
|
||||
version('develop', branch='development')
|
||||
version('20.07', sha256='c386f566f4c57ee56b5630f79ce2c6117d5a612a4aab69b7b26e48d577251165')
|
||||
version('20.06', sha256='be2f2a5107111fcb8b3928b76024b370c7cb01a9e5dd79484cf7fcf59d0b4858')
|
||||
version('20.05', sha256='97d753bb75e845a0a959ec1a044a48e6adb86dd008b5e29ce7a01d49ed276338')
|
||||
version('20.04', sha256='a7ece54d5d89cc00fd555551902a0d4d0fb50db15d2600f441353eed0dddd83b')
|
||||
@@ -57,6 +58,12 @@ class Amrex(CMakePackage):
|
||||
values=('Debug', 'Release'))
|
||||
variant('sundials', default=False,
|
||||
description='Build AMReX with SUNDIALS support')
|
||||
variant('hdf5', default=False,
|
||||
description='Enable HDF5-based I/O')
|
||||
variant('hypre', default=False,
|
||||
description='Enable Hypre interfaces')
|
||||
variant('petsc', default=False,
|
||||
description='Enable PETSc interfaces')
|
||||
|
||||
# Build dependencies
|
||||
depends_on('mpi', when='+mpi')
|
||||
@@ -68,6 +75,24 @@ class Amrex(CMakePackage):
|
||||
conflicts('%apple-clang')
|
||||
conflicts('%clang')
|
||||
|
||||
# Check options compatibility
|
||||
conflicts('+sundials', when='~fortran',
|
||||
msg='AMReX SUNDIALS support needs AMReX Fortran API (+fortran)')
|
||||
conflicts('+hdf5', when='@:20.06',
|
||||
msg='AMReX HDF5 support needs AMReX newer than version 20.06')
|
||||
conflicts('+hypre', when='@:20.06',
|
||||
msg='AMReX Hypre support needs AMReX newer than version 20.06')
|
||||
conflicts('+hypre', when='~fortran',
|
||||
msg='AMReX Hypre support needs AMReX Fortran API (+fortran)')
|
||||
conflicts('+hypre', when='~linear_solvers',
|
||||
msg='AMReX Hypre support needs variant +linear_solvers')
|
||||
conflicts('+petsc', when='@:20.06',
|
||||
msg='AMReX PETSc support needs AMReX newer than version 20.06')
|
||||
conflicts('+petsc', when='~fortran',
|
||||
msg='AMReX PETSc support needs AMReX Fortran API (+fortran)')
|
||||
conflicts('+petsc', when='~linear_solvers',
|
||||
msg='AMReX PETSc support needs variant +linear_solvers')
|
||||
|
||||
def url_for_version(self, version):
|
||||
if version >= Version('20.05'):
|
||||
url = "https://github.com/AMReX-Codes/amrex/releases/download/{0}/amrex-{0}.tar.gz"
|
||||
@@ -89,11 +114,16 @@ def cmake_args(self):
|
||||
self.spec.variants['precision'].value.upper(),
|
||||
'-DENABLE_EB:BOOL=%s' % self.cmake_is_on('+eb'),
|
||||
'-DXSDK_ENABLE_Fortran:BOOL=%s' % self.cmake_is_on('+fortran'),
|
||||
'-DENABLE_FORTRAN_INTERFACES:BOOL=%s'
|
||||
% self.cmake_is_on('+fortran'),
|
||||
'-DENABLE_LINEAR_SOLVERS:BOOL=%s' %
|
||||
self.cmake_is_on('+linear_solvers'),
|
||||
'-DENABLE_AMRDATA:BOOL=%s' % self.cmake_is_on('+amrdata'),
|
||||
'-DENABLE_PARTICLES:BOOL=%s' % self.cmake_is_on('+particles'),
|
||||
'-DENABLE_SUNDIALS:BOOL=%s' % self.cmake_is_on('+sundials')
|
||||
'-DENABLE_SUNDIALS:BOOL=%s' % self.cmake_is_on('+sundials'),
|
||||
'-DENABLE_HDF5:BOOL=%s' % self.cmake_is_on('+hdf5'),
|
||||
'-DENABLE_HYPRE:BOOL=%s' % self.cmake_is_on('+hypre'),
|
||||
'-DENABLE_PETSC:BOOL=%s' % self.cmake_is_on('+petsc'),
|
||||
]
|
||||
if self.spec.satisfies('%fj'):
|
||||
args.append('-DCMAKE_Fortran_MODDIR_FLAG=-M')
|
||||
|
182
var/spack/repos/builtin/packages/apcomp/package.py
Normal file
182
var/spack/repos/builtin/packages/apcomp/package.py
Normal file
@@ -0,0 +1,182 @@
|
||||
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
|
||||
from spack import *
|
||||
import os
|
||||
import socket
|
||||
import llnl.util.tty as tty
|
||||
|
||||
|
||||
def cmake_cache_entry(name, value, vtype=None):
|
||||
"""
|
||||
Helper that creates CMake cache entry strings used in
|
||||
'host-config' files.
|
||||
"""
|
||||
if vtype is None:
|
||||
if value == "ON" or value == "OFF":
|
||||
vtype = "BOOL"
|
||||
else:
|
||||
vtype = "PATH"
|
||||
return 'set({0} "{1}" CACHE {2} "")\n\n'.format(name, value, vtype)
|
||||
|
||||
|
||||
class Apcomp(Package):
|
||||
"""A multi use-case image compositor"""
|
||||
|
||||
homepage = 'https://github.com/Alpine-DAV/ap_compositor'
|
||||
git = 'https://github.com/Alpine-DAV/ap_compositor.git'
|
||||
url = "https://github.com/Alpine-DAV/ap_compositor/releases/download/v0.0.1/apcomp-v0.0.1.tar.gz"
|
||||
|
||||
maintainers = ['mclarsen', 'cyrush']
|
||||
|
||||
version('master', branch='master', submodules='True')
|
||||
version('0.0.1', sha256="cbf85fe58d5d5bc2f468d081386cc8b79861046b3bb7e966edfa3f8e95b998b2")
|
||||
|
||||
variant('openmp', default=True, description='Build with openmp support')
|
||||
variant('mpi', default=True, description='Build with MPI support')
|
||||
variant('shared', default=True, description='Build Shared Library')
|
||||
|
||||
depends_on('cmake@3.9:', type='build')
|
||||
depends_on("mpi", when="+mpi")
|
||||
|
||||
root_cmakelists_dir = 'src'
|
||||
|
||||
def install(self, spec, prefix):
|
||||
"""
|
||||
Build and install APComp
|
||||
"""
|
||||
with working_dir('spack-build', create=True):
|
||||
host_cfg_fname = self.create_host_config(spec,
|
||||
prefix)
|
||||
cmake_args = []
|
||||
# if we have a static build, we need to avoid any of
|
||||
# spack's default cmake settings related to rpaths
|
||||
# (see: https://github.com/LLNL/spack/issues/2658)
|
||||
if "+shared" in spec:
|
||||
cmake_args.extend(std_cmake_args)
|
||||
else:
|
||||
for arg in std_cmake_args:
|
||||
if arg.count("RPATH") == 0:
|
||||
cmake_args.append(arg)
|
||||
cmake_args.extend(["-C", host_cfg_fname, "../src"])
|
||||
print("Configuring APComp...")
|
||||
cmake(*cmake_args)
|
||||
print("Building APComp...")
|
||||
make()
|
||||
print("Installing APComp...")
|
||||
make("install")
|
||||
# install copy of host config for provenance
|
||||
install(host_cfg_fname, prefix)
|
||||
|
||||
def create_host_config(self, spec, prefix):
|
||||
"""
|
||||
This method creates a 'host-config' file that specifies
|
||||
all of the options used to configure and build apcomp.
|
||||
"""
|
||||
|
||||
#######################
|
||||
# Compiler Info
|
||||
#######################
|
||||
c_compiler = env["SPACK_CC"]
|
||||
cpp_compiler = env["SPACK_CXX"]
|
||||
|
||||
#######################################################################
|
||||
# We directly fetch the names of the actual compilers to create a
|
||||
# 'host config' file that works outside of the spack install env.
|
||||
#######################################################################
|
||||
|
||||
sys_type = spec.architecture
|
||||
# if on llnl systems, we can use the SYS_TYPE
|
||||
if "SYS_TYPE" in env:
|
||||
sys_type = env["SYS_TYPE"]
|
||||
|
||||
##############################################
|
||||
# Find and record what CMake is used
|
||||
##############################################
|
||||
|
||||
if "+cmake" in spec:
|
||||
cmake_exe = spec['cmake'].command.path
|
||||
else:
|
||||
cmake_exe = which("cmake")
|
||||
if cmake_exe is None:
|
||||
msg = 'failed to find CMake (and cmake variant is off)'
|
||||
raise RuntimeError(msg)
|
||||
cmake_exe = cmake_exe.path
|
||||
|
||||
host_cfg_fname = "%s-%s-%s-apcomp.cmake" % (socket.gethostname(),
|
||||
sys_type,
|
||||
spec.compiler)
|
||||
|
||||
cfg = open(host_cfg_fname, "w")
|
||||
cfg.write("##################################\n")
|
||||
cfg.write("# spack generated host-config\n")
|
||||
cfg.write("##################################\n")
|
||||
cfg.write("# {0}-{1}\n".format(sys_type, spec.compiler))
|
||||
cfg.write("##################################\n\n")
|
||||
|
||||
# Include path to cmake for reference
|
||||
cfg.write("# cmake from spack \n")
|
||||
cfg.write("# cmake executable path: %s\n\n" % cmake_exe)
|
||||
|
||||
#######################
|
||||
# Compiler Settings
|
||||
#######################
|
||||
cfg.write("#######\n")
|
||||
cfg.write("# using %s compiler spec\n" % spec.compiler)
|
||||
cfg.write("#######\n\n")
|
||||
cfg.write("# c compiler used by spack\n")
|
||||
cfg.write(cmake_cache_entry("CMAKE_C_COMPILER", c_compiler))
|
||||
cfg.write("# cpp compiler used by spack\n")
|
||||
cfg.write(cmake_cache_entry("CMAKE_CXX_COMPILER", cpp_compiler))
|
||||
|
||||
# shared vs static libs
|
||||
if "+shared" in spec:
|
||||
cfg.write(cmake_cache_entry("BUILD_SHARED_LIBS", "ON"))
|
||||
else:
|
||||
cfg.write(cmake_cache_entry("BUILD_SHARED_LIBS", "OFF"))
|
||||
|
||||
if "+openmp" in spec:
|
||||
cfg.write(cmake_cache_entry("ENABLE_OPENMP", "ON"))
|
||||
else:
|
||||
cfg.write(cmake_cache_entry("ENABLE_OPENMP", "OFF"))
|
||||
|
||||
if "+mpi" in spec:
|
||||
mpicc_path = spec['mpi'].mpicc
|
||||
mpicxx_path = spec['mpi'].mpicxx
|
||||
mpifc_path = spec['mpi'].mpifc
|
||||
# if we are using compiler wrappers on cray systems
|
||||
# use those for mpi wrappers, b/c spec['mpi'].mpicxx
|
||||
# etc make return the spack compiler wrappers
|
||||
# which can trip up mpi detection in CMake 3.14
|
||||
if cpp_compiler == "CC":
|
||||
mpicc_path = "cc"
|
||||
mpicxx_path = "CC"
|
||||
mpifc_path = "ftn"
|
||||
cfg.write(cmake_cache_entry("ENABLE_MPI", "ON"))
|
||||
cfg.write(cmake_cache_entry("MPI_C_COMPILER", mpicc_path))
|
||||
cfg.write(cmake_cache_entry("MPI_CXX_COMPILER", mpicxx_path))
|
||||
cfg.write(cmake_cache_entry("MPI_Fortran_COMPILER", mpifc_path))
|
||||
mpiexe_bin = join_path(spec['mpi'].prefix.bin, 'mpiexec')
|
||||
if os.path.isfile(mpiexe_bin):
|
||||
# starting with cmake 3.10, FindMPI expects MPIEXEC_EXECUTABLE
|
||||
# vs the older versions which expect MPIEXEC
|
||||
if self.spec["cmake"].satisfies('@3.10:'):
|
||||
cfg.write(cmake_cache_entry("MPIEXEC_EXECUTABLE",
|
||||
mpiexe_bin))
|
||||
else:
|
||||
cfg.write(cmake_cache_entry("MPIEXEC",
|
||||
mpiexe_bin))
|
||||
else:
|
||||
cfg.write(cmake_cache_entry("ENABLE_MPI", "OFF"))
|
||||
|
||||
cfg.write("##################################\n")
|
||||
cfg.write("# end spack generated host-config\n")
|
||||
cfg.write("##################################\n")
|
||||
cfg.close()
|
||||
|
||||
host_cfg_fname = os.path.abspath(host_cfg_fname)
|
||||
tty.info("spack generated conduit host-config file: " + host_cfg_fname)
|
||||
return host_cfg_fname
|
@@ -35,22 +35,21 @@ class Ascent(Package, CudaPackage):
|
||||
|
||||
homepage = "https://github.com/Alpine-DAV/ascent"
|
||||
git = "https://github.com/Alpine-DAV/ascent.git"
|
||||
url = "https://github.com/Alpine-DAV/ascent/releases/download/v0.5.0/ascent-v0.5.0-src-with-blt.tar.gz"
|
||||
url = "https://github.com/Alpine-DAV/ascent/releases/download/v0.5.1/ascent-v0.5.1-src-with-blt.tar.gz"
|
||||
|
||||
maintainers = ['cyrush']
|
||||
|
||||
version('develop',
|
||||
branch='develop',
|
||||
submodules=True)
|
||||
|
||||
version('0.5.0', sha256='2837b7371db3ac1bcc31a479d7cf0eb62a503cacadfa4187061502b3c4a89fa0')
|
||||
submodules=True,
|
||||
preferred=True)
|
||||
|
||||
###########################################################################
|
||||
# package variants
|
||||
###########################################################################
|
||||
|
||||
variant("shared", default=True, description="Build Ascent as shared libs")
|
||||
variant("test", default=True, description='Enable Ascent unit tests')
|
||||
variant('test', default=True, description='Enable Ascent unit tests')
|
||||
|
||||
variant("mpi", default=True, description="Build Ascent MPI Support")
|
||||
variant("serial", default=True, description="build serial (non-mpi) libraries")
|
||||
@@ -68,11 +67,15 @@ class Ascent(Package, CudaPackage):
|
||||
variant("cuda", default=False, description="Build cuda support")
|
||||
variant("mfem", default=False, description="Build MFEM filter support")
|
||||
variant("adios", default=False, description="Build Adios filter support")
|
||||
variant("dray", default=False, description="Build with Devil Ray support")
|
||||
|
||||
# variants for dev-tools (docs, etc)
|
||||
variant("doc", default=False, description="Build Conduit's documentation")
|
||||
variant("doc", default=False, description="Build Ascent's documentation")
|
||||
|
||||
###########################################################################
|
||||
# variant for BabelFlow runtime
|
||||
variant("babelflow", default=False, description="Build with BabelFlow")
|
||||
|
||||
##########################################################################
|
||||
# package dependencies
|
||||
###########################################################################
|
||||
|
||||
@@ -102,29 +105,54 @@ class Ascent(Package, CudaPackage):
|
||||
depends_on("mpi", when="+mpi")
|
||||
depends_on("py-mpi4py", when="+mpi+python+shared")
|
||||
|
||||
#######################
|
||||
# BabelFlow
|
||||
#######################
|
||||
depends_on('babelflow@develop', when='+babelflow+mpi')
|
||||
depends_on('parallelmergetree@develop', when='+babelflow+mpi')
|
||||
|
||||
#############################
|
||||
# TPLs for Runtime Features
|
||||
#############################
|
||||
|
||||
depends_on("vtk-h@0.5.0", when="+vtkh")
|
||||
depends_on("vtk-h@0.5.0~openmp", when="+vtkh~openmp")
|
||||
depends_on("vtk-h@0.5.0+cuda+openmp", when="+vtkh+cuda+openmp")
|
||||
depends_on("vtk-h@0.5.0+cuda~openmp", when="+vtkh+cuda~openmp")
|
||||
depends_on("vtk-h", when="+vtkh")
|
||||
depends_on("vtk-h~openmp", when="+vtkh~openmp")
|
||||
depends_on("vtk-h+cuda+openmp", when="+vtkh+cuda+openmp")
|
||||
depends_on("vtk-h+cuda~openmp", when="+vtkh+cuda~openmp")
|
||||
|
||||
depends_on("vtk-h@0.5.0~shared", when="~shared+vtkh")
|
||||
depends_on("vtk-h@0.5.0~shared~openmp", when="~shared+vtkh~openmp")
|
||||
depends_on("vtk-h@0.5.0~shared+cuda", when="~shared+vtkh+cuda")
|
||||
depends_on("vtk-h@0.5.0~shared+cuda~openmp", when="~shared+vtkh+cuda~openmp")
|
||||
depends_on("vtk-h~shared", when="~shared+vtkh")
|
||||
depends_on("vtk-h~shared~openmp", when="~shared+vtkh~openmp")
|
||||
depends_on("vtk-h~shared+cuda", when="~shared+vtkh+cuda")
|
||||
depends_on("vtk-h~shared+cuda~openmp", when="~shared+vtkh+cuda~openmp")
|
||||
|
||||
# mfem
|
||||
depends_on("mfem+threadsafe+shared+mpi+conduit", when="+shared+mfem+mpi")
|
||||
depends_on("mfem+threadsafe~shared+mpi+conduit", when="~shared+mfem+mpi")
|
||||
depends_on("mfem~threadsafe~openmp+shared+mpi+conduit", when="+shared+mfem+mpi")
|
||||
depends_on("mfem~threadsafe~openmp~shared+mpi+conduit", when="~shared+mfem+mpi")
|
||||
|
||||
depends_on("mfem+threadsafe+shared~mpi+conduit", when="+shared+mfem~mpi")
|
||||
depends_on("mfem+threadsafe~shared~mpi+conduit", when="~shared+mfem~mpi")
|
||||
depends_on("mfem~threadsafe~openmp+shared~mpi+conduit", when="+shared+mfem~mpi")
|
||||
depends_on("mfem~threadsafe~openmp~shared~mpi+conduit", when="~shared+mfem~mpi")
|
||||
|
||||
depends_on("adios", when="+adios")
|
||||
|
||||
# devil ray variants with mpi
|
||||
# we have to specify both because mfem makes us
|
||||
depends_on("dray@develop+mpi~test~utils+shared+cuda", when="+dray+mpi+cuda+shared")
|
||||
depends_on("dray@develop+mpi~test~utils+shared+openmp", when="+dray+mpi+openmp+shared")
|
||||
depends_on("dray@develop+mpi~test~utils+shared~openmp~cuda", when="+dray+mpi~openmp~cuda+shared")
|
||||
|
||||
depends_on("dray@develop+mpi~test~utils~shared+cuda", when="+dray+mpi+cuda~shared")
|
||||
depends_on("dray@develop+mpi~test~utils~shared+openmp", when="+dray+mpi+openmp~shared")
|
||||
depends_on("dray@develop+mpi~test~utils~shared~openmp~cuda", when="+dray+mpi~openmp~cuda~shared")
|
||||
|
||||
# devil ray variants without mpi
|
||||
depends_on("dray@develop~mpi~test~utils+shared+cuda", when="+dray~mpi+cuda+shared")
|
||||
depends_on("dray@develop~mpi~test~utils+shared+openmp", when="+dray~mpi+openmp+shared")
|
||||
depends_on("dray@develop~mpi~test~utils+shared~openmp~cuda", when="+dray~mpi~openmp~cuda+shared")
|
||||
|
||||
depends_on("dray@develop~mpi~test~utils~shared+cuda", when="+dray~mpi+cuda~shared")
|
||||
depends_on("dray@develop~mpi~test~utils~shared+openmp", when="+dray~mpi+openmp~shared")
|
||||
depends_on("dray@develop~mpi~test~utils~shared~openmp~cuda", when="+dray~mpi~openmp~cuda~shared")
|
||||
|
||||
#######################
|
||||
# Documentation related
|
||||
#######################
|
||||
@@ -398,6 +426,16 @@ def create_host_config(self, spec, prefix, py_site_pkgs_dir=None):
|
||||
else:
|
||||
cfg.write(cmake_cache_entry("MPIEXEC",
|
||||
mpiexe_bin))
|
||||
|
||||
###################################
|
||||
# BABELFLOW (also depends on mpi)
|
||||
###################################
|
||||
if "+babelflow" in spec:
|
||||
cfg.write(cmake_cache_entry("ENABLE_BABELFLOW", "ON"))
|
||||
cfg.write(cmake_cache_entry("BabelFlow_DIR",
|
||||
spec['babelflow'].prefix))
|
||||
cfg.write(cmake_cache_entry("PMT_DIR",
|
||||
spec['parallelmergetree'].prefix))
|
||||
else:
|
||||
cfg.write(cmake_cache_entry("ENABLE_MPI", "OFF"))
|
||||
|
||||
@@ -449,6 +487,15 @@ def create_host_config(self, spec, prefix, py_site_pkgs_dir=None):
|
||||
else:
|
||||
cfg.write("# mfem not built by spack \n")
|
||||
|
||||
#######################
|
||||
# Devil Ray
|
||||
#######################
|
||||
if "+dray" in spec:
|
||||
cfg.write("# devil ray from spack \n")
|
||||
cfg.write(cmake_cache_entry("DRAY_DIR", spec['dray'].prefix))
|
||||
else:
|
||||
cfg.write("# devil ray not built by spack \n")
|
||||
|
||||
#######################
|
||||
# Adios
|
||||
#######################
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user