Compare commits
185 Commits
features/k
...
v0.15.3-sh
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
008cf2ee15 | ||
|
|
474a077143 | ||
|
|
d386c59de9 | ||
|
|
08926b5b12 | ||
|
|
aaee0bcb7e | ||
|
|
f2a35a767a | ||
|
|
2b05d2bf5c | ||
|
|
cc00619929 | ||
|
|
188a371595 | ||
|
|
dce7be9932 | ||
|
|
4bb26802ed | ||
|
|
1965e1e606 | ||
|
|
6ed3db6c14 | ||
|
|
a8fbc96271 | ||
|
|
d8956a3bbe | ||
|
|
a807b95081 | ||
|
|
1b608d6041 | ||
|
|
be143d7dff | ||
|
|
05fe92e086 | ||
|
|
cd54fb95b8 | ||
|
|
8b63c4555c | ||
|
|
ec78160569 | ||
|
|
e1379f132d | ||
|
|
cdcd3dcedd | ||
|
|
7c1083916a | ||
|
|
c07bbe1a25 | ||
|
|
85032c6224 | ||
|
|
7b7898a69c | ||
|
|
84c5d76eae | ||
|
|
bcd47f0bd6 | ||
|
|
cb6a959cdb | ||
|
|
32cd12bff7 | ||
|
|
7021965159 | ||
|
|
5c5743ca33 | ||
|
|
034a7662ac | ||
|
|
e6b6ac5898 | ||
|
|
35037bf088 | ||
|
|
d14c245411 | ||
|
|
6e2ad01f20 | ||
|
|
ef9b5a8f74 | ||
|
|
4921ed29d5 | ||
|
|
f4c720e902 | ||
|
|
0a71b1d5ac | ||
|
|
3593a7be6a | ||
|
|
e4d2cf4441 | ||
|
|
911e51bd89 | ||
|
|
6ec8aea6f7 | ||
|
|
5b11f7aa4c | ||
|
|
97e46981b9 | ||
|
|
873ac5e890 | ||
|
|
4d7dae5716 | ||
|
|
b19f0fafcc | ||
|
|
11b1bdd119 | ||
|
|
f749821dc2 | ||
|
|
5abb20dcab | ||
|
|
0c233bdd0f | ||
|
|
0f171c7ded | ||
|
|
b4c7520dd8 | ||
|
|
9ab7d8f01d | ||
|
|
a7ad344c2a | ||
|
|
deb2d3745c | ||
|
|
ff96ec430b | ||
|
|
d4a959736a | ||
|
|
5ba51a0be0 | ||
|
|
27e1140df7 | ||
|
|
7ab6af8a3b | ||
|
|
0e6e93eaac | ||
|
|
38f8bdd2bb | ||
|
|
8e45a3fc2f | ||
|
|
c22af99b04 | ||
|
|
fc3a909fbc | ||
|
|
9665754eae | ||
|
|
0f9f9f3a85 | ||
|
|
777a5682a6 | ||
|
|
8994b4aab6 | ||
|
|
98ec366470 | ||
|
|
c61f4d7c82 | ||
|
|
811b304230 | ||
|
|
8f0c9ad409 | ||
|
|
6a423a5d8a | ||
|
|
23c37063bd | ||
|
|
478f3a5a99 | ||
|
|
02afb30990 | ||
|
|
06e3f15e47 | ||
|
|
f13ce3540d | ||
|
|
7ae34087e3 | ||
|
|
f0fea97e88 | ||
|
|
54893197ed | ||
|
|
80da1d50d1 | ||
|
|
944c5d75cd | ||
|
|
9ef4bc9d50 | ||
|
|
a2af432833 | ||
|
|
aefed311af | ||
|
|
6ffacddcf4 | ||
|
|
e17824f82f | ||
|
|
57ca47f035 | ||
|
|
4532a56b4e | ||
|
|
86e69a48a2 | ||
|
|
2508295d81 | ||
|
|
1a041c051a | ||
|
|
2262ca2e67 | ||
|
|
2269771a91 | ||
|
|
7f32574dd8 | ||
|
|
d15ac30f62 | ||
|
|
1f41347ab8 | ||
|
|
1f4f01103b | ||
|
|
8f46fcb512 | ||
|
|
2d3b973ebc | ||
|
|
7e62e0f27f | ||
|
|
ea0db4c0f9 | ||
|
|
0afc68e60b | ||
|
|
8ad25d5013 | ||
|
|
e90db68321 | ||
|
|
9e96b89f02 | ||
|
|
b4dae1b7fd | ||
|
|
9e9adf1d2f | ||
|
|
de9255247a | ||
|
|
de5d3e3229 | ||
|
|
e621aafc77 | ||
|
|
c53427c98d | ||
|
|
7a75148d1b | ||
|
|
4210520c9d | ||
|
|
4f3fb50ae7 | ||
|
|
7660659107 | ||
|
|
fcca2a518b | ||
|
|
23e1cd7775 | ||
|
|
58e794e95a | ||
|
|
7ed59ed835 | ||
|
|
512726ae5b | ||
|
|
20851a6e6c | ||
|
|
92bbbb9659 | ||
|
|
5f2f2bfb84 | ||
|
|
9b63f72d6b | ||
|
|
4c60f01bae | ||
|
|
cd08308463 | ||
|
|
fe69997043 | ||
|
|
1584a6e3c6 | ||
|
|
c393880852 | ||
|
|
bbe9e6bf54 | ||
|
|
d7a00b71d4 | ||
|
|
6775d2546a | ||
|
|
8a154333f2 | ||
|
|
5e637a04fd | ||
|
|
0213869439 | ||
|
|
22e9a9792a | ||
|
|
4f23da9d26 | ||
|
|
f9430e2fd4 | ||
|
|
a2f86d5d18 | ||
|
|
0efab6637c | ||
|
|
2b11694b94 | ||
|
|
088798a727 | ||
|
|
bddbb1c22e | ||
|
|
92f447cf1c | ||
|
|
96f266c3e3 | ||
|
|
d5093c20c5 | ||
|
|
2064241c37 | ||
|
|
721742b764 | ||
|
|
c45bf153d8 | ||
|
|
b98e5e66e7 | ||
|
|
3d18bf345f | ||
|
|
f8e9cf4081 | ||
|
|
98e0f8b89b | ||
|
|
263275b7ea | ||
|
|
3e13002d7f | ||
|
|
654e5cc924 | ||
|
|
04a72c1834 | ||
|
|
53cf6eb194 | ||
|
|
5a7f186176 | ||
|
|
987adfa9c9 | ||
|
|
e476bb1400 | ||
|
|
dc12233610 | ||
|
|
29d21a0a5d | ||
|
|
762f505da5 | ||
|
|
8e1c326174 | ||
|
|
0bac5d527d | ||
|
|
79256eeb5c | ||
|
|
de760942f2 | ||
|
|
860641bfab | ||
|
|
673e55f14d | ||
|
|
54777a4f3e | ||
|
|
db36e66592 | ||
|
|
0d36e94407 | ||
|
|
92c3b5b8b2 | ||
|
|
71220a3656 | ||
|
|
09bd29d816 |
@@ -4,8 +4,7 @@ coverage:
|
||||
range: 60...90
|
||||
status:
|
||||
project:
|
||||
default:
|
||||
threshold: 0.2%
|
||||
default: yes
|
||||
|
||||
ignore:
|
||||
- lib/spack/spack/test/.*
|
||||
@@ -14,8 +13,3 @@ ignore:
|
||||
- share/spack/qa/.*
|
||||
|
||||
comment: off
|
||||
|
||||
# Inline codecov annotations make the code hard to read, and they add
|
||||
# annotations in files that seemingly have nothing to do with the PR.
|
||||
github_checks:
|
||||
annotations: false
|
||||
|
||||
@@ -4,9 +4,7 @@
|
||||
parallel = True
|
||||
concurrency = multiprocessing
|
||||
branch = True
|
||||
source =
|
||||
bin
|
||||
lib
|
||||
source = lib
|
||||
omit =
|
||||
lib/spack/spack/test/*
|
||||
lib/spack/docs/*
|
||||
|
||||
@@ -8,4 +8,4 @@ share/spack/dotkit/*
|
||||
share/spack/lmod/*
|
||||
share/spack/modules/*
|
||||
lib/spack/spack/test/*
|
||||
var/spack/cache/*
|
||||
|
||||
|
||||
45
.flake8
45
.flake8
@@ -27,47 +27,6 @@
|
||||
# - N813: camelcase imported as lowercase
|
||||
# - N814: camelcase imported as constant
|
||||
#
|
||||
# F4: pyflakes import checks, these are now checked by mypy more precisely
|
||||
# - F403: from module import *
|
||||
# - F405: undefined name or from *
|
||||
#
|
||||
# Black ignores, these are incompatible with black style and do not follow PEP-8
|
||||
# - E203: white space around slice operators can be required, ignore : warn
|
||||
# - W503: see above, already ignored for line-breaks
|
||||
#
|
||||
[flake8]
|
||||
ignore = E129,E221,E241,E272,E731,W503,W504,F999,N801,N813,N814,F403,F405
|
||||
max-line-length = 88
|
||||
|
||||
# F4: Import
|
||||
# - F405: `name` may be undefined, or undefined from star imports: `module`
|
||||
#
|
||||
# F8: Name
|
||||
# - F821: undefined name `name`
|
||||
#
|
||||
per-file-ignores =
|
||||
var/spack/repos/*/package.py:F405,F821
|
||||
|
||||
# exclude things we usually do not want linting for.
|
||||
# These still get linted when passed explicitly, as when spack flake8 passes
|
||||
# them on the command line.
|
||||
exclude =
|
||||
.git
|
||||
etc/
|
||||
opt/
|
||||
share/
|
||||
var/spack/cache/
|
||||
var/spack/gpg*/
|
||||
var/spack/junit-report/
|
||||
var/spack/mock-configs/
|
||||
lib/spack/external
|
||||
__pycache__
|
||||
var
|
||||
|
||||
format = spack
|
||||
|
||||
[flake8:local-plugins]
|
||||
report =
|
||||
spack = flake8_formatter:SpackFormatter
|
||||
paths =
|
||||
./share/spack/qa/
|
||||
ignore = E129,E221,E241,E272,E731,W503,W504,F999,N801,N813,N814
|
||||
max-line-length = 79
|
||||
|
||||
24
.flake8_packages
Normal file
24
.flake8_packages
Normal file
@@ -0,0 +1,24 @@
|
||||
# -*- conf -*-
|
||||
# flake8 settings for Spack package files.
|
||||
#
|
||||
# This should include all the same exceptions that we use for core files.
|
||||
#
|
||||
# In Spack packages, we also allow the single `from spack import *`
|
||||
# wildcard import and dependencies can set globals for their
|
||||
# dependents. So we add exceptions for checks related to undefined names.
|
||||
#
|
||||
# Note that we also add *per-line* exemptions for certain patterns in the
|
||||
# `spack flake8` command. This is where F403 for `from spack import *`
|
||||
# is added (because we *only* allow that wildcard).
|
||||
#
|
||||
# See .flake8 for regular exceptions.
|
||||
#
|
||||
# F4: Import
|
||||
# - F405: `name` may be undefined, or undefined from star imports: `module`
|
||||
#
|
||||
# F8: Name
|
||||
# - F821: undefined name `name`
|
||||
#
|
||||
[flake8]
|
||||
ignore = E129,E221,E241,E272,E731,W503,W504,F405,F821,F999,N801,N813,N814
|
||||
max-line-length = 79
|
||||
2
.gitattributes
vendored
2
.gitattributes
vendored
@@ -1,3 +1 @@
|
||||
*.py diff=python
|
||||
*.lp linguist-language=Prolog
|
||||
lib/spack/external/* linguist-vendored
|
||||
|
||||
6
.github/actions/add-maintainers-as-reviewers/Dockerfile
vendored
Normal file
6
.github/actions/add-maintainers-as-reviewers/Dockerfile
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
FROM python:3.7-alpine
|
||||
|
||||
RUN pip install pygithub
|
||||
|
||||
ADD entrypoint.py /entrypoint.py
|
||||
ENTRYPOINT ["/entrypoint.py"]
|
||||
85
.github/actions/add-maintainers-as-reviewers/entrypoint.py
vendored
Executable file
85
.github/actions/add-maintainers-as-reviewers/entrypoint.py
vendored
Executable file
@@ -0,0 +1,85 @@
|
||||
#!/usr/bin/env python
|
||||
#
|
||||
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
"""Maintainer review action.
|
||||
|
||||
This action checks which packages have changed in a PR, and adds their
|
||||
maintainers to the pull request for review.
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
|
||||
from github import Github
|
||||
|
||||
|
||||
def spack(*args):
|
||||
"""Run the spack executable with arguments, and return the output split.
|
||||
|
||||
This does just enough to run `spack pkg` and `spack maintainers`, the
|
||||
two commands used by this action.
|
||||
"""
|
||||
github_workspace = os.environ['GITHUB_WORKSPACE']
|
||||
spack = os.path.join(github_workspace, 'bin', 'spack')
|
||||
output = subprocess.check_output([spack] + list(args))
|
||||
split = re.split(r'\s*', output.decode('utf-8').strip())
|
||||
return [s for s in split if s]
|
||||
|
||||
|
||||
def main():
|
||||
# get these first so that we'll fail early
|
||||
token = os.environ['GITHUB_TOKEN']
|
||||
event_path = os.environ['GITHUB_EVENT_PATH']
|
||||
|
||||
with open(event_path) as file:
|
||||
data = json.load(file)
|
||||
|
||||
# make sure it's a pull_request event
|
||||
assert 'pull_request' in data
|
||||
|
||||
# only request reviews on open, edit, or reopen
|
||||
action = data['action']
|
||||
if action not in ('opened', 'edited', 'reopened'):
|
||||
return
|
||||
|
||||
# get data from the event payload
|
||||
pr_data = data['pull_request']
|
||||
base_branch_name = pr_data['base']['ref']
|
||||
full_repo_name = pr_data['base']['repo']['full_name']
|
||||
pr_number = pr_data['number']
|
||||
requested_reviewers = pr_data['requested_reviewers']
|
||||
author = pr_data['user']['login']
|
||||
|
||||
# get a list of packages that this PR modified
|
||||
changed_pkgs = spack(
|
||||
'pkg', 'changed', '--type', 'ac', '%s...' % base_branch_name)
|
||||
|
||||
# get maintainers for all modified packages
|
||||
maintainers = set()
|
||||
for pkg in changed_pkgs:
|
||||
pkg_maintainers = set(spack('maintainers', pkg))
|
||||
maintainers |= pkg_maintainers
|
||||
|
||||
# remove any maintainers who are already on the PR, and the author,
|
||||
# as you can't review your own PR)
|
||||
maintainers -= set(requested_reviewers)
|
||||
maintainers -= set([author])
|
||||
|
||||
if not maintainers:
|
||||
return
|
||||
|
||||
# request reviews from each maintainer
|
||||
gh = Github(token)
|
||||
repo = gh.get_repo(full_repo_name)
|
||||
pr = repo.get_pull(pr_number)
|
||||
pr.create_review_request(list(maintainers))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
7
.github/dependabot.yml
vendored
7
.github/dependabot.yml
vendored
@@ -1,7 +0,0 @@
|
||||
version: 2
|
||||
updates:
|
||||
# Maintain dependencies for GitHub Actions
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "daily"
|
||||
35
.github/workflows/linux_build_tests.yaml
vendored
35
.github/workflows/linux_build_tests.yaml
vendored
@@ -5,18 +5,6 @@ on:
|
||||
branches:
|
||||
- develop
|
||||
- releases/**
|
||||
paths-ignore:
|
||||
# Don't run if we only modified packages in the built-in repository
|
||||
- 'var/spack/repos/builtin/**'
|
||||
- '!var/spack/repos/builtin/packages/lz4/**'
|
||||
- '!var/spack/repos/builtin/packages/mpich/**'
|
||||
- '!var/spack/repos/builtin/packages/tut/**'
|
||||
- '!var/spack/repos/builtin/packages/py-setuptools/**'
|
||||
- '!var/spack/repos/builtin/packages/openjpeg/**'
|
||||
- '!var/spack/repos/builtin/packages/r-rcpp/**'
|
||||
- '!var/spack/repos/builtin/packages/ruby-rake/**'
|
||||
# Don't run if we only modified documentation
|
||||
- 'lib/spack/docs/**'
|
||||
pull_request:
|
||||
branches:
|
||||
- develop
|
||||
@@ -30,41 +18,36 @@ on:
|
||||
- '!var/spack/repos/builtin/packages/py-setuptools/**'
|
||||
- '!var/spack/repos/builtin/packages/openjpeg/**'
|
||||
- '!var/spack/repos/builtin/packages/r-rcpp/**'
|
||||
- '!var/spack/repos/builtin/packages/ruby-rake/**'
|
||||
# Don't run if we only modified documentation
|
||||
- 'lib/spack/docs/**'
|
||||
|
||||
jobs:
|
||||
build:
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
max-parallel: 4
|
||||
matrix:
|
||||
package:
|
||||
- lz4 # MakefilePackage
|
||||
- mpich~fortran # AutotoolsPackage
|
||||
- 'tut%gcc@:10.99.99' # WafPackage
|
||||
- py-setuptools # PythonPackage
|
||||
- openjpeg # CMakePackage
|
||||
- r-rcpp # RPackage
|
||||
- ruby-rake # RubyPackage
|
||||
package: [lz4, mpich, tut, py-setuptools, openjpeg, r-rcpp]
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/cache@v2.1.6
|
||||
- name: Cache ccache's store
|
||||
uses: actions/cache@v1
|
||||
with:
|
||||
path: ~/.ccache
|
||||
key: ccache-build-${{ matrix.package }}
|
||||
restore-keys: |
|
||||
ccache-build-${{ matrix.package }}
|
||||
- uses: actions/setup-python@v2
|
||||
- name: Setup Python
|
||||
uses: actions/setup-python@v1
|
||||
with:
|
||||
python-version: 3.9
|
||||
python-version: 3.8
|
||||
- name: Install System Packages
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get -yqq install ccache gfortran perl perl-base r-base r-base-core r-base-dev ruby findutils openssl libssl-dev libpciaccess-dev
|
||||
sudo apt-get -yqq install ccache gfortran perl perl-base r-base r-base-core r-base-dev findutils openssl libssl-dev libpciaccess-dev
|
||||
R --version
|
||||
perl --version
|
||||
ruby --version
|
||||
- name: Copy Configuration
|
||||
run: |
|
||||
ccache -M 300M && ccache -z
|
||||
|
||||
142
.github/workflows/linux_unit_tests.yaml
vendored
Normal file
142
.github/workflows/linux_unit_tests.yaml
vendored
Normal file
@@ -0,0 +1,142 @@
|
||||
name: linux tests
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- develop
|
||||
- releases/**
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- develop
|
||||
jobs:
|
||||
unittests:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: [2.7, 3.5, 3.6, 3.7, 3.8]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Setup Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- name: Install System packages
|
||||
run: |
|
||||
sudo apt-get -y update
|
||||
sudo apt-get install -y coreutils gfortran graphviz gnupg2 mercurial ninja-build patchelf
|
||||
# Needed for kcov
|
||||
sudo apt-get -y install cmake binutils-dev libcurl4-openssl-dev zlib1g-dev libdw-dev libiberty-dev
|
||||
- name: Install Python packages
|
||||
run: |
|
||||
pip install --upgrade pip six setuptools codecov coverage
|
||||
- name: Setup git configuration
|
||||
run: |
|
||||
# Need this for the git tests to succeed.
|
||||
git --version
|
||||
git config --global user.email "spack@example.com"
|
||||
git config --global user.name "Test User"
|
||||
git fetch -u origin develop:develop
|
||||
- name: Install kcov for bash script coverage
|
||||
env:
|
||||
KCOV_VERSION: 34
|
||||
run: |
|
||||
KCOV_ROOT=$(mktemp -d)
|
||||
wget --output-document=${KCOV_ROOT}/${KCOV_VERSION}.tar.gz https://github.com/SimonKagstrom/kcov/archive/v${KCOV_VERSION}.tar.gz
|
||||
tar -C ${KCOV_ROOT} -xzvf ${KCOV_ROOT}/${KCOV_VERSION}.tar.gz
|
||||
mkdir -p ${KCOV_ROOT}/build
|
||||
cd ${KCOV_ROOT}/build && cmake -Wno-dev ${KCOV_ROOT}/kcov-${KCOV_VERSION} && cd -
|
||||
make -C ${KCOV_ROOT}/build && sudo make -C ${KCOV_ROOT}/build install
|
||||
- name: Run unit tests
|
||||
env:
|
||||
COVERAGE: true
|
||||
run: |
|
||||
share/spack/qa/run-unit-tests
|
||||
coverage combine
|
||||
coverage xml
|
||||
- name: Upload to codecov.io
|
||||
uses: codecov/codecov-action@v1
|
||||
with:
|
||||
flags: unittests,linux
|
||||
flake8:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: 3.8
|
||||
- name: Install Python packages
|
||||
run: |
|
||||
pip install --upgrade pip six setuptools flake8
|
||||
- name: Setup git configuration
|
||||
run: |
|
||||
# Need this for the git tests to succeed.
|
||||
git --version
|
||||
git config --global user.email "spack@example.com"
|
||||
git config --global user.name "Test User"
|
||||
git fetch -u origin develop:develop
|
||||
- name: Run flake8 tests
|
||||
run: |
|
||||
share/spack/qa/run-flake8-tests
|
||||
shell:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: 3.8
|
||||
- name: Install System packages
|
||||
run: |
|
||||
sudo apt-get -y update
|
||||
sudo apt-get install -y coreutils gfortran gnupg2 mercurial ninja-build patchelf zsh fish
|
||||
# Needed for kcov
|
||||
sudo apt-get -y install cmake binutils-dev libcurl4-openssl-dev zlib1g-dev libdw-dev libiberty-dev
|
||||
- name: Install Python packages
|
||||
run: |
|
||||
pip install --upgrade pip six setuptools codecov coverage
|
||||
- name: Setup git configuration
|
||||
run: |
|
||||
# Need this for the git tests to succeed.
|
||||
git --version
|
||||
git config --global user.email "spack@example.com"
|
||||
git config --global user.name "Test User"
|
||||
git fetch -u origin develop:develop
|
||||
- name: Install kcov for bash script coverage
|
||||
env:
|
||||
KCOV_VERSION: 38
|
||||
run: |
|
||||
KCOV_ROOT=$(mktemp -d)
|
||||
wget --output-document=${KCOV_ROOT}/${KCOV_VERSION}.tar.gz https://github.com/SimonKagstrom/kcov/archive/v${KCOV_VERSION}.tar.gz
|
||||
tar -C ${KCOV_ROOT} -xzvf ${KCOV_ROOT}/${KCOV_VERSION}.tar.gz
|
||||
mkdir -p ${KCOV_ROOT}/build
|
||||
cd ${KCOV_ROOT}/build && cmake -Wno-dev ${KCOV_ROOT}/kcov-${KCOV_VERSION} && cd -
|
||||
make -C ${KCOV_ROOT}/build && sudo make -C ${KCOV_ROOT}/build install
|
||||
- name: Run shell tests
|
||||
env:
|
||||
COVERAGE: true
|
||||
run: |
|
||||
share/spack/qa/run-shell-tests
|
||||
- name: Upload to codecov.io
|
||||
uses: codecov/codecov-action@v1
|
||||
with:
|
||||
flags: shelltests,linux
|
||||
documentation:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: 3.8
|
||||
- name: Install System packages
|
||||
run: |
|
||||
sudo apt-get -y update
|
||||
sudo apt-get install -y coreutils ninja-build graphviz
|
||||
- name: Install Python packages
|
||||
run: |
|
||||
pip install --upgrade pip six setuptools
|
||||
pip install --upgrade -r lib/spack/docs/requirements.txt
|
||||
- name: Build documentation
|
||||
run: |
|
||||
share/spack/qa/run-doc-tests
|
||||
15
.github/workflows/macos_python.yml
vendored
15
.github/workflows/macos_python.yml
vendored
@@ -25,9 +25,6 @@ jobs:
|
||||
runs-on: macos-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: 3.9
|
||||
- name: spack install
|
||||
run: |
|
||||
. .github/workflows/install_spack.sh
|
||||
@@ -40,22 +37,17 @@ jobs:
|
||||
timeout-minutes: 700
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: 3.9
|
||||
- name: spack install
|
||||
run: |
|
||||
. .github/workflows/install_spack.sh
|
||||
spack install -v --fail-fast py-jupyterlab %apple-clang
|
||||
spack config add packages:opengl:paths:opengl@4.1:/usr/X11R6
|
||||
spack install -v --fail-fast py-jupyter %apple-clang
|
||||
|
||||
install_scipy_clang:
|
||||
name: scipy, mpl, pd
|
||||
runs-on: macos-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: 3.9
|
||||
- name: spack install
|
||||
run: |
|
||||
. .github/workflows/install_spack.sh
|
||||
@@ -68,9 +60,6 @@ jobs:
|
||||
runs-on: macos-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: 3.9
|
||||
- name: spack install
|
||||
run: |
|
||||
. .github/workflows/install_spack.sh
|
||||
|
||||
48
.github/workflows/macos_unit_tests.yaml
vendored
Normal file
48
.github/workflows/macos_unit_tests.yaml
vendored
Normal file
@@ -0,0 +1,48 @@
|
||||
name: macos tests
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- develop
|
||||
- releases/**
|
||||
pull_request:
|
||||
branches:
|
||||
- develop
|
||||
- releases/**
|
||||
jobs:
|
||||
build:
|
||||
|
||||
runs-on: macos-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: [3.7]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Setup Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- name: Install Python packages
|
||||
run: |
|
||||
pip install --upgrade pip six setuptools
|
||||
pip install --upgrade codecov coverage
|
||||
pip install --upgrade flake8 pep8-naming
|
||||
- name: Setup Homebrew packages
|
||||
run: |
|
||||
brew update
|
||||
brew upgrade
|
||||
brew install gcc gnupg2 dash kcov
|
||||
- name: Run unit tests
|
||||
run: |
|
||||
git --version
|
||||
git fetch -u origin develop:develop
|
||||
. share/spack/setup-env.sh
|
||||
coverage run $(which spack) test
|
||||
coverage combine
|
||||
coverage xml
|
||||
- name: Upload to codecov.io
|
||||
uses: codecov/codecov-action@v1
|
||||
with:
|
||||
file: ./coverage.xml
|
||||
flags: unittests,macos
|
||||
30
.github/workflows/minimum_python_versions.yaml
vendored
Normal file
30
.github/workflows/minimum_python_versions.yaml
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
name: python version check
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- develop
|
||||
- releases/**
|
||||
pull_request:
|
||||
branches:
|
||||
- develop
|
||||
- releases/**
|
||||
jobs:
|
||||
validate:
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Setup Python
|
||||
uses: actions/setup-python@v1
|
||||
with:
|
||||
python-version: 3.7
|
||||
- name: Install Python Packages
|
||||
run: |
|
||||
pip install --upgrade pip
|
||||
pip install --upgrade vermin
|
||||
- name: Minimum Version (Spack's Core)
|
||||
run: vermin --backport argparse -t=2.6- -t=3.5- -v lib/spack/spack/ lib/spack/llnl/ bin/
|
||||
- name: Minimum Version (Repositories)
|
||||
run: vermin --backport argparse -t=2.6- -t=3.5- -v var/spack/repos
|
||||
9
.github/workflows/setup_git.sh
vendored
9
.github/workflows/setup_git.sh
vendored
@@ -1,9 +0,0 @@
|
||||
#!/usr/bin/env sh
|
||||
git config --global user.email "spack@example.com"
|
||||
git config --global user.name "Test User"
|
||||
# With fetch-depth: 0 we have a remote develop
|
||||
# but not a local branch. Don't do this on develop
|
||||
if [ "$(git branch --show-current)" != "develop" ]
|
||||
then
|
||||
git branch develop origin/develop
|
||||
fi
|
||||
395
.github/workflows/unit_tests.yaml
vendored
395
.github/workflows/unit_tests.yaml
vendored
@@ -1,395 +0,0 @@
|
||||
name: linux tests
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- develop
|
||||
- releases/**
|
||||
pull_request:
|
||||
branches:
|
||||
- develop
|
||||
- releases/**
|
||||
jobs:
|
||||
# Validate that the code can be run on all the Python versions
|
||||
# supported by Spack
|
||||
validate:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: 3.9
|
||||
- name: Install Python Packages
|
||||
run: |
|
||||
pip install --upgrade pip
|
||||
pip install --upgrade vermin
|
||||
- name: vermin (Spack's Core)
|
||||
run: vermin --backport argparse --violations --backport typing -t=2.6- -t=3.5- -vvv lib/spack/spack/ lib/spack/llnl/ bin/
|
||||
- name: vermin (Repositories)
|
||||
run: vermin --backport argparse --violations --backport typing -t=2.6- -t=3.5- -vvv var/spack/repos
|
||||
# Run style checks on the files that have been changed
|
||||
style:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: 3.9
|
||||
- name: Install Python packages
|
||||
run: |
|
||||
pip install --upgrade pip six setuptools flake8 isort>=4.3.5 mypy>=0.800 black types-six
|
||||
- name: Setup git configuration
|
||||
run: |
|
||||
# Need this for the git tests to succeed.
|
||||
git --version
|
||||
. .github/workflows/setup_git.sh
|
||||
- name: Run style tests
|
||||
run: |
|
||||
share/spack/qa/run-style-tests
|
||||
# Build the documentation
|
||||
documentation:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: 3.9
|
||||
- name: Install System packages
|
||||
run: |
|
||||
sudo apt-get -y update
|
||||
sudo apt-get install -y coreutils ninja-build graphviz
|
||||
- name: Install Python packages
|
||||
run: |
|
||||
pip install --upgrade pip six setuptools
|
||||
pip install --upgrade -r lib/spack/docs/requirements.txt
|
||||
- name: Build documentation
|
||||
run: |
|
||||
share/spack/qa/run-doc-tests
|
||||
|
||||
# Check which files have been updated by the PR
|
||||
changes:
|
||||
runs-on: ubuntu-latest
|
||||
# Set job outputs to values from filter step
|
||||
outputs:
|
||||
core: ${{ steps.filter.outputs.core }}
|
||||
packages: ${{ steps.filter.outputs.packages }}
|
||||
with_coverage: ${{ steps.coverage.outputs.with_coverage }}
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
if: ${{ github.event_name == 'push' }}
|
||||
with:
|
||||
fetch-depth: 0
|
||||
# For pull requests it's not necessary to checkout the code
|
||||
- uses: dorny/paths-filter@v2
|
||||
id: filter
|
||||
with:
|
||||
# See https://github.com/dorny/paths-filter/issues/56 for the syntax used below
|
||||
filters: |
|
||||
core:
|
||||
- './!(var/**)/**'
|
||||
packages:
|
||||
- 'var/**'
|
||||
# Some links for easier reference:
|
||||
#
|
||||
# "github" context: https://docs.github.com/en/actions/reference/context-and-expression-syntax-for-github-actions#github-context
|
||||
# job outputs: https://docs.github.com/en/actions/reference/workflow-syntax-for-github-actions#jobsjob_idoutputs
|
||||
# setting environment variables from earlier steps: https://docs.github.com/en/actions/reference/workflow-commands-for-github-actions#setting-an-environment-variable
|
||||
#
|
||||
- id: coverage
|
||||
# Run the subsequent jobs with coverage if core has been modified,
|
||||
# regardless of whether this is a pull request or a push to a branch
|
||||
run: |
|
||||
echo Core changes: ${{ steps.filter.outputs.core }}
|
||||
echo Event name: ${{ github.event_name }}
|
||||
if [ "${{ steps.filter.outputs.core }}" == "true" ]
|
||||
then
|
||||
echo "::set-output name=with_coverage::true"
|
||||
else
|
||||
echo "::set-output name=with_coverage::false"
|
||||
fi
|
||||
|
||||
# Run unit tests with different configurations on linux
|
||||
unittests:
|
||||
needs: [ validate, style, documentation, changes ]
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: [2.7, 3.5, 3.6, 3.7, 3.8, 3.9]
|
||||
concretizer: ['original', 'clingo']
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- name: Install System packages
|
||||
run: |
|
||||
sudo apt-get -y update
|
||||
# Needed for unit tests
|
||||
sudo apt-get -y install \
|
||||
coreutils cvs gfortran graphviz gnupg2 mercurial ninja-build \
|
||||
patchelf
|
||||
# Needed for kcov
|
||||
sudo apt-get -y install cmake binutils-dev libcurl4-openssl-dev
|
||||
sudo apt-get -y install zlib1g-dev libdw-dev libiberty-dev
|
||||
- name: Install Python packages
|
||||
run: |
|
||||
pip install --upgrade pip six setuptools codecov coverage
|
||||
- name: Setup git configuration
|
||||
run: |
|
||||
# Need this for the git tests to succeed.
|
||||
git --version
|
||||
. .github/workflows/setup_git.sh
|
||||
- name: Install kcov for bash script coverage
|
||||
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
|
||||
env:
|
||||
KCOV_VERSION: 34
|
||||
run: |
|
||||
KCOV_ROOT=$(mktemp -d)
|
||||
wget --output-document=${KCOV_ROOT}/${KCOV_VERSION}.tar.gz https://github.com/SimonKagstrom/kcov/archive/v${KCOV_VERSION}.tar.gz
|
||||
tar -C ${KCOV_ROOT} -xzvf ${KCOV_ROOT}/${KCOV_VERSION}.tar.gz
|
||||
mkdir -p ${KCOV_ROOT}/build
|
||||
cd ${KCOV_ROOT}/build && cmake -Wno-dev ${KCOV_ROOT}/kcov-${KCOV_VERSION} && cd -
|
||||
make -C ${KCOV_ROOT}/build && sudo make -C ${KCOV_ROOT}/build install
|
||||
- name: Bootstrap clingo from sources
|
||||
if: ${{ matrix.concretizer == 'clingo' }}
|
||||
env:
|
||||
SPACK_PYTHON: python
|
||||
run: |
|
||||
. share/spack/setup-env.sh
|
||||
spack external find --not-buildable cmake bison
|
||||
spack -v solve zlib
|
||||
- name: Run unit tests (full suite with coverage)
|
||||
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
|
||||
env:
|
||||
SPACK_PYTHON: python
|
||||
COVERAGE: true
|
||||
SPACK_TEST_SOLVER: ${{ matrix.concretizer }}
|
||||
run: |
|
||||
share/spack/qa/run-unit-tests
|
||||
coverage combine
|
||||
coverage xml
|
||||
- name: Run unit tests (reduced suite without coverage)
|
||||
if: ${{ needs.changes.outputs.with_coverage == 'false' }}
|
||||
env:
|
||||
SPACK_PYTHON: python
|
||||
ONLY_PACKAGES: true
|
||||
SPACK_TEST_SOLVER: ${{ matrix.concretizer }}
|
||||
run: |
|
||||
share/spack/qa/run-unit-tests
|
||||
- uses: codecov/codecov-action@v1
|
||||
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
|
||||
with:
|
||||
flags: unittests,linux,${{ matrix.concretizer }}
|
||||
# Test shell integration
|
||||
shell:
|
||||
needs: [ validate, style, documentation, changes ]
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: 3.9
|
||||
- name: Install System packages
|
||||
run: |
|
||||
sudo apt-get -y update
|
||||
# Needed for shell tests
|
||||
sudo apt-get install -y coreutils csh zsh tcsh fish dash bash
|
||||
# Needed for kcov
|
||||
sudo apt-get -y install cmake binutils-dev libcurl4-openssl-dev
|
||||
sudo apt-get -y install zlib1g-dev libdw-dev libiberty-dev
|
||||
- name: Install Python packages
|
||||
run: |
|
||||
pip install --upgrade pip six setuptools codecov coverage
|
||||
- name: Setup git configuration
|
||||
run: |
|
||||
# Need this for the git tests to succeed.
|
||||
git --version
|
||||
. .github/workflows/setup_git.sh
|
||||
- name: Install kcov for bash script coverage
|
||||
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
|
||||
env:
|
||||
KCOV_VERSION: 38
|
||||
run: |
|
||||
KCOV_ROOT=$(mktemp -d)
|
||||
wget --output-document=${KCOV_ROOT}/${KCOV_VERSION}.tar.gz https://github.com/SimonKagstrom/kcov/archive/v${KCOV_VERSION}.tar.gz
|
||||
tar -C ${KCOV_ROOT} -xzvf ${KCOV_ROOT}/${KCOV_VERSION}.tar.gz
|
||||
mkdir -p ${KCOV_ROOT}/build
|
||||
cd ${KCOV_ROOT}/build && cmake -Wno-dev ${KCOV_ROOT}/kcov-${KCOV_VERSION} && cd -
|
||||
make -C ${KCOV_ROOT}/build && sudo make -C ${KCOV_ROOT}/build install
|
||||
- name: Run shell tests (without coverage)
|
||||
if: ${{ needs.changes.outputs.with_coverage == 'false' }}
|
||||
run: |
|
||||
share/spack/qa/run-shell-tests
|
||||
- name: Run shell tests (with coverage)
|
||||
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
|
||||
env:
|
||||
COVERAGE: true
|
||||
run: |
|
||||
share/spack/qa/run-shell-tests
|
||||
- uses: codecov/codecov-action@v1
|
||||
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
|
||||
with:
|
||||
flags: shelltests,linux
|
||||
# Test for Python2.6 run on Centos 6
|
||||
centos6:
|
||||
needs: [ validate, style, documentation, changes ]
|
||||
runs-on: ubuntu-latest
|
||||
container: spack/github-actions:centos6
|
||||
steps:
|
||||
- name: Run unit tests (full test-suite)
|
||||
# The CentOS 6 container doesn't run with coverage, but
|
||||
# under the same conditions it runs the full test suite
|
||||
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
|
||||
env:
|
||||
HOME: /home/spack-test
|
||||
run: |
|
||||
whoami && echo $HOME && cd $HOME
|
||||
git clone https://github.com/spack/spack.git && cd spack
|
||||
git fetch origin ${{ github.ref }}:test-branch
|
||||
git checkout test-branch
|
||||
share/spack/qa/run-unit-tests
|
||||
- name: Run unit tests (only package tests)
|
||||
if: ${{ needs.changes.outputs.with_coverage == 'false' }}
|
||||
env:
|
||||
HOME: /home/spack-test
|
||||
ONLY_PACKAGES: true
|
||||
run: |
|
||||
whoami && echo $HOME && cd $HOME
|
||||
git clone https://github.com/spack/spack.git && cd spack
|
||||
git fetch origin ${{ github.ref }}:test-branch
|
||||
git checkout test-branch
|
||||
share/spack/qa/run-unit-tests
|
||||
|
||||
# Test RHEL8 UBI with platform Python. This job is run
|
||||
# only on PRs modifying core Spack
|
||||
rhel8-platform-python:
|
||||
needs: [ validate, style, documentation, changes ]
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
|
||||
container: registry.access.redhat.com/ubi8/ubi
|
||||
steps:
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
dnf install -y \
|
||||
bzip2 curl file gcc-c++ gcc gcc-gfortran git gnupg2 gzip \
|
||||
make patch tcl unzip which xz
|
||||
- uses: actions/checkout@v2
|
||||
- name: Setup repo and non-root user
|
||||
run: |
|
||||
git --version
|
||||
git fetch --unshallow
|
||||
. .github/workflows/setup_git.sh
|
||||
useradd spack-test
|
||||
chown -R spack-test .
|
||||
- name: Run unit tests
|
||||
shell: runuser -u spack-test -- bash {0}
|
||||
run: |
|
||||
source share/spack/setup-env.sh
|
||||
spack unit-test -k 'not cvs and not svn and not hg' -x --verbose
|
||||
# Test for the clingo based solver (using clingo-cffi)
|
||||
clingo-cffi:
|
||||
needs: [ validate, style, documentation, changes ]
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: 3.9
|
||||
- name: Install System packages
|
||||
run: |
|
||||
sudo apt-get -y update
|
||||
# Needed for unit tests
|
||||
sudo apt-get -y install \
|
||||
coreutils cvs gfortran graphviz gnupg2 mercurial ninja-build \
|
||||
patchelf
|
||||
# Needed for kcov
|
||||
sudo apt-get -y install cmake binutils-dev libcurl4-openssl-dev
|
||||
sudo apt-get -y install zlib1g-dev libdw-dev libiberty-dev
|
||||
- name: Install kcov for bash script coverage
|
||||
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
|
||||
env:
|
||||
KCOV_VERSION: 34
|
||||
run: |
|
||||
KCOV_ROOT=$(mktemp -d)
|
||||
wget --output-document=${KCOV_ROOT}/${KCOV_VERSION}.tar.gz https://github.com/SimonKagstrom/kcov/archive/v${KCOV_VERSION}.tar.gz
|
||||
tar -C ${KCOV_ROOT} -xzvf ${KCOV_ROOT}/${KCOV_VERSION}.tar.gz
|
||||
mkdir -p ${KCOV_ROOT}/build
|
||||
cd ${KCOV_ROOT}/build && cmake -Wno-dev ${KCOV_ROOT}/kcov-${KCOV_VERSION} && cd -
|
||||
make -C ${KCOV_ROOT}/build && sudo make -C ${KCOV_ROOT}/build install
|
||||
- name: Install Python packages
|
||||
run: |
|
||||
pip install --upgrade pip six setuptools codecov coverage clingo
|
||||
- name: Setup git configuration
|
||||
run: |
|
||||
# Need this for the git tests to succeed.
|
||||
git --version
|
||||
. .github/workflows/setup_git.sh
|
||||
- name: Run unit tests (full suite with coverage)
|
||||
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
|
||||
env:
|
||||
COVERAGE: true
|
||||
SPACK_TEST_SOLVER: clingo
|
||||
run: |
|
||||
share/spack/qa/run-unit-tests
|
||||
coverage combine
|
||||
coverage xml
|
||||
- name: Run unit tests (reduced suite without coverage)
|
||||
if: ${{ needs.changes.outputs.with_coverage == 'false' }}
|
||||
env:
|
||||
ONLY_PACKAGES: true
|
||||
SPACK_TEST_SOLVER: clingo
|
||||
run: |
|
||||
share/spack/qa/run-unit-tests
|
||||
- uses: codecov/codecov-action@v1
|
||||
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
|
||||
with:
|
||||
flags: unittests,linux,clingo
|
||||
# Run unit tests on MacOS
|
||||
build:
|
||||
needs: [ validate, style, documentation, changes ]
|
||||
runs-on: macos-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: [3.8]
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- name: Install Python packages
|
||||
run: |
|
||||
pip install --upgrade pip six setuptools
|
||||
pip install --upgrade codecov coverage
|
||||
pip install --upgrade flake8 isort>=4.3.5 mypy>=0.800
|
||||
- name: Setup Homebrew packages
|
||||
run: |
|
||||
brew install dash fish gcc gnupg2 kcov
|
||||
- name: Run unit tests
|
||||
run: |
|
||||
git --version
|
||||
. .github/workflows/setup_git.sh
|
||||
. share/spack/setup-env.sh
|
||||
if [ "${{ needs.changes.outputs.with_coverage }}" == "true" ]
|
||||
then
|
||||
coverage run $(which spack) unit-test -x
|
||||
coverage combine
|
||||
coverage xml
|
||||
else
|
||||
echo "ONLY PACKAGE RECIPES CHANGED [skipping coverage]"
|
||||
$(which spack) unit-test -x -m "not maybeslow" -k "package_sanity"
|
||||
fi
|
||||
- uses: codecov/codecov-action@v1
|
||||
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
|
||||
with:
|
||||
file: ./coverage.xml
|
||||
flags: unittests,macos
|
||||
519
.gitignore
vendored
519
.gitignore
vendored
@@ -1,511 +1,40 @@
|
||||
##########################
|
||||
# Spack-specific ignores #
|
||||
##########################
|
||||
|
||||
/db
|
||||
/var/spack/stage
|
||||
/var/spack/cache
|
||||
/var/spack/environments
|
||||
/var/spack/repos/*/index.yaml
|
||||
/var/spack/repos/*/lock
|
||||
__pycache__/
|
||||
*.pyc
|
||||
/opt
|
||||
*~
|
||||
.DS_Store
|
||||
.idea
|
||||
# Ignore everything in /etc/spack except /etc/spack/defaults
|
||||
/etc/spack/*
|
||||
!/etc/spack/defaults
|
||||
/etc/spackconfig
|
||||
/share/spack/dotkit
|
||||
/share/spack/modules
|
||||
/share/spack/lmod
|
||||
# Debug logs
|
||||
spack-db.*
|
||||
/TAGS
|
||||
*.swp
|
||||
/htmlcov
|
||||
.coverage
|
||||
\#*
|
||||
.#*
|
||||
.cache
|
||||
lib/spack/spack/test/.cache
|
||||
/bin/spackc
|
||||
*.in.log
|
||||
*.out.log
|
||||
*.orig
|
||||
|
||||
###########################
|
||||
# Python-specific ignores #
|
||||
###########################
|
||||
|
||||
# Byte-compiled / optimized / DLL files
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
*$py.class
|
||||
|
||||
# C extensions
|
||||
*.so
|
||||
|
||||
# Distribution / packaging
|
||||
.Python
|
||||
build/
|
||||
develop-eggs/
|
||||
dist/
|
||||
downloads/
|
||||
eggs/
|
||||
.eggs/
|
||||
#lib/
|
||||
#lib64/
|
||||
parts/
|
||||
sdist/
|
||||
#var/
|
||||
wheels/
|
||||
share/python-wheels/
|
||||
*.egg-info/
|
||||
.installed.cfg
|
||||
*.egg
|
||||
MANIFEST
|
||||
|
||||
# PyInstaller
|
||||
# Usually these files are written by a python script from a template
|
||||
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
||||
*.manifest
|
||||
*.spec
|
||||
|
||||
# Installer logs
|
||||
pip-log.txt
|
||||
pip-delete-this-directory.txt
|
||||
|
||||
# Unit test / coverage reports
|
||||
htmlcov/
|
||||
.tox/
|
||||
.nox/
|
||||
.coverage
|
||||
.coverage.*
|
||||
.cache
|
||||
nosetests.xml
|
||||
coverage.xml
|
||||
*.cover
|
||||
*.py,cover
|
||||
.hypothesis/
|
||||
.pytest_cache/
|
||||
cover/
|
||||
|
||||
# Translations
|
||||
*.mo
|
||||
*.pot
|
||||
|
||||
# Django stuff:
|
||||
*.log
|
||||
local_settings.py
|
||||
db.sqlite3
|
||||
db.sqlite3-journal
|
||||
|
||||
# Flask stuff:
|
||||
instance/
|
||||
.webassets-cache
|
||||
|
||||
# Scrapy stuff:
|
||||
.scrapy
|
||||
|
||||
# Sphinx documentation
|
||||
docs/_build/
|
||||
|
||||
# PyBuilder
|
||||
.pybuilder/
|
||||
target/
|
||||
|
||||
# Jupyter Notebook
|
||||
.ipynb_checkpoints
|
||||
|
||||
# IPython
|
||||
profile_default/
|
||||
ipython_config.py
|
||||
|
||||
# pyenv
|
||||
# For a library or package, you might want to ignore these files since the code is
|
||||
# intended to run in multiple environments; otherwise, check them in:
|
||||
# .python-version
|
||||
|
||||
# pipenv
|
||||
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
||||
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
||||
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
||||
# install all needed dependencies.
|
||||
#Pipfile.lock
|
||||
|
||||
# PEP 582; used by e.g. github.com/David-OConnor/pyflow
|
||||
__pypackages__/
|
||||
|
||||
# Celery stuff
|
||||
celerybeat-schedule
|
||||
celerybeat.pid
|
||||
|
||||
# SageMath parsed files
|
||||
*.sage.py
|
||||
|
||||
# Environments
|
||||
.env
|
||||
.venv
|
||||
env/
|
||||
venv/
|
||||
ENV/
|
||||
env.bak/
|
||||
venv.bak/
|
||||
|
||||
# Spyder project settings
|
||||
.spyderproject
|
||||
.spyproject
|
||||
|
||||
# Rope project settings
|
||||
.ropeproject
|
||||
|
||||
# mkdocs documentation
|
||||
/site
|
||||
|
||||
# mypy
|
||||
.mypy_cache/
|
||||
.dmypy.json
|
||||
dmypy.json
|
||||
|
||||
# Pyre type checker
|
||||
.pyre/
|
||||
|
||||
# pytype static type analyzer
|
||||
.pytype/
|
||||
|
||||
# Cython debug symbols
|
||||
cython_debug/
|
||||
|
||||
########################
|
||||
# Vim-specific ignores #
|
||||
########################
|
||||
|
||||
# Swap
|
||||
[._]*.s[a-v][a-z]
|
||||
!*.svg # comment out if you don't need vector files
|
||||
[._]*.sw[a-p]
|
||||
[._]s[a-rt-v][a-z]
|
||||
[._]ss[a-gi-z]
|
||||
[._]sw[a-p]
|
||||
|
||||
# Session
|
||||
Session.vim
|
||||
Sessionx.vim
|
||||
|
||||
# Temporary
|
||||
.netrwhist
|
||||
*~
|
||||
# Auto-generated tag files
|
||||
tags
|
||||
# Persistent undo
|
||||
[._]*.un~
|
||||
|
||||
##########################
|
||||
# Emacs-specific ignores #
|
||||
##########################
|
||||
|
||||
*~
|
||||
\#*\#
|
||||
/.emacs.desktop
|
||||
/.emacs.desktop.lock
|
||||
*.elc
|
||||
auto-save-list
|
||||
tramp
|
||||
.\#*
|
||||
|
||||
# Org-mode
|
||||
.org-id-locations
|
||||
*_archive
|
||||
|
||||
# flymake-mode
|
||||
*_flymake.*
|
||||
|
||||
# eshell files
|
||||
/eshell/history
|
||||
/eshell/lastdir
|
||||
|
||||
# elpa packages
|
||||
/elpa/
|
||||
|
||||
# reftex files
|
||||
*.rel
|
||||
|
||||
# AUCTeX auto folder
|
||||
/auto/
|
||||
|
||||
# cask packages
|
||||
.cask/
|
||||
dist/
|
||||
|
||||
# Flycheck
|
||||
flycheck_*.el
|
||||
|
||||
# server auth directory
|
||||
/server/
|
||||
|
||||
# projectiles files
|
||||
.projectile
|
||||
|
||||
# directory configuration
|
||||
.dir-locals.el
|
||||
|
||||
# network security
|
||||
/network-security.data
|
||||
|
||||
############################
|
||||
# Eclipse-specific ignores #
|
||||
############################
|
||||
|
||||
.metadata
|
||||
#bin/
|
||||
tmp/
|
||||
*.tmp
|
||||
*.bak
|
||||
*.swp
|
||||
*~.nib
|
||||
local.properties
|
||||
.settings/
|
||||
.loadpath
|
||||
.recommenders
|
||||
|
||||
# External tool builders
|
||||
.externalToolBuilders/
|
||||
|
||||
# Locally stored "Eclipse launch configurations"
|
||||
*.launch
|
||||
|
||||
# PyDev specific (Python IDE for Eclipse)
|
||||
*.pydevproject
|
||||
|
||||
# CDT-specific (C/C++ Development Tooling)
|
||||
# Eclipse files
|
||||
.project
|
||||
.cproject
|
||||
.pydevproject
|
||||
|
||||
# CDT- autotools
|
||||
.autotools
|
||||
|
||||
# Java annotation processor (APT)
|
||||
.factorypath
|
||||
|
||||
# PDT-specific (PHP Development Tools)
|
||||
.buildpath
|
||||
|
||||
# sbteclipse plugin
|
||||
.target
|
||||
|
||||
# Tern plugin
|
||||
.tern-project
|
||||
|
||||
# TeXlipse plugin
|
||||
.texlipse
|
||||
|
||||
# STS (Spring Tool Suite)
|
||||
.springBeans
|
||||
|
||||
# Code Recommenders
|
||||
.recommenders/
|
||||
|
||||
# Annotation Processing
|
||||
.apt_generated/
|
||||
.apt_generated_test/
|
||||
|
||||
# Scala IDE specific (Scala & Java development for Eclipse)
|
||||
.cache-main
|
||||
.scala_dependencies
|
||||
.worksheet
|
||||
|
||||
# Uncomment this line if you wish to ignore the project description file.
|
||||
# Typically, this file would be tracked if it contains build/dependency configurations:
|
||||
#.project
|
||||
|
||||
##################################
|
||||
# Visual Studio-specific ignores #
|
||||
##################################
|
||||
|
||||
.vscode/*
|
||||
!.vscode/settings.json
|
||||
!.vscode/tasks.json
|
||||
!.vscode/launch.json
|
||||
!.vscode/extensions.json
|
||||
*.code-workspace
|
||||
|
||||
# Local History for Visual Studio Code
|
||||
.history/
|
||||
|
||||
#################################
|
||||
# Sublime Text-specific ignores #
|
||||
#################################
|
||||
|
||||
# Cache files for Sublime Text
|
||||
*.tmlanguage.cache
|
||||
*.tmPreferences.cache
|
||||
*.stTheme.cache
|
||||
|
||||
# Workspace files are user-specific
|
||||
*.sublime-workspace
|
||||
|
||||
# Project files should be checked into the repository, unless a significant
|
||||
# proportion of contributors will probably not be using Sublime Text
|
||||
# *.sublime-project
|
||||
|
||||
# SFTP configuration file
|
||||
sftp-config.json
|
||||
sftp-config-alt*.json
|
||||
|
||||
# Package control specific files
|
||||
Package Control.last-run
|
||||
Package Control.ca-list
|
||||
Package Control.ca-bundle
|
||||
Package Control.system-ca-bundle
|
||||
Package Control.cache/
|
||||
Package Control.ca-certs/
|
||||
Package Control.merged-ca-bundle
|
||||
Package Control.user-ca-bundle
|
||||
oscrypto-ca-bundle.crt
|
||||
bh_unicode_properties.cache
|
||||
|
||||
# Sublime-github package stores a github token in this file
|
||||
# https://packagecontrol.io/packages/sublime-github
|
||||
GitHub.sublime-settings
|
||||
|
||||
##############################
|
||||
# JetBrains-specific ignores #
|
||||
##############################
|
||||
|
||||
# Ignore the entire folder since it may conatin more files than
|
||||
# just the ones listed below
|
||||
.idea/
|
||||
|
||||
# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider
|
||||
# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
|
||||
|
||||
# User-specific stuff
|
||||
.idea/**/workspace.xml
|
||||
.idea/**/tasks.xml
|
||||
.idea/**/usage.statistics.xml
|
||||
.idea/**/dictionaries
|
||||
.idea/**/shelf
|
||||
|
||||
# Generated files
|
||||
.idea/**/contentModel.xml
|
||||
|
||||
# Sensitive or high-churn files
|
||||
.idea/**/dataSources/
|
||||
.idea/**/dataSources.ids
|
||||
.idea/**/dataSources.local.xml
|
||||
.idea/**/sqlDataSources.xml
|
||||
.idea/**/dynamic.xml
|
||||
.idea/**/uiDesigner.xml
|
||||
.idea/**/dbnavigator.xml
|
||||
|
||||
# Gradle
|
||||
.idea/**/gradle.xml
|
||||
.idea/**/libraries
|
||||
|
||||
# Gradle and Maven with auto-import
|
||||
# When using Gradle or Maven with auto-import, you should exclude module files,
|
||||
# since they will be recreated, and may cause churn. Uncomment if using
|
||||
# auto-import.
|
||||
# .idea/artifacts
|
||||
# .idea/compiler.xml
|
||||
# .idea/jarRepositories.xml
|
||||
# .idea/modules.xml
|
||||
# .idea/*.iml
|
||||
# .idea/modules
|
||||
# *.iml
|
||||
# *.ipr
|
||||
|
||||
# CMake
|
||||
cmake-build-*/
|
||||
|
||||
# Mongo Explorer plugin
|
||||
.idea/**/mongoSettings.xml
|
||||
|
||||
# File-based project format
|
||||
*.iws
|
||||
|
||||
# IntelliJ
|
||||
out/
|
||||
|
||||
# mpeltonen/sbt-idea plugin
|
||||
.idea_modules/
|
||||
|
||||
# JIRA plugin
|
||||
atlassian-ide-plugin.xml
|
||||
|
||||
# Cursive Clojure plugin
|
||||
.idea/replstate.xml
|
||||
|
||||
# Crashlytics plugin (for Android Studio and IntelliJ)
|
||||
com_crashlytics_export_strings.xml
|
||||
crashlytics.properties
|
||||
crashlytics-build.properties
|
||||
fabric.properties
|
||||
|
||||
# Editor-based Rest Client
|
||||
.idea/httpRequests
|
||||
|
||||
# Android studio 3.1+ serialized cache file
|
||||
.idea/caches/build_file_checksums.ser
|
||||
|
||||
##########################
|
||||
# macOS-specific ignores #
|
||||
##########################
|
||||
|
||||
# General
|
||||
.DS_Store
|
||||
.AppleDouble
|
||||
.LSOverride
|
||||
|
||||
# Icon must end with two \r
|
||||
Icon
|
||||
|
||||
# Thumbnails
|
||||
._*
|
||||
|
||||
# Files that might appear in the root of a volume
|
||||
.DocumentRevisions-V100
|
||||
.fseventsd
|
||||
.Spotlight-V100
|
||||
.TemporaryItems
|
||||
.Trashes
|
||||
.VolumeIcon.icns
|
||||
.com.apple.timemachine.donotpresent
|
||||
|
||||
# Directories potentially created on remote AFP share
|
||||
.AppleDB
|
||||
.AppleDesktop
|
||||
Network Trash Folder
|
||||
Temporary Items
|
||||
.apdisk
|
||||
|
||||
##########################
|
||||
# Linux-specific ignores #
|
||||
##########################
|
||||
|
||||
*~
|
||||
|
||||
# temporary files which can be created if a process still has a handle open of a deleted file
|
||||
.fuse_hidden*
|
||||
|
||||
# KDE directory preferences
|
||||
.directory
|
||||
|
||||
# Linux trash folder which might appear on any partition or disk
|
||||
.Trash-*
|
||||
|
||||
# .nfs files are created when an open file is removed but is still being accessed
|
||||
.nfs*
|
||||
|
||||
############################
|
||||
# Windows-specific ignores #
|
||||
############################
|
||||
|
||||
# Windows thumbnail cache files
|
||||
Thumbs.db
|
||||
Thumbs.db:encryptable
|
||||
ehthumbs.db
|
||||
ehthumbs_vista.db
|
||||
|
||||
# Dump file
|
||||
*.stackdump
|
||||
|
||||
# Folder config file
|
||||
[Dd]esktop.ini
|
||||
|
||||
# Recycle Bin used on file shares
|
||||
$RECYCLE.BIN/
|
||||
|
||||
# Windows Installer files
|
||||
*.cab
|
||||
*.msi
|
||||
*.msix
|
||||
*.msm
|
||||
*.msp
|
||||
|
||||
# Windows shortcuts
|
||||
*.lnk
|
||||
# VSCode files
|
||||
.vscode
|
||||
.devcontainer
|
||||
|
||||
7
.mailmap
7
.mailmap
@@ -3,8 +3,7 @@ Adam Moody <moody20@llnl.gov> Adam T. Moody
|
||||
Alfredo Gimenez <gimenez1@llnl.gov> Alfredo Gimenez <alfredo.gimenez@gmail.com>
|
||||
Alfredo Gimenez <gimenez1@llnl.gov> Alfredo Adolfo Gimenez <alfredo.gimenez@gmail.com>
|
||||
Andrew Williams <williamsa89@cardiff.ac.uk> Andrew Williams <andrew@alshain.org.uk>
|
||||
Axel Huebl <axelhuebl@lbl.gov> Axel Huebl <a.huebl@hzdr.de>
|
||||
Axel Huebl <axelhuebl@lbl.gov> Axel Huebl <axel.huebl@plasma.ninja>
|
||||
Axel Huebl <a.huebl@hzdr.de> Axel Huebl <axel.huebl@plasma.ninja>
|
||||
Ben Boeckel <ben.boeckel@kitware.com> Ben Boeckel <mathstuf@gmail.com>
|
||||
Ben Boeckel <ben.boeckel@kitware.com> Ben Boeckel <mathstuf@users.noreply.github.com>
|
||||
Benedikt Hegner <hegner@cern.ch> Benedikt Hegner <benedikt.hegner@cern.ch>
|
||||
@@ -21,8 +20,8 @@ Geoffrey Oxberry <oxberry1@llnl.gov> Geoffrey Oxberry
|
||||
Glenn Johnson <glenn-johnson@uiowa.edu> Glenn Johnson <gjohnson@argon-ohpc.hpc.uiowa.edu>
|
||||
Glenn Johnson <glenn-johnson@uiowa.edu> Glenn Johnson <glennpj@gmail.com>
|
||||
Gregory Becker <becker33@llnl.gov> Gregory Becker <becker33.llnl.gov>
|
||||
Gregory Becker <becker33@llnl.gov> Gregory Becker <becker33.llnl.gov>
|
||||
Gregory Becker <becker33@llnl.gov> Gregory Becker <becker33@llnl.gov>
|
||||
Gregory Becker <becker33@llnl.gov> becker33 <becker33.llnl.gov>
|
||||
Gregory Becker <becker33@llnl.gov> becker33 <becker33@llnl.gov>
|
||||
Gregory L. Lee <lee218@llnl.gov> Greg Lee <lee218@llnl.gov>
|
||||
Gregory L. Lee <lee218@llnl.gov> Gregory L. Lee <lee218@cab687.llnl.gov>
|
||||
Gregory L. Lee <lee218@llnl.gov> Gregory L. Lee <lee218@cab690.llnl.gov>
|
||||
|
||||
35
.mypy.ini
35
.mypy.ini
@@ -1,35 +0,0 @@
|
||||
[mypy]
|
||||
python_version = 3.7
|
||||
files=lib/spack/llnl/**/*.py,lib/spack/spack/**/*.py
|
||||
mypy_path=bin,lib/spack,lib/spack/external,var/spack/repos/builtin
|
||||
# This and a generated import file allows supporting packages
|
||||
namespace_packages=True
|
||||
# To avoid re-factoring all the externals, ignore errors and missing imports
|
||||
# globally, then turn back on in spack and spack submodules
|
||||
ignore_errors=True
|
||||
ignore_missing_imports=True
|
||||
|
||||
[mypy-spack.*]
|
||||
ignore_errors=False
|
||||
ignore_missing_imports=False
|
||||
|
||||
[mypy-packages.*]
|
||||
ignore_errors=False
|
||||
ignore_missing_imports=False
|
||||
|
||||
[mypy-llnl.*]
|
||||
ignore_errors=False
|
||||
ignore_missing_imports=False
|
||||
|
||||
[mypy-spack.test.packages]
|
||||
ignore_errors=True
|
||||
|
||||
# ignore errors in fake import path for packages
|
||||
[mypy-spack.pkg.*]
|
||||
ignore_errors=True
|
||||
ignore_missing_imports=True
|
||||
|
||||
# jinja has syntax in it that requires python3 and causes a parse error
|
||||
# skip importing it
|
||||
[mypy-jinja2]
|
||||
follow_imports=skip
|
||||
60
.travis.yml
Normal file
60
.travis.yml
Normal file
@@ -0,0 +1,60 @@
|
||||
# Only build releases and develop on push; do not build every branch.
|
||||
branches:
|
||||
only:
|
||||
- develop
|
||||
- /^releases\/.*$/
|
||||
|
||||
language: python
|
||||
python: '2.6'
|
||||
dist: trusty
|
||||
os: linux
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
- gfortran
|
||||
- graphviz
|
||||
- gnupg2
|
||||
- kcov
|
||||
- mercurial
|
||||
- ninja-build
|
||||
- realpath
|
||||
- zsh
|
||||
- fish
|
||||
|
||||
before_install:
|
||||
- if [[ "$TRAVIS_DIST" == "trusty" ]]; then
|
||||
share/spack/qa/install_patchelf.sh;
|
||||
else
|
||||
sudo apt-get update;
|
||||
sudo apt-get -y install patchelf;
|
||||
fi
|
||||
|
||||
# Install various dependencies
|
||||
install:
|
||||
- pip install --upgrade pip
|
||||
- pip install --upgrade six
|
||||
- pip install --upgrade setuptools
|
||||
- pip install --upgrade flake8
|
||||
- pip install --upgrade pep8-naming
|
||||
|
||||
before_script:
|
||||
# Need this for the git tests to succeed.
|
||||
- git config --global user.email "spack@example.com"
|
||||
- git config --global user.name "Test User"
|
||||
|
||||
# Need this to be able to compute the list of changed files
|
||||
- git fetch origin ${TRAVIS_BRANCH}:${TRAVIS_BRANCH}
|
||||
|
||||
script:
|
||||
- python bin/spack -h
|
||||
- python bin/spack help -a
|
||||
- python bin/spack -p --lines 20 spec mpileaks%gcc ^elfutils@0.170
|
||||
- python bin/spack test -x --verbose
|
||||
|
||||
notifications:
|
||||
email:
|
||||
recipients:
|
||||
- tgamblin@llnl.gov
|
||||
- massimiliano.culpo@gmail.com
|
||||
on_success: change
|
||||
on_failure: always
|
||||
185
CHANGELOG.md
185
CHANGELOG.md
@@ -1,186 +1,3 @@
|
||||
# v0.16.2 (2021-05-22)
|
||||
|
||||
* Major performance improvement for `spack load` and other commands. (#23661)
|
||||
* `spack fetch` is now environment-aware. (#19166)
|
||||
* Numerous fixes for the new, `clingo`-based concretizer. (#23016, #23307,
|
||||
#23090, #22896, #22534, #20644, #20537, #21148)
|
||||
* Supoprt for automatically bootstrapping `clingo` from source. (#20652, #20657
|
||||
#21364, #21446, #21913, #22354, #22444, #22460, #22489, #22610, #22631)
|
||||
* Python 3.10 support: `collections.abc` (#20441)
|
||||
* Fix import issues by using `__import__` instead of Spack package importe.
|
||||
(#23288, #23290)
|
||||
* Bugfixes and `--source-dir` argument for `spack location`. (#22755, #22348,
|
||||
#22321)
|
||||
* Better support for externals in shared prefixes. (#22653)
|
||||
* `spack build-env` now prefers specs defined in the active environment.
|
||||
(#21642)
|
||||
* Remove erroneous warnings about quotes in `from_sourcing_files`. (#22767)
|
||||
* Fix clearing cache of `InternalConfigScope`. (#22609)
|
||||
* Bugfix for active when pkg is already active error. (#22587)
|
||||
* Make `SingleFileScope` able to repopulate the cache after clearing it.
|
||||
(#22559)
|
||||
* Channelflow: Fix the package. (#22483)
|
||||
* More descriptive error message for bugs in `package.py` (#21811)
|
||||
* Use package-supplied `autogen.sh`. (#20319)
|
||||
* Respect `-k/verify-ssl-false` in `_existing_url` method. (#21864)
|
||||
|
||||
|
||||
# v0.16.1 (2021-02-22)
|
||||
|
||||
This minor release includes a new feature and associated fixes:
|
||||
* intel-oneapi support through new packages (#20411, #20686, #20693, #20717,
|
||||
#20732, #20808, #21377, #21448)
|
||||
|
||||
This release also contains bug fixes/enhancements for:
|
||||
* HIP/ROCm support (#19715, #20095)
|
||||
* concretization (#19988, #20020, #20082, #20086, #20099, #20102, #20128,
|
||||
#20182, #20193, #20194, #20196, #20203, #20247, #20259, #20307, #20362,
|
||||
#20383, #20423, #20473, #20506, #20507, #20604, #20638, #20649, #20677,
|
||||
#20680, #20790)
|
||||
* environment install reporting fix (#20004)
|
||||
* avoid import in ABI compatibility info (#20236)
|
||||
* restore ability of dev-build to skip patches (#20351)
|
||||
* spack find -d spec grouping (#20028)
|
||||
* spack smoke test support (#19987, #20298)
|
||||
* macOS fixes (#20038, #21662)
|
||||
* abstract spec comparisons (#20341)
|
||||
* continuous integration (#17563)
|
||||
* performance improvements for binary relocation (#19690, #20768)
|
||||
* additional sanity checks for variants in builtin packages (#20373)
|
||||
* do not pollute auto-generated configuration files with empty lists or
|
||||
dicts (#20526)
|
||||
|
||||
plus assorted documentation (#20021, #20174) and package bug fixes/enhancements
|
||||
(#19617, #19933, #19986, #20006, #20097, #20198, #20794, #20906, #21411).
|
||||
|
||||
|
||||
# v0.16.0 (2020-11-18)
|
||||
|
||||
`v0.16.0` is a major feature release.
|
||||
|
||||
## Major features in this release
|
||||
|
||||
1. **New concretizer (experimental)** Our new backtracking concretizer is
|
||||
now in Spack as an experimental feature. You will need to install
|
||||
`clingo@master+python` and set `concretizer: clingo` in `config.yaml`
|
||||
to use it. The original concretizer is not exhaustive and is not
|
||||
guaranteed to find a solution if one exists. We encourage you to use
|
||||
the new concretizer and to report any bugs you find with it. We
|
||||
anticipate making the new concretizer the default and including all
|
||||
required dependencies for it in Spack `v0.17`. For more details, see
|
||||
#19501.
|
||||
|
||||
2. **spack test (experimental)** Users can add `test()` methods to their
|
||||
packages to run smoke tests on installations with the new `spack test`
|
||||
command (the old `spack test` is now `spack unit-test`). `spack test`
|
||||
is environment-aware, so you can `spack install` an environment and
|
||||
`spack test run` smoke tests on all of its packages. Historical test
|
||||
logs can be perused with `spack test results`. Generic smoke tests for
|
||||
MPI implementations, C, C++, and Fortran compilers as well as specific
|
||||
smoke tests for 18 packages. This is marked experimental because the
|
||||
test API (`self.run_test()`) is likely to be change, but we encourage
|
||||
users to upstream tests, and we will maintain and refactor any that
|
||||
are added to mainline packages (#15702).
|
||||
|
||||
3. **spack develop** New `spack develop` command allows you to develop
|
||||
several packages at once within a Spack environment. Running
|
||||
`spack develop foo@v1` and `spack develop bar@v2` will check
|
||||
out specific versions of `foo` and `bar` into subdirectories, which you
|
||||
can then build incrementally with `spack install ` (#15256).
|
||||
|
||||
4. **More parallelism** Spack previously installed the dependencies of a
|
||||
_single_ spec in parallel. Entire environments can now be installed in
|
||||
parallel, greatly accelerating builds of large environments. get
|
||||
parallelism from individual specs. Spack now parallelizes entire
|
||||
environment builds (#18131).
|
||||
|
||||
5. **Customizable base images for spack containerize**
|
||||
`spack containerize` previously only output a `Dockerfile` based
|
||||
on `ubuntu`. You may now specify any base image of your choosing (#15028).
|
||||
|
||||
6. **more external finding** `spack external find` was added in `v0.15`,
|
||||
but only `cmake` had support. `spack external find` can now find
|
||||
`bison`, `cuda`, `findutils`, `flex`, `git`, `lustre` `m4`, `mpich`,
|
||||
`mvapich2`, `ncurses`, `openmpi`, `perl`, `spectrum-mpi`, `tar`, and
|
||||
`texinfo` on your system and add them automatically to
|
||||
`packages.yaml`.
|
||||
|
||||
7. **Support aocc, nvhpc, and oneapi compilers** We are aggressively
|
||||
pursuing support for the newest vendor compilers, especially those for
|
||||
the U.S. exascale and pre-exascale systems. Compiler classes and
|
||||
auto-detection for `aocc`, `nvhpc`, `oneapi` are now in Spack (#19345,
|
||||
#19294, #19330).
|
||||
|
||||
## Additional new features of note
|
||||
|
||||
* New `spack mark` command can be used to designate packages as explicitly
|
||||
installed, so that `spack gc` will not garbage-collect them (#16662).
|
||||
* `install_tree` can be customized with Spack's projection format (#18341)
|
||||
* `sbang` now lives in the `install_tree` so that all users can access it (#11598)
|
||||
* `csh` and `tcsh` users no longer need to set `SPACK_ROOT` before
|
||||
sourcing `setup-env.csh` (#18225)
|
||||
* Spec syntax now supports `variant=*` syntax for finding any package
|
||||
that has a particular variant (#19381).
|
||||
* Spack respects `SPACK_GNUPGHOME` variable for custom GPG directories (#17139)
|
||||
* Spack now recognizes Graviton chips
|
||||
|
||||
## Major refactors
|
||||
|
||||
* Use spawn instead of fork on Python >= 3.8 on macOS (#18205)
|
||||
* Use indexes for public build caches (#19101, #19117, #19132, #19141, #19209)
|
||||
* `sbang` is an external package now (https://github.com/spack/sbang, #19582)
|
||||
* `archspec` is an external package now (https://github.com/archspec/archspec, #19600)
|
||||
|
||||
## Deprecations and Removals
|
||||
|
||||
* `spack bootstrap` was deprecated in v0.14.0, and has now been removed.
|
||||
* `spack setup` is deprecated as of v0.16.0.
|
||||
* What was `spack test` is now called `spack unit-test`. `spack test` is
|
||||
now the smoke testing feature in (2) above.
|
||||
|
||||
## Bugfixes
|
||||
|
||||
Some of the most notable bugfixes in this release include:
|
||||
|
||||
* Better warning messages for deprecated syntax in `packages.yaml` (#18013)
|
||||
* `buildcache list --allarch` now works properly (#17827)
|
||||
* Many fixes and tests for buildcaches and binary relcoation (#15687,
|
||||
*#17455, #17418, #17455, #15687, #18110)
|
||||
|
||||
## Package Improvements
|
||||
|
||||
Spack now has 5050 total packages, 720 of which were added since `v0.15`.
|
||||
|
||||
* ROCm packages (`hip`, `aomp`, more) added by AMD (#19957, #19832, others)
|
||||
* Many improvements for ARM support
|
||||
* `llvm-flang`, `flang`, and `f18` removed, as `llvm` has real `flang`
|
||||
support since Flang was merged to LLVM mainline
|
||||
* Emerging support for `spack external find` and `spack test` in packages.
|
||||
|
||||
## Infrastructure
|
||||
|
||||
* Major infrastructure improvements to pipelines on `gitlab.spack.io`
|
||||
* Support for testing PRs from forks (#19248) is being enabled for all
|
||||
forks to enable rolling, up-to-date binary builds on `develop`
|
||||
|
||||
|
||||
# v0.15.4 (2020-08-12)
|
||||
|
||||
This release contains one feature addition:
|
||||
|
||||
* Users can set `SPACK_GNUPGHOME` to override Spack's GPG path (#17139)
|
||||
|
||||
Several bugfixes for CUDA, binary packaging, and `spack -V`:
|
||||
|
||||
* CUDA package's `.libs` method searches for `libcudart` instead of `libcuda` (#18000)
|
||||
* Don't set `CUDAHOSTCXX` in environments that contain CUDA (#17826)
|
||||
* `buildcache create`: `NoOverwriteException` is a warning, not an error (#17832)
|
||||
* Fix `spack buildcache list --allarch` (#17884)
|
||||
* `spack -V` works with `releases/latest` tag and shallow clones (#17884)
|
||||
|
||||
And fixes for GitHub Actions and tests to ensure that CI passes on the
|
||||
release branch (#15687, #17279, #17328, #17377, #17732).
|
||||
|
||||
# v0.15.3 (2020-07-28)
|
||||
|
||||
This release contains the following bugfixes:
|
||||
@@ -772,4 +589,4 @@ version of all the changes since `v0.9.1`.
|
||||
- Switched from `nose` to `pytest` for unit tests.
|
||||
- Unit tests take 1 minute now instead of 8
|
||||
- Massively expanded documentation
|
||||
- Docs are now hosted on [spack.readthedocs.io](https://spack.readthedocs.io)
|
||||
- Docs are now hosted on [spack.readthedocs.io](http://spack.readthedocs.io)
|
||||
|
||||
12
COPYRIGHT
12
COPYRIGHT
@@ -28,11 +28,9 @@ text in the license header:
|
||||
|
||||
External Packages
|
||||
-------------------
|
||||
|
||||
Spack bundles most external dependencies in lib/spack/external. It also
|
||||
includes the sbang tool directly in bin/sbang. These packages are covered
|
||||
by various permissive licenses. A summary listing follows. See the
|
||||
license included with each package for full details.
|
||||
Spack bundles its external dependencies in lib/spack/external. These
|
||||
packages are covered by various permissive licenses. A summary listing
|
||||
follows. See the license included with each package for full details.
|
||||
|
||||
PackageName: argparse
|
||||
PackageHomePage: https://pypi.python.org/pypi/argparse
|
||||
@@ -78,10 +76,6 @@ PackageName: ruamel.yaml
|
||||
PackageHomePage: https://yaml.readthedocs.io/
|
||||
PackageLicenseDeclared: MIT
|
||||
|
||||
PackageName: sbang
|
||||
PackageHomePage: https://github.com/spack/sbang
|
||||
PackageLicenseDeclared: Apache-2.0 OR MIT
|
||||
|
||||
PackageName: six
|
||||
PackageHomePage: https://pypi.python.org/pypi/six
|
||||
PackageLicenseDeclared: MIT
|
||||
|
||||
27
LICENSE-MIT
27
LICENSE-MIT
@@ -1,21 +1,20 @@
|
||||
MIT License
|
||||
Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
Copyright (c) 2013-2020 LLNS, LLC and other Spack Project Developers.
|
||||
Permission is hereby granted, free of charge, to any person obtaining a
|
||||
copy of this software and associated documentation files (the "Software"),
|
||||
to deal in the Software without restriction, including without limitation
|
||||
the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
and/or sell copies of the Software, and to permit persons to whom the
|
||||
Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
DEALINGS IN THE SOFTWARE.
|
||||
|
||||
22
README.md
22
README.md
@@ -1,11 +1,13 @@
|
||||
# <img src="https://cdn.rawgit.com/spack/spack/develop/share/spack/logo/spack-logo.svg" width="64" valign="middle" alt="Spack"/> Spack
|
||||
|
||||
[](https://github.com/spack/spack/actions)
|
||||
[](https://github.com/spack/spack/actions)
|
||||
[](https://github.com/spack/spack/actions)
|
||||
[](https://github.com/spack/spack/actions)
|
||||
[](https://github.com/spack/spack/actions?query=workflow%3A%22macOS+builds+nightly%22)
|
||||
[](https://travis-ci.com/spack/spack)
|
||||
[](https://codecov.io/gh/spack/spack)
|
||||
[](https://spack.readthedocs.io)
|
||||
[](https://slack.spack.io)
|
||||
[](https://spackpm.herokuapp.com)
|
||||
|
||||
Spack is a multi-platform package manager that builds and installs
|
||||
multiple versions and configurations of software. It works on Linux,
|
||||
@@ -20,7 +22,7 @@ builds of the same package. With Spack, you can build your software
|
||||
*all* the ways you want to.
|
||||
|
||||
See the
|
||||
[Feature Overview](https://spack.readthedocs.io/en/latest/features.html)
|
||||
[Feature Overview](http://spack.readthedocs.io/en/latest/features.html)
|
||||
for examples and highlights.
|
||||
|
||||
To install spack and your first package, make sure you have Python.
|
||||
@@ -33,14 +35,14 @@ Then:
|
||||
Documentation
|
||||
----------------
|
||||
|
||||
[**Full documentation**](https://spack.readthedocs.io/) is available, or
|
||||
[**Full documentation**](http://spack.readthedocs.io/) is available, or
|
||||
run `spack help` or `spack help --all`.
|
||||
|
||||
Tutorial
|
||||
----------------
|
||||
|
||||
We maintain a
|
||||
[**hands-on tutorial**](https://spack.readthedocs.io/en/latest/tutorial.html).
|
||||
[**hands-on tutorial**](http://spack.readthedocs.io/en/latest/tutorial.html).
|
||||
It covers basic to advanced usage, packaging, developer features, and large HPC
|
||||
deployments. You can do all of the exercises on your own laptop using a
|
||||
Docker container.
|
||||
@@ -58,7 +60,7 @@ packages to bugfixes, documentation, or even new core features.
|
||||
Resources:
|
||||
|
||||
* **Slack workspace**: [spackpm.slack.com](https://spackpm.slack.com).
|
||||
To get an invitation, visit [slack.spack.io](https://slack.spack.io).
|
||||
To get an invitation, [**click here**](https://spackpm.herokuapp.com).
|
||||
* **Mailing list**: [groups.google.com/d/forum/spack](https://groups.google.com/d/forum/spack)
|
||||
* **Twitter**: [@spackpm](https://twitter.com/spackpm). Be sure to
|
||||
`@mention` us!
|
||||
@@ -72,9 +74,9 @@ When you send your request, make ``develop`` the destination branch on the
|
||||
|
||||
Your PR must pass Spack's unit tests and documentation tests, and must be
|
||||
[PEP 8](https://www.python.org/dev/peps/pep-0008/) compliant. We enforce
|
||||
these guidelines with our CI process. To run these tests locally, and for
|
||||
helpful tips on git, see our
|
||||
[Contribution Guide](https://spack.readthedocs.io/en/latest/contribution_guide.html).
|
||||
these guidelines with [Travis CI](https://travis-ci.org/spack/spack). To
|
||||
run these tests locally, and for helpful tips on git, see our
|
||||
[Contribution Guide](http://spack.readthedocs.io/en/latest/contribution_guide.html).
|
||||
|
||||
Spack's `develop` branch has the latest contributions. Pull requests
|
||||
should target `develop`, and users who want the latest package versions,
|
||||
@@ -119,7 +121,7 @@ If you are referencing Spack in a publication, please cite the following paper:
|
||||
|
||||
* Todd Gamblin, Matthew P. LeGendre, Michael R. Collette, Gregory L. Lee,
|
||||
Adam Moody, Bronis R. de Supinski, and W. Scott Futral.
|
||||
[**The Spack Package Manager: Bringing Order to HPC Software Chaos**](https://www.computer.org/csdl/proceedings/sc/2015/3723/00/2807623.pdf).
|
||||
[**The Spack Package Manager: Bringing Order to HPC Software Chaos**](http://www.computer.org/csdl/proceedings/sc/2015/3723/00/2807623.pdf).
|
||||
In *Supercomputing 2015 (SC’15)*, Austin, Texas, November 15-20 2015. LLNL-CONF-669890.
|
||||
|
||||
License
|
||||
|
||||
165
bin/sbang
165
bin/sbang
@@ -1,103 +1,114 @@
|
||||
#!/bin/sh
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
|
||||
# sbang project developers. See the top-level COPYRIGHT file for details.
|
||||
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
#
|
||||
# `sbang`: Run scripts with long shebang lines.
|
||||
#
|
||||
# Many operating systems limit the length and number of possible
|
||||
# arguments in shebang lines, making it hard to use interpreters that are
|
||||
# deep in the directory hierarchy or require special arguments.
|
||||
# Many operating systems limit the length of shebang lines, making it
|
||||
# hard to use interpreters that are deep in the directory hierarchy.
|
||||
# `sbang` can run such scripts, either as a shebang interpreter, or
|
||||
# directly on the command line.
|
||||
#
|
||||
# To use, put the long shebang on the second line of your script, and
|
||||
# make sbang the interpreter, like this:
|
||||
# Usage
|
||||
# -----------------------------
|
||||
# Suppose you have a script, long-shebang.sh, like this:
|
||||
#
|
||||
# #!/bin/sh /path/to/sbang
|
||||
# #!/long/path/to/real/interpreter with arguments
|
||||
# 1 #!/very/long/path/to/some/interpreter
|
||||
# 2
|
||||
# 3 echo "success!"
|
||||
#
|
||||
# `sbang` will run the real interpreter with the script as its argument.
|
||||
# Invoking this script will result in an error on some OS's. On
|
||||
# Linux, you get this:
|
||||
#
|
||||
# See https://github.com/spack/sbang for more details.
|
||||
# $ ./long-shebang.sh
|
||||
# -bash: ./long: /very/long/path/to/some/interp: bad interpreter:
|
||||
# No such file or directory
|
||||
#
|
||||
# On Mac OS X, the system simply assumes the interpreter is the shell
|
||||
# and tries to run with it, which is likely not what you want.
|
||||
#
|
||||
#
|
||||
# `sbang` on the command line
|
||||
# -----------------------------
|
||||
# You can use `sbang` in two ways. The first is to use it directly,
|
||||
# from the command line, like this:
|
||||
#
|
||||
# $ sbang ./long-shebang.sh
|
||||
# success!
|
||||
#
|
||||
#
|
||||
# `sbang` as the interpreter
|
||||
# -----------------------------
|
||||
# You can also use `sbang` *as* the interpreter for your script. Put
|
||||
# `#!/bin/bash /path/to/sbang` on line 1, and move the original
|
||||
# shebang to line 2 of the script:
|
||||
#
|
||||
# 1 #!/bin/bash /path/to/sbang
|
||||
# 2 #!/long/path/to/real/interpreter with arguments
|
||||
# 3
|
||||
# 4 echo "success!"
|
||||
#
|
||||
# $ ./long-shebang.sh
|
||||
# success!
|
||||
#
|
||||
# On Linux, you could shorten line 1 to `#!/path/to/sbang`, but other
|
||||
# operating systems like Mac OS X require the interpreter to be a
|
||||
# binary, so it's best to use `sbang` as a `bash` argument.
|
||||
# Obviously, for this to work, `sbang` needs to have a short enough
|
||||
# path that *it* will run without hitting OS limits.
|
||||
#
|
||||
# For Lua, scripts the second line can't start with #!, as # is not
|
||||
# the comment character in lua (even though lua ignores #! on the
|
||||
# *first* line of a script). So, instrument a lua script like this,
|
||||
# using -- instead of # on the second line:
|
||||
#
|
||||
# 1 #!/bin/bash /path/to/sbang
|
||||
# 2 --!/long/path/to/lua with arguments
|
||||
# 3
|
||||
# 4 print "success!"
|
||||
#
|
||||
# How it works
|
||||
# -----------------------------
|
||||
# `sbang` is a very simple bash script. It looks at the first two
|
||||
# lines of a script argument and runs the last line starting with
|
||||
# `#!`, with the script as an argument. It also forwards arguments.
|
||||
#
|
||||
|
||||
# Generic error handling
|
||||
die() {
|
||||
echo "$@" 1>&2;
|
||||
exit 1
|
||||
}
|
||||
|
||||
# set SBANG_DEBUG to make the script print what would normally be executed.
|
||||
exec="exec"
|
||||
if [ -n "${SBANG_DEBUG}" ]; then
|
||||
exec="echo "
|
||||
fi
|
||||
|
||||
# First argument is the script we want to actually run.
|
||||
script="$1"
|
||||
|
||||
# ensure that the script actually exists
|
||||
if [ -z "$script" ]; then
|
||||
die "error: sbang requires exactly one argument"
|
||||
elif [ ! -f "$script" ]; then
|
||||
die "$script: no such file or directory"
|
||||
fi
|
||||
|
||||
# Search the first two lines of script for interpreters.
|
||||
lines=0
|
||||
while read -r line && [ $lines -ne 2 ]; do
|
||||
if [ "${line#\#!}" != "$line" ]; then
|
||||
shebang_line="${line#\#!}"
|
||||
elif [ "${line#//!}" != "$line" ]; then # // comments
|
||||
shebang_line="${line#//!}"
|
||||
elif [ "${line#--!}" != "$line" ]; then # -- lua comments
|
||||
shebang_line="${line#--!}"
|
||||
elif [ "${line#<?php\ }" != "$line" ]; then # php comments
|
||||
shebang_line="${line#<?php\ \#!}"
|
||||
shebang_line="${shebang_line%\ ?>}"
|
||||
while read line && ((lines < 2)) ; do
|
||||
if [[ "$line" = '#!'* ]]; then
|
||||
interpreter="${line#\#!}"
|
||||
elif [[ "$line" = '//!'*node* ]]; then
|
||||
interpreter="${line#//!}"
|
||||
elif [[ "$line" = '--!'*lua* ]]; then
|
||||
interpreter="${line#--!}"
|
||||
fi
|
||||
lines=$((lines+1))
|
||||
done < "$script"
|
||||
# this is ineeded for scripts with sbang parameter
|
||||
# like ones in intltool
|
||||
# #!/<spack-long-path>/perl -w
|
||||
# this is the interpreter line with all the parameters as a vector
|
||||
interpreter_v=(${interpreter})
|
||||
# this is the single interpreter path
|
||||
interpreter_f="${interpreter_v[0]}"
|
||||
|
||||
# error if we did not find any interpreter
|
||||
if [ -z "$shebang_line" ]; then
|
||||
die "error: sbang found no interpreter in $script"
|
||||
fi
|
||||
|
||||
# parse out the interpreter and first argument
|
||||
IFS=' ' read -r interpreter arg1 rest <<EOF
|
||||
$shebang_line
|
||||
EOF
|
||||
|
||||
# Determine if the interpreter is a particular program, accounting for the
|
||||
# '#!/usr/bin/env PROGRAM' convention. So:
|
||||
#
|
||||
# interpreter_is perl
|
||||
#
|
||||
# will be true for '#!/usr/bin/perl' and '#!/usr/bin/env perl'
|
||||
interpreter_is() {
|
||||
if [ "${interpreter##*/}" = "$1" ]; then
|
||||
return 0
|
||||
elif [ "$interpreter" = "/usr/bin/env" ] && [ "$arg1" = "$1" ]; then
|
||||
return 0
|
||||
# Invoke any interpreter found, or raise an error if none was found.
|
||||
if [[ -n "$interpreter_f" ]]; then
|
||||
if [[ "${interpreter_f##*/}" = "perl"* ]]; then
|
||||
exec $interpreter -x "$@"
|
||||
else
|
||||
return 1
|
||||
exec $interpreter "$@"
|
||||
fi
|
||||
}
|
||||
|
||||
if interpreter_is "sbang"; then
|
||||
die "error: refusing to re-execute sbang to avoid infinite loop."
|
||||
fi
|
||||
|
||||
# Finally invoke the real shebang line
|
||||
# ruby and perl need -x to ignore the first line of input (the sbang line)
|
||||
#
|
||||
if interpreter_is perl || interpreter_is ruby; then
|
||||
# shellcheck disable=SC2086
|
||||
$exec $shebang_line -x "$@"
|
||||
else
|
||||
# shellcheck disable=SC2086
|
||||
$exec $shebang_line "$@"
|
||||
echo "error: sbang found no interpreter in $script"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
41
bin/spack
41
bin/spack
@@ -1,7 +1,7 @@
|
||||
#!/bin/sh
|
||||
# -*- python -*-
|
||||
#
|
||||
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
|
||||
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
@@ -10,13 +10,9 @@
|
||||
# Following line is a shell no-op, and starts a multi-line Python comment.
|
||||
# See https://stackoverflow.com/a/47886254
|
||||
""":"
|
||||
# prefer SPACK_PYTHON environment variable, python3, python, then python2
|
||||
SPACK_PREFERRED_PYTHONS="python3 python python2 /usr/libexec/platform-python"
|
||||
for cmd in "${SPACK_PYTHON:-}" ${SPACK_PREFERRED_PYTHONS}; do
|
||||
if command -v > /dev/null "$cmd"; then
|
||||
export SPACK_PYTHON="$(command -v "$cmd")"
|
||||
exec "${SPACK_PYTHON}" "$0" "$@"
|
||||
fi
|
||||
# prefer python3, then python, then python2
|
||||
for cmd in python3 python python2; do
|
||||
command -v > /dev/null $cmd && exec $cmd $0 "$@"
|
||||
done
|
||||
|
||||
echo "==> Error: spack could not find a python interpreter!" >&2
|
||||
@@ -30,15 +26,10 @@ from __future__ import print_function
|
||||
import os
|
||||
import sys
|
||||
|
||||
min_python3 = (3, 5)
|
||||
|
||||
if sys.version_info[:2] < (2, 6) or (
|
||||
sys.version_info[:2] >= (3, 0) and sys.version_info[:2] < min_python3
|
||||
):
|
||||
if sys.version_info[:2] < (2, 6):
|
||||
v_info = sys.version_info[:3]
|
||||
msg = "Spack requires Python 2.6, 2.7 or %d.%d or higher " % min_python3
|
||||
msg += "You are running spack with Python %d.%d.%d." % v_info
|
||||
sys.exit(msg)
|
||||
sys.exit("Spack requires Python 2.6 or higher."
|
||||
"This is Python %d.%d.%d." % v_info)
|
||||
|
||||
# Find spack's location and its prefix.
|
||||
spack_file = os.path.realpath(os.path.expanduser(__file__))
|
||||
@@ -51,10 +42,8 @@ sys.path.insert(0, spack_lib_path)
|
||||
# Add external libs
|
||||
spack_external_libs = os.path.join(spack_lib_path, "external")
|
||||
|
||||
if sys.version_info[:2] <= (2, 7):
|
||||
sys.path.insert(0, os.path.join(spack_external_libs, "py2"))
|
||||
if sys.version_info[:2] == (2, 6):
|
||||
sys.path.insert(0, os.path.join(spack_external_libs, "py26"))
|
||||
sys.path.insert(0, os.path.join(spack_external_libs, 'py26'))
|
||||
|
||||
sys.path.insert(0, spack_external_libs)
|
||||
|
||||
@@ -64,14 +53,12 @@ sys.path.insert(0, spack_external_libs)
|
||||
# Briefly: ruamel.yaml produces a .pth file when installed with pip that
|
||||
# makes the site installed package the preferred one, even though sys.path
|
||||
# is modified to point to another version of ruamel.yaml.
|
||||
if "ruamel.yaml" in sys.modules:
|
||||
del sys.modules["ruamel.yaml"]
|
||||
if 'ruamel.yaml' in sys.modules:
|
||||
del sys.modules['ruamel.yaml']
|
||||
|
||||
if "ruamel" in sys.modules:
|
||||
del sys.modules["ruamel"]
|
||||
|
||||
import spack.main # noqa
|
||||
if 'ruamel' in sys.modules:
|
||||
del sys.modules['ruamel']
|
||||
|
||||
# Once we've set up the system path, run the spack main method
|
||||
if __name__ == "__main__":
|
||||
sys.exit(spack.main.main())
|
||||
import spack.main # noqa
|
||||
sys.exit(spack.main.main())
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
#!/bin/sh
|
||||
#
|
||||
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
|
||||
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
@@ -16,23 +16,24 @@
|
||||
config:
|
||||
# This is the path to the root of the Spack install tree.
|
||||
# You can use $spack here to refer to the root of the spack instance.
|
||||
install_tree:
|
||||
root: $spack/opt/spack
|
||||
projections:
|
||||
all: "${ARCHITECTURE}/${COMPILERNAME}-${COMPILERVER}/${PACKAGE}-${VERSION}-${HASH}"
|
||||
# install_tree can include an optional padded length (int or boolean)
|
||||
# default is False (do not pad)
|
||||
# if padded_length is True, Spack will pad as close to the system max path
|
||||
# length as possible
|
||||
# if padded_length is an integer, Spack will pad to that many characters,
|
||||
# assuming it is higher than the length of the install_tree root.
|
||||
# padded_length: 128
|
||||
install_tree: ~/.spack/opt/spack
|
||||
|
||||
|
||||
# Locations where templates should be found
|
||||
template_dirs:
|
||||
- $spack/share/spack/templates
|
||||
|
||||
|
||||
# Default directory layout
|
||||
install_path_scheme: "${ARCHITECTURE}/${COMPILERNAME}-${COMPILERVER}/${PACKAGE}-${VERSION}-${HASH}"
|
||||
|
||||
|
||||
# Locations where different types of modules should be installed.
|
||||
module_roots:
|
||||
tcl: ~/.spack/share/spack/modules
|
||||
lmod: ~/.spack/share/spack/lmod
|
||||
|
||||
|
||||
# Temporary locations Spack can try to use for builds.
|
||||
#
|
||||
# Recommended options are given below.
|
||||
@@ -63,14 +64,10 @@ config:
|
||||
- ~/.spack/stage
|
||||
# - $spack/var/spack/stage
|
||||
|
||||
# Directory in which to run tests and store test results.
|
||||
# Tests will be stored in directories named by date/time and package
|
||||
# name/hash.
|
||||
test_stage: ~/.spack/test
|
||||
|
||||
# Cache directory for already downloaded source tarballs and archived
|
||||
# repositories. This can be purged with `spack clean --downloads`.
|
||||
source_cache: $spack/var/spack/cache
|
||||
source_cache: ~/.spack/var/spack/cache
|
||||
|
||||
|
||||
# Cache directory for miscellaneous files, like the package index.
|
||||
@@ -100,7 +97,7 @@ config:
|
||||
# If set to true, Spack will attempt to build any compiler on the spec
|
||||
# that is not already available. If set to False, Spack will only use
|
||||
# compilers already configured in compilers.yaml
|
||||
install_missing_compilers: false
|
||||
install_missing_compilers: False
|
||||
|
||||
|
||||
# If set to true, Spack will always check checksums after downloading
|
||||
@@ -108,11 +105,6 @@ config:
|
||||
checksum: true
|
||||
|
||||
|
||||
# If set to true, Spack will fetch deprecated versions without warning.
|
||||
# If false, Spack will raise an error when trying to install a deprecated version.
|
||||
deprecated: false
|
||||
|
||||
|
||||
# If set to true, `spack install` and friends will NOT clean
|
||||
# potentially harmful variables from the build environment. Use wisely.
|
||||
dirty: false
|
||||
@@ -135,13 +127,11 @@ config:
|
||||
locks: true
|
||||
|
||||
|
||||
# The maximum number of jobs to use for the build system (e.g. `make`), when
|
||||
# the -j flag is not given on the command line. Defaults to 16 when not set.
|
||||
# Note that the maximum number of jobs is limited by the number of cores
|
||||
# available, taking thread affinity into account when supported. For instance:
|
||||
# - With `build_jobs: 16` and 4 cores available `spack install` will run `make -j4`
|
||||
# - With `build_jobs: 16` and 32 cores available `spack install` will run `make -j16`
|
||||
# - With `build_jobs: 2` and 4 cores available `spack install -j6` will run `make -j6`
|
||||
# The maximum number of jobs to use when running `make` in parallel,
|
||||
# always limited by the number of cores available. For instance:
|
||||
# - If set to 16 on a 4 cores machine `spack install` will run `make -j4`
|
||||
# - If set to 16 on a 18 cores machine `spack install` will run `make -j16`
|
||||
# If not set, Spack will use all available cores up to 16.
|
||||
# build_jobs: 16
|
||||
|
||||
|
||||
@@ -149,20 +139,6 @@ config:
|
||||
ccache: false
|
||||
|
||||
|
||||
# The concretization algorithm to use in Spack. Options are:
|
||||
#
|
||||
# 'original': Spack's original greedy, fixed-point concretizer. This
|
||||
# algorithm can make decisions too early and will not backtrack
|
||||
# sufficiently for many specs.
|
||||
#
|
||||
# 'clingo': Uses a logic solver under the hood to solve DAGs with full
|
||||
# backtracking and optimization for user preferences.
|
||||
#
|
||||
# 'clingo' currently requires the clingo ASP solver to be installed and
|
||||
# built with python bindings. 'original' is built in.
|
||||
concretizer: original
|
||||
|
||||
|
||||
# How long to wait to lock the Spack installation database. This lock is used
|
||||
# when Spack needs to manage its own package metadata and all operations are
|
||||
# expected to complete within the default time limit. The timeout should
|
||||
@@ -177,13 +153,11 @@ config:
|
||||
# never succeed.
|
||||
package_lock_timeout: null
|
||||
|
||||
|
||||
# Control whether Spack embeds RPATH or RUNPATH attributes in ELF binaries.
|
||||
# Has no effect on macOS. DO NOT MIX these within the same install tree.
|
||||
# See the Spack documentation for details.
|
||||
shared_linking: 'rpath'
|
||||
|
||||
|
||||
# Set to 'false' to allow installation on filesystems that doesn't allow setgid bit
|
||||
# manipulation by unprivileged user (e.g. AFS)
|
||||
allow_sgid: true
|
||||
|
||||
@@ -1,21 +0,0 @@
|
||||
# -------------------------------------------------------------------------
|
||||
# This is the default configuration for Spack's module file generation.
|
||||
#
|
||||
# Settings here are versioned with Spack and are intended to provide
|
||||
# sensible defaults out of the box. Spack maintainers should edit this
|
||||
# file to keep it current.
|
||||
#
|
||||
# Users can override these settings by editing the following files.
|
||||
#
|
||||
# Per-spack-instance settings (overrides defaults):
|
||||
# $SPACK_ROOT/etc/spack/modules.yaml
|
||||
#
|
||||
# Per-user settings (overrides default and site settings):
|
||||
# ~/.spack/modules.yaml
|
||||
# -------------------------------------------------------------------------
|
||||
modules:
|
||||
prefix_inspections:
|
||||
lib:
|
||||
- LD_LIBRARY_PATH
|
||||
lib64:
|
||||
- LD_LIBRARY_PATH
|
||||
@@ -22,20 +22,10 @@ packages:
|
||||
- intel
|
||||
providers:
|
||||
elf: [libelf]
|
||||
fuse: [macfuse]
|
||||
unwind: [apple-libunwind]
|
||||
uuid: [apple-libuuid]
|
||||
apple-libunwind:
|
||||
buildable: false
|
||||
externals:
|
||||
paths:
|
||||
# Apple bundles libunwind version 35.3 with macOS 10.9 and later,
|
||||
# although the version number used here isn't critical
|
||||
- spec: apple-libunwind@35.3
|
||||
prefix: /usr
|
||||
apple-libuuid:
|
||||
buildable: false
|
||||
externals:
|
||||
# Apple bundles libuuid in libsystem_c version 1353.100.2,
|
||||
# although the version number used here isn't critical
|
||||
- spec: apple-libuuid@1353.100.2
|
||||
prefix: /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk
|
||||
apple-libunwind@35.3: /usr
|
||||
buildable: False
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
mirrors:
|
||||
spack-public: https://mirror.spack.io
|
||||
spack-public: https://spack-llnl-mirror.s3-us-west-2.amazonaws.com/
|
||||
|
||||
@@ -14,7 +14,8 @@
|
||||
# ~/.spack/modules.yaml
|
||||
# -------------------------------------------------------------------------
|
||||
modules:
|
||||
# Paths to check when creating modules for all module sets
|
||||
enable:
|
||||
- tcl
|
||||
prefix_inspections:
|
||||
bin:
|
||||
- PATH
|
||||
@@ -24,6 +25,16 @@ modules:
|
||||
- MANPATH
|
||||
share/aclocal:
|
||||
- ACLOCAL_PATH
|
||||
lib:
|
||||
- LIBRARY_PATH
|
||||
lib64:
|
||||
- LIBRARY_PATH
|
||||
include:
|
||||
- C_INCLUDE_PATH
|
||||
- CPLUS_INCLUDE_PATH
|
||||
# The INCLUDE env variable specifies paths to look for
|
||||
# .mod file for Intel Fortran compilers
|
||||
- INCLUDE
|
||||
lib/pkgconfig:
|
||||
- PKG_CONFIG_PATH
|
||||
lib64/pkgconfig:
|
||||
@@ -33,20 +44,6 @@ modules:
|
||||
'':
|
||||
- CMAKE_PREFIX_PATH
|
||||
|
||||
# These are configurations for the module set named "default"
|
||||
default:
|
||||
# These values are defaulted in the code. They are not defaulted here so
|
||||
# that we can enable backwards compatibility with the old syntax more
|
||||
# easily (old value is in the config yaml, config:module_roots)
|
||||
# Where to install modules
|
||||
# roots:
|
||||
# tcl: $spack/share/spack/modules
|
||||
# lmod: $spack/share/spack/lmod
|
||||
# What type of modules to use
|
||||
enable:
|
||||
- tcl
|
||||
|
||||
# Default configurations if lmod is enabled
|
||||
lmod:
|
||||
hierarchy:
|
||||
- mpi
|
||||
lmod:
|
||||
hierarchy:
|
||||
- mpi
|
||||
|
||||
@@ -15,46 +15,42 @@
|
||||
# -------------------------------------------------------------------------
|
||||
packages:
|
||||
all:
|
||||
compiler: [gcc, intel, pgi, clang, xl, nag, fj, aocc]
|
||||
compiler: [gcc, intel, pgi, clang, xl, nag, fj]
|
||||
providers:
|
||||
awk: [gawk]
|
||||
blas: [openblas, amdblis]
|
||||
D: [ldc]
|
||||
awk: [gawk]
|
||||
blas: [openblas]
|
||||
daal: [intel-daal]
|
||||
elf: [elfutils]
|
||||
fftw-api: [fftw, amdfftw]
|
||||
flame: [libflame, amdlibflame]
|
||||
fuse: [libfuse]
|
||||
gl: [mesa+opengl, mesa18, opengl]
|
||||
fftw-api: [fftw]
|
||||
gl: [libglvnd-fe, mesa+opengl~glvnd, opengl~glvnd]
|
||||
glx: [libglvnd-fe+glx, mesa+glx~glvnd, opengl+glx~glvnd]
|
||||
egl: [libglvnd-fe+egl, opengl+egl~glvnd]
|
||||
libglvnd-be-gl: [mesa+glvnd, opengl+glvnd]
|
||||
libglvnd-be-glx: [mesa+glx+glvnd, opengl+glx+glvnd]
|
||||
libglvnd-be-egl: [opengl+egl+glvnd]
|
||||
glu: [mesa-glu, openglu]
|
||||
glx: [mesa+glx, mesa18+glx, opengl]
|
||||
golang: [gcc]
|
||||
iconv: [libiconv]
|
||||
ipp: [intel-ipp]
|
||||
java: [openjdk, jdk, ibm-java]
|
||||
jpeg: [libjpeg-turbo, libjpeg]
|
||||
lapack: [openblas, amdlibflame]
|
||||
lua-lang: [lua, lua-luajit]
|
||||
lapack: [openblas]
|
||||
mariadb-client: [mariadb-c-client, mariadb]
|
||||
mkl: [intel-mkl]
|
||||
mpe: [mpe2]
|
||||
mpi: [openmpi, mpich]
|
||||
mysql-client: [mysql, mariadb-c-client]
|
||||
opencl: [pocl]
|
||||
onedal: [intel-oneapi-dal]
|
||||
osmesa: [mesa+osmesa, mesa18+osmesa]
|
||||
pil: [py-pillow]
|
||||
pkgconfig: [pkgconf, pkg-config]
|
||||
rpc: [libtirpc]
|
||||
scalapack: [netlib-scalapack, amdscalapack]
|
||||
scalapack: [netlib-scalapack]
|
||||
sycl: [hipsycl]
|
||||
szip: [libaec, libszip]
|
||||
szip: [libszip, libaec]
|
||||
tbb: [intel-tbb]
|
||||
unwind: [libunwind]
|
||||
uuid: [util-linux-uuid, libuuid]
|
||||
xxd: [xxd-standalone, vim]
|
||||
yacc: [bison, byacc]
|
||||
ziglang: [zig]
|
||||
sycl: [hipsycl]
|
||||
permissions:
|
||||
read: world
|
||||
write: user
|
||||
|
||||
7
etc/spack/defaults/upstreams.yaml
Normal file
7
etc/spack/defaults/upstreams.yaml
Normal file
@@ -0,0 +1,7 @@
|
||||
upstreams:
|
||||
global:
|
||||
install_tree: $spack/opt/spack
|
||||
modules:
|
||||
tcl: $spack/share/spack/modules
|
||||
lmod: $spack/share/spack/lmod
|
||||
dotkit: $spack/share/spack/dotkit
|
||||
@@ -1,10 +1,10 @@
|
||||
<html>
|
||||
<head>
|
||||
<meta http-equiv="refresh" content="0; url=https://spack.readthedocs.io/" />
|
||||
<meta http-equiv="refresh" content="0; url=http://spack.readthedocs.io/" />
|
||||
</head>
|
||||
<body>
|
||||
<p>
|
||||
This page has moved to <a href="https://spack.readthedocs.io/">https://spack.readthedocs.io/</a>
|
||||
This page has moved to <a href="http://spack.readthedocs.io/">http://spack.readthedocs.io/</a>
|
||||
</p>
|
||||
</body>
|
||||
</html>
|
||||
|
||||
@@ -1,162 +0,0 @@
|
||||
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
|
||||
Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
.. _analyze:
|
||||
|
||||
=======
|
||||
Analyze
|
||||
=======
|
||||
|
||||
|
||||
The analyze command is a front-end to various tools that let us analyze
|
||||
package installations. Each analyzer is a module for a different kind
|
||||
of analysis that can be done on a package installation, including (but not
|
||||
limited to) binary, log, or text analysis. Thus, the analyze command group
|
||||
allows you to take an existing package install, choose an analyzer,
|
||||
and extract some output for the package using it.
|
||||
|
||||
|
||||
-----------------
|
||||
Analyzer Metadata
|
||||
-----------------
|
||||
|
||||
For all analyzers, we write to an ``analyzers`` folder in ``~/.spack``, or the
|
||||
value that you specify in your spack config at ``config:analyzers_dir``.
|
||||
For example, here we see the results of running an analysis on zlib:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ tree ~/.spack/analyzers/
|
||||
└── linux-ubuntu20.04-skylake
|
||||
└── gcc-9.3.0
|
||||
└── zlib-1.2.11-sl7m27mzkbejtkrajigj3a3m37ygv4u2
|
||||
├── environment_variables
|
||||
│ └── spack-analyzer-environment-variables.json
|
||||
├── install_files
|
||||
│ └── spack-analyzer-install-files.json
|
||||
└── libabigail
|
||||
└── spack-analyzer-libabigail-libz.so.1.2.11.xml
|
||||
|
||||
|
||||
This means that you can always find analyzer output in this folder, and it
|
||||
is organized with the same logic as the package install it was run for.
|
||||
If you want to customize this top level folder, simply provide the ``--path``
|
||||
argument to ``spack analyze run``. The nested organization will be maintained
|
||||
within your custom root.
|
||||
|
||||
-----------------
|
||||
Listing Analyzers
|
||||
-----------------
|
||||
|
||||
If you aren't familiar with Spack's analyzers, you can quickly list those that
|
||||
are available:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack analyze list-analyzers
|
||||
install_files : install file listing read from install_manifest.json
|
||||
environment_variables : environment variables parsed from spack-build-env.txt
|
||||
config_args : config args loaded from spack-configure-args.txt
|
||||
abigail : Application Binary Interface (ABI) features for objects
|
||||
|
||||
|
||||
In the above, the first three are fairly simple - parsing metadata files from
|
||||
a package install directory to save
|
||||
|
||||
-------------------
|
||||
Analyzing a Package
|
||||
-------------------
|
||||
|
||||
The analyze command, akin to install, will accept a package spec to perform
|
||||
an analysis for. The package must be installed. Let's walk through an example
|
||||
with zlib. We first ask to analyze it. However, since we have more than one
|
||||
install, we are asked to disambiguate:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack analyze run zlib
|
||||
==> Error: zlib matches multiple packages.
|
||||
Matching packages:
|
||||
fz2bs56 zlib@1.2.11%gcc@7.5.0 arch=linux-ubuntu18.04-skylake
|
||||
sl7m27m zlib@1.2.11%gcc@9.3.0 arch=linux-ubuntu20.04-skylake
|
||||
Use a more specific spec.
|
||||
|
||||
|
||||
We can then specify the spec version that we want to analyze:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack analyze run zlib/fz2bs56
|
||||
|
||||
If you don't provide any specific analyzer names, by default all analyzers
|
||||
(shown in the ``list-analyzers`` subcommand list) will be run. If an analyzer does not
|
||||
have any result, it will be skipped. For example, here is a result running for
|
||||
zlib:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ ls ~/.spack/analyzers/linux-ubuntu20.04-skylake/gcc-9.3.0/zlib-1.2.11-sl7m27mzkbejtkrajigj3a3m37ygv4u2/
|
||||
spack-analyzer-environment-variables.json
|
||||
spack-analyzer-install-files.json
|
||||
spack-analyzer-libabigail-libz.so.1.2.11.xml
|
||||
|
||||
If you want to run a specific analyzer, ask for it with `--analyzer`. Here we run
|
||||
spack analyze on libabigail (already installed) _using_ libabigail1
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack analyze run --analyzer abigail libabigail
|
||||
|
||||
|
||||
.. _analyze_monitoring:
|
||||
|
||||
----------------------
|
||||
Monitoring An Analysis
|
||||
----------------------
|
||||
|
||||
For any kind of analysis, you can
|
||||
use a `spack monitor <https://github.com/spack/spack-monitor>`_ "Spackmon"
|
||||
as a server to upload the same run metadata to. You can
|
||||
follow the instructions in the `spack monitor documentation <https://spack-monitor.readthedocs.org>`_
|
||||
to first create a server along with a username and token for yourself.
|
||||
You can then use this guide to interact with the server.
|
||||
|
||||
You should first export our spack monitor token and username to the environment:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ export SPACKMON_TOKEN=50445263afd8f67e59bd79bff597836ee6c05438
|
||||
$ export SPACKMON_USER=spacky
|
||||
|
||||
|
||||
By default, the host for your server is expected to be at ``http://127.0.0.1``
|
||||
with a prefix of ``ms1``, and if this is the case, you can simply add the
|
||||
``--monitor`` flag to the install command:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack analyze run --monitor wget
|
||||
|
||||
If you need to customize the host or the prefix, you can do that as well:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack analyze run --monitor --monitor-prefix monitor --monitor-host https://monitor-service.io wget
|
||||
|
||||
If your server doesn't have authentication, you can skip it:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack analyze run --monitor --monitor-disable-auth wget
|
||||
|
||||
Regardless of your choice, when you run analyze on an installed package (whether
|
||||
it was installed with ``--monitor`` or not, you'll see the results generating as they did
|
||||
before, and a message that the monitor server was pinged:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack analyze --monitor wget
|
||||
...
|
||||
==> Sending result for wget bin/wget to monitor.
|
||||
@@ -1,4 +1,4 @@
|
||||
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
|
||||
.. Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
@@ -27,17 +27,11 @@ It is recommended that the following be put in your ``.bashrc`` file:
|
||||
|
||||
If you do not see colorized output when using ``less -R`` it is because color
|
||||
is being disabled in the piped output. In this case, tell spack to force
|
||||
colorized output with a flag
|
||||
colorized output.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack --color always | less -R
|
||||
|
||||
or an environment variable
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ SPACK_COLOR=always spack | less -R
|
||||
$ spack --color always | less -R
|
||||
|
||||
--------------------------
|
||||
Listing available packages
|
||||
@@ -138,27 +132,32 @@ If ``mpileaks`` depends on other packages, Spack will install the
|
||||
dependencies first. It then fetches the ``mpileaks`` tarball, expands
|
||||
it, verifies that it was downloaded without errors, builds it, and
|
||||
installs it in its own directory under ``$SPACK_ROOT/opt``. You'll see
|
||||
a number of messages from Spack, a lot of build output, and a message
|
||||
that the package is installed.
|
||||
a number of messages from spack, a lot of build output, and a message
|
||||
that the packages is installed:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack install mpileaks
|
||||
... dependency build output ...
|
||||
==> Installing mpileaks-1.0-ph7pbnhl334wuhogmugriohcwempqry2
|
||||
==> No binary for mpileaks-1.0-ph7pbnhl334wuhogmugriohcwempqry2 found: installing from source
|
||||
==> mpileaks: Executing phase: 'autoreconf'
|
||||
==> mpileaks: Executing phase: 'configure'
|
||||
==> mpileaks: Executing phase: 'build'
|
||||
==> mpileaks: Executing phase: 'install'
|
||||
[+] ~/spack/opt/linux-rhel7-broadwell/gcc-8.1.0/mpileaks-1.0-ph7pbnhl334wuhogmugriohcwempqry2
|
||||
==> Installing mpileaks
|
||||
==> mpich is already installed in ~/spack/opt/linux-debian7-x86_64/gcc@4.4.7/mpich@3.0.4.
|
||||
==> callpath is already installed in ~/spack/opt/linux-debian7-x86_64/gcc@4.4.7/callpath@1.0.2-5dce4318.
|
||||
==> adept-utils is already installed in ~/spack/opt/linux-debian7-x86_64/gcc@4.4.7/adept-utils@1.0-5adef8da.
|
||||
==> Trying to fetch from https://github.com/hpc/mpileaks/releases/download/v1.0/mpileaks-1.0.tar.gz
|
||||
######################################################################## 100.0%
|
||||
==> Staging archive: ~/spack/var/spack/stage/mpileaks@1.0%gcc@4.4.7 arch=linux-debian7-x86_64-59f6ad23/mpileaks-1.0.tar.gz
|
||||
==> Created stage in ~/spack/var/spack/stage/mpileaks@1.0%gcc@4.4.7 arch=linux-debian7-x86_64-59f6ad23.
|
||||
==> No patches needed for mpileaks.
|
||||
==> Building mpileaks.
|
||||
|
||||
... build output ...
|
||||
|
||||
==> Successfully installed mpileaks.
|
||||
Fetch: 2.16s. Build: 9.82s. Total: 11.98s.
|
||||
[+] ~/spack/opt/linux-debian7-x86_64/gcc@4.4.7/mpileaks@1.0-59f6ad23
|
||||
|
||||
The last line, with the ``[+]``, indicates where the package is
|
||||
installed.
|
||||
|
||||
Add the Spack debug option (one or more times) -- ``spack -d install
|
||||
mpileaks`` -- to get additional (and even more verbose) output.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Building a specific version
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
@@ -285,102 +284,6 @@ and removed everything that is not either:
|
||||
You can check :ref:`cmd-spack-find-metadata` to see how to query for explicitly installed packages
|
||||
or :ref:`dependency-types` for a more thorough treatment of dependency types.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Marking packages explicit or implicit
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
By default, Spack will mark packages a user installs as explicitly installed,
|
||||
while all of its dependencies will be marked as implicitly installed. Packages
|
||||
can be marked manually as explicitly or implicitly installed by using
|
||||
``spack mark``. This can be used in combination with ``spack gc`` to clean up
|
||||
packages that are no longer required.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack install m4
|
||||
==> 29005: Installing libsigsegv
|
||||
[...]
|
||||
==> 29005: Installing m4
|
||||
[...]
|
||||
|
||||
$ spack install m4 ^libsigsegv@2.11
|
||||
==> 39798: Installing libsigsegv
|
||||
[...]
|
||||
==> 39798: Installing m4
|
||||
[...]
|
||||
|
||||
$ spack find -d
|
||||
==> 4 installed packages
|
||||
-- linux-fedora32-haswell / gcc@10.1.1 --------------------------
|
||||
libsigsegv@2.11
|
||||
|
||||
libsigsegv@2.12
|
||||
|
||||
m4@1.4.18
|
||||
libsigsegv@2.12
|
||||
|
||||
m4@1.4.18
|
||||
libsigsegv@2.11
|
||||
|
||||
$ spack gc
|
||||
==> There are no unused specs. Spack's store is clean.
|
||||
|
||||
$ spack mark -i m4 ^libsigsegv@2.11
|
||||
==> m4@1.4.18 : marking the package implicit
|
||||
|
||||
$ spack gc
|
||||
==> The following packages will be uninstalled:
|
||||
|
||||
-- linux-fedora32-haswell / gcc@10.1.1 --------------------------
|
||||
5fj7p2o libsigsegv@2.11 c6ensc6 m4@1.4.18
|
||||
|
||||
==> Do you want to proceed? [y/N]
|
||||
|
||||
In the example above, we ended up with two versions of ``m4`` since they depend
|
||||
on different versions of ``libsigsegv``. ``spack gc`` will not remove any of
|
||||
the packages since both versions of ``m4`` have been installed explicitly
|
||||
and both versions of ``libsigsegv`` are required by the ``m4`` packages.
|
||||
|
||||
``spack mark`` can also be used to implement upgrade workflows. The following
|
||||
example demonstrates how the ``spack mark`` and ``spack gc`` can be used to
|
||||
only keep the current version of a package installed.
|
||||
|
||||
When updating Spack via ``git pull``, new versions for either ``libsigsegv``
|
||||
or ``m4`` might be introduced. This will cause Spack to install duplicates.
|
||||
Since we only want to keep one version, we mark everything as implicitly
|
||||
installed before updating Spack. If there is no new version for either of the
|
||||
packages, ``spack install`` will simply mark them as explicitly installed and
|
||||
``spack gc`` will not remove them.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack install m4
|
||||
==> 62843: Installing libsigsegv
|
||||
[...]
|
||||
==> 62843: Installing m4
|
||||
[...]
|
||||
|
||||
$ spack mark -i -a
|
||||
==> m4@1.4.18 : marking the package implicit
|
||||
|
||||
$ git pull
|
||||
[...]
|
||||
|
||||
$ spack install m4
|
||||
[...]
|
||||
==> m4@1.4.18 : marking the package explicit
|
||||
[...]
|
||||
|
||||
$ spack gc
|
||||
==> There are no unused specs. Spack's store is clean.
|
||||
|
||||
When using this workflow for installations that contain more packages, care
|
||||
has to be taken to either only mark selected packages or issue ``spack install``
|
||||
for all packages that should be kept.
|
||||
|
||||
You can check :ref:`cmd-spack-find-metadata` to see how to query for explicitly
|
||||
or implicitly installed packages.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Non-Downloadable Tarballs
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
@@ -426,6 +329,85 @@ the tarballs in question to it (see :ref:`mirrors`):
|
||||
|
||||
$ spack install galahad
|
||||
|
||||
-----------------------------
|
||||
Deprecating insecure packages
|
||||
-----------------------------
|
||||
|
||||
``spack deprecate`` allows for the removal of insecure packages with
|
||||
minimal impact to their dependents.
|
||||
|
||||
.. warning::
|
||||
|
||||
The ``spack deprecate`` command is designed for use only in
|
||||
extraordinary circumstances. This is a VERY big hammer to be used
|
||||
with care.
|
||||
|
||||
The ``spack deprecate`` command will remove one package and replace it
|
||||
with another by replacing the deprecated package's prefix with a link
|
||||
to the deprecator package's prefix.
|
||||
|
||||
.. warning::
|
||||
|
||||
The ``spack deprecate`` command makes no promises about binary
|
||||
compatibility. It is up to the user to ensure the deprecator is
|
||||
suitable for the deprecated package.
|
||||
|
||||
Spack tracks concrete deprecated specs and ensures that no future packages
|
||||
concretize to a deprecated spec.
|
||||
|
||||
The first spec given to the ``spack deprecate`` command is the package
|
||||
to deprecate. It is an abstract spec that must describe a single
|
||||
installed package. The second spec argument is the deprecator
|
||||
spec. By default it must be an abstract spec that describes a single
|
||||
installed package, but with the ``-i/--install-deprecator`` it can be
|
||||
any abstract spec that Spack will install and then use as the
|
||||
deprecator. The ``-I/--no-install-deprecator`` option will ensure
|
||||
the default behavior.
|
||||
|
||||
By default, ``spack deprecate`` will deprecate all dependencies of the
|
||||
deprecated spec, replacing each by the dependency of the same name in
|
||||
the deprecator spec. The ``-d/--dependencies`` option will ensure the
|
||||
default, while the ``-D/--no-dependencies`` option will deprecate only
|
||||
the root of the deprecate spec in favor of the root of the deprecator
|
||||
spec.
|
||||
|
||||
``spack deprecate`` can use symbolic links or hard links. The default
|
||||
behavior is symbolic links, but the ``-l/--link-type`` flag can take
|
||||
options ``hard`` or ``soft``.
|
||||
|
||||
-----------------------
|
||||
Verifying installations
|
||||
-----------------------
|
||||
|
||||
The ``spack verify`` command can be used to verify the validity of
|
||||
Spack-installed packages any time after installation.
|
||||
|
||||
At installation time, Spack creates a manifest of every file in the
|
||||
installation prefix. For links, Spack tracks the mode, ownership, and
|
||||
destination. For directories, Spack tracks the mode, and
|
||||
ownership. For files, Spack tracks the mode, ownership, modification
|
||||
time, hash, and size. The Spack verify command will check, for every
|
||||
file in each package, whether any of those attributes have changed. It
|
||||
will also check for newly added files or deleted files from the
|
||||
installation prefix. Spack can either check all installed packages
|
||||
using the `-a,--all` or accept specs listed on the command line to
|
||||
verify.
|
||||
|
||||
The ``spack verify`` command can also verify for individual files that
|
||||
they haven't been altered since installation time. If the given file
|
||||
is not in a Spack installation prefix, Spack will report that it is
|
||||
not owned by any package. To check individual files instead of specs,
|
||||
use the ``-f,--files`` option.
|
||||
|
||||
Spack installation manifests are part of the tarball signed by Spack
|
||||
for binary package distribution. When installed from a binary package,
|
||||
Spack uses the packaged installation manifest instead of creating one
|
||||
at install time.
|
||||
|
||||
The ``spack verify`` command also accepts the ``-l,--local`` option to
|
||||
check only local packages (as opposed to those used transparently from
|
||||
``upstream`` spack instances) and the ``-j,--json`` option to output
|
||||
machine-readable json data for any errors.
|
||||
|
||||
-------------------------
|
||||
Seeing installed packages
|
||||
@@ -694,95 +676,6 @@ structured the way you want:
|
||||
"hash": "zvaa4lhlhilypw5quj3akyd3apbq5gap"
|
||||
}
|
||||
|
||||
|
||||
------------------------
|
||||
Using installed packages
|
||||
------------------------
|
||||
|
||||
There are several different ways to use Spack packages once you have
|
||||
installed them. As you've seen, spack packages are installed into long
|
||||
paths with hashes, and you need a way to get them into your path. The
|
||||
easiest way is to use :ref:`spack load <cmd-spack-load>`, which is
|
||||
described in the next section.
|
||||
|
||||
Some more advanced ways to use Spack packages include:
|
||||
|
||||
* :ref:`environments <environments>`, which you can use to bundle a
|
||||
number of related packages to "activate" all at once, and
|
||||
* :ref:`environment modules <modules>`, which are commonly used on
|
||||
supercomputing clusters. Spack generates module files for every
|
||||
installation automatically, and you can customize how this is done.
|
||||
|
||||
.. _cmd-spack-load:
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^
|
||||
``spack load / unload``
|
||||
^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
If you have :ref:`shell support <shell-support>` enabled you can use the
|
||||
``spack load`` command to quickly get a package on your ``PATH``.
|
||||
|
||||
For example this will add the ``mpich`` package built with ``gcc`` to
|
||||
your path:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack install mpich %gcc@4.4.7
|
||||
|
||||
# ... wait for install ...
|
||||
|
||||
$ spack load mpich %gcc@4.4.7
|
||||
$ which mpicc
|
||||
~/spack/opt/linux-debian7-x86_64/gcc@4.4.7/mpich@3.0.4/bin/mpicc
|
||||
|
||||
These commands will add appropriate directories to your ``PATH``,
|
||||
``MANPATH``, ``CPATH``, and ``LD_LIBRARY_PATH`` according to the
|
||||
:ref:`prefix inspections <customize-env-modifications>` defined in your
|
||||
modules configuration. When you no longer want to use a package, you
|
||||
can type unload or unuse similarly:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack unload mpich %gcc@4.4.7
|
||||
|
||||
|
||||
"""""""""""""""
|
||||
Ambiguous specs
|
||||
"""""""""""""""
|
||||
|
||||
If a spec used with load/unload or is ambiguous (i.e. more than one
|
||||
installed package matches it), then Spack will warn you:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack load libelf
|
||||
==> Error: libelf matches multiple packages.
|
||||
Matching packages:
|
||||
qmm4kso libelf@0.8.13%gcc@4.4.7 arch=linux-debian7-x86_64
|
||||
cd2u6jt libelf@0.8.13%intel@15.0.0 arch=linux-debian7-x86_64
|
||||
Use a more specific spec
|
||||
|
||||
You can either type the ``spack load`` command again with a fully
|
||||
qualified argument, or you can add just enough extra constraints to
|
||||
identify one package. For example, above, the key differentiator is
|
||||
that one ``libelf`` is built with the Intel compiler, while the other
|
||||
used ``gcc``. You could therefore just type:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack load libelf %intel
|
||||
|
||||
To identify just the one built with the Intel compiler. If you want to be
|
||||
*very* specific, you can load it by its hash. For example, to load the
|
||||
first ``libelf`` above, you would run:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack load /qmm4kso
|
||||
|
||||
We'll learn more about Spack's spec syntax in the next section.
|
||||
|
||||
|
||||
.. _sec-specs:
|
||||
|
||||
--------------------
|
||||
@@ -802,11 +695,11 @@ Here is an example of a much longer spec than we've seen thus far:
|
||||
|
||||
.. code-block:: none
|
||||
|
||||
mpileaks @1.2:1.4 %gcc@4.7.5 +debug -qt target=x86_64 ^callpath @1.1 %gcc@4.7.2
|
||||
mpileaks @1.2:1.4 %gcc@4.7.5 +debug -qt arch=bgq_os ^callpath @1.1 %gcc@4.7.2
|
||||
|
||||
If provided to ``spack install``, this will install the ``mpileaks``
|
||||
library at some version between ``1.2`` and ``1.4`` (inclusive),
|
||||
built using ``gcc`` at version 4.7.5 for a generic ``x86_64`` architecture,
|
||||
built using ``gcc`` at version 4.7.5 for the Blue Gene/Q architecture,
|
||||
with debug options enabled, and without Qt support. Additionally, it
|
||||
says to link it with the ``callpath`` library (which it depends on),
|
||||
and to build callpath with ``gcc`` 4.7.2. Most specs will not be as
|
||||
@@ -969,7 +862,7 @@ Variants are named options associated with a particular package. They are
|
||||
optional, as each package must provide default values for each variant it
|
||||
makes available. Variants can be specified using
|
||||
a flexible parameter syntax ``name=<value>``. For example,
|
||||
``spack install mercury debug=True`` will install mercury built with debug
|
||||
``spack install libelf debug=True`` will install libelf built with debug
|
||||
flags. The names of particular variants available for a package depend on
|
||||
what was provided by the package author. ``spack info <package>`` will
|
||||
provide information on what build variants are available.
|
||||
@@ -977,11 +870,11 @@ provide information on what build variants are available.
|
||||
For compatibility with earlier versions, variants which happen to be
|
||||
boolean in nature can be specified by a syntax that represents turning
|
||||
options on and off. For example, in the previous spec we could have
|
||||
supplied ``mercury +debug`` with the same effect of enabling the debug
|
||||
supplied ``libelf +debug`` with the same effect of enabling the debug
|
||||
compile time option for the libelf package.
|
||||
|
||||
Depending on the package a variant may have any default value. For
|
||||
``mercury`` here, ``debug`` is ``False`` by default, and we turned it on
|
||||
``libelf`` here, ``debug`` is ``False`` by default, and we turned it on
|
||||
with ``debug=True`` or ``+debug``. If a variant is ``True`` by default
|
||||
you can turn it off by either adding ``-name`` or ``~name`` to the spec.
|
||||
|
||||
@@ -1341,88 +1234,6 @@ add a version specifier to the spec:
|
||||
Notice that the package versions that provide insufficient MPI
|
||||
versions are now filtered out.
|
||||
|
||||
|
||||
-----------------------------
|
||||
Deprecating insecure packages
|
||||
-----------------------------
|
||||
|
||||
``spack deprecate`` allows for the removal of insecure packages with
|
||||
minimal impact to their dependents.
|
||||
|
||||
.. warning::
|
||||
|
||||
The ``spack deprecate`` command is designed for use only in
|
||||
extraordinary circumstances. This is a VERY big hammer to be used
|
||||
with care.
|
||||
|
||||
The ``spack deprecate`` command will remove one package and replace it
|
||||
with another by replacing the deprecated package's prefix with a link
|
||||
to the deprecator package's prefix.
|
||||
|
||||
.. warning::
|
||||
|
||||
The ``spack deprecate`` command makes no promises about binary
|
||||
compatibility. It is up to the user to ensure the deprecator is
|
||||
suitable for the deprecated package.
|
||||
|
||||
Spack tracks concrete deprecated specs and ensures that no future packages
|
||||
concretize to a deprecated spec.
|
||||
|
||||
The first spec given to the ``spack deprecate`` command is the package
|
||||
to deprecate. It is an abstract spec that must describe a single
|
||||
installed package. The second spec argument is the deprecator
|
||||
spec. By default it must be an abstract spec that describes a single
|
||||
installed package, but with the ``-i/--install-deprecator`` it can be
|
||||
any abstract spec that Spack will install and then use as the
|
||||
deprecator. The ``-I/--no-install-deprecator`` option will ensure
|
||||
the default behavior.
|
||||
|
||||
By default, ``spack deprecate`` will deprecate all dependencies of the
|
||||
deprecated spec, replacing each by the dependency of the same name in
|
||||
the deprecator spec. The ``-d/--dependencies`` option will ensure the
|
||||
default, while the ``-D/--no-dependencies`` option will deprecate only
|
||||
the root of the deprecate spec in favor of the root of the deprecator
|
||||
spec.
|
||||
|
||||
``spack deprecate`` can use symbolic links or hard links. The default
|
||||
behavior is symbolic links, but the ``-l/--link-type`` flag can take
|
||||
options ``hard`` or ``soft``.
|
||||
|
||||
-----------------------
|
||||
Verifying installations
|
||||
-----------------------
|
||||
|
||||
The ``spack verify`` command can be used to verify the validity of
|
||||
Spack-installed packages any time after installation.
|
||||
|
||||
At installation time, Spack creates a manifest of every file in the
|
||||
installation prefix. For links, Spack tracks the mode, ownership, and
|
||||
destination. For directories, Spack tracks the mode, and
|
||||
ownership. For files, Spack tracks the mode, ownership, modification
|
||||
time, hash, and size. The Spack verify command will check, for every
|
||||
file in each package, whether any of those attributes have changed. It
|
||||
will also check for newly added files or deleted files from the
|
||||
installation prefix. Spack can either check all installed packages
|
||||
using the `-a,--all` or accept specs listed on the command line to
|
||||
verify.
|
||||
|
||||
The ``spack verify`` command can also verify for individual files that
|
||||
they haven't been altered since installation time. If the given file
|
||||
is not in a Spack installation prefix, Spack will report that it is
|
||||
not owned by any package. To check individual files instead of specs,
|
||||
use the ``-f,--files`` option.
|
||||
|
||||
Spack installation manifests are part of the tarball signed by Spack
|
||||
for binary package distribution. When installed from a binary package,
|
||||
Spack uses the packaged installation manifest instead of creating one
|
||||
at install time.
|
||||
|
||||
The ``spack verify`` command also accepts the ``-l,--local`` option to
|
||||
check only local packages (as opposed to those used transparently from
|
||||
``upstream`` spack instances) and the ``-j,--json`` option to output
|
||||
machine-readable json data for any errors.
|
||||
|
||||
|
||||
.. _extensions:
|
||||
|
||||
---------------------------
|
||||
@@ -1730,39 +1541,6 @@ This issue typically manifests with the error below:
|
||||
|
||||
A nicer error message is TBD in future versions of Spack.
|
||||
|
||||
---------------
|
||||
Troubleshooting
|
||||
---------------
|
||||
|
||||
The ``spack audit`` command:
|
||||
|
||||
.. command-output:: spack audit -h
|
||||
|
||||
can be used to detect a number of configuration issues. This command detects
|
||||
configuration settings which might not be strictly wrong but are not likely
|
||||
to be useful outside of special cases.
|
||||
|
||||
It can also be used to detect dependency issues with packages - for example
|
||||
cases where a package constrains a dependency with a variant that doesn't
|
||||
exist (in this case Spack could report the problem ahead of time but
|
||||
automatically performing the check would slow down most runs of Spack).
|
||||
|
||||
A detailed list of the checks currently implemented for each subcommand can be
|
||||
printed with:
|
||||
|
||||
.. command-output:: spack -v audit list
|
||||
|
||||
Depending on the use case, users might run the appropriate subcommands to obtain
|
||||
diagnostics. Issues, if found, are reported to stdout:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
% spack audit packages lammps
|
||||
PKG-DIRECTIVES: 1 issue found
|
||||
1. lammps: wrong variant in "conflicts" directive
|
||||
the variant 'adios' does not exist
|
||||
in /home/spack/spack/var/spack/repos/builtin/packages/lammps/package.py
|
||||
|
||||
|
||||
------------
|
||||
Getting Help
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
|
||||
.. Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
@@ -31,25 +31,9 @@ Build caches are created via:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack buildcache create <spec>
|
||||
$ spack buildcache create spec
|
||||
|
||||
|
||||
If you wanted to create a build cache in a local directory, you would provide
|
||||
the ``-d`` argument to target that directory, again also specifying the spec.
|
||||
Here is an example creating a local directory, "spack-cache" and creating
|
||||
build cache files for the "ninja" spec:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ mkdir -p ./spack-cache
|
||||
$ spack buildcache create -d ./spack-cache ninja
|
||||
==> Buildcache files will be output to file:///home/spackuser/spack/spack-cache/build_cache
|
||||
gpgconf: socketdir is '/run/user/1000/gnupg'
|
||||
gpg: using "E6DF6A8BD43208E4D6F392F23777740B7DBD643D" as default secret key for signing
|
||||
|
||||
Note that the targeted spec must already be installed. Once you have a build cache,
|
||||
you can add it as a mirror, discussed next.
|
||||
|
||||
---------------------------------------
|
||||
Finding or installing build cache files
|
||||
---------------------------------------
|
||||
@@ -59,98 +43,19 @@ with:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack mirror add <name> <url>
|
||||
|
||||
|
||||
Note that the url can be a web url _or_ a local filesystem location. In the previous
|
||||
example, you might add the directory "spack-cache" and call it ``mymirror``:
|
||||
$ spack mirror add <name> <url>
|
||||
|
||||
Build caches are found via:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack mirror add mymirror ./spack-cache
|
||||
$ spack buildcache list
|
||||
|
||||
|
||||
You can see that the mirror is added with ``spack mirror list`` as follows:
|
||||
Build caches are installed via:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
|
||||
$ spack mirror list
|
||||
mymirror file:///home/spackuser/spack/spack-cache
|
||||
spack-public https://spack-llnl-mirror.s3-us-west-2.amazonaws.com/
|
||||
|
||||
|
||||
At this point, you've create a buildcache, but spack hasn't indexed it, so if
|
||||
you run ``spack buildcache list`` you won't see any results. You need to index
|
||||
this new build cache as follows:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack buildcache update-index -d spack-cache/
|
||||
|
||||
Now you can use list:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack buildcache list
|
||||
==> 1 cached build.
|
||||
-- linux-ubuntu20.04-skylake / gcc@9.3.0 ------------------------
|
||||
ninja@1.10.2
|
||||
|
||||
|
||||
Great! So now let's say you have a different spack installation, or perhaps just
|
||||
a different environment for the same one, and you want to install a package from
|
||||
that build cache. Let's first uninstall the actual library "ninja" to see if we can
|
||||
re-install it from the cache.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack uninstall ninja
|
||||
|
||||
|
||||
And now reinstall from the buildcache
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack buildcache install ninja
|
||||
==> buildcache spec(s) matching ninja
|
||||
==> Fetching file:///home/spackuser/spack/spack-cache/build_cache/linux-ubuntu20.04-skylake/gcc-9.3.0/ninja-1.10.2/linux-ubuntu20.04-skylake-gcc-9.3.0-ninja-1.10.2-i4e5luour7jxdpc3bkiykd4imke3mkym.spack
|
||||
####################################################################################################################################### 100.0%
|
||||
==> Installing buildcache for spec ninja@1.10.2%gcc@9.3.0 arch=linux-ubuntu20.04-skylake
|
||||
gpgconf: socketdir is '/run/user/1000/gnupg'
|
||||
gpg: Signature made Tue 23 Mar 2021 10:16:29 PM MDT
|
||||
gpg: using RSA key E6DF6A8BD43208E4D6F392F23777740B7DBD643D
|
||||
gpg: Good signature from "spackuser (GPG created for Spack) <spackuser@noreply.users.github.com>" [ultimate]
|
||||
|
||||
|
||||
It worked! You've just completed a full example of creating a build cache with
|
||||
a spec of interest, adding it as a mirror, updating it's index, listing the contents,
|
||||
and finally, installing from it.
|
||||
|
||||
|
||||
Note that the above command is intended to install a particular package to a
|
||||
build cache you have created, and not to install a package from a build cache.
|
||||
For the latter, once a mirror is added, by default when you do ``spack install`` the ``--use-cache``
|
||||
flag is set, and you will install a package from a build cache if it is available.
|
||||
If you want to always use the cache, you can do:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack install --cache-only <package>
|
||||
|
||||
For example, to combine all of the commands above to add the E4S build cache
|
||||
and then install from it exclusively, you would do:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack mirror add E4S https://cache.e4s.io
|
||||
$ spack buildcache keys --install --trust
|
||||
$ spack install --cache-only <package>
|
||||
|
||||
We use ``--install`` and ``--trust`` to say that we are installing keys to our
|
||||
keyring, and trusting all downloaded keys.
|
||||
|
||||
$ spack buildcache install
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
List of popular build caches
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
|
||||
.. Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
@@ -57,13 +57,10 @@ directory. Here's an example of an external configuration:
|
||||
|
||||
packages:
|
||||
openmpi:
|
||||
externals:
|
||||
- spec: "openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64"
|
||||
prefix: /opt/openmpi-1.4.3
|
||||
- spec: "openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64+debug"
|
||||
prefix: /opt/openmpi-1.4.3-debug
|
||||
- spec: "openmpi@1.6.5%intel@10.1 arch=linux-debian7-x86_64"
|
||||
prefix: /opt/openmpi-1.6.5-intel
|
||||
paths:
|
||||
openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64: /opt/openmpi-1.4.3
|
||||
openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64+debug: /opt/openmpi-1.4.3-debug
|
||||
openmpi@1.6.5%intel@10.1 arch=linux-debian7-x86_64: /opt/openmpi-1.6.5-intel
|
||||
|
||||
This example lists three installations of OpenMPI, one built with GCC,
|
||||
one built with GCC and debug information, and another built with Intel.
|
||||
@@ -79,15 +76,13 @@ of the installation prefixes. The following example says that module
|
||||
.. code-block:: yaml
|
||||
|
||||
cmake:
|
||||
externals:
|
||||
- spec: cmake@3.7.2
|
||||
modules:
|
||||
- CMake/3.7.2
|
||||
modules:
|
||||
cmake@3.7.2: CMake/3.7.2
|
||||
|
||||
Each ``packages.yaml`` begins with a ``packages:`` attribute, followed
|
||||
by a list of package names. To specify externals, add an ``externals:``
|
||||
attribute under the package name, which lists externals.
|
||||
Each external should specify a ``spec:`` string that should be as
|
||||
Each ``packages.yaml`` begins with a ``packages:`` token, followed
|
||||
by a list of package names. To specify externals, add a ``paths`` or ``modules``
|
||||
token under the package name, which lists externals in a
|
||||
``spec: /path`` or ``spec: module-name`` format. Each spec should be as
|
||||
well-defined as reasonably possible. If a
|
||||
package lacks a spec component, such as missing a compiler or
|
||||
package version, then Spack will guess the missing component based
|
||||
@@ -111,13 +106,10 @@ be:
|
||||
|
||||
packages:
|
||||
openmpi:
|
||||
externals:
|
||||
- spec: "openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64"
|
||||
prefix: /opt/openmpi-1.4.3
|
||||
- spec: "openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64+debug"
|
||||
prefix: /opt/openmpi-1.4.3-debug
|
||||
- spec: "openmpi@1.6.5%intel@10.1 arch=linux-debian7-x86_64"
|
||||
prefix: /opt/openmpi-1.6.5-intel
|
||||
paths:
|
||||
openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64: /opt/openmpi-1.4.3
|
||||
openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64+debug: /opt/openmpi-1.4.3-debug
|
||||
openmpi@1.6.5%intel@10.1 arch=linux-debian7-x86_64: /opt/openmpi-1.6.5-intel
|
||||
buildable: False
|
||||
|
||||
The addition of the ``buildable`` flag tells Spack that it should never build
|
||||
@@ -145,13 +137,10 @@ but more conveniently:
|
||||
mpi:
|
||||
buildable: False
|
||||
openmpi:
|
||||
externals:
|
||||
- spec: "openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64"
|
||||
prefix: /opt/openmpi-1.4.3
|
||||
- spec: "openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64+debug"
|
||||
prefix: /opt/openmpi-1.4.3-debug
|
||||
- spec: "openmpi@1.6.5%intel@10.1 arch=linux-debian7-x86_64"
|
||||
prefix: /opt/openmpi-1.6.5-intel
|
||||
paths:
|
||||
openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64: /opt/openmpi-1.4.3
|
||||
openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64+debug: /opt/openmpi-1.4.3-debug
|
||||
openmpi@1.6.5%intel@10.1 arch=linux-debian7-x86_64: /opt/openmpi-1.6.5-intel
|
||||
|
||||
Implementations can also be listed immediately under the virtual they provide:
|
||||
|
||||
@@ -183,9 +172,8 @@ After running this command your ``packages.yaml`` may include new entries:
|
||||
|
||||
packages:
|
||||
cmake:
|
||||
externals:
|
||||
- spec: cmake@3.17.2
|
||||
prefix: /usr
|
||||
paths:
|
||||
cmake@3.17.2: /usr
|
||||
|
||||
Generally this is useful for detecting a small set of commonly-used packages;
|
||||
for now this is generally limited to finding build-only dependencies.
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
|
||||
.. Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
@@ -29,7 +29,6 @@ on these ideas for each distinct build system that Spack supports:
|
||||
:maxdepth: 1
|
||||
:caption: Make-incompatible
|
||||
|
||||
build_systems/mavenpackage
|
||||
build_systems/sconspackage
|
||||
build_systems/wafpackage
|
||||
|
||||
@@ -59,9 +58,7 @@ on these ideas for each distinct build system that Spack supports:
|
||||
|
||||
build_systems/bundlepackage
|
||||
build_systems/cudapackage
|
||||
build_systems/inteloneapipackage
|
||||
build_systems/intelpackage
|
||||
build_systems/rocmpackage
|
||||
build_systems/custompackage
|
||||
|
||||
For reference, the :py:mod:`Build System API docs <spack.build_systems>`
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
|
||||
.. Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
@@ -155,7 +155,7 @@ version, this can be done like so:
|
||||
|
||||
@property
|
||||
def force_autoreconf(self):
|
||||
return self.version == Version('1.2.3')
|
||||
return self.version == Version('1.2.3'):
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Finding configure flags
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
|
||||
.. Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
|
||||
.. Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
|
||||
.. Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
@@ -9,120 +9,35 @@
|
||||
CudaPackage
|
||||
-----------
|
||||
|
||||
Different from other packages, ``CudaPackage`` does not represent a build system.
|
||||
Instead its goal is to simplify and unify usage of ``CUDA`` in other packages by providing a `mixin-class <https://en.wikipedia.org/wiki/Mixin>`_.
|
||||
Different from other packages, ``CudaPackage`` does not represent a build
|
||||
system. Instead its goal is to simplify and unify usage of ``CUDA`` in other
|
||||
packages.
|
||||
|
||||
You can find source for the package at
|
||||
`<https://github.com/spack/spack/blob/develop/lib/spack/spack/build_systems/cuda.py>`__.
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Provided variants and dependencies
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
^^^^^^^^
|
||||
Variants
|
||||
^^^^^^^^
|
||||
|
||||
This package provides the following variants:
|
||||
|
||||
* **cuda**
|
||||
|
||||
This variant is used to enable/disable building with ``CUDA``. The default
|
||||
is disabled (or ``False``).
|
||||
|
||||
* **cuda_arch**
|
||||
|
||||
This variant supports the optional specification of the architecture.
|
||||
Valid values are maintained in the ``cuda_arch_values`` property and
|
||||
are the numeric character equivalent of the compute capability version
|
||||
(e.g., '10' for version 1.0). Each provided value affects associated
|
||||
``CUDA`` dependencies and compiler conflicts.
|
||||
|
||||
GPUs and their compute capability versions are listed at
|
||||
https://developer.nvidia.com/cuda-gpus .
|
||||
|
||||
^^^^^^^^^
|
||||
Conflicts
|
||||
^^^^^^^^^
|
||||
|
||||
Conflicts are used to prevent builds with known bugs or issues. While
|
||||
base ``CUDA`` conflicts have been included with this package, you may
|
||||
want to add more for your software.
|
||||
|
||||
For example, if your package requires ``cuda_arch`` to be specified when
|
||||
``cuda`` is enabled, you can add the following conflict to your package
|
||||
to terminate such build attempts with a suitable message:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
conflicts('cuda_arch=none', when='+cuda',
|
||||
msg='CUDA architecture is required')
|
||||
|
||||
Similarly, if your software does not support all versions of the property,
|
||||
you could add ``conflicts`` to your package for those versions. For example,
|
||||
suppose your software does not work with CUDA compute capability versions
|
||||
prior to SM 5.0 (``50``). You can add the following code to display a
|
||||
custom message should a user attempt such a build:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
unsupported_cuda_archs = [
|
||||
'10', '11', '12', '13',
|
||||
'20', '21',
|
||||
'30', '32', '35', '37'
|
||||
]
|
||||
for value in unsupported_cuda_archs:
|
||||
conflicts('cuda_arch={0}'.format(value), when='+cuda',
|
||||
msg='CUDA architecture {0} is not supported'.format(value))
|
||||
|
||||
^^^^^^^
|
||||
Methods
|
||||
^^^^^^^
|
||||
|
||||
This package provides one custom helper method, which is used to build
|
||||
standard CUDA compiler flags.
|
||||
|
||||
**cuda_flags**
|
||||
|
||||
This built-in static method returns a list of command line flags
|
||||
for the chosen ``cuda_arch`` value(s). The flags are intended to
|
||||
be passed to the CUDA compiler driver (i.e., ``nvcc``).
|
||||
|
||||
This method must be explicitly called when you are creating the
|
||||
arguments for your build in order to use the values.
|
||||
``CudaPackage`` provides ``cuda`` variant (default to ``off``) to enable/disable
|
||||
``CUDA``, and ``cuda_arch`` variant to optionally specify the architecture.
|
||||
It also declares dependencies on the ``CUDA`` package ``depends_on('cuda@...')``
|
||||
based on the architecture as well as specifies conflicts for certain compiler versions.
|
||||
|
||||
^^^^^
|
||||
Usage
|
||||
^^^^^
|
||||
|
||||
This helper package can be added to your package by adding it as a base
|
||||
class of your package. For example, you can add it to your
|
||||
:ref:`CMakePackage <cmakepackage>`-based package as follows:
|
||||
In order to use it, just add another base class to your package, for example:
|
||||
|
||||
.. code-block:: python
|
||||
:emphasize-lines: 1,7-16
|
||||
|
||||
class MyCudaPackage(CMakePackage, CudaPackage):
|
||||
class MyPackage(CMakePackage, CudaPackage):
|
||||
...
|
||||
def cmake_args(self):
|
||||
spec = self.spec
|
||||
args = []
|
||||
...
|
||||
if '+cuda' in spec:
|
||||
# Set up the cuda macros needed by the build
|
||||
args.append('-DWITH_CUDA=ON')
|
||||
cuda_arch_list = spec.variants['cuda_arch'].value
|
||||
cuda_arch = cuda_arch_list[0]
|
||||
options.append('-DWITH_CUDA=ON')
|
||||
cuda_arch = spec.variants['cuda_arch'].value
|
||||
if cuda_arch != 'none':
|
||||
args.append('-DCUDA_FLAGS=-arch=sm_{0}'.format(cuda_arch))
|
||||
options.append('-DCUDA_FLAGS=-arch=sm_{0}'.format(cuda_arch[0]))
|
||||
else:
|
||||
# Ensure build with cuda is disabled
|
||||
args.append('-DWITH_CUDA=OFF')
|
||||
...
|
||||
return args
|
||||
|
||||
assuming only the ``WITH_CUDA`` and ``CUDA_FLAGS`` flags are required.
|
||||
You will need to customize options as needed for your build.
|
||||
|
||||
This example also illustrates how to check for the ``cuda`` variant using
|
||||
``self.spec`` and how to retrieve the ``cuda_arch`` variant's value, which
|
||||
is a list, using ``self.spec.variants['cuda_arch'].value``.
|
||||
|
||||
With over 70 packages using ``CudaPackage`` as of January 2021 there are
|
||||
lots of examples to choose from to get more ideas for using this package.
|
||||
options.append('-DWITH_CUDA=OFF')
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
|
||||
.. Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
@@ -9,7 +9,7 @@
|
||||
Custom Build Systems
|
||||
--------------------
|
||||
|
||||
While the built-in build systems should meet your needs for the
|
||||
While the build systems listed above should meet your needs for the
|
||||
vast majority of packages, some packages provide custom build scripts.
|
||||
This guide is intended for the following use cases:
|
||||
|
||||
@@ -31,7 +31,7 @@ installation. Both of these packages require custom build systems.
|
||||
Base class
|
||||
^^^^^^^^^^
|
||||
|
||||
If your package does not belong to any of the built-in build
|
||||
If your package does not belong to any of the aforementioned build
|
||||
systems that Spack already supports, you should inherit from the
|
||||
``Package`` base class. ``Package`` is a simple base class with a
|
||||
single phase: ``install``. If your package is simple, you may be able
|
||||
@@ -168,8 +168,7 @@ if and only if this flag is set, we would use the following line:
|
||||
Testing
|
||||
^^^^^^^
|
||||
|
||||
Let's put everything together and add unit tests to be optionally run
|
||||
during the installation of our package.
|
||||
Let's put everything together and add unit tests to our package.
|
||||
In the ``perl`` package, we can see:
|
||||
|
||||
.. code-block:: python
|
||||
@@ -183,6 +182,12 @@ As you can guess, this runs ``make test`` *after* building the package,
|
||||
if and only if testing is requested. Again, this is not specific to
|
||||
custom build systems, it can be added to existing build systems as well.
|
||||
|
||||
Ideally, every package in Spack will have some sort of test to ensure
|
||||
that it was built correctly. It is up to the package authors to make
|
||||
sure this happens. If you are adding a package for some software and
|
||||
the developers list commands to test the installation, please add these
|
||||
tests to your ``package.py``.
|
||||
|
||||
.. warning::
|
||||
|
||||
The order of decorators matters. The following ordering:
|
||||
@@ -202,12 +207,3 @@ custom build systems, it can be added to existing build systems as well.
|
||||
the tests will always be run regardless of whether or not
|
||||
``--test=root`` is requested. See https://github.com/spack/spack/issues/3833
|
||||
for more information
|
||||
|
||||
Ideally, every package in Spack will have some sort of test to ensure
|
||||
that it was built correctly. It is up to the package authors to make
|
||||
sure this happens. If you are adding a package for some software and
|
||||
the developers list commands to test the installation, please add these
|
||||
tests to your ``package.py``.
|
||||
|
||||
For more information on other forms of package testing, refer to
|
||||
:ref:`Checking an installation <checking_an_installation>`.
|
||||
|
||||
@@ -1,155 +0,0 @@
|
||||
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
|
||||
Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
.. _inteloneapipackage:
|
||||
|
||||
|
||||
====================
|
||||
IntelOneapiPackage
|
||||
====================
|
||||
|
||||
|
||||
.. contents::
|
||||
|
||||
|
||||
oneAPI packages in Spack
|
||||
========================
|
||||
|
||||
Spack can install and use the Intel oneAPI products. You may either
|
||||
use spack to install the oneAPI tools or use the `Intel
|
||||
installers`_. After installation, you may use the tools directly, or
|
||||
use Spack to build packages with the tools.
|
||||
|
||||
The Spack Python class ``IntelOneapiPackage`` is a base class that is
|
||||
used by ``IntelOneapiCompilers``, ``IntelOneapiMkl``,
|
||||
``IntelOneapiTbb`` and other classes to implement the oneAPI
|
||||
packages. See the :ref:`package-list` for the full list of available
|
||||
oneAPI packages or use::
|
||||
|
||||
spack list -d oneAPI
|
||||
|
||||
For more information on a specific package, do::
|
||||
|
||||
spack info <package-name>
|
||||
|
||||
Intel no longer releases new versions of Parallel Studio, which can be
|
||||
used in Spack via the :ref:`intelpackage`. All of its components can
|
||||
now be found in oneAPI.
|
||||
|
||||
Examples
|
||||
========
|
||||
|
||||
Building a Package With icx
|
||||
---------------------------
|
||||
|
||||
In this example, we build patchelf with ``icc`` and ``icx``. The
|
||||
compilers are installed with spack.
|
||||
|
||||
Install the oneAPI compilers::
|
||||
|
||||
spack install intel-oneapi-compilers
|
||||
|
||||
Add the compilers to your ``compilers.yaml`` so spack can use them::
|
||||
|
||||
spack compiler add `spack location -i intel-oneapi-compilers`/compiler/latest/linux/bin/intel64
|
||||
spack compiler add `spack location -i intel-oneapi-compilers`/compiler/latest/linux/bin
|
||||
|
||||
Verify that the compilers are available::
|
||||
|
||||
spack compiler list
|
||||
|
||||
The ``intel-oneapi-compilers`` package includes 2 families of
|
||||
compilers:
|
||||
|
||||
* ``intel``: ``icc``, ``icpc``, ``ifort``. Intel's *classic*
|
||||
compilers.
|
||||
* ``oneapi``: ``icx``, ``icpx``, ``ifx``. Intel's new generation of
|
||||
compilers based on LLVM.
|
||||
|
||||
To build the ``patchelf`` Spack package with ``icc``, do::
|
||||
|
||||
spack install patchelf%intel
|
||||
|
||||
To build with with ``icx``, do ::
|
||||
|
||||
spack install patchelf%oneapi
|
||||
|
||||
Using oneAPI MPI to Satisfy a Virtual Dependence
|
||||
------------------------------------------------------
|
||||
|
||||
The ``hdf5`` package works with any compatible MPI implementation. To
|
||||
build ``hdf5`` with Intel oneAPI MPI do::
|
||||
|
||||
spack install hdf5 +mpi ^intel-oneapi-mpi
|
||||
|
||||
Using an Externally Installed oneAPI
|
||||
====================================
|
||||
|
||||
Spack can also use oneAPI tools that are manually installed with
|
||||
`Intel Installers`_. The procedures for configuring Spack to use
|
||||
external compilers and libraries are different.
|
||||
|
||||
Compilers
|
||||
---------
|
||||
|
||||
To use the compilers, add some information about the installation to
|
||||
``compilers.yaml``. For most users, it is sufficient to do::
|
||||
|
||||
spack compiler add /opt/intel/oneapi/compiler/latest/linux/bin/intel64
|
||||
spack compiler add /opt/intel/oneapi/compiler/latest/linux/bin
|
||||
|
||||
Adapt the paths above if you did not install the tools in the default
|
||||
location. After adding the compilers, using them is the same
|
||||
as if you had installed the ``intel-oneapi-compilers`` package.
|
||||
Another option is to manually add the configuration to
|
||||
``compilers.yaml`` as described in :ref:`Compiler configuration
|
||||
<compiler-config>`.
|
||||
|
||||
Libraries
|
||||
---------
|
||||
|
||||
If you want Spack to use MKL that you have installed without Spack in
|
||||
the default location, then add the following to
|
||||
``~/.spack/packages.yaml``, adjusting the version as appropriate::
|
||||
|
||||
intel-oneapi-mkl:
|
||||
externals:
|
||||
- spec: intel-oneapi-mkl@2021.1.1
|
||||
prefix: /opt/intel/oneapi/
|
||||
|
||||
|
||||
Using oneAPI Tools Installed by Spack
|
||||
=====================================
|
||||
|
||||
Spack can be a convenient way to install and configure compilers and
|
||||
libaries, even if you do not intend to build a Spack package. If you
|
||||
want to build a Makefile project using Spack-installed oneAPI compilers,
|
||||
then use spack to configure your environment::
|
||||
|
||||
spack load intel-oneapi-compilers
|
||||
|
||||
And then you can build with::
|
||||
|
||||
CXX=icpx make
|
||||
|
||||
You can also use Spack-installed libraries. For example::
|
||||
|
||||
spack load intel-oneapi-mkl
|
||||
|
||||
Will update your environment CPATH, LIBRARY_PATH, and other
|
||||
environment variables for building an application with MKL.
|
||||
|
||||
More information
|
||||
================
|
||||
|
||||
This section describes basic use of oneAPI, especially if it has
|
||||
changed compared to Parallel Studio. See :ref:`intelpackage` for more
|
||||
information on :ref:`intel-virtual-packages`,
|
||||
:ref:`intel-unrelated-packages`,
|
||||
:ref:`intel-integrating-external-libraries`, and
|
||||
:ref:`using-mkl-tips`.
|
||||
|
||||
|
||||
.. _`Intel installers`: https://software.intel.com/content/www/us/en/develop/documentation/installation-guide-for-intel-oneapi-toolkits-linux/top.html
|
||||
@@ -1,4 +1,4 @@
|
||||
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
|
||||
.. Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
@@ -137,7 +137,6 @@ If you need to save disk space or installation time, you could install the
|
||||
``intel`` compilers-only subset (0.6 GB) and just the library packages you
|
||||
need, for example ``intel-mpi`` (0.5 GB) and ``intel-mkl`` (2.5 GB).
|
||||
|
||||
.. _intel-unrelated-packages:
|
||||
|
||||
""""""""""""""""""""
|
||||
Unrelated packages
|
||||
@@ -359,8 +358,6 @@ affected by an advanced third method:
|
||||
Next, visit section `Selecting Intel Compilers`_ to learn how to tell
|
||||
Spack to use the newly configured compilers.
|
||||
|
||||
.. _intel-integrating-external-libraries:
|
||||
|
||||
""""""""""""""""""""""""""""""""""
|
||||
Integrating external libraries
|
||||
""""""""""""""""""""""""""""""""""
|
||||
@@ -421,13 +418,9 @@ Adapt the following example. Be sure to maintain the indentation:
|
||||
# other content ...
|
||||
|
||||
intel-mkl:
|
||||
externals:
|
||||
- spec: "intel-mkl@2018.2.199 arch=linux-centos6-x86_64"
|
||||
modules:
|
||||
- intel-mkl/18/18.0.2
|
||||
- spec: "intel-mkl@2018.3.222 arch=linux-centos6-x86_64"
|
||||
modules:
|
||||
- intel-mkl/18/18.0.3
|
||||
modules:
|
||||
intel-mkl@2018.2.199 arch=linux-centos6-x86_64: intel-mkl/18/18.0.2
|
||||
intel-mkl@2018.3.222 arch=linux-centos6-x86_64: intel-mkl/18/18.0.3
|
||||
|
||||
The version numbers for the ``intel-mkl`` specs defined here correspond to file
|
||||
and directory names that Intel uses for its products because they were adopted
|
||||
@@ -458,16 +451,12 @@ mechanism.
|
||||
|
||||
packages:
|
||||
intel-parallel-studio:
|
||||
externals:
|
||||
- spec: "intel-parallel-studio@cluster.2018.2.199 +mkl+mpi+ipp+tbb+daal arch=linux-centos6-x86_64"
|
||||
modules:
|
||||
- intel/18/18.0.2
|
||||
- spec: "intel-parallel-studio@cluster.2018.3.222 +mkl+mpi+ipp+tbb+daal arch=linux-centos6-x86_64"
|
||||
modules:
|
||||
- intel/18/18.0.3
|
||||
modules:
|
||||
intel-parallel-studio@cluster.2018.2.199 +mkl+mpi+ipp+tbb+daal arch=linux-centos6-x86_64: intel/18/18.0.2
|
||||
intel-parallel-studio@cluster.2018.3.222 +mkl+mpi+ipp+tbb+daal arch=linux-centos6-x86_64: intel/18/18.0.3
|
||||
buildable: False
|
||||
|
||||
One additional example illustrates the use of ``prefix:`` instead of
|
||||
One additional example illustrates the use of ``paths:`` instead of
|
||||
``modules:``, useful when external modulefiles are not available or not
|
||||
suitable:
|
||||
|
||||
@@ -475,15 +464,13 @@ suitable:
|
||||
|
||||
packages:
|
||||
intel-parallel-studio:
|
||||
externals:
|
||||
- spec: "intel-parallel-studio@cluster.2018.2.199 +mkl+mpi+ipp+tbb+daal"
|
||||
prefix: /opt/intel
|
||||
- spec: "intel-parallel-studio@cluster.2018.3.222 +mkl+mpi+ipp+tbb+daal"
|
||||
prefix: /opt/intel
|
||||
paths:
|
||||
intel-parallel-studio@cluster.2018.2.199 +mkl+mpi+ipp+tbb+daal: /opt/intel
|
||||
intel-parallel-studio@cluster.2018.3.222 +mkl+mpi+ipp+tbb+daal: /opt/intel
|
||||
buildable: False
|
||||
|
||||
Note that for the Intel packages discussed here, the directory values in the
|
||||
``prefix:`` entries must be the high-level and typically version-less
|
||||
``paths:`` entries must be the high-level and typically version-less
|
||||
"installation directory" that has been used by Intel's product installer.
|
||||
Such a directory will typically accumulate various product versions. Amongst
|
||||
them, Spack will select the correct version-specific product directory based on
|
||||
@@ -561,29 +548,43 @@ follow `the next section <intel-install-libs_>`_ instead.
|
||||
modules: []
|
||||
spec: intel@18.0.3
|
||||
paths:
|
||||
cc: /usr/bin/true
|
||||
cxx: /usr/bin/true
|
||||
f77: /usr/bin/true
|
||||
fc: /usr/bin/true
|
||||
cc: stub
|
||||
cxx: stub
|
||||
f77: stub
|
||||
fc: stub
|
||||
|
||||
Replace ``18.0.3`` with the version that you determined in the preceding
|
||||
step. The exact contents under ``paths:`` do not matter yet, but the paths must exist.
|
||||
step. The contents under ``paths:`` do not matter yet.
|
||||
|
||||
This temporary stub is required such that the ``intel-parallel-studio`` package
|
||||
can be installed for the ``intel`` compiler (which the package itself is going
|
||||
to provide after the installation) rather than an arbitrary system compiler.
|
||||
The paths given in ``cc``, ``cxx``, ``f77``, ``fc`` must exist, but will
|
||||
never be used to build anything during the installation of ``intel-parallel-studio``.
|
||||
You are right to ask: "Why on earth is that necessary?" [fn8]_.
|
||||
The answer lies in Spack striving for strict compiler consistency.
|
||||
Consider what happens without such a pre-declared compiler stub:
|
||||
Say, you ask Spack to install a particular version
|
||||
``intel-parallel-studio@edition.V``. Spack will apply an unrelated compiler
|
||||
spec to concretize and install your request, resulting in
|
||||
``intel-parallel-studio@edition.V %X``. That compiler ``%X`` is not going to
|
||||
be the version that this new package itself provides. Rather, it would
|
||||
typically be ``%gcc@...`` in a default Spack installation or possibly indeed
|
||||
``%intel@...``, but at a version that precedes ``V``.
|
||||
|
||||
The reason for this stub is that ``intel-parallel-studio`` also provides the
|
||||
``mpi`` and ``mkl`` packages and when concretizing a spec, Spack ensures
|
||||
strong consistency of the used compiler across all dependencies: [fn8]_.
|
||||
Installing a package ``foo +mkl %intel`` will make Spack look for a package
|
||||
``mkl %intel``, which can be provided by ``intel-parallel-studio+mkl %intel``,
|
||||
but not by ``intel-parallel-studio+mkl %gcc``.
|
||||
The problem comes to the fore as soon as you try to use any virtual ``mkl``
|
||||
or ``mpi`` packages that you would expect to now be provided by
|
||||
``intel-parallel-studio@edition.V``. Spack will indeed see those virtual
|
||||
packages, but only as being tied to the compiler that the package
|
||||
``intel-parallel-studio@edition.V`` was concretized with *at installation*.
|
||||
If you were to install a client package with the new compilers now available
|
||||
to you, you would naturally run ``spack install foo +mkl %intel@V``, yet
|
||||
Spack will either complain about ``mkl%intel@V`` being missing (because it
|
||||
only knows about ``mkl%X``) or it will go and attempt to install *another
|
||||
instance* of ``intel-parallel-studio@edition.V %intel@V`` so as to match the
|
||||
compiler spec ``%intel@V`` that you gave for your client package ``foo``.
|
||||
This will be unexpected and will quickly get annoying because each
|
||||
reinstallation takes up time and extra disk space.
|
||||
|
||||
Failure to do so may result in additional installations of ``mkl``, ``intel-mpi`` or
|
||||
even ``intel-parallel-studio`` as dependencies for other packages.
|
||||
To escape this trap, put the compiler stub declaration shown here in place,
|
||||
then use that pre-declared compiler spec to install the actual package, as
|
||||
shown next. This approach works because during installation only the
|
||||
package's own self-sufficient installer will be used, not any compiler.
|
||||
|
||||
.. _`verify-compiler-anticipated`:
|
||||
|
||||
@@ -634,25 +635,11 @@ follow `the next section <intel-install-libs_>`_ instead.
|
||||
want to use the ``intel64`` variant. The ``icpc`` and ``ifort`` compilers
|
||||
will be located in the same directory as ``icc``.
|
||||
|
||||
* Make sure to specify ``modules: ['intel-parallel-studio-cluster2018.3-intel-18.0.3-HASH']``
|
||||
(with ``HASH`` being the short hash as displayed when running
|
||||
``spack find -l intel-parallel-studio@cluster.2018.3`` and the versions adapted accordingly)
|
||||
to ensure that the correct and complete environment for the Intel compilers gets
|
||||
loaded when running them. With modern versions of the Intel compiler you may otherwise see
|
||||
issues about missing libraries. Please also note that module name must exactly match
|
||||
the name as returned by ``module avail`` (and shown in the example above).
|
||||
|
||||
* Use the ``modules:`` and/or ``cflags:`` tokens to further specify a suitable accompanying
|
||||
* Use the ``modules:`` and/or ``cflags:`` tokens to specify a suitable accompanying
|
||||
``gcc`` version to help pacify picky client packages that ask for C++
|
||||
standards more recent than supported by your system-provided ``gcc`` and its
|
||||
``libstdc++.so``.
|
||||
|
||||
* If you specified a custom variant (for example ``+vtune``) you may want to add this as your
|
||||
preferred variant in the packages configuration for the ``intel-parallel-studio`` package
|
||||
as described in :ref:`concretization-preferences`. Otherwise you will have to specify
|
||||
the variant everytime ``intel-parallel-studio`` is being used as ``mkl``, ``fftw`` or ``mpi``
|
||||
implementation to avoid pulling in a different variant.
|
||||
|
||||
* To set the Intel compilers for default use in Spack, instead of the usual ``%gcc``,
|
||||
follow section `Selecting Intel compilers`_.
|
||||
|
||||
@@ -837,7 +824,6 @@ for example:
|
||||
compiler: [ intel@18, intel@17, gcc@4.4.7, gcc@4.9.3, gcc@7.3.0, ]
|
||||
|
||||
|
||||
.. _intel-virtual-packages:
|
||||
|
||||
""""""""""""""""""""""""""""""""""""""""""""""""
|
||||
Selecting libraries to satisfy virtual packages
|
||||
@@ -911,7 +897,6 @@ With the proper installation as detailed above, no special steps should be
|
||||
required when a client package specifically (and thus deliberately) requests an
|
||||
Intel package as dependency, this being one of the target use cases for Spack.
|
||||
|
||||
.. _using-mkl-tips:
|
||||
|
||||
"""""""""""""""""""""""""""""""""""""""""""""""
|
||||
Tips for configuring client packages to use MKL
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
|
||||
.. Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
@@ -147,10 +147,8 @@ and a ``filter_file`` method to help with this. For example:
|
||||
def edit(self, spec, prefix):
|
||||
makefile = FileFilter('Makefile')
|
||||
|
||||
makefile.filter(r'^\s*CC\s*=.*', 'CC = ' + spack_cc)
|
||||
makefile.filter(r'^\s*CXX\s*=.*', 'CXX = ' + spack_cxx)
|
||||
makefile.filter(r'^\s*F77\s*=.*', 'F77 = ' + spack_f77)
|
||||
makefile.filter(r'^\s*FC\s*=.*', 'FC = ' + spack_fc)
|
||||
makefile.filter('CC = gcc', 'CC = cc')
|
||||
makefile.filter('CXX = g++', 'CC = c++')
|
||||
|
||||
|
||||
`stream <https://github.com/spack/spack/blob/develop/var/spack/repos/builtin/packages/stream/package.py>`_
|
||||
|
||||
@@ -1,102 +0,0 @@
|
||||
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
|
||||
Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
.. _mavenpackage:
|
||||
|
||||
------------
|
||||
MavenPackage
|
||||
------------
|
||||
|
||||
Apache Maven is a general-purpose build system that does not rely
|
||||
on Makefiles to build software. It is designed for building and
|
||||
managing and Java-based project.
|
||||
|
||||
^^^^^^
|
||||
Phases
|
||||
^^^^^^
|
||||
|
||||
The ``MavenPackage`` base class comes with the following phases:
|
||||
|
||||
#. ``build`` - compile code and package into a JAR file
|
||||
#. ``install`` - copy to installation prefix
|
||||
|
||||
By default, these phases run:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ mvn package
|
||||
$ install . <prefix>
|
||||
|
||||
|
||||
^^^^^^^^^^^^^^^
|
||||
Important files
|
||||
^^^^^^^^^^^^^^^
|
||||
|
||||
Maven packages can be identified by the presence of a ``pom.xml`` file.
|
||||
This file lists dependencies and other metadata about the project.
|
||||
There may also be configuration files in the ``.mvn`` directory.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Build system dependencies
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Maven requires the ``mvn`` executable to build the project. It also
|
||||
requires Java at both build- and run-time. Because of this, the base
|
||||
class automatically adds the following dependencies:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
depends_on('java', type=('build', 'run'))
|
||||
depends_on('maven', type='build')
|
||||
|
||||
|
||||
In the ``pom.xml`` file, you may see sections like:
|
||||
|
||||
.. code-block:: xml
|
||||
|
||||
<requireJavaVersion>
|
||||
<version>[1.7,)</version>
|
||||
</requireJavaVersion>
|
||||
<requireMavenVersion>
|
||||
<version>[3.5.4,)</version>
|
||||
</requireMavenVersion>
|
||||
|
||||
|
||||
This specifies the versions of Java and Maven that are required to
|
||||
build the package. See
|
||||
https://docs.oracle.com/middleware/1212/core/MAVEN/maven_version.htm#MAVEN402
|
||||
for a description of this version range syntax. In this case, you
|
||||
should add:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
depends_on('java@7:', type='build')
|
||||
depends_on('maven@3.5.4:', type='build')
|
||||
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Passing arguments to the build phase
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
The default build and install phases should be sufficient to install
|
||||
most packages. However, you may want to pass additional flags to
|
||||
the build phase. For example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def build_args(self):
|
||||
return [
|
||||
'-Pdist,native',
|
||||
'-Dtar',
|
||||
'-Dmaven.javadoc.skip=true'
|
||||
]
|
||||
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^
|
||||
External documentation
|
||||
^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
For more information on the Maven build system, see:
|
||||
https://maven.apache.org/index.html
|
||||
@@ -1,4 +1,4 @@
|
||||
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
|
||||
.. Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
@@ -121,15 +121,11 @@ override the ``meson_args`` method like so:
|
||||
.. code-block:: python
|
||||
|
||||
def meson_args(self):
|
||||
return ['--warnlevel=3']
|
||||
return ['--default-library=both']
|
||||
|
||||
|
||||
This method can be used to pass flags as well as variables.
|
||||
|
||||
Note that the ``MesonPackage`` base class already defines variants for
|
||||
``buildtype``, ``default_library`` and ``strip``, which are mapped to default
|
||||
Meson arguments, meaning that you don't have to specify these.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^
|
||||
External documentation
|
||||
^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
|
||||
.. Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
|
||||
.. Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
@@ -120,6 +120,8 @@ so ``PerlPackage`` contains:
|
||||
|
||||
extends('perl')
|
||||
|
||||
depends_on('perl', type=('build', 'run'))
|
||||
|
||||
|
||||
If your package requires a specific version of Perl, you should
|
||||
specify this.
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
|
||||
.. Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
@@ -23,11 +23,20 @@ can be overridden:
|
||||
* ``build_ext``
|
||||
* ``build_clib``
|
||||
* ``build_scripts``
|
||||
* ``clean``
|
||||
* ``install``
|
||||
* ``install_lib``
|
||||
* ``install_headers``
|
||||
* ``install_scripts``
|
||||
* ``install_data``
|
||||
* ``sdist``
|
||||
* ``register``
|
||||
* ``bdist``
|
||||
* ``bdist_dumb``
|
||||
* ``bdist_rpm``
|
||||
* ``bdist_wininst``
|
||||
* ``upload``
|
||||
* ``check``
|
||||
|
||||
These are all standard ``setup.py`` commands and can be found by running:
|
||||
|
||||
@@ -46,7 +55,7 @@ If for whatever reason you need to run more phases, simply modify your
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
phases = ['build_ext', 'install']
|
||||
phases = ['build_ext', 'install', 'bdist']
|
||||
|
||||
|
||||
Each phase provides a function ``<phase>`` that runs:
|
||||
@@ -72,24 +81,6 @@ you'll need to define a function for it like so:
|
||||
self.setup_py('configure')
|
||||
|
||||
|
||||
^^^^^^
|
||||
Wheels
|
||||
^^^^^^
|
||||
|
||||
Some Python packages are closed-source and distributed as wheels.
|
||||
Instead of using the ``PythonPackage`` base class, you should extend
|
||||
the ``Package`` base class and implement the following custom installation
|
||||
procedure:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def install(self, spec, prefix):
|
||||
pip = which('pip')
|
||||
pip('install', self.stage.archive_file, '--prefix={0}'.format(prefix))
|
||||
|
||||
|
||||
This will require a dependency on pip, as mentioned below.
|
||||
|
||||
^^^^^^^^^^^^^^^
|
||||
Important files
|
||||
^^^^^^^^^^^^^^^
|
||||
@@ -104,30 +95,9 @@ file should be considered to be the truth. As dependencies are added or
|
||||
removed, the documentation is much more likely to become outdated than
|
||||
the ``setup.py``.
|
||||
|
||||
The Python ecosystem has evolved significantly over the years. Before
|
||||
setuptools became popular, most packages listed their dependencies in a
|
||||
``requirements.txt`` file. Once setuptools took over, these dependencies
|
||||
were listed directly in the ``setup.py``. Newer PEPs introduced additional
|
||||
files, like ``setup.cfg`` and ``pyproject.toml``. You should look out for
|
||||
all of these files, as they may all contain important information about
|
||||
package dependencies.
|
||||
|
||||
Some Python packages are closed-source and are distributed as Python
|
||||
wheels. For example, ``py-azureml-sdk`` downloads a ``.whl`` file. This
|
||||
file is simply a zip file, and can be extracted using:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ unzip *.whl
|
||||
|
||||
|
||||
The zip file will not contain a ``setup.py``, but it will contain a
|
||||
``METADATA`` file which contains all the information you need to
|
||||
write a ``package.py`` build recipe.
|
||||
|
||||
^^^^
|
||||
PyPI
|
||||
^^^^
|
||||
^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Finding Python packages
|
||||
^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
The vast majority of Python packages are hosted on PyPI - The Python
|
||||
Package Index. ``pip`` only supports packages hosted on PyPI, making
|
||||
@@ -135,29 +105,8 @@ it the only option for developers who want a simple installation.
|
||||
Search for "PyPI <package-name>" to find the download page. Note that
|
||||
some pages are versioned, and the first result may not be the newest
|
||||
version. Click on the "Latest Version" button to the top right to see
|
||||
if a newer version is available. The download page is usually at::
|
||||
|
||||
https://pypi.org/project/<package-name>
|
||||
|
||||
|
||||
Since PyPI is so common, the ``PythonPackage`` base class has a
|
||||
``pypi`` attribute that can be set. Once set, ``pypi`` will be used
|
||||
to define the ``homepage``, ``url``, and ``list_url``. For example,
|
||||
the following:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
homepage = 'https://pypi.org/project/setuptools/'
|
||||
url = 'https://pypi.org/packages/source/s/setuptools/setuptools-49.2.0.zip'
|
||||
list_url = 'https://pypi.org/simple/setuptools/'
|
||||
|
||||
|
||||
is equivalent to:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
pypi = 'setuptools/setuptools-49.2.0.zip'
|
||||
|
||||
if a newer version is available. The download page is usually at:
|
||||
https://pypi.org/project/<package-name>
|
||||
|
||||
^^^^^^^^^^^
|
||||
Description
|
||||
@@ -195,38 +144,50 @@ also get the homepage on the command-line by running:
|
||||
URL
|
||||
^^^
|
||||
|
||||
If ``pypi`` is set as mentioned above, ``url`` and ``list_url`` will
|
||||
be automatically set for you. If both ``.tar.gz`` and ``.zip`` versions
|
||||
are available, ``.tar.gz`` is preferred. If some releases offer both
|
||||
``.tar.gz`` and ``.zip`` versions, but some only offer ``.zip`` versions,
|
||||
use ``.zip``.
|
||||
You may have noticed that Spack allows you to add multiple versions of
|
||||
the same package without adding multiple versions of the download URL.
|
||||
It does this by guessing what the version string in the URL is and
|
||||
replacing this with the requested version. Obviously, if Spack cannot
|
||||
guess the version correctly, or if non-version-related things change
|
||||
in the URL, Spack cannot substitute the version properly.
|
||||
|
||||
Some Python packages are closed-source and do not ship ``.tar.gz`` or ``.zip``
|
||||
files on either PyPI or GitHub. If this is the case, you can still download
|
||||
and install a Python wheel. For example, ``py-azureml-sdk`` is closed source
|
||||
and can be downloaded from::
|
||||
Once upon a time, PyPI offered nice, simple download URLs like:
|
||||
https://pypi.python.org/packages/source/n/numpy/numpy-1.13.1.zip
|
||||
|
||||
https://pypi.io/packages/py3/a/azureml_sdk/azureml_sdk-1.11.0-py3-none-any.whl
|
||||
As you can see, the version is 1.13.1. It probably isn't hard to guess
|
||||
what URL to use to download version 1.12.0, and Spack was perfectly
|
||||
capable of performing this calculation.
|
||||
|
||||
However, PyPI switched to a new download URL format:
|
||||
https://pypi.python.org/packages/c0/3a/40967d9f5675fbb097ffec170f59c2ba19fc96373e73ad47c2cae9a30aed/numpy-1.13.1.zip#md5=2c3c0f4edf720c3a7b525dacc825b9ae
|
||||
|
||||
You may see Python-specific or OS-specific URLs. Note that when you add a
|
||||
``.whl`` URL, you should add ``expand=False`` to ensure that Spack doesn't
|
||||
try to extract the wheel:
|
||||
and more recently:
|
||||
https://files.pythonhosted.org/packages/b0/2b/497c2bb7c660b2606d4a96e2035e92554429e139c6c71cdff67af66b58d2/numpy-1.14.3.zip
|
||||
|
||||
.. code-block:: python
|
||||
As you can imagine, it is impossible for Spack to guess what URL to
|
||||
use to download version 1.12.0 given this URL. There is a solution,
|
||||
however. PyPI offers a new hidden interface for downloading
|
||||
Python packages that does not include a hash in the URL:
|
||||
https://pypi.io/packages/source/n/numpy/numpy-1.13.1.zip
|
||||
|
||||
version('1.11.0', sha256='d8c9d24ea90457214d798b0d922489863dad518adde3638e08ef62de28fb183a', expand=False)
|
||||
This URL redirects to the files.pythonhosted.org URL. The general syntax for
|
||||
this pypi.io URL is:
|
||||
https://pypi.io/packages/source/<first-letter-of-name>/<name>/<name>-<version>.<extension>
|
||||
|
||||
Please use the pypi.io URL instead of the pypi.python.org URL. If both
|
||||
``.tar.gz`` and ``.zip`` versions are available, ``.tar.gz`` is preferred.
|
||||
If some releases offer both ``.tar.gz`` and ``.zip`` versions, but some
|
||||
only offer ``.zip`` versions, use ``.zip``.
|
||||
|
||||
"""""""""""""""
|
||||
PyPI vs. GitHub
|
||||
"""""""""""""""
|
||||
|
||||
Many packages are hosted on PyPI, but are developed on GitHub or another
|
||||
Many packages are hosted on PyPI, but are developed on GitHub and other
|
||||
version control systems. The tarball can be downloaded from either
|
||||
location, but PyPI is preferred for the following reasons:
|
||||
|
||||
#. PyPI contains the bare minimum number of files needed to install the package.
|
||||
#. PyPI contains the bare minimum of files to install the package.
|
||||
|
||||
You may notice that the tarball you download from PyPI does not
|
||||
have the same checksum as the tarball you download from GitHub.
|
||||
@@ -263,6 +224,25 @@ location, but PyPI is preferred for the following reasons:
|
||||
PyPI is nice because it makes it physically impossible to
|
||||
re-release the same version of a package with a different checksum.
|
||||
|
||||
There are some reasons to prefer downloading from GitHub:
|
||||
|
||||
#. The GitHub tarball may contain unit tests
|
||||
|
||||
As previously mentioned, the PyPI tarball contains the bare minimum
|
||||
of files to install the package. Unless explicitly specified by the
|
||||
developers, it will not contain development files like unit tests.
|
||||
If you desire to run the unit tests during installation, you should
|
||||
use the GitHub tarball instead.
|
||||
|
||||
#. Spack does not yet support ``spack versions`` and ``spack checksum``
|
||||
with PyPI URLs
|
||||
|
||||
These commands work just fine with GitHub URLs. This is a minor
|
||||
annoyance, not a reason to prefer GitHub over PyPI.
|
||||
|
||||
If you really want to run these unit tests, no one will stop you from
|
||||
submitting a PR for a new package that downloads from GitHub.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Build system dependencies
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
@@ -282,26 +262,26 @@ mentions that Python 3 is required, this can be specified as:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
depends_on('python@3:', type=('build', 'run'))
|
||||
depends_on('python@3:', type=('build', 'run')
|
||||
|
||||
|
||||
If Python 2 is required, this would look like:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
depends_on('python@:2', type=('build', 'run'))
|
||||
depends_on('python@:2', type=('build', 'run')
|
||||
|
||||
|
||||
If Python 2.7 is the only version that works, you can use:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
depends_on('python@2.7:2.8', type=('build', 'run'))
|
||||
depends_on('python@2.7:2.8', type=('build', 'run')
|
||||
|
||||
|
||||
The documentation may not always specify supported Python versions.
|
||||
Another place to check is in the ``setup.py`` or ``setup.cfg`` file.
|
||||
Look for a line containing ``python_requires``. An example from
|
||||
Another place to check is in the ``setup.py`` file. Look for a line
|
||||
containing ``python_requires``. An example from
|
||||
`py-numpy <https://github.com/spack/spack/blob/develop/var/spack/repos/builtin/packages/py-numpy/package.py>`_
|
||||
looks like:
|
||||
|
||||
@@ -310,7 +290,7 @@ looks like:
|
||||
python_requires='>=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*'
|
||||
|
||||
|
||||
You may also find a version check at the top of the ``setup.py``:
|
||||
More commonly, you will find a version check at the top of the file:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@@ -325,39 +305,6 @@ This can be converted to Spack's spec notation like so:
|
||||
depends_on('python@2.7:2.8,3.4:', type=('build', 'run'))
|
||||
|
||||
|
||||
If you are writing a recipe for a package that only distributes
|
||||
wheels, look for a section in the ``METADATA`` file that looks like::
|
||||
|
||||
Requires-Python: >=3.5,<4
|
||||
|
||||
|
||||
This would be translated to:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
extends('python')
|
||||
depends_on('python@3.5:3.999', type=('build', 'run'))
|
||||
|
||||
|
||||
Many ``setup.py`` or ``setup.cfg`` files also contain information like::
|
||||
|
||||
Programming Language :: Python :: 2
|
||||
Programming Language :: Python :: 2.6
|
||||
Programming Language :: Python :: 2.7
|
||||
Programming Language :: Python :: 3
|
||||
Programming Language :: Python :: 3.3
|
||||
Programming Language :: Python :: 3.4
|
||||
Programming Language :: Python :: 3.5
|
||||
Programming Language :: Python :: 3.6
|
||||
|
||||
|
||||
This is a list of versions of Python that the developer likely tests.
|
||||
However, you should not use this to restrict the versions of Python
|
||||
the package uses unless one of the two former methods (``python_requires``
|
||||
or ``sys.version_info``) is used. There is no logic in setuptools
|
||||
that prevents the package from building for Python versions not in
|
||||
this list, and often new releases like Python 3.7 or 3.8 work just fine.
|
||||
|
||||
""""""""""
|
||||
setuptools
|
||||
""""""""""
|
||||
@@ -370,7 +317,7 @@ Most notably, there was no way to list a project's dependencies
|
||||
with distutils. Along came setuptools, a non-builtin build system
|
||||
designed to overcome the limitations of distutils. Both projects
|
||||
use a similar API, making the transition easy while adding much
|
||||
needed functionality. Today, setuptools is used in around 90% of
|
||||
needed functionality. Today, setuptools is used in around 75% of
|
||||
the Python packages in Spack.
|
||||
|
||||
Since setuptools isn't built-in to Python, you need to add it as a
|
||||
@@ -413,20 +360,6 @@ run-time. This can be specified as:
|
||||
depends_on('py-setuptools', type='build')
|
||||
|
||||
|
||||
"""
|
||||
pip
|
||||
"""
|
||||
|
||||
Packages distributed as Python wheels will require an extra dependency
|
||||
on pip:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
depends_on('py-pip', type='build')
|
||||
|
||||
|
||||
We will use pip to install the actual wheel.
|
||||
|
||||
""""""
|
||||
cython
|
||||
""""""
|
||||
@@ -450,12 +383,6 @@ where speed is crucial. There is no reason why someone would not
|
||||
want an optimized version of a library instead of the pure-Python
|
||||
version.
|
||||
|
||||
Note that some release tarballs come pre-cythonized, and cython is
|
||||
not needed as a dependency. However, this is becoming less common
|
||||
as Python continues to evolve and developers discover that cythonized
|
||||
sources are no longer compatible with newer versions of Python and
|
||||
need to be re-cythonized.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^
|
||||
Python dependencies
|
||||
^^^^^^^^^^^^^^^^^^^
|
||||
@@ -502,33 +429,21 @@ Obviously, this means that ``py-numpy`` is a dependency.
|
||||
|
||||
If the package uses ``setuptools``, check for the following clues:
|
||||
|
||||
* ``python_requires``
|
||||
|
||||
As mentioned above, this specifies which versions of Python are
|
||||
required.
|
||||
|
||||
* ``setup_requires``
|
||||
|
||||
These packages are usually only needed at build-time, so you can
|
||||
add them with ``type='build'``.
|
||||
|
||||
* ``install_requires``
|
||||
|
||||
These packages are required for building and installation. You can
|
||||
add them with ``type=('build', 'run')``.
|
||||
These packages are required for installation.
|
||||
|
||||
* ``extra_requires``
|
||||
|
||||
These packages are optional dependencies that enable additional
|
||||
functionality. You should add a variant that optionally adds these
|
||||
dependencies. This variant should be False by default.
|
||||
dependencies.
|
||||
|
||||
* ``test_requires``
|
||||
|
||||
These are packages that are required to run the unit tests for the
|
||||
package. These dependencies can be specified using the
|
||||
``type='test'`` dependency type. However, the PyPI tarballs rarely
|
||||
contain unit tests, so there is usually no reason to add these.
|
||||
``type='test'`` dependency type.
|
||||
|
||||
In the root directory of the package, you may notice a
|
||||
``requirements.txt`` file. It may look like this file contains a list
|
||||
@@ -546,37 +461,13 @@ sphinx. If you can't find any information about the package's
|
||||
dependencies, you can take a look in ``requirements.txt``, but be sure
|
||||
not to add test or documentation dependencies.
|
||||
|
||||
Newer PEPs have added alternative ways to specify a package's dependencies.
|
||||
If you don't see any dependencies listed in the ``setup.py``, look for a
|
||||
``setup.cfg`` or ``pyproject.toml``. These files can be used to store the
|
||||
same ``install_requires`` information that ``setup.py`` used to use.
|
||||
|
||||
If you are write a recipe for a package that only distributes wheels,
|
||||
check the ``METADATA`` file for lines like::
|
||||
|
||||
Requires-Dist: azureml-core (~=1.11.0)
|
||||
Requires-Dist: azureml-dataset-runtime[fuse] (~=1.11.0)
|
||||
Requires-Dist: azureml-train (~=1.11.0)
|
||||
Requires-Dist: azureml-train-automl-client (~=1.11.0)
|
||||
Requires-Dist: azureml-pipeline (~=1.11.0)
|
||||
Provides-Extra: accel-models
|
||||
Requires-Dist: azureml-accel-models (~=1.11.0); extra == 'accel-models'
|
||||
Provides-Extra: automl
|
||||
Requires-Dist: azureml-train-automl (~=1.11.0); extra == 'automl'
|
||||
|
||||
|
||||
Lines that use ``Requires-Dist`` are similar to ``install_requires``.
|
||||
Lines that use ``Provides-Extra`` are similar to ``extra_requires``,
|
||||
and you can add a variant for those dependencies. The ``~=1.11.0``
|
||||
syntax is equivalent to ``1.11.0:1.11.999``.
|
||||
|
||||
""""""""""
|
||||
setuptools
|
||||
""""""""""
|
||||
|
||||
Setuptools is a bit of a special case. If a package requires setuptools
|
||||
at run-time, how do they express this? They could add it to
|
||||
``install_requires``, but setuptools is imported long before this and is
|
||||
``install_requires``, but setuptools is imported long before this and
|
||||
needed to read this line. And since you can't install the package
|
||||
without setuptools, the developers assume that setuptools will already
|
||||
be there, so they never mention when it is required. We don't want to
|
||||
@@ -584,8 +475,7 @@ add run-time dependencies if they aren't needed, so you need to
|
||||
determine whether or not setuptools is needed. Grep the installation
|
||||
directory for any files containing a reference to ``setuptools`` or
|
||||
``pkg_resources``. Both modules come from ``py-setuptools``.
|
||||
``pkg_resources`` is particularly common in scripts found in
|
||||
``prefix/bin``.
|
||||
``pkg_resources`` is particularly common in scripts in ``prefix/bin``.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Passing arguments to setup.py
|
||||
@@ -627,8 +517,7 @@ adds:
|
||||
Testing
|
||||
^^^^^^^
|
||||
|
||||
``PythonPackage`` provides a couple of options for testing packages
|
||||
both during and after the installation process.
|
||||
``PythonPackage`` provides a couple of options for testing packages.
|
||||
|
||||
""""""""""""
|
||||
Import tests
|
||||
@@ -660,75 +549,47 @@ a "package" is a directory containing files like:
|
||||
foo/baz.py
|
||||
|
||||
|
||||
whereas a "module" is a single Python file.
|
||||
|
||||
The ``PythonPackage`` base class automatically detects these module
|
||||
names for you. If, for whatever reason, the module names detected
|
||||
are wrong, you can provide the names yourself by overriding
|
||||
``import_modules`` like so:
|
||||
whereas a "module" is a single Python file. Since ``find_packages``
|
||||
only returns packages, you'll have to determine the correct module
|
||||
names yourself. You can now add these packages and modules to the
|
||||
package like so:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
import_modules = ['six']
|
||||
|
||||
|
||||
Sometimes the list of module names to import depends on how the
|
||||
package was built. For example, the ``py-pyyaml`` package has a
|
||||
``+libyaml`` variant that enables the build of a faster optimized
|
||||
version of the library. If the user chooses ``~libyaml``, only the
|
||||
``yaml`` library will be importable. If the user chooses ``+libyaml``,
|
||||
both the ``yaml`` and ``yaml.cyaml`` libraries will be available.
|
||||
This can be expressed like so:
|
||||
When you run ``spack install --test=root py-six``, Spack will attempt
|
||||
to import the ``six`` module after installation.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@property
|
||||
def import_modules(self):
|
||||
modules = ['yaml']
|
||||
|
||||
if '+libyaml' in self.spec:
|
||||
modules.append('yaml.cyaml')
|
||||
|
||||
return modules
|
||||
|
||||
|
||||
These tests often catch missing dependencies and non-RPATHed
|
||||
These tests most often catch missing dependencies and non-RPATHed
|
||||
libraries. Make sure not to add modules/packages containing the word
|
||||
"test", as these likely won't end up in the installation directory,
|
||||
or may require test dependencies like pytest to be installed.
|
||||
|
||||
Import tests can be run during the installation using ``spack install
|
||||
--test=root`` or at any time after the installation using
|
||||
``spack test run``.
|
||||
"test", as these likely won't end up in installation directory.
|
||||
|
||||
""""""""""
|
||||
Unit tests
|
||||
""""""""""
|
||||
|
||||
The package may have its own unit or regression tests. Spack can
|
||||
run these tests during the installation by adding phase-appropriate
|
||||
test methods.
|
||||
The package you want to install may come with additional unit tests.
|
||||
By default, Spack runs:
|
||||
|
||||
For example, ``py-numpy`` adds the following as a check to run
|
||||
after the ``install`` phase:
|
||||
.. code-block:: console
|
||||
|
||||
$ python setup.py test
|
||||
|
||||
|
||||
if it detects that the ``setup.py`` file supports a ``test`` phase.
|
||||
You can add additional build-time or install-time tests by overriding
|
||||
``test`` and ``installtest``, respectively. For example, ``py-numpy``
|
||||
adds:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@run_after('install')
|
||||
@on_package_attributes(run_tests=True)
|
||||
def install_test(self):
|
||||
with working_dir('spack-test', create=True):
|
||||
python('-c', 'import numpy; numpy.test("full", verbose=2)')
|
||||
with working_dir('..'):
|
||||
python('-c', 'import numpy; numpy.test("full", verbose=2)')
|
||||
|
||||
|
||||
when testing is enabled during the installation (i.e., ``spack install
|
||||
--test=root``).
|
||||
|
||||
.. note::
|
||||
|
||||
Additional information is available on :ref:`install phase tests
|
||||
<install_phase-tests>`.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Setup file in a sub-directory
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
@@ -768,7 +629,7 @@ PythonPackage vs. packages that use Python
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
There are many packages that make use of Python, but packages that depend
|
||||
on Python are not necessarily ``PythonPackage``'s.
|
||||
on Python are not necessarily ``PythonPackages``.
|
||||
|
||||
"""""""""""""""""""""""
|
||||
Choosing a build system
|
||||
@@ -790,8 +651,6 @@ that the package uses the ``PythonPackage`` build system. However, there
|
||||
are occasionally packages that use ``PythonPackage`` that shouldn't
|
||||
start with ``py-``. For example:
|
||||
|
||||
* awscli
|
||||
* aws-parallelcluster
|
||||
* busco
|
||||
* easybuild
|
||||
* httpie
|
||||
@@ -865,8 +724,8 @@ and ``pip`` may be a perfectly valid alternative to using Spack. The
|
||||
main advantage of Spack over ``pip`` is its ability to compile
|
||||
non-Python dependencies. It can also build cythonized versions of a
|
||||
package or link to an optimized BLAS/LAPACK library like MKL,
|
||||
resulting in calculations that run orders of magnitudes faster.
|
||||
Spack does not offer a significant advantage over other python-management
|
||||
resulting in calculations that run orders of magnitude faster.
|
||||
Spack does not offer a significant advantage to other python-management
|
||||
systems for installing and using tools like flake8 and sphinx.
|
||||
But if you need packages with non-Python dependencies like
|
||||
numpy and scipy, Spack will be very valuable to you.
|
||||
@@ -877,9 +736,8 @@ non-Python dependencies. Anaconda contains many Python packages that
|
||||
are not yet in Spack, and Spack contains many Python packages that are
|
||||
not yet in Anaconda. The main advantage of Spack over Anaconda is its
|
||||
ability to choose a specific compiler and BLAS/LAPACK or MPI library.
|
||||
Spack also has better platform support for supercomputers, and can build
|
||||
optimized binaries for your specific microarchitecture. On the other hand,
|
||||
Anaconda offers Windows support.
|
||||
Spack also has better platform support for supercomputers. On the
|
||||
other hand, Anaconda offers Windows support.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^
|
||||
External documentation
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
|
||||
.. Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
@@ -108,19 +108,6 @@ override the ``qmake_args`` method like so:
|
||||
|
||||
This method can be used to pass flags as well as variables.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
``*.pro`` file in a sub-directory
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
If the ``*.pro`` file used to tell QMake how to build the package is
|
||||
found in a sub-directory, you can tell Spack to run all phases in this
|
||||
sub-directory by adding the following to the package:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
build_directory = 'src'
|
||||
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^
|
||||
External documentation
|
||||
^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
@@ -1,122 +0,0 @@
|
||||
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
|
||||
Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
.. _rocmpackage:
|
||||
|
||||
-----------
|
||||
ROCmPackage
|
||||
-----------
|
||||
|
||||
The ``ROCmPackage`` is not a build system but a helper package. Like ``CudaPackage``,
|
||||
it provides standard variants, dependencies, and conflicts to facilitate building
|
||||
packages using GPUs though for AMD in this case.
|
||||
|
||||
You can find the source for this package (and suggestions for setting up your
|
||||
``compilers.yaml`` and ``packages.yaml`` files) at
|
||||
`<https://github.com/spack/spack/blob/develop/lib/spack/spack/build_systems/rocm.py>`__.
|
||||
|
||||
^^^^^^^^
|
||||
Variants
|
||||
^^^^^^^^
|
||||
|
||||
This package provides the following variants:
|
||||
|
||||
* **rocm**
|
||||
|
||||
This variant is used to enable/disable building with ``rocm``.
|
||||
The default is disabled (or ``False``).
|
||||
|
||||
* **amdgpu_target**
|
||||
|
||||
This variant supports the optional specification of the AMD GPU architecture.
|
||||
Valid values are the names of the GPUs (e.g., ``gfx701``), which are maintained
|
||||
in the ``amdgpu_targets`` property.
|
||||
|
||||
^^^^^^^^^^^^
|
||||
Dependencies
|
||||
^^^^^^^^^^^^
|
||||
|
||||
This package defines basic ``rocm`` dependencies, including ``llvm`` and ``hip``.
|
||||
|
||||
^^^^^^^^^
|
||||
Conflicts
|
||||
^^^^^^^^^
|
||||
|
||||
Conflicts are used to prevent builds with known bugs or issues. This package
|
||||
already requires that the ``amdgpu_target`` always be specified for ``rocm``
|
||||
builds. It also defines a conflict that prevents builds with an ``amdgpu_target``
|
||||
when ``rocm`` is disabled.
|
||||
|
||||
Refer to `Conflicts <https://spack.readthedocs.io/en/latest/packaging_guide.html?highlight=conflicts#conflicts>`__
|
||||
for more information on package conflicts.
|
||||
|
||||
^^^^^^^
|
||||
Methods
|
||||
^^^^^^^
|
||||
|
||||
This package provides one custom helper method, which is used to build
|
||||
standard AMD hip compiler flags.
|
||||
|
||||
**hip_flags**
|
||||
|
||||
This built-in static method returns the appropriately formatted
|
||||
``--amdgpu-target`` build option for ``hipcc``.
|
||||
|
||||
This method must be explicitly called when you are creating the
|
||||
arguments for your build in order to use the values.
|
||||
|
||||
^^^^^
|
||||
Usage
|
||||
^^^^^
|
||||
|
||||
This helper package can be added to your package by adding it as a base
|
||||
class of your package. For example, you can add it to your
|
||||
:ref:`CMakePackage <cmakepackage>`-based package as follows:
|
||||
|
||||
.. code-block:: python
|
||||
:emphasize-lines: 1,3-7,14-25
|
||||
|
||||
class MyRocmPackage(CMakePackage, ROCmPackage):
|
||||
...
|
||||
# Ensure +rocm and amdgpu_targets are passed to dependencies
|
||||
depends_on('mydeppackage', when='+rocm')
|
||||
for val in ROCmPackage.amdgpu_targets:
|
||||
depends_on('mydeppackage amdgpu_target={0}'.format(val),
|
||||
when='amdgpu_target={0}'.format(val))
|
||||
...
|
||||
|
||||
def cmake_args(self):
|
||||
spec = self.spec
|
||||
args = []
|
||||
...
|
||||
if '+rocm' in spec:
|
||||
# Set up the hip macros needed by the build
|
||||
args.extend([
|
||||
'-DENABLE_HIP=ON',
|
||||
'-DHIP_ROOT_DIR={0}'.format(spec['hip'].prefix])
|
||||
rocm_archs = spec.variants['amdgpu_target'].value
|
||||
if 'none' not in rocm_archs:
|
||||
args.append('-DHIP_HIPCC_FLAGS=--amdgpu-target={0}'
|
||||
.format(",".join(rocm_archs)))
|
||||
else:
|
||||
# Ensure build with hip is disabled
|
||||
args.append('-DENABLE_HIP=OFF')
|
||||
...
|
||||
return args
|
||||
...
|
||||
|
||||
assuming only on the ``ENABLE_HIP``, ``HIP_ROOT_DIR``, and ``HIP_HIPCC_FLAGS``
|
||||
macros are required to be set and the only dependency needing rocm options
|
||||
is ``mydeppackage``. You will need to customize the flags as needed for your
|
||||
build.
|
||||
|
||||
This example also illustrates how to check for the ``rocm`` variant using
|
||||
``self.spec`` and how to retrieve the ``amdgpu_target`` variant's value
|
||||
using ``self.spec.variants['amdgpu_target'].value``.
|
||||
|
||||
All five packages using ``ROCmPackage`` as of January 2021 also use the
|
||||
:ref:`CudaPackage <cudapackage>`. So it is worth looking at those packages
|
||||
to get ideas for creating a package that can support both ``cuda`` and
|
||||
``rocm``.
|
||||
@@ -1,4 +1,4 @@
|
||||
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
|
||||
.. Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
@@ -79,14 +79,12 @@ Description
|
||||
The first thing you'll need to add to your new package is a description.
|
||||
The top of the homepage for ``caret`` lists the following description:
|
||||
|
||||
Classification and Regression Training
|
||||
caret: Classification and Regression Training
|
||||
|
||||
Misc functions for training and plotting classification and regression models.
|
||||
|
||||
The first line is a short description (title) and the second line is a long
|
||||
description. In this case the description is only one line but often the
|
||||
description is several lines. Spack makes use of both short and long
|
||||
descriptions and convention is to use both when creating an R package.
|
||||
You can either use the short description (first line), long description
|
||||
(second line), or both depending on what you feel is most appropriate.
|
||||
|
||||
^^^^^^^^
|
||||
Homepage
|
||||
@@ -126,67 +124,6 @@ If you only specify the URL for the latest release, your package will
|
||||
no longer be able to fetch that version as soon as a new release comes
|
||||
out. To get around this, add the archive directory as a ``list_url``.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
Bioconductor packages
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Bioconductor packages are set up in a similar way to CRAN packages, but there
|
||||
are some very important distinctions. Bioconductor packages can be found at:
|
||||
https://bioconductor.org/. Bioconductor packages are R packages and so follow
|
||||
the same packaging scheme as CRAN packages. What is different is that
|
||||
Bioconductor itself is versioned and released. This scheme, using the
|
||||
Bioconductor package installer, allows further specification of the minimum
|
||||
version of R as well as further restrictions on the dependencies between
|
||||
packages than what is possible with the native R packaging system. Spack can
|
||||
not replicate these extra features and thus Bioconductor packages in Spack need
|
||||
to be managed as a group during updates in order to maintain package
|
||||
consistency with Bioconductor itself.
|
||||
|
||||
Another key difference is that, while previous versions of packages are
|
||||
available, they are not available from a site that can be programmatically set,
|
||||
thus a ``list_url`` attribute can not be used. However, each package is also
|
||||
available in a git repository, with branches corresponding to each Bioconductor
|
||||
release. Thus, it is always possible to retrieve the version of any package
|
||||
corresponding to a Bioconductor release simply by fetching the branch that
|
||||
corresponds to the Bioconductor release of the package repository. For this
|
||||
reason, spack Bioconductor R packages use the git repository, with the commit
|
||||
of the respective branch used in the ``version()`` attribute of the package.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
cran and bioc attributes
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Much like the ``pypi`` attribute for python packages, due to the fact that R
|
||||
packages are obtained from specific repositories, it is possible to set up shortcut
|
||||
attributes that can be used to set ``homepage``, ``url``, ``list_url``, and
|
||||
``git``. For example, the following ``cran`` attribute:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
cran = 'caret'
|
||||
|
||||
is equivalent to:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
homepage = 'https://cloud.r-project.org/package=caret'
|
||||
url = 'https://cloud.r-project.org/src/contrib/caret_6.0-86.tar.gz'
|
||||
list_url = 'https://cloud.r-project.org/src/contrib/Archive/caret'
|
||||
|
||||
Likewise, the following ``bioc`` attribute:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
bioc = 'BiocVersion'
|
||||
|
||||
is equivalent to:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
homepage = 'https://bioconductor.org/packages/BiocVersion/'
|
||||
git = 'https://git.bioconductor.org/packages/BiocVersion'
|
||||
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Build system dependencies
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
@@ -201,6 +138,7 @@ every R package needs this, the ``RPackage`` base class contains:
|
||||
.. code-block:: python
|
||||
|
||||
extends('r')
|
||||
depends_on('r', type=('build', 'run'))
|
||||
|
||||
|
||||
Take a close look at the homepage for ``caret``. If you look at the
|
||||
@@ -219,7 +157,7 @@ R dependencies
|
||||
R packages are often small and follow the classic Unix philosophy
|
||||
of doing one thing well. They are modular and usually depend on
|
||||
several other packages. You may find a single package with over a
|
||||
hundred dependencies. Luckily, R packages are well-documented
|
||||
hundred dependencies. Luckily, CRAN packages are well-documented
|
||||
and list all of their dependencies in the following sections:
|
||||
|
||||
* Depends
|
||||
@@ -360,8 +298,8 @@ like so:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def configure_args(self):
|
||||
mpi_name = self.spec['mpi'].name
|
||||
def configure_args(self, spec, prefix):
|
||||
mpi_name = spec['mpi'].name
|
||||
|
||||
# The type of MPI. Supported values are:
|
||||
# OPENMPI, LAM, MPICH, MPICH2, or CRAY
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
|
||||
.. Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
@@ -12,172 +12,5 @@ RubyPackage
|
||||
Like Perl, Python, and R, Ruby has its own build system for
|
||||
installing Ruby gems.
|
||||
|
||||
^^^^^^
|
||||
Phases
|
||||
^^^^^^
|
||||
|
||||
The ``RubyPackage`` base class provides the following phases that
|
||||
can be overridden:
|
||||
|
||||
#. ``build`` - build everything needed to install
|
||||
#. ``install`` - install everything from build directory
|
||||
|
||||
For packages that come with a ``*.gemspec`` file, these phases run:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ gem build *.gemspec
|
||||
$ gem install *.gem
|
||||
|
||||
|
||||
For packages that come with a ``Rakefile`` file, these phases run:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ rake package
|
||||
$ gem install *.gem
|
||||
|
||||
|
||||
For packages that come pre-packaged as a ``*.gem`` file, the build
|
||||
phase is skipped and the install phase runs:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ gem install *.gem
|
||||
|
||||
|
||||
These are all standard ``gem`` commands and can be found by running:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ gem help commands
|
||||
|
||||
|
||||
For packages that only distribute ``*.gem`` files, these files can be
|
||||
downloaded with the ``expand=False`` option in the ``version`` directive.
|
||||
The build phase will be automatically skipped.
|
||||
|
||||
^^^^^^^^^^^^^^^
|
||||
Important files
|
||||
^^^^^^^^^^^^^^^
|
||||
|
||||
When building from source, Ruby packages can be identified by the
|
||||
presence of any of the following files:
|
||||
|
||||
* ``*.gemspec``
|
||||
* ``Rakefile``
|
||||
* ``setup.rb`` (not yet supported)
|
||||
|
||||
However, not all Ruby packages are released as source code. Some are only
|
||||
released as ``*.gem`` files. These files can be extracted using:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ gem unpack *.gem
|
||||
|
||||
|
||||
^^^^^^^^^^^
|
||||
Description
|
||||
^^^^^^^^^^^
|
||||
|
||||
The ``*.gemspec`` file may contain something like:
|
||||
|
||||
.. code-block:: ruby
|
||||
|
||||
summary = 'An implementation of the AsciiDoc text processor and publishing toolchain'
|
||||
description = 'A fast, open source text processor and publishing toolchain for converting AsciiDoc content to HTML 5, DocBook 5, and other formats.'
|
||||
|
||||
|
||||
Either of these can be used for the description of the Spack package.
|
||||
|
||||
^^^^^^^^
|
||||
Homepage
|
||||
^^^^^^^^
|
||||
|
||||
The ``*.gemspec`` file may contain something like:
|
||||
|
||||
.. code-block:: ruby
|
||||
|
||||
homepage = 'https://asciidoctor.org'
|
||||
|
||||
|
||||
This should be used as the official homepage of the Spack package.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Build system dependencies
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
All Ruby packages require Ruby at build and run-time. For this reason,
|
||||
the base class contains:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
extends('ruby')
|
||||
|
||||
|
||||
The ``*.gemspec`` file may contain something like:
|
||||
|
||||
.. code-block:: ruby
|
||||
|
||||
required_ruby_version = '>= 2.3.0'
|
||||
|
||||
|
||||
This can be added to the Spack package using:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
depends_on('ruby@2.3.0:', type=('build', 'run'))
|
||||
|
||||
|
||||
^^^^^^^^^^^^^^^^^
|
||||
Ruby dependencies
|
||||
^^^^^^^^^^^^^^^^^
|
||||
|
||||
When you install a package with ``gem``, it reads the ``*.gemspec``
|
||||
file in order to determine the dependencies of the package.
|
||||
If the dependencies are not yet installed, ``gem`` downloads them
|
||||
and installs them for you. This may sound convenient, but Spack
|
||||
cannot rely on this behavior for two reasons:
|
||||
|
||||
#. Spack needs to be able to install packages on air-gapped networks.
|
||||
|
||||
If there is no internet connection, ``gem`` can't download the
|
||||
package dependencies. By explicitly listing every dependency in
|
||||
the ``package.py``, Spack knows what to download ahead of time.
|
||||
|
||||
#. Duplicate installations of the same dependency may occur.
|
||||
|
||||
Spack supports *activation* of Ruby extensions, which involves
|
||||
symlinking the package installation prefix to the Ruby installation
|
||||
prefix. If your package is missing a dependency, that dependency
|
||||
will be installed to the installation directory of the same package.
|
||||
If you try to activate the package + dependency, it may cause a
|
||||
problem if that package has already been activated.
|
||||
|
||||
For these reasons, you must always explicitly list all dependencies.
|
||||
Although the documentation may list the package's dependencies,
|
||||
often the developers assume people will use ``gem`` and won't have to
|
||||
worry about it. Always check the ``*.gemspec`` file to find the true
|
||||
dependencies.
|
||||
|
||||
Check for the following clues in the ``*.gemspec`` file:
|
||||
|
||||
* ``add_runtime_dependency``
|
||||
|
||||
These packages are required for installation.
|
||||
|
||||
* ``add_dependency``
|
||||
|
||||
This is an alias for ``add_runtime_dependency``
|
||||
|
||||
* ``add_development_dependency``
|
||||
|
||||
These packages are optional dependencies used for development.
|
||||
They should not be added as dependencies of the package.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^
|
||||
External documentation
|
||||
^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
For more information on Ruby packaging, see:
|
||||
https://guides.rubygems.org/
|
||||
This build system is a work-in-progress. See
|
||||
https://github.com/spack/spack/pull/3127 for more information.
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
|
||||
.. Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
|
||||
.. Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
@@ -93,17 +93,10 @@ in the site-packages directory:
|
||||
$ python
|
||||
>>> import setuptools
|
||||
>>> setuptools.find_packages()
|
||||
[
|
||||
'PyQt5', 'PyQt5.QtCore', 'PyQt5.QtGui', 'PyQt5.QtHelp',
|
||||
'PyQt5.QtMultimedia', 'PyQt5.QtMultimediaWidgets', 'PyQt5.QtNetwork',
|
||||
'PyQt5.QtOpenGL', 'PyQt5.QtPrintSupport', 'PyQt5.QtQml',
|
||||
'PyQt5.QtQuick', 'PyQt5.QtSvg', 'PyQt5.QtTest', 'PyQt5.QtWebChannel',
|
||||
'PyQt5.QtWebSockets', 'PyQt5.QtWidgets', 'PyQt5.QtXml',
|
||||
'PyQt5.QtXmlPatterns'
|
||||
]
|
||||
['QtPy5']
|
||||
|
||||
|
||||
Large, complex packages like ``py-pyqt5`` will return a long list of
|
||||
Large, complex packages like ``QtPy5`` will return a long list of
|
||||
packages, while other packages may return an empty list. These packages
|
||||
only install a single ``foo.py`` file. In Python packaging lingo,
|
||||
a "package" is a directory containing files like:
|
||||
@@ -115,25 +108,21 @@ a "package" is a directory containing files like:
|
||||
foo/baz.py
|
||||
|
||||
|
||||
whereas a "module" is a single Python file.
|
||||
|
||||
The ``SIPPackage`` base class automatically detects these module
|
||||
names for you. If, for whatever reason, the module names detected
|
||||
are wrong, you can provide the names yourself by overriding
|
||||
``import_modules`` like so:
|
||||
whereas a "module" is a single Python file. Since ``find_packages``
|
||||
only returns packages, you'll have to determine the correct module
|
||||
names yourself. You can now add these packages and modules to the
|
||||
package like so:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
import_modules = ['PyQt5']
|
||||
|
||||
|
||||
These tests often catch missing dependencies and non-RPATHed
|
||||
libraries. Make sure not to add modules/packages containing the word
|
||||
"test", as these likely won't end up in the installation directory,
|
||||
or may require test dependencies like pytest to be installed.
|
||||
When you run ``spack install --test=root py-pyqt5``, Spack will attempt
|
||||
to import the ``PyQt5`` module after installation.
|
||||
|
||||
These tests can be triggered by running ``spack install --test=root``
|
||||
or by running ``spack test run`` after the installation has finished.
|
||||
These tests most often catch missing dependencies and non-RPATHed
|
||||
libraries.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^
|
||||
External documentation
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
|
||||
.. Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
|
||||
.. Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
|
||||
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
@@ -17,10 +17,10 @@
|
||||
# All configuration values have a default; values that are commented out
|
||||
# serve to show the default.
|
||||
|
||||
import sys
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
from glob import glob
|
||||
|
||||
from sphinx.ext.apidoc import main as sphinx_apidoc
|
||||
@@ -82,8 +82,6 @@
|
||||
# Disable duplicate cross-reference warnings.
|
||||
#
|
||||
from sphinx.domains.python import PythonDomain
|
||||
|
||||
|
||||
class PatchedPythonDomain(PythonDomain):
|
||||
def resolve_xref(self, env, fromdocname, builder, typ, target, node, contnode):
|
||||
if 'refspecific' in node:
|
||||
@@ -130,7 +128,7 @@ def setup(sphinx):
|
||||
|
||||
# General information about the project.
|
||||
project = u'Spack'
|
||||
copyright = u'2013-2021, Lawrence Livermore National Laboratory.'
|
||||
copyright = u'2013-2019, Lawrence Livermore National Laboratory.'
|
||||
|
||||
# The version info for the project you're documenting, acts as replacement for
|
||||
# |version| and |release|, also used in various other places throughout the
|
||||
@@ -138,7 +136,6 @@ def setup(sphinx):
|
||||
#
|
||||
# The short X.Y version.
|
||||
import spack
|
||||
|
||||
version = '.'.join(str(s) for s in spack.spack_version_info[:2])
|
||||
# The full version, including alpha/beta/rc tags.
|
||||
release = spack.spack_version
|
||||
@@ -182,8 +179,7 @@ def setup(sphinx):
|
||||
# We use our own extension of the default style with a few modifications
|
||||
from pygments.style import Style
|
||||
from pygments.styles.default import DefaultStyle
|
||||
from pygments.token import Comment, Generic, Text
|
||||
|
||||
from pygments.token import Generic, Comment, Text
|
||||
|
||||
class SpackStyle(DefaultStyle):
|
||||
styles = DefaultStyle.styles.copy()
|
||||
@@ -192,7 +188,6 @@ class SpackStyle(DefaultStyle):
|
||||
styles[Generic.Prompt] = "bold #346ec9"
|
||||
|
||||
import pkg_resources
|
||||
|
||||
dist = pkg_resources.Distribution(__file__)
|
||||
sys.path.append('.') # make 'conf' module findable
|
||||
ep = pkg_resources.EntryPoint.parse('spack = conf:SpackStyle', dist=dist)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
|
||||
.. Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
@@ -99,7 +99,7 @@ username is not already in the path, Spack will append the value of ``$user`` to
|
||||
the selected ``build_stage`` path.
|
||||
|
||||
.. warning:: We highly recommend specifying ``build_stage`` paths that
|
||||
distinguish between staging and other activities to ensure
|
||||
distinguish between staging and other activities to ensure
|
||||
``spack clean`` does not inadvertently remove unrelated files.
|
||||
Spack prepends ``spack-stage-`` to temporary staging directory names to
|
||||
reduce this risk. Using a combination of ``spack`` and or ``stage`` in
|
||||
@@ -202,30 +202,28 @@ of builds.
|
||||
|
||||
Unless overridden in a package or on the command line, Spack builds all
|
||||
packages in parallel. The default parallelism is equal to the number of
|
||||
cores available to the process, up to 16 (the default of ``build_jobs``).
|
||||
For a build system that uses Makefiles, this ``spack install`` runs:
|
||||
cores on your machine, up to 16. Parallelism cannot exceed the number of
|
||||
cores available on the host. For a build system that uses Makefiles, this
|
||||
means running:
|
||||
|
||||
- ``make -j<build_jobs>``, when ``build_jobs`` is less than the number of
|
||||
cores available
|
||||
cores on the machine
|
||||
- ``make -j<ncores>``, when ``build_jobs`` is greater or equal to the
|
||||
number of cores available
|
||||
number of cores on the machine
|
||||
|
||||
If you work on a shared login node or have a strict ulimit, it may be
|
||||
necessary to set the default to a lower value. By setting ``build_jobs``
|
||||
to 4, for example, commands like ``spack install`` will run ``make -j4``
|
||||
instead of hogging every core. To build all software in serial,
|
||||
set ``build_jobs`` to 1.
|
||||
instead of hogging every core.
|
||||
|
||||
Note that specifying the number of jobs on the command line always takes
|
||||
priority, so that ``spack install -j<n>`` always runs `make -j<n>`, even
|
||||
when that exceeds the number of cores available.
|
||||
To build all software in serial, set ``build_jobs`` to 1.
|
||||
|
||||
--------------------
|
||||
``ccache``
|
||||
--------------------
|
||||
|
||||
When set to ``true`` Spack will use ccache to cache compiles. This is
|
||||
useful specifically in two cases: (1) when using ``spack dev-build``, and (2)
|
||||
useful specifically in two cases: (1) when using ``spack setup``, and (2)
|
||||
when building the same package with many different variants. The default is
|
||||
``false``.
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
|
||||
.. Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
@@ -78,13 +78,6 @@ are six configuration scopes. From lowest to highest:
|
||||
If multiple scopes are listed on the command line, they are ordered
|
||||
from lowest to highest precedence.
|
||||
|
||||
#. **environment**: When using Spack :ref:`environments`, Spack reads
|
||||
additional configuration from the environment file. See
|
||||
:ref:`environment-configuration` for further details on these
|
||||
scopes. Environment scopes can be referenced from the command line
|
||||
as ``env:name`` (to reference environment ``foo``, use
|
||||
``env:foo``).
|
||||
|
||||
#. **command line**: Build settings specified on the command line take
|
||||
precedence over all other scopes.
|
||||
|
||||
@@ -199,11 +192,10 @@ with MPICH. You can create different configuration scopes for use with
|
||||
Platform-specific Scopes
|
||||
------------------------
|
||||
|
||||
For each scope above (excluding environment scopes), there can also be
|
||||
platform-specific settings. For example, on most platforms, GCC is
|
||||
the preferred compiler. However, on macOS (darwin), Clang often works
|
||||
for more packages, and is set as the default compiler. This
|
||||
configuration is set in
|
||||
For each scope above, there can also be platform-specific settings.
|
||||
For example, on most platforms, GCC is the preferred compiler.
|
||||
However, on macOS (darwin), Clang often works for more packages,
|
||||
and is set as the default compiler. This configuration is set in
|
||||
``$(prefix)/etc/spack/defaults/darwin/packages.yaml``. It will take
|
||||
precedence over settings in the ``defaults`` scope, but can still be
|
||||
overridden by settings in ``system``, ``system/darwin``, ``site``,
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
|
||||
.. Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
@@ -9,48 +9,28 @@
|
||||
Container Images
|
||||
================
|
||||
|
||||
Spack :ref:`environments` are a great tool to create container images, but
|
||||
preparing one that is suitable for production requires some more boilerplate
|
||||
than just:
|
||||
Spack can be an ideal tool to setup images for containers since all the
|
||||
features discussed in :ref:`environments` can greatly help to manage
|
||||
the installation of software during the image build process. Nonetheless,
|
||||
building a production image from scratch still requires a lot of
|
||||
boilerplate to:
|
||||
|
||||
.. code-block:: docker
|
||||
- Get Spack working within the image, possibly running as root
|
||||
- Minimize the physical size of the software installed
|
||||
- Properly update the system software in the base image
|
||||
|
||||
COPY spack.yaml /environment
|
||||
RUN spack -e /environment install
|
||||
|
||||
Additional actions may be needed to minimize the size of the
|
||||
container, or to update the system software that is installed in the base
|
||||
image, or to set up a proper entrypoint to run the image. These tasks are
|
||||
usually both necessary and repetitive, so Spack comes with a command
|
||||
to generate recipes for container images starting from a ``spack.yaml``.
|
||||
|
||||
--------------------
|
||||
A Quick Introduction
|
||||
--------------------
|
||||
|
||||
Consider having a Spack environment like the following:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
spack:
|
||||
specs:
|
||||
- gromacs+mpi
|
||||
- mpich
|
||||
|
||||
Producing a ``Dockerfile`` from it is as simple as moving to the directory
|
||||
where the ``spack.yaml`` file is stored and giving the following command:
|
||||
To facilitate users with these tedious tasks, Spack provides a command
|
||||
to automatically generate recipes for container images based on
|
||||
Environments:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack containerize > Dockerfile
|
||||
|
||||
The ``Dockerfile`` that gets created uses multi-stage builds and
|
||||
other techniques to minimize the size of the final image:
|
||||
|
||||
.. code-block:: docker
|
||||
$ ls
|
||||
spack.yaml
|
||||
|
||||
$ spack containerize
|
||||
# Build stage with Spack pre-installed and ready to be used
|
||||
FROM spack/ubuntu-bionic:latest as builder
|
||||
FROM spack/centos7:latest as builder
|
||||
|
||||
# What we want to install and how we want to install it
|
||||
# is specified in a manifest file (spack.yaml)
|
||||
@@ -65,7 +45,7 @@ other techniques to minimize the size of the final image:
|
||||
&& echo " view: /opt/view") > /opt/spack-environment/spack.yaml
|
||||
|
||||
# Install the software, remove unnecessary deps
|
||||
RUN cd /opt/spack-environment && spack env activate . && spack install --fail-fast && spack gc -y
|
||||
RUN cd /opt/spack-environment && spack env activate . && spack install && spack gc -y
|
||||
|
||||
# Strip all the binaries
|
||||
RUN find -L /opt/view/* -type f -exec readlink -f '{}' \; | \
|
||||
@@ -78,34 +58,38 @@ other techniques to minimize the size of the final image:
|
||||
RUN cd /opt/spack-environment && \
|
||||
spack env activate --sh -d . >> /etc/profile.d/z10_spack_environment.sh
|
||||
|
||||
|
||||
# Bare OS image to run the installed executables
|
||||
FROM ubuntu:18.04
|
||||
FROM centos:7
|
||||
|
||||
COPY --from=builder /opt/spack-environment /opt/spack-environment
|
||||
COPY --from=builder /opt/software /opt/software
|
||||
COPY --from=builder /opt/view /opt/view
|
||||
COPY --from=builder /etc/profile.d/z10_spack_environment.sh /etc/profile.d/z10_spack_environment.sh
|
||||
|
||||
RUN yum update -y && yum install -y epel-release && yum update -y \
|
||||
&& yum install -y libgomp \
|
||||
&& rm -rf /var/cache/yum && yum clean all
|
||||
|
||||
RUN echo 'export PS1="\[$(tput bold)\]\[$(tput setaf 1)\][gromacs]\[$(tput setaf 2)\]\u\[$(tput sgr0)\]:\w $ \[$(tput sgr0)\]"' >> ~/.bashrc
|
||||
|
||||
|
||||
LABEL "app"="gromacs"
|
||||
LABEL "mpi"="mpich"
|
||||
|
||||
ENTRYPOINT ["/bin/bash", "--rcfile", "/etc/profile", "-l"]
|
||||
|
||||
The image itself can then be built and run in the usual way, with any of the
|
||||
tools suitable for the task. For instance, if we decided to use ``docker``:
|
||||
|
||||
.. code-block:: bash
|
||||
The bits that make this automation possible are discussed in details
|
||||
below. All the images generated in this way will be based on
|
||||
multi-stage builds with:
|
||||
|
||||
$ spack containerize > Dockerfile
|
||||
$ docker build -t myimage .
|
||||
[ ... ]
|
||||
$ docker run -it myimage
|
||||
- A fat ``build`` stage containing common build tools and Spack itself
|
||||
- A minimal ``final`` stage containing only the software requested by the user
|
||||
|
||||
The various components involved in the generation of the recipe and their
|
||||
configuration are discussed in details in the sections below.
|
||||
|
||||
.. _container_spack_images:
|
||||
|
||||
--------------------------
|
||||
Spack Images on Docker Hub
|
||||
--------------------------
|
||||
-----------------
|
||||
Spack Base Images
|
||||
-----------------
|
||||
|
||||
Docker images with Spack preinstalled and ready to be used are
|
||||
built on `Docker Hub <https://hub.docker.com/u/spack>`_
|
||||
@@ -140,20 +124,19 @@ All the images are tagged with the corresponding release of Spack:
|
||||
with the exception of the ``latest`` tag that points to the HEAD
|
||||
of the ``develop`` branch. These images are available for anyone
|
||||
to use and take care of all the repetitive tasks that are necessary
|
||||
to setup Spack within a container. The container recipes generated
|
||||
by Spack use them as default base images for their ``build`` stage,
|
||||
even though handles to use custom base images provided by users are
|
||||
available to accommodate complex use cases.
|
||||
to setup Spack within a container. All the container recipes generated
|
||||
automatically by Spack use them as base images for their ``build`` stage.
|
||||
|
||||
---------------------------------
|
||||
Creating Images From Environments
|
||||
---------------------------------
|
||||
|
||||
-------------------------
|
||||
Environment Configuration
|
||||
-------------------------
|
||||
|
||||
Any Spack Environment can be used for the automatic generation of container
|
||||
recipes. Sensible defaults are provided for things like the base image or the
|
||||
version of Spack used in the image.
|
||||
If a finer tuning is needed it can be obtained by adding the relevant metadata
|
||||
under the ``container`` attribute of environments:
|
||||
version of Spack used in the image. If a finer tuning is needed it can be
|
||||
obtained by adding the relevant metadata under the ``container`` attribute
|
||||
of environments:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
@@ -167,10 +150,9 @@ under the ``container`` attribute of environments:
|
||||
# singularity or anything else that is currently supported
|
||||
format: docker
|
||||
|
||||
# Sets the base images for the stages where Spack builds the
|
||||
# software or where the software gets installed after being built..
|
||||
images:
|
||||
os: "centos:7"
|
||||
# Select from a valid list of images
|
||||
base:
|
||||
image: "centos:7"
|
||||
spack: develop
|
||||
|
||||
# Whether or not to strip binaries
|
||||
@@ -178,223 +160,19 @@ under the ``container`` attribute of environments:
|
||||
|
||||
# Additional system packages that are needed at runtime
|
||||
os_packages:
|
||||
final:
|
||||
- libgomp
|
||||
- libgomp
|
||||
|
||||
# Extra instructions
|
||||
extra_instructions:
|
||||
final: |
|
||||
RUN echo 'export PS1="\[$(tput bold)\]\[$(tput setaf 1)\][gromacs]\[$(tput setaf 2)\]\u\[$(tput sgr0)\]:\w $ "' >> ~/.bashrc
|
||||
RUN echo 'export PS1="\[$(tput bold)\]\[$(tput setaf 1)\][gromacs]\[$(tput setaf 2)\]\u\[$(tput sgr0)\]:\w $ \[$(tput sgr0)\]"' >> ~/.bashrc
|
||||
|
||||
# Labels for the image
|
||||
labels:
|
||||
app: "gromacs"
|
||||
mpi: "mpich"
|
||||
|
||||
A detailed description of the options available can be found in the
|
||||
:ref:`container_config_options` section.
|
||||
|
||||
-------------------
|
||||
Setting Base Images
|
||||
-------------------
|
||||
|
||||
The ``images`` subsection is used to select both the image where
|
||||
Spack builds the software and the image where the built software
|
||||
is installed. This attribute can be set in two different ways and
|
||||
which one to use depends on the use case at hand.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Use Official Spack Images From Dockerhub
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
To generate a recipe that uses an official Docker image from the
|
||||
Spack organization to build the software and the corresponding official OS image
|
||||
to install the built software, all the user has to do is specify:
|
||||
|
||||
1. An operating system under ``images:os``
|
||||
2. A Spack version under ``images:spack``
|
||||
|
||||
Any combination of these two values that can be mapped to one of the images
|
||||
discussed in :ref:`container_spack_images` is allowed. For instance, the
|
||||
following ``spack.yaml``:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
spack:
|
||||
specs:
|
||||
- gromacs+mpi
|
||||
- mpich
|
||||
|
||||
container:
|
||||
images:
|
||||
os: centos:7
|
||||
spack: 0.15.4
|
||||
|
||||
uses ``spack/centos7:0.15.4`` and ``centos:7`` for the stages where the
|
||||
software is respectively built and installed:
|
||||
|
||||
.. code-block:: docker
|
||||
|
||||
# Build stage with Spack pre-installed and ready to be used
|
||||
FROM spack/centos7:0.15.4 as builder
|
||||
|
||||
# What we want to install and how we want to install it
|
||||
# is specified in a manifest file (spack.yaml)
|
||||
RUN mkdir /opt/spack-environment \
|
||||
&& (echo "spack:" \
|
||||
&& echo " specs:" \
|
||||
&& echo " - gromacs+mpi" \
|
||||
&& echo " - mpich" \
|
||||
&& echo " concretization: together" \
|
||||
&& echo " config:" \
|
||||
&& echo " install_tree: /opt/software" \
|
||||
&& echo " view: /opt/view") > /opt/spack-environment/spack.yaml
|
||||
[ ... ]
|
||||
# Bare OS image to run the installed executables
|
||||
FROM centos:7
|
||||
|
||||
COPY --from=builder /opt/spack-environment /opt/spack-environment
|
||||
COPY --from=builder /opt/software /opt/software
|
||||
COPY --from=builder /opt/view /opt/view
|
||||
COPY --from=builder /etc/profile.d/z10_spack_environment.sh /etc/profile.d/z10_spack_environment.sh
|
||||
|
||||
ENTRYPOINT ["/bin/bash", "--rcfile", "/etc/profile", "-l"]
|
||||
|
||||
This method of selecting base images is the simplest of the two, and we advise
|
||||
to use it whenever possible. There are cases though where using Spack official
|
||||
images is not enough to fit production needs. In these situations users can manually
|
||||
select which base image to start from in the recipe, as we'll see next.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Use Custom Images Provided by Users
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Consider, as an example, building a production grade image for a CUDA
|
||||
application. The best strategy would probably be to build on top of
|
||||
images provided by the vendor and regard CUDA as an external package.
|
||||
|
||||
Spack doesn't currently provide an official image with CUDA configured
|
||||
this way, but users can build it on their own and then configure the
|
||||
environment to explicitly pull it. This requires users to:
|
||||
|
||||
1. Specify the image used to build the software under ``images:build``
|
||||
2. Specify the image used to install the built software under ``images:final``
|
||||
|
||||
A ``spack.yaml`` like the following:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
spack:
|
||||
specs:
|
||||
- gromacs@2019.4+cuda build_type=Release
|
||||
- mpich
|
||||
- fftw precision=float
|
||||
packages:
|
||||
cuda:
|
||||
buildable: False
|
||||
externals:
|
||||
- spec: cuda%gcc
|
||||
prefix: /usr/local/cuda
|
||||
|
||||
container:
|
||||
images:
|
||||
build: custom/cuda-10.1-ubuntu18.04:latest
|
||||
final: nvidia/cuda:10.1-base-ubuntu18.04
|
||||
|
||||
produces, for instance, the following ``Dockerfile``:
|
||||
|
||||
.. code-block:: docker
|
||||
|
||||
# Build stage with Spack pre-installed and ready to be used
|
||||
FROM custom/cuda-10.1-ubuntu18.04:latest as builder
|
||||
|
||||
# What we want to install and how we want to install it
|
||||
# is specified in a manifest file (spack.yaml)
|
||||
RUN mkdir /opt/spack-environment \
|
||||
&& (echo "spack:" \
|
||||
&& echo " specs:" \
|
||||
&& echo " - gromacs@2019.4+cuda build_type=Release" \
|
||||
&& echo " - mpich" \
|
||||
&& echo " - fftw precision=float" \
|
||||
&& echo " packages:" \
|
||||
&& echo " cuda:" \
|
||||
&& echo " buildable: false" \
|
||||
&& echo " externals:" \
|
||||
&& echo " - spec: cuda%gcc" \
|
||||
&& echo " prefix: /usr/local/cuda" \
|
||||
&& echo " concretization: together" \
|
||||
&& echo " config:" \
|
||||
&& echo " install_tree: /opt/software" \
|
||||
&& echo " view: /opt/view") > /opt/spack-environment/spack.yaml
|
||||
|
||||
# Install the software, remove unnecessary deps
|
||||
RUN cd /opt/spack-environment && spack env activate . && spack install --fail-fast && spack gc -y
|
||||
|
||||
# Strip all the binaries
|
||||
RUN find -L /opt/view/* -type f -exec readlink -f '{}' \; | \
|
||||
xargs file -i | \
|
||||
grep 'charset=binary' | \
|
||||
grep 'x-executable\|x-archive\|x-sharedlib' | \
|
||||
awk -F: '{print $1}' | xargs strip -s
|
||||
|
||||
# Modifications to the environment that are necessary to run
|
||||
RUN cd /opt/spack-environment && \
|
||||
spack env activate --sh -d . >> /etc/profile.d/z10_spack_environment.sh
|
||||
|
||||
# Bare OS image to run the installed executables
|
||||
FROM nvidia/cuda:10.1-base-ubuntu18.04
|
||||
|
||||
COPY --from=builder /opt/spack-environment /opt/spack-environment
|
||||
COPY --from=builder /opt/software /opt/software
|
||||
COPY --from=builder /opt/view /opt/view
|
||||
COPY --from=builder /etc/profile.d/z10_spack_environment.sh /etc/profile.d/z10_spack_environment.sh
|
||||
|
||||
ENTRYPOINT ["/bin/bash", "--rcfile", "/etc/profile", "-l"]
|
||||
|
||||
where the base images for both stages are completely custom.
|
||||
|
||||
This second mode of selection for base images is more flexible than just
|
||||
choosing an operating system and a Spack version, but is also more demanding.
|
||||
Users may need to generate by themselves their base images and it's also their
|
||||
responsibility to ensure that:
|
||||
|
||||
1. Spack is available in the ``build`` stage and set up correctly to install the required software
|
||||
2. The artifacts produced in the ``build`` stage can be executed in the ``final`` stage
|
||||
|
||||
Therefore we don't recommend its use in cases that can be otherwise
|
||||
covered by the simplified mode shown first.
|
||||
|
||||
----------------------------
|
||||
Singularity Definition Files
|
||||
----------------------------
|
||||
|
||||
In addition to producing recipes in ``Dockerfile`` format Spack can produce
|
||||
Singularity Definition Files by just changing the value of the ``format``
|
||||
attribute:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ cat spack.yaml
|
||||
spack:
|
||||
specs:
|
||||
- hdf5~mpi
|
||||
container:
|
||||
format: singularity
|
||||
|
||||
$ spack containerize > hdf5.def
|
||||
$ sudo singularity build hdf5.sif hdf5.def
|
||||
|
||||
The minimum version of Singularity required to build a SIF (Singularity Image Format)
|
||||
image from the recipes generated by Spack is ``3.5.3``.
|
||||
|
||||
.. _container_config_options:
|
||||
|
||||
-----------------------
|
||||
Configuration Reference
|
||||
-----------------------
|
||||
|
||||
The tables below describe all the configuration options that are currently supported
|
||||
to customize the generation of container recipes:
|
||||
The tables below describe the configuration options that are currently supported:
|
||||
|
||||
.. list-table:: General configuration options for the ``container`` section of ``spack.yaml``
|
||||
:header-rows: 1
|
||||
@@ -407,41 +185,21 @@ to customize the generation of container recipes:
|
||||
- The format of the recipe
|
||||
- ``docker`` or ``singularity``
|
||||
- Yes
|
||||
* - ``images:os``
|
||||
- Operating system used as a base for the image
|
||||
* - ``base:image``
|
||||
- Base image for ``final`` stage
|
||||
- See :ref:`containers-supported-os`
|
||||
- Yes, if using constrained selection of base images
|
||||
* - ``images:spack``
|
||||
- Version of Spack use in the ``build`` stage
|
||||
- Yes
|
||||
* - ``base:spack``
|
||||
- Version of Spack
|
||||
- Valid tags for ``base:image``
|
||||
- Yes, if using constrained selection of base images
|
||||
* - ``images:build``
|
||||
- Image to be used in the ``build`` stage
|
||||
- Any valid container image
|
||||
- Yes, if using custom selection of base images
|
||||
* - ``images:final``
|
||||
- Image to be used in the ``build`` stage
|
||||
- Any valid container image
|
||||
- Yes, if using custom selection of base images
|
||||
- Yes
|
||||
* - ``strip``
|
||||
- Whether to strip binaries
|
||||
- ``true`` (default) or ``false``
|
||||
- No
|
||||
* - ``os_packages:command``
|
||||
- Tool used to manage system packages
|
||||
- ``apt``, ``yum``
|
||||
- Only with custom base images
|
||||
* - ``os_packages:update``
|
||||
- Whether or not to update the list of available packages
|
||||
- True or False (default: True)
|
||||
- No
|
||||
* - ``os_packages:build``
|
||||
- System packages needed at build-time
|
||||
- Valid packages for the current OS
|
||||
- No
|
||||
* - ``os_packages:final``
|
||||
- System packages needed at run-time
|
||||
- Valid packages for the current OS
|
||||
* - ``os_packages``
|
||||
- System packages to be installed
|
||||
- Valid packages for the ``final`` OS
|
||||
- No
|
||||
* - ``extra_instructions:build``
|
||||
- Extra instructions (e.g. `RUN`, `COPY`, etc.) at the end of the ``build`` stage
|
||||
@@ -480,56 +238,70 @@ to customize the generation of container recipes:
|
||||
- Description string
|
||||
- No
|
||||
|
||||
--------------
|
||||
Best Practices
|
||||
--------------
|
||||
Once the Environment is properly configured a recipe for a container
|
||||
image can be printed to standard output by issuing the following
|
||||
command from the directory where the ``spack.yaml`` resides:
|
||||
|
||||
^^^
|
||||
MPI
|
||||
^^^
|
||||
Due to the dependency on Fortran for OpenMPI, which is the spack default
|
||||
implementation, consider adding ``gfortran`` to the ``apt-get install`` list.
|
||||
.. code-block:: console
|
||||
|
||||
Recent versions of OpenMPI will require you to pass ``--allow-run-as-root``
|
||||
to your ``mpirun`` calls if started as root user inside Docker.
|
||||
$ spack containerize
|
||||
|
||||
For execution on HPC clusters, it can be helpful to import the docker
|
||||
image into Singularity in order to start a program with an *external*
|
||||
MPI. Otherwise, also add ``openssh-server`` to the ``apt-get install`` list.
|
||||
The example ``spack.yaml`` above would produce for instance the
|
||||
following ``Dockerfile``:
|
||||
|
||||
^^^^
|
||||
CUDA
|
||||
^^^^
|
||||
Starting from CUDA 9.0, Nvidia provides minimal CUDA images based on
|
||||
Ubuntu. Please see `their instructions <https://hub.docker.com/r/nvidia/cuda/>`_.
|
||||
Avoid double-installing CUDA by adding, e.g.
|
||||
.. code-block:: docker
|
||||
|
||||
.. code-block:: yaml
|
||||
# Build stage with Spack pre-installed and ready to be used
|
||||
FROM spack/centos7:latest as builder
|
||||
|
||||
packages:
|
||||
cuda:
|
||||
externals:
|
||||
- spec: "cuda@9.0.176%gcc@5.4.0 arch=linux-ubuntu16-x86_64"
|
||||
prefix: /usr/local/cuda
|
||||
buildable: False
|
||||
# What we want to install and how we want to install it
|
||||
# is specified in a manifest file (spack.yaml)
|
||||
RUN mkdir /opt/spack-environment \
|
||||
&& (echo "spack:" \
|
||||
&& echo " specs:" \
|
||||
&& echo " - gromacs+mpi" \
|
||||
&& echo " - mpich" \
|
||||
&& echo " concretization: together" \
|
||||
&& echo " config:" \
|
||||
&& echo " install_tree: /opt/software" \
|
||||
&& echo " view: /opt/view") > /opt/spack-environment/spack.yaml
|
||||
|
||||
to your ``spack.yaml``.
|
||||
# Install the software, remove unnecessary deps
|
||||
RUN cd /opt/spack-environment && spack env activate . && spack install && spack gc -y
|
||||
|
||||
Users will either need ``nvidia-docker`` or e.g. Singularity to *execute*
|
||||
device kernels.
|
||||
# Strip all the binaries
|
||||
RUN find -L /opt/view/* -type f -exec readlink -f '{}' \; | \
|
||||
xargs file -i | \
|
||||
grep 'charset=binary' | \
|
||||
grep 'x-executable\|x-archive\|x-sharedlib' | \
|
||||
awk -F: '{print $1}' | xargs strip -s
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Docker on Windows and OSX
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
# Modifications to the environment that are necessary to run
|
||||
RUN cd /opt/spack-environment && \
|
||||
spack env activate --sh -d . >> /etc/profile.d/z10_spack_environment.sh
|
||||
|
||||
On Mac OS and Windows, docker runs on a hypervisor that is not allocated much
|
||||
memory by default, and some spack packages may fail to build due to lack of
|
||||
memory. To work around this issue, consider configuring your docker installation
|
||||
to use more of your host memory. In some cases, you can also ease the memory
|
||||
pressure on parallel builds by limiting the parallelism in your config.yaml.
|
||||
|
||||
.. code-block:: yaml
|
||||
# Bare OS image to run the installed executables
|
||||
FROM centos:7
|
||||
|
||||
config:
|
||||
build_jobs: 2
|
||||
COPY --from=builder /opt/spack-environment /opt/spack-environment
|
||||
COPY --from=builder /opt/software /opt/software
|
||||
COPY --from=builder /opt/view /opt/view
|
||||
COPY --from=builder /etc/profile.d/z10_spack_environment.sh /etc/profile.d/z10_spack_environment.sh
|
||||
|
||||
RUN yum update -y && yum install -y epel-release && yum update -y \
|
||||
&& yum install -y libgomp \
|
||||
&& rm -rf /var/cache/yum && yum clean all
|
||||
|
||||
RUN echo 'export PS1="\[$(tput bold)\]\[$(tput setaf 1)\][gromacs]\[$(tput setaf 2)\]\u\[$(tput sgr0)\]:\w $ \[$(tput sgr0)\]"' >> ~/.bashrc
|
||||
|
||||
|
||||
LABEL "app"="gromacs"
|
||||
LABEL "mpi"="mpich"
|
||||
|
||||
ENTRYPOINT ["/bin/bash", "--rcfile", "/etc/profile", "-l"]
|
||||
|
||||
.. note::
|
||||
Spack can also produce Singularity definition files to build the image. The
|
||||
minimum version of Singularity required to build a SIF (Singularity Image Format)
|
||||
from them is ``3.5.3``.
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
|
||||
.. Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
@@ -48,7 +48,7 @@ information.
|
||||
Continuous Integration
|
||||
----------------------
|
||||
|
||||
Spack uses `Github Actions <https://docs.github.com/en/actions>`_ for Continuous Integration
|
||||
Spack uses `Travis CI <https://travis-ci.org/spack/spack>`_ for Continuous Integration
|
||||
testing. This means that every time you submit a pull request, a series of tests will
|
||||
be run to make sure you didn't accidentally introduce any bugs into Spack. **Your PR
|
||||
will not be accepted until it passes all of these tests.** While you can certainly wait
|
||||
@@ -57,24 +57,25 @@ locally to speed up the review process.
|
||||
|
||||
.. note::
|
||||
|
||||
Oftentimes, CI will fail for reasons other than a problem with your PR.
|
||||
Oftentimes, Travis will fail for reasons other than a problem with your PR.
|
||||
For example, apt-get, pip, or homebrew will fail to download one of the
|
||||
dependencies for the test suite, or a transient bug will cause the unit tests
|
||||
to timeout. If any job fails, click the "Details" link and click on the test(s)
|
||||
to timeout. If Travis fails, click the "Details" link and click on the test(s)
|
||||
that is failing. If it doesn't look like it is failing for reasons related to
|
||||
your PR, you have two options. If you have write permissions for the Spack
|
||||
repository, you should see a "Restart workflow" button on the right-hand side. If
|
||||
repository, you should see a "Restart job" button on the right-hand side. If
|
||||
not, you can close and reopen your PR to rerun all of the tests. If the same
|
||||
test keeps failing, there may be a problem with your PR. If you notice that
|
||||
every recent PR is failing with the same error message, it may be that an issue
|
||||
occurred with the CI infrastructure or one of Spack's dependencies put out a
|
||||
new release that is causing problems. If this is the case, please file an issue.
|
||||
every recent PR is failing with the same error message, it may be that Travis
|
||||
is down or one of Spack's dependencies put out a new release that is causing
|
||||
problems. If this is the case, please file an issue.
|
||||
|
||||
|
||||
We currently test against Python 2.6, 2.7, and 3.5-3.7 on both macOS and Linux and
|
||||
If you take a look in ``$SPACK_ROOT/.travis.yml``, you'll notice that we test
|
||||
against Python 2.6, 2.7, and 3.4-3.7 on both macOS and Linux. We currently
|
||||
perform 3 types of tests:
|
||||
|
||||
.. _cmd-spack-unit-test:
|
||||
.. _cmd-spack-test:
|
||||
|
||||
^^^^^^^^^^
|
||||
Unit Tests
|
||||
@@ -96,7 +97,7 @@ To run *all* of the unit tests, use:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack unit-test
|
||||
$ spack test
|
||||
|
||||
These tests may take several minutes to complete. If you know you are
|
||||
only modifying a single Spack feature, you can run subsets of tests at a
|
||||
@@ -105,53 +106,51 @@ time. For example, this would run all the tests in
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack unit-test lib/spack/spack/test/architecture.py
|
||||
$ spack test architecture.py
|
||||
|
||||
And this would run the ``test_platform`` test from that file:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack unit-test lib/spack/spack/test/architecture.py::test_platform
|
||||
$ spack test architecture.py::test_platform
|
||||
|
||||
This allows you to develop iteratively: make a change, test that change,
|
||||
make another change, test that change, etc. We use `pytest
|
||||
<http://pytest.org/>`_ as our tests framework, and these types of
|
||||
<http://pytest.org/>`_ as our tests fromework, and these types of
|
||||
arguments are just passed to the ``pytest`` command underneath. See `the
|
||||
pytest docs
|
||||
<http://doc.pytest.org/en/latest/usage.html#specifying-tests-selecting-tests>`_
|
||||
for more details on test selection syntax.
|
||||
|
||||
``spack unit-test`` has a few special options that can help you
|
||||
understand what tests are available. To get a list of all available
|
||||
unit test files, run:
|
||||
``spack test`` has a few special options that can help you understand
|
||||
what tests are available. To get a list of all available unit test
|
||||
files, run:
|
||||
|
||||
.. command-output:: spack unit-test --list
|
||||
.. command-output:: spack test --list
|
||||
:ellipsis: 5
|
||||
|
||||
To see a more detailed list of available unit tests, use ``spack
|
||||
unit-test --list-long``:
|
||||
To see a more detailed list of available unit tests, use ``spack test
|
||||
--list-long``:
|
||||
|
||||
.. command-output:: spack unit-test --list-long
|
||||
.. command-output:: spack test --list-long
|
||||
:ellipsis: 10
|
||||
|
||||
And to see the fully qualified names of all tests, use ``--list-names``:
|
||||
|
||||
.. command-output:: spack unit-test --list-names
|
||||
.. command-output:: spack test --list-names
|
||||
:ellipsis: 5
|
||||
|
||||
You can combine these with ``pytest`` arguments to restrict which tests
|
||||
you want to know about. For example, to see just the tests in
|
||||
``architecture.py``:
|
||||
|
||||
.. command-output:: spack unit-test --list-long lib/spack/spack/test/architecture.py
|
||||
.. command-output:: spack test --list-long architecture.py
|
||||
|
||||
You can also combine any of these options with a ``pytest`` keyword
|
||||
search. See the `pytest usage docs
|
||||
<https://docs.pytest.org/en/stable/usage.html#specifying-tests-selecting-tests>`_:
|
||||
for more details on test selection syntax. For example, to see the names of all tests that have "spec"
|
||||
search. For example, to see the names of all tests that have "spec"
|
||||
or "concretize" somewhere in their names:
|
||||
|
||||
.. command-output:: spack unit-test --list-names -k "spec and concretize"
|
||||
.. command-output:: spack test --list-names -k "spec and concretize"
|
||||
|
||||
By default, ``pytest`` captures the output of all unit tests, and it will
|
||||
print any captured output for failed tests. Sometimes it's helpful to see
|
||||
@@ -161,7 +160,7 @@ argument to ``pytest``:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack unit-test -s --list-long lib/spack/spack/test/architecture.py::test_platform
|
||||
$ spack test -s architecture.py::test_platform
|
||||
|
||||
Unit tests are crucial to making sure bugs aren't introduced into
|
||||
Spack. If you are modifying core Spack libraries or adding new
|
||||
@@ -174,31 +173,29 @@ how to write tests!
|
||||
.. note::
|
||||
|
||||
You may notice the ``share/spack/qa/run-unit-tests`` script in the
|
||||
repository. This script is designed for CI. It runs the unit
|
||||
repository. This script is designed for Travis CI. It runs the unit
|
||||
tests and reports coverage statistics back to Codecov. If you want to
|
||||
run the unit tests yourself, we suggest you use ``spack unit-test``.
|
||||
run the unit tests yourself, we suggest you use ``spack test``.
|
||||
|
||||
^^^^^^^^^^^^
|
||||
Style Tests
|
||||
Flake8 Tests
|
||||
^^^^^^^^^^^^
|
||||
|
||||
Spack uses `Flake8 <http://flake8.pycqa.org/en/latest/>`_ to test for
|
||||
`PEP 8 <https://www.python.org/dev/peps/pep-0008/>`_ conformance and
|
||||
`mypy <https://mypy.readthedocs.io/en/stable/>` for type checking. PEP 8 is
|
||||
`PEP 8 <https://www.python.org/dev/peps/pep-0008/>`_ conformance. PEP 8 is
|
||||
a series of style guides for Python that provide suggestions for everything
|
||||
from variable naming to indentation. In order to limit the number of PRs that
|
||||
were mostly style changes, we decided to enforce PEP 8 conformance. Your PR
|
||||
needs to comply with PEP 8 in order to be accepted, and if it modifies the
|
||||
spack library it needs to successfully type-check with mypy as well.
|
||||
needs to comply with PEP 8 in order to be accepted.
|
||||
|
||||
Testing for compliance with spack's style is easy. Simply run the ``spack style``
|
||||
Testing for PEP 8 compliance is easy. Simply run the ``spack flake8``
|
||||
command:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack style
|
||||
$ spack flake8
|
||||
|
||||
``spack style`` has a couple advantages over running the tools by hand:
|
||||
``spack flake8`` has a couple advantages over running ``flake8`` by hand:
|
||||
|
||||
#. It only tests files that you have modified since branching off of
|
||||
``develop``.
|
||||
@@ -209,9 +206,7 @@ command:
|
||||
checks. For example, URLs are often longer than 80 characters, so we
|
||||
exempt them from line length checks. We also exempt lines that start
|
||||
with "homepage", "url", "version", "variant", "depends_on", and
|
||||
"extends" in ``package.py`` files. This is now also possible when directly
|
||||
running flake8 if you can use the ``spack`` formatter plugin included with
|
||||
spack.
|
||||
"extends" in ``package.py`` files.
|
||||
|
||||
More approved flake8 exemptions can be found
|
||||
`here <https://github.com/spack/spack/blob/develop/.flake8>`_.
|
||||
@@ -244,14 +239,14 @@ However, if you aren't compliant with PEP 8, flake8 will complain:
|
||||
|
||||
Most of the error messages are straightforward, but if you don't understand what
|
||||
they mean, just ask questions about them when you submit your PR. The line numbers
|
||||
will change if you add or delete lines, so simply run ``spack style`` again
|
||||
will change if you add or delete lines, so simply run ``spack flake8`` again
|
||||
to update them.
|
||||
|
||||
.. tip::
|
||||
|
||||
Try fixing flake8 errors in reverse order. This eliminates the need for
|
||||
multiple runs of ``spack style`` just to re-compute line numbers and
|
||||
makes it much easier to fix errors directly off of the CI output.
|
||||
multiple runs of ``spack flake8`` just to re-compute line numbers and
|
||||
makes it much easier to fix errors directly off of the Travis output.
|
||||
|
||||
.. warning::
|
||||
|
||||
@@ -331,7 +326,7 @@ Once all of the dependencies are installed, you can try building the documentati
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ cd path/to/spack/lib/spack/docs/
|
||||
$ cd "$SPACK_ROOT/lib/spack/docs"
|
||||
$ make clean
|
||||
$ make
|
||||
|
||||
@@ -343,7 +338,7 @@ your PR is accepted.
|
||||
There is also a ``run-doc-tests`` script in ``share/spack/qa``. The only
|
||||
difference between running this script and running ``make`` by hand is that
|
||||
the script will exit immediately if it encounters an error or warning. This
|
||||
is necessary for CI. If you made a lot of documentation changes, it is
|
||||
is necessary for Travis CI. If you made a lot of documentation changes, it is
|
||||
much quicker to run ``make`` by hand so that you can see all of the warnings
|
||||
at once.
|
||||
|
||||
@@ -407,7 +402,7 @@ and allow you to see coverage line-by-line when viewing the Spack repository.
|
||||
If you are new to Spack, a great way to get started is to write unit tests to
|
||||
increase coverage!
|
||||
|
||||
Unlike with CI on Github Actions Codecov tests are not required to pass in order for your
|
||||
Unlike with Travis, Codecov tests are not required to pass in order for your
|
||||
PR to be merged. If you modify core Spack libraries, we would greatly
|
||||
appreciate unit tests that cover these changed lines. Otherwise, we have no
|
||||
way of knowing whether or not your changes introduce a bug. If you make
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
|
||||
.. Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
@@ -106,21 +106,11 @@ with a high level view of Spack's directory structure:
|
||||
external/ <- external libs included in Spack distro
|
||||
llnl/ <- some general-use libraries
|
||||
|
||||
spack/ <- spack module; contains Python code
|
||||
analyzers/ <- modules to run analysis on installed packages
|
||||
build_systems/ <- modules for different build systems
|
||||
cmd/ <- each file in here is a spack subcommand
|
||||
compilers/ <- compiler description files
|
||||
container/ <- module for spack containerize
|
||||
hooks/ <- hook modules to run at different points
|
||||
modules/ <- modules for lmod, tcl, etc.
|
||||
operating_systems/ <- operating system modules
|
||||
platforms/ <- different spack platforms
|
||||
reporters/ <- reporters like cdash, junit
|
||||
schema/ <- schemas to validate data structures
|
||||
solver/ <- the spack solver
|
||||
test/ <- unit test modules
|
||||
util/ <- common code
|
||||
spack/ <- spack module; contains Python code
|
||||
cmd/ <- each file in here is a spack subcommand
|
||||
compilers/ <- compiler description files
|
||||
test/ <- unit test modules
|
||||
util/ <- common code
|
||||
|
||||
Spack is designed so that it could live within a `standard UNIX
|
||||
directory hierarchy <http://linux.die.net/man/7/hier>`_, so ``lib``,
|
||||
@@ -261,22 +251,6 @@ Unit tests
|
||||
This is a fake package hierarchy used to mock up packages for
|
||||
Spack's test suite.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Research and Monitoring Modules
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
:mod:`spack.monitor`
|
||||
Contains :class:`SpackMonitor <spack.monitor.SpackMonitor>`. This is accessed
|
||||
from the ``spack install`` and ``spack analyze`` commands to send build
|
||||
and package metadada up to a `Spack Monitor <https://github.com/spack/spack-monitor>`_ server.
|
||||
|
||||
|
||||
:mod:`spack.analyzers`
|
||||
A module folder with a :class:`AnalyzerBase <spack.analyzers.analyzer_base.AnalyzerBase>`
|
||||
that provides base functions to run, save, and (optionally) upload analysis
|
||||
results to a `Spack Monitor <https://github.com/spack/spack-monitor>`_ server.
|
||||
|
||||
|
||||
^^^^^^^^^^^^^
|
||||
Other Modules
|
||||
^^^^^^^^^^^^^
|
||||
@@ -325,235 +299,6 @@ Conceptually, packages are overloaded. They contain:
|
||||
Stage objects
|
||||
-------------
|
||||
|
||||
|
||||
.. _writing-analyzers:
|
||||
|
||||
-----------------
|
||||
Writing analyzers
|
||||
-----------------
|
||||
|
||||
To write an analyzer, you should add a new python file to the
|
||||
analyzers module directory at ``lib/spack/spack/analyzers`` .
|
||||
Your analyzer should be a subclass of the :class:`AnalyzerBase <spack.analyzers.analyzer_base.AnalyzerBase>`. For example, if you want
|
||||
to add an analyzer class ``Myanalyzer`` you woul write to
|
||||
``spack/analyzers/myanalyzer.py`` and import and
|
||||
use the base as follows:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from .analyzer_base import AnalyzerBase
|
||||
|
||||
class Myanalyzer(AnalyzerBase):
|
||||
|
||||
|
||||
Note that the class name is your module file name, all lowercase
|
||||
except for the first capital letter. You can look at other analyzers in
|
||||
that analyzer directory for examples. The guide here will tell you about the basic functions needed.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Analyzer Output Directory
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
By default, when you run ``spack analyze run`` an analyzer output directory will
|
||||
be created in your spack user directory in your ``$HOME``. The reason we output here
|
||||
is because the install directory might not always be writable.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
~/.spack/
|
||||
analyzers
|
||||
|
||||
Result files will be written here, organized in subfolders in the same structure
|
||||
as the package, with each analyzer owning it's own subfolder. for example:
|
||||
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ tree ~/.spack/analyzers/
|
||||
/home/spackuser/.spack/analyzers/
|
||||
└── linux-ubuntu20.04-skylake
|
||||
└── gcc-9.3.0
|
||||
└── zlib-1.2.11-sl7m27mzkbejtkrajigj3a3m37ygv4u2
|
||||
├── environment_variables
|
||||
│ └── spack-analyzer-environment-variables.json
|
||||
├── install_files
|
||||
│ └── spack-analyzer-install-files.json
|
||||
└── libabigail
|
||||
└── lib
|
||||
└── spack-analyzer-libabigail-libz.so.1.2.11.xml
|
||||
|
||||
|
||||
Notice that for the libabigail analyzer, since results are generated per object,
|
||||
we honor the object's folder in case there are equivalently named files in
|
||||
different folders. The result files are typically written as json so they can be easily read and uploaded in a future interaction with a monitor.
|
||||
|
||||
|
||||
^^^^^^^^^^^^^^^^^
|
||||
Analyzer Metadata
|
||||
^^^^^^^^^^^^^^^^^
|
||||
|
||||
Your analyzer is required to have the class attributes ``name``, ``outfile``,
|
||||
and ``description``. These are printed to the user with they use the subcommand
|
||||
``spack analyze list-analyzers``. Here is an example.
|
||||
As we mentioned above, note that this analyzer would live in a module named
|
||||
``libabigail.py`` in the analyzers folder so that the class can be discovered.
|
||||
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
class Libabigail(AnalyzerBase):
|
||||
|
||||
name = "libabigail"
|
||||
outfile = "spack-analyzer-libabigail.json"
|
||||
description = "Application Binary Interface (ABI) features for objects"
|
||||
|
||||
|
||||
This means that the name and output file should be unique for your analyzer.
|
||||
Note that "all" cannot be the name of an analyzer, as this key is used to indicate
|
||||
that the user wants to run all analyzers.
|
||||
|
||||
.. _analyzer_run_function:
|
||||
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
An analyzer run Function
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
The core of an analyzer is its ``run()`` function, which should accept no
|
||||
arguments. You can assume your analyzer has the package spec of interest at ``self.spec``
|
||||
and it's up to the run function to generate whatever analysis data you need,
|
||||
and then return the object with a key as the analyzer name. The result data
|
||||
should be a list of objects, each with a name, ``analyzer_name``, ``install_file``,
|
||||
and one of ``value`` or ``binary_value``. The install file should be for a relative
|
||||
path, and not the absolute path. For example, let's say we extract a metric called
|
||||
``metric`` for ``bin/wget`` using our analyzer ``thebest-analyzer``.
|
||||
We might have data that looks like this:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
result = {"name": "metric", "analyzer_name": "thebest-analyzer", "value": "1", "install_file": "bin/wget"}
|
||||
|
||||
|
||||
We'd then return it as follows - note that they key is the analyzer name at ``self.name``.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
return {self.name: result}
|
||||
|
||||
This will save the complete result to the analyzer metadata folder, as described
|
||||
previously. If you want support for adding a different kind of metadata (e.g.,
|
||||
not associated with an install file) then the monitor server would need to be updated
|
||||
to support this first.
|
||||
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
An analyzer init Function
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
If you don't need any extra dependencies or checks, you can skip defining an analyzer
|
||||
init function, as the base class will handle it. Typically, it will accept
|
||||
a spec, and an optional output directory (if the user does not want the default
|
||||
metadata folder for analyzer results). The analyzer init function should call
|
||||
it's parent init, and then do any extra checks or validation that are required to
|
||||
work. For example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def __init__(self, spec, dirname=None):
|
||||
super(Myanalyzer, self).__init__(spec, dirname)
|
||||
|
||||
# install extra dependencies, do extra preparation and checks here
|
||||
|
||||
|
||||
At the end of the init, you will have available to you:
|
||||
|
||||
- **self.spec**: the spec object
|
||||
- **self.dirname**: an optional directory name the user as provided at init to save
|
||||
- **self.output_dir**: the analyzer metadata directory, where we save by default
|
||||
- **self.meta_dir**: the path to the package metadata directory (.spack) if you need it
|
||||
|
||||
And can proceed to write your analyzer.
|
||||
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Saving Analyzer Results
|
||||
^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
The analyzer will have ``save_result`` called, with the result object generated
|
||||
to save it to the filesystem, and if the user has added the ``--monitor`` flag
|
||||
to upload it to a monitor server. If your result follows an accepted result
|
||||
format and you don't need to parse it further, you don't need to add this
|
||||
function to your class. However, if your result data is large or otherwise
|
||||
needs additional parsing, you can define it. If you define the function, it
|
||||
is useful to know about the ``output_dir`` property, which you can join
|
||||
with your output file relative path of choice:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
outfile = os.path.join(self.output_dir, "my-output-file.txt")
|
||||
|
||||
|
||||
The directory will be provided by the ``output_dir`` property but it won't exist,
|
||||
so you should create it:
|
||||
|
||||
|
||||
.. code::block:: python
|
||||
|
||||
# Create the output directory
|
||||
if not os.path.exists(self._output_dir):
|
||||
os.makedirs(self._output_dir)
|
||||
|
||||
|
||||
If you are generating results that match to specific files in the package
|
||||
install directory, you should try to maintain those paths in the case that
|
||||
there are equivalently named files in different directories that would
|
||||
overwrite one another. As an example of an analyzer with a custom save,
|
||||
the Libabigail analyzer saves ``*.xml`` files to the analyzer metadata
|
||||
folder in ``run()``, as they are either binaries, or as xml (text) would
|
||||
usually be too big to pass in one request. For this reason, the files
|
||||
are saved during ``run()`` and the filenames added to the result object,
|
||||
and then when the result object is passed back into ``save_result()``,
|
||||
we skip saving to the filesystem, and instead read the file and send
|
||||
each one (separately) to the monitor:
|
||||
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def save_result(self, result, monitor=None, overwrite=False):
|
||||
"""ABI results are saved to individual files, so each one needs to be
|
||||
read and uploaded. Result here should be the lookup generated in run(),
|
||||
the key is the analyzer name, and each value is the result file.
|
||||
We currently upload the entire xml as text because libabigail can't
|
||||
easily read gzipped xml, but this will be updated when it can.
|
||||
"""
|
||||
if not monitor:
|
||||
return
|
||||
|
||||
name = self.spec.package.name
|
||||
|
||||
for obj, filename in result.get(self.name, {}).items():
|
||||
|
||||
# Don't include the prefix
|
||||
rel_path = obj.replace(self.spec.prefix + os.path.sep, "")
|
||||
|
||||
# We've already saved the results to file during run
|
||||
content = spack.monitor.read_file(filename)
|
||||
|
||||
# A result needs an analyzer, value or binary_value, and name
|
||||
data = {"value": content, "install_file": rel_path, "name": "abidw-xml"}
|
||||
tty.info("Sending result for %s %s to monitor." % (name, rel_path))
|
||||
monitor.send_analyze_metadata(self.spec.package, {"libabigail": [data]})
|
||||
|
||||
|
||||
|
||||
Notice that this function, if you define it, requires a result object (generated by
|
||||
``run()``, a monitor (if you want to send), and a boolean ``overwrite`` to be used
|
||||
to check if a result exists first, and not write to it if the result exists and
|
||||
overwrite is False. Also notice that since we already saved these files to the analyzer metadata folder, we return early if a monitor isn't defined, because this function serves to send results to the monitor. If you haven't saved anything to the analyzer metadata folder
|
||||
yet, you might want to do that here. You should also use ``tty.info`` to give
|
||||
the user a message of "Writing result to $DIRNAME."
|
||||
|
||||
|
||||
.. _writing-commands:
|
||||
|
||||
----------------
|
||||
@@ -600,183 +345,6 @@ Whenever you add/remove/rename a command or flags for an existing command,
|
||||
make sure to update Spack's `Bash tab completion script
|
||||
<https://github.com/adamjstewart/spack/blob/develop/share/spack/spack-completion.bash>`_.
|
||||
|
||||
|
||||
-------------
|
||||
Writing Hooks
|
||||
-------------
|
||||
|
||||
A hook is a callback that makes it easy to design functions that run
|
||||
for different events. We do this by way of defining hook types, and then
|
||||
inserting them at different places in the spack code base. Whenever a hook
|
||||
type triggers by way of a function call, we find all the hooks of that type,
|
||||
and run them.
|
||||
|
||||
Spack defines hooks by way of a module at ``lib/spack/spack/hooks`` where we can define
|
||||
types of hooks in the ``__init__.py``, and then python files in that folder
|
||||
can use hook functions. The files are automatically parsed, so if you write
|
||||
a new file for some integration (e.g., ``lib/spack/spack/hooks/myintegration.py``
|
||||
you can then write hook functions in that file that will be automatically detected,
|
||||
and run whenever your hook is called. This section will cover the basic kind
|
||||
of hooks, and how to write them.
|
||||
|
||||
^^^^^^^^^^^^^^
|
||||
Types of Hooks
|
||||
^^^^^^^^^^^^^^
|
||||
|
||||
The following hooks are currently implemented to make it easy for you,
|
||||
the developer, to add hooks at different stages of a spack install or similar.
|
||||
If there is a hook that you would like and is missing, you can propose to add a new one.
|
||||
|
||||
"""""""""""""""""""""
|
||||
``pre_install(spec)``
|
||||
"""""""""""""""""""""
|
||||
|
||||
A ``pre_install`` hook is run within an install subprocess, directly before
|
||||
the install starts. It expects a single argument of a spec, and is run in
|
||||
a multiprocessing subprocess. Note that if you see ``pre_install`` functions associated with packages these are not hooks
|
||||
as we have defined them here, but rather callback functions associated with
|
||||
a package install.
|
||||
|
||||
|
||||
""""""""""""""""""""""
|
||||
``post_install(spec)``
|
||||
""""""""""""""""""""""
|
||||
|
||||
A ``post_install`` hook is run within an install subprocess, directly after
|
||||
the install finishes, but before the build stage is removed. If you
|
||||
write one of these hooks, you should expect it to accept a spec as the only
|
||||
argument. This is run in a multiprocessing subprocess. This ``post_install`` is
|
||||
also seen in packages, but in this context not related to the hooks described
|
||||
here.
|
||||
|
||||
|
||||
""""""""""""""""""""""""""
|
||||
``on_install_start(spec)``
|
||||
""""""""""""""""""""""""""
|
||||
|
||||
This hook is run at the beginning of ``lib/spack/spack/installer.py``,
|
||||
in the install function of a ``PackageInstaller``,
|
||||
and importantly is not part of a build process, but before it. This is when
|
||||
we have just newly grabbed the task, and are preparing to install. If you
|
||||
write a hook of this type, you should provide the spec to it.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def on_install_start(spec):
|
||||
"""On start of an install, we want to...
|
||||
"""
|
||||
print('on_install_start')
|
||||
|
||||
|
||||
""""""""""""""""""""""""""""
|
||||
``on_install_success(spec)``
|
||||
""""""""""""""""""""""""""""
|
||||
|
||||
This hook is run on a successful install, and is also run inside the build
|
||||
process, akin to ``post_install``. The main difference is that this hook
|
||||
is run outside of the context of the stage directory, meaning after the
|
||||
build stage has been removed and the user is alerted that the install was
|
||||
successful. If you need to write a hook that is run on success of a particular
|
||||
phase, you should use ``on_phase_success``.
|
||||
|
||||
""""""""""""""""""""""""""""
|
||||
``on_install_failure(spec)``
|
||||
""""""""""""""""""""""""""""
|
||||
|
||||
This hook is run given an install failure that happens outside of the build
|
||||
subprocess, but somewhere in ``installer.py`` when something else goes wrong.
|
||||
If you need to write a hook that is relevant to a failure within a build
|
||||
process, you would want to instead use ``on_phase_failure``.
|
||||
|
||||
|
||||
"""""""""""""""""""""""""""""""""""""""""""""""
|
||||
``on_phase_success(pkg, phase_name, log_file)``
|
||||
"""""""""""""""""""""""""""""""""""""""""""""""
|
||||
|
||||
This hook is run within the install subprocess, and specifically when a phase
|
||||
successfully finishes. Since we are interested in the package, the name of
|
||||
the phase, and any output from it, we require:
|
||||
|
||||
- **pkg**: the package variable, which also has the attached spec at ``pkg.spec``
|
||||
- **phase_name**: the name of the phase that was successful (e.g., configure)
|
||||
- **log_file**: the path to the file with output, in case you need to inspect or otherwise interact with it.
|
||||
|
||||
"""""""""""""""""""""""""""""""""""""""""""""
|
||||
``on_phase_error(pkg, phase_name, log_file)``
|
||||
"""""""""""""""""""""""""""""""""""""""""""""
|
||||
|
||||
In the case of an error during a phase, we might want to trigger some event
|
||||
with a hook, and this is the purpose of this particular hook. Akin to
|
||||
``on_phase_success`` we require the same variables - the package that failed,
|
||||
the name of the phase, and the log file where we might find errors.
|
||||
|
||||
"""""""""""""""""""""""""""""""""
|
||||
``on_analyzer_save(pkg, result)``
|
||||
"""""""""""""""""""""""""""""""""
|
||||
|
||||
After an analyzer has saved some result for a package, this hook is called,
|
||||
and it provides the package that we just ran the analysis for, along with
|
||||
the loaded result. Typically, a result is structured to have the name
|
||||
of the analyzer as key, and the result object that is defined in detail in
|
||||
:ref:`analyzer_run_function`.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def on_analyzer_save(pkg, result):
|
||||
"""given a package and a result...
|
||||
"""
|
||||
print('Do something extra with a package analysis result here')
|
||||
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^
|
||||
Adding a New Hook Type
|
||||
^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Adding a new hook type is very simple! In ``lib/spack/spack/hooks/__init__.py``
|
||||
you can simply create a new ``HookRunner`` that is named to match your new hook.
|
||||
For example, let's say you want to add a new hook called ``post_log_write``
|
||||
to trigger after anything is written to a logger. You would add it as follows:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# pre/post install and run by the install subprocess
|
||||
pre_install = HookRunner('pre_install')
|
||||
post_install = HookRunner('post_install')
|
||||
|
||||
# hooks related to logging
|
||||
post_log_write = HookRunner('post_log_write') # <- here is my new hook!
|
||||
|
||||
|
||||
You then need to decide what arguments my hook would expect. Since this is
|
||||
related to logging, let's say that you want a message and level. That means
|
||||
that when you add a python file to the ``lib/spack/spack/hooks``
|
||||
folder with one or more callbacks intended to be triggered by this hook. You might
|
||||
use my new hook as follows:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def post_log_write(message, level):
|
||||
"""Do something custom with the messsage and level every time we write
|
||||
to the log
|
||||
"""
|
||||
print('running post_log_write!')
|
||||
|
||||
|
||||
To use the hook, we would call it as follows somewhere in the logic to do logging.
|
||||
In this example, we use it outside of a logger that is already defined:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
import spack.hooks
|
||||
|
||||
# We do something here to generate a logger and message
|
||||
spack.hooks.post_log_write(message, logger.level)
|
||||
|
||||
|
||||
This is not to say that this would be the best way to implement an integration
|
||||
with the logger (you'd probably want to write a custom logger, or you could
|
||||
have the hook defined within the logger) but serves as an example of writing a hook.
|
||||
|
||||
----------
|
||||
Unit tests
|
||||
----------
|
||||
@@ -795,12 +363,11 @@ Developer commands
|
||||
``spack doc``
|
||||
^^^^^^^^^^^^^
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^
|
||||
``spack unit-test``
|
||||
^^^^^^^^^^^^^^^^^^^
|
||||
^^^^^^^^^^^^^^
|
||||
``spack test``
|
||||
^^^^^^^^^^^^^^
|
||||
|
||||
See the :ref:`contributor guide section <cmd-spack-unit-test>` on
|
||||
``spack unit-test``.
|
||||
See the :ref:`contributor guide section <cmd-spack-test>` on ``spack test``.
|
||||
|
||||
.. _cmd-spack-python:
|
||||
|
||||
@@ -828,89 +395,23 @@ other Spack modules:
|
||||
True
|
||||
>>>
|
||||
|
||||
If you prefer using an IPython interpreter, given that IPython is installed
|
||||
you can specify the interpreter with ``-i``:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack python -i ipython
|
||||
Python 3.8.3 (default, May 19 2020, 18:47:26)
|
||||
Type 'copyright', 'credits' or 'license' for more information
|
||||
IPython 7.17.0 -- An enhanced Interactive Python. Type '?' for help.
|
||||
|
||||
|
||||
Spack version 0.16.0
|
||||
Python 3.8.3, Linux x86_64
|
||||
|
||||
In [1]:
|
||||
|
||||
|
||||
With either interpreter you can run a single command:
|
||||
You can also run a single command:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack python -c 'import distro; distro.linux_distribution()'
|
||||
('Ubuntu', '18.04', 'Bionic Beaver')
|
||||
|
||||
$ spack python -i ipython -c 'import distro; distro.linux_distribution()'
|
||||
Out[1]: ('Ubuntu', '18.04', 'Bionic Beaver')
|
||||
('Fedora', '25', 'Workstation Edition')
|
||||
|
||||
or a file:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack python ~/test_fetching.py
|
||||
$ spack python -i ipython ~/test_fetching.py
|
||||
|
||||
just like you would with the normal ``python`` command.
|
||||
|
||||
|
||||
.. _cmd-spack-url:
|
||||
|
||||
|
||||
^^^^^^^^^^^^^^^
|
||||
``spack blame``
|
||||
^^^^^^^^^^^^^^^
|
||||
|
||||
Spack blame is a way to quickly see contributors to packages or files
|
||||
in the spack repository. You should provide a target package name or
|
||||
file name to the command. Here is an example asking to see contributions
|
||||
for the package "python":
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack blame python
|
||||
LAST_COMMIT LINES % AUTHOR EMAIL
|
||||
2 weeks ago 3 0.3 Mickey Mouse <cheddar@gmouse.org>
|
||||
a month ago 927 99.7 Minnie Mouse <swiss@mouse.org>
|
||||
|
||||
2 weeks ago 930 100.0
|
||||
|
||||
|
||||
By default, you will get a table view (shown above) sorted by date of contribution,
|
||||
with the most recent contribution at the top. If you want to sort instead
|
||||
by percentage of code contribution, then add ``-p``:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack blame -p python
|
||||
|
||||
|
||||
And to see the git blame view, add ``-g`` instead:
|
||||
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack blame -g python
|
||||
|
||||
|
||||
Finally, to get a json export of the data, add ``--json``:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack blame --json python
|
||||
|
||||
|
||||
^^^^^^^^^^^^^
|
||||
``spack url``
|
||||
^^^^^^^^^^^^^
|
||||
@@ -1051,10 +552,8 @@ develop onto release branches. This is typically done by cherry-picking
|
||||
bugfix commits off of ``develop``.
|
||||
|
||||
To avoid version churn for users of a release series, minor releases
|
||||
**should not** make changes that would change the concretization of
|
||||
should **not** make changes that would change the concretization of
|
||||
packages. They should generally only contain fixes to the Spack core.
|
||||
However, sometimes priorities are such that new functionality needs to
|
||||
be added to a minor release.
|
||||
|
||||
Both major and minor releases are tagged. After each release, we merge
|
||||
the release branch back into ``develop`` so that the version bump and any
|
||||
@@ -1063,51 +562,50 @@ convenience, we also tag the latest release as ``releases/latest``,
|
||||
so that users can easily check it out to get the latest
|
||||
stable version. See :ref:`merging-releases` for more details.
|
||||
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Scheduling work for releases
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
We schedule work for releases by creating `GitHub projects
|
||||
<https://github.com/spack/spack/projects>`_. At any time, there may be
|
||||
several open release projects. For example, below are two releases (from
|
||||
several open release projects. For example, here are two releases (from
|
||||
some past version of the page linked above):
|
||||
|
||||
.. image:: images/projects.png
|
||||
|
||||
This image shows one release in progress for ``0.15.1`` and another for
|
||||
Here, there's one release in progress for ``0.15.1`` and another for
|
||||
``0.16.0``. Each of these releases has a project board containing issues
|
||||
and pull requests. GitHub shows a status bar with completed work in
|
||||
green, work in progress in purple, and work not started yet in gray, so
|
||||
it's fairly easy to see progress.
|
||||
|
||||
Spack's project boards are not firm commitments so we move work between
|
||||
Spack's project boards are not firm commitments, and we move work between
|
||||
releases frequently. If we need to make a release and some tasks are not
|
||||
yet done, we will simply move them to the next minor or major release, rather
|
||||
yet done, we will simply move them to next minor or major release, rather
|
||||
than delaying the release to complete them.
|
||||
|
||||
For more on using GitHub project boards, see `GitHub's documentation
|
||||
<https://docs.github.com/en/github/managing-your-work-on-github/about-project-boards>`_.
|
||||
|
||||
|
||||
.. _major-releases:
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
Making major releases
|
||||
Making Major Releases
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Assuming a project board has already been created and all required work
|
||||
completed, the steps to make the major release are:
|
||||
Assuming you've already created a project board and completed the work
|
||||
for a major release, the steps to make the release are as follows:
|
||||
|
||||
#. Create two new project boards:
|
||||
|
||||
* One for the next major release
|
||||
* One for the next point release
|
||||
|
||||
#. Move any optional tasks that are not done to one of the new project boards.
|
||||
|
||||
In general, small bugfixes should go to the next point release. Major
|
||||
features, refactors, and changes that could affect concretization should
|
||||
go in the next major release.
|
||||
#. Move any tasks that aren't done yet to one of the new project boards.
|
||||
Small bugfixes should go to the next point release. Major features,
|
||||
refactors, and changes that could affect concretization should go in
|
||||
the next major release.
|
||||
|
||||
#. Create a branch for the release, based on ``develop``:
|
||||
|
||||
@@ -1119,20 +617,23 @@ completed, the steps to make the major release are:
|
||||
``releases/vX.Y``. That is, you should create a ``releases/vX.Y``
|
||||
branch if you are preparing the ``X.Y.0`` release.
|
||||
|
||||
#. Bump the version in ``lib/spack/spack/__init__.py``.
|
||||
|
||||
See `this example from 0.13.0
|
||||
#. Bump the version in ``lib/spack/spack/__init__.py``. See `this example from 0.13.0
|
||||
<https://github.com/spack/spack/commit/8eeb64096c98b8a43d1c587f13ece743c864fba9>`_
|
||||
|
||||
#. Update ``CHANGELOG.md`` with major highlights in bullet form.
|
||||
#. Updaate the release version lists in these files to include the new version:
|
||||
|
||||
Use proper markdown formatting, like `this example from 0.15.0
|
||||
* ``lib/spack/spack/schema/container.py``
|
||||
* ``lib/spack/spack/container/images.json``
|
||||
|
||||
**TODO**: We should get rid of this step in some future release.
|
||||
|
||||
#. Update ``CHANGELOG.md`` with major highlights in bullet form. Use
|
||||
proper markdown formatting, like `this example from 0.15.0
|
||||
<https://github.com/spack/spack/commit/d4bf70d9882fcfe88507e9cb444331d7dd7ba71c>`_.
|
||||
|
||||
#. Push the release branch to GitHub.
|
||||
|
||||
#. Make sure CI passes on the release branch, including:
|
||||
|
||||
* Regular unit tests
|
||||
* Build tests
|
||||
* The E4S pipeline at `gitlab.spack.io <https://gitlab.spack.io>`_
|
||||
@@ -1150,33 +651,33 @@ completed, the steps to make the major release are:
|
||||
.. _point-releases:
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
Making point releases
|
||||
Making Point Releases
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Assuming a project board has already been created and all required work
|
||||
completed, the steps to make the point release are:
|
||||
This assumes you've already created a project board for a point release
|
||||
and completed the work to be done for the release. To make a point
|
||||
release:
|
||||
|
||||
#. Create a new project board for the next point release.
|
||||
#. Create one new project board for the next point release.
|
||||
|
||||
#. Move any optional tasks that are not done to the next project board.
|
||||
#. Move any cards that aren't done yet to the next project board.
|
||||
|
||||
#. Check out the release branch (it should already exist).
|
||||
|
||||
For the ``X.Y.Z`` release, the release branch is called ``releases/vX.Y``.
|
||||
For ``v0.15.1``, you would check out ``releases/v0.15``:
|
||||
#. Check out the release branch (it should already exist). For the
|
||||
``X.Y.Z`` release, the release branch is called ``releases/vX.Y``. For
|
||||
``v0.15.1``, you would check out ``releases/v0.15``:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ git checkout releases/v0.15
|
||||
|
||||
#. Cherry-pick each pull request in the ``Done`` column of the release
|
||||
project board onto the release branch.
|
||||
project onto the release branch.
|
||||
|
||||
This is **usually** fairly simple since we squash the commits from the
|
||||
vast majority of pull requests. That means there is only one commit
|
||||
vast majority of pull requests, which means there is only one commit
|
||||
per pull request to cherry-pick. For example, `this pull request
|
||||
<https://github.com/spack/spack/pull/15777>`_ has three commits, but
|
||||
they were squashed into a single commit on merge. You can see the
|
||||
the were squashed into a single commit on merge. You can see the
|
||||
commit that was created here:
|
||||
|
||||
.. image:: images/pr-commit.png
|
||||
@@ -1188,8 +689,9 @@ completed, the steps to make the point release are:
|
||||
|
||||
$ git cherry-pick 7e46da7
|
||||
|
||||
For pull requests that were rebased (or not squashed), you'll need to
|
||||
cherry-pick each associated commit individually.
|
||||
For pull requests that were rebased, you'll need to cherry-pick each
|
||||
rebased commit individually. There have not been any rebased PRs like
|
||||
this in recent point releases.
|
||||
|
||||
.. warning::
|
||||
|
||||
@@ -1202,35 +704,37 @@ completed, the steps to make the point release are:
|
||||
cherry-picked all the commits in order. This generally means there
|
||||
is some other intervening pull request that the one you're trying
|
||||
to pick depends on. In these cases, you'll need to make a judgment
|
||||
call regarding those pull requests. Consider the number of affected
|
||||
files and or the resulting differences.
|
||||
call:
|
||||
|
||||
1. If the dependency changes are small, you might just cherry-pick it,
|
||||
too. If you do this, add the task to the release board.
|
||||
1. If the dependency is small, you might just cherry-pick it, too.
|
||||
If you do this, add it to the release board.
|
||||
|
||||
2. If the changes are large, then you may decide that this fix is not
|
||||
worth including in a point release, in which case you should remove
|
||||
the task from the release project.
|
||||
2. If it is large, then you may decide that this fix is not worth
|
||||
including in a point release, in which case you should remove it
|
||||
from the release project.
|
||||
|
||||
3. You can always decide to manually back-port the fix to the release
|
||||
branch if neither of the above options makes sense, but this can
|
||||
require a lot of work. It's seldom the right choice.
|
||||
|
||||
#. Bump the version in ``lib/spack/spack/__init__.py``.
|
||||
|
||||
See `this example from 0.14.1
|
||||
#. Bump the version in ``lib/spack/spack/__init__.py``. See `this example from 0.14.1
|
||||
<https://github.com/spack/spack/commit/ff0abb9838121522321df2a054d18e54b566b44a>`_.
|
||||
|
||||
#. Update ``CHANGELOG.md`` with a list of the changes.
|
||||
#. Updaate the release version lists in these files to include the new version:
|
||||
|
||||
This is typically a summary of the commits you cherry-picked onto the
|
||||
release branch. See `the changelog from 0.14.1
|
||||
* ``lib/spack/spack/schema/container.py``
|
||||
* ``lib/spack/spack/container/images.json``
|
||||
|
||||
**TODO**: We should get rid of this step in some future release.
|
||||
|
||||
#. Update ``CHANGELOG.md`` with a list of bugfixes. This is typically just a
|
||||
summary of the commits you cherry-picked onto the release branch. See
|
||||
`the changelog from 0.14.1
|
||||
<https://github.com/spack/spack/commit/ff0abb9838121522321df2a054d18e54b566b44a>`_.
|
||||
|
||||
#. Push the release branch to GitHub.
|
||||
|
||||
#. Make sure CI passes on the release branch, including:
|
||||
|
||||
* Regular unit tests
|
||||
* Build tests
|
||||
* The E4S pipeline at `gitlab.spack.io <https://gitlab.spack.io>`_
|
||||
@@ -1253,26 +757,23 @@ completed, the steps to make the point release are:
|
||||
Publishing a release on GitHub
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
#. Create the release in GitHub.
|
||||
#. Go to `github.com/spack/spack/releases
|
||||
<https://github.com/spack/spack/releases>`_ and click ``Draft a new
|
||||
release``. Set the following:
|
||||
|
||||
* Go to
|
||||
`github.com/spack/spack/releases <https://github.com/spack/spack/releases>`_
|
||||
and click ``Draft a new release``.
|
||||
* ``Tag version`` should start with ``v`` and contain *all three*
|
||||
parts of the version, .g. ``v0.15.1``. This is the name of the tag
|
||||
that will be created.
|
||||
|
||||
* Set ``Tag version`` to the name of the tag that will be created.
|
||||
* ``Target`` should be the ``releases/vX.Y`` branch (e.g., ``releases/v0.15``).
|
||||
|
||||
The name should start with ``v`` and contain *all three*
|
||||
parts of the version (e.g. ``v0.15.0`` or ``v0.15.1``).
|
||||
* ``Release title`` should be ``vX.Y.Z`` (To match the tag, e.g., ``v0.15.1``).
|
||||
|
||||
* Set ``Target`` to the ``releases/vX.Y`` branch (e.g., ``releases/v0.15``).
|
||||
* For the text, paste the latest release markdown from your ``CHANGELOG.md``.
|
||||
|
||||
* Set ``Release title`` to ``vX.Y.Z`` to match the tag (e.g., ``v0.15.1``).
|
||||
You can save the draft and keep coming back to this as you prepare the release.
|
||||
|
||||
* Paste the latest release markdown from your ``CHANGELOG.md`` file as the text.
|
||||
|
||||
* Save the draft so you can keep coming back to it as you prepare the release.
|
||||
|
||||
#. When you are ready to finalize the release, click ``Publish release``.
|
||||
#. When you are done, click ``Publish release``.
|
||||
|
||||
#. Immediately after publishing, go back to
|
||||
`github.com/spack/spack/releases
|
||||
@@ -1280,28 +781,20 @@ Publishing a release on GitHub
|
||||
auto-generated ``.tar.gz`` file for the release. It's the ``Source
|
||||
code (tar.gz)`` link.
|
||||
|
||||
#. Click ``Edit`` on the release you just made and attach the downloaded
|
||||
#. Click ``Edit`` on the release you just did and attach the downloaded
|
||||
release tarball as a binary. This does two things:
|
||||
|
||||
#. Makes sure that the hash of our releases does not change over time.
|
||||
|
||||
GitHub sometimes annoyingly changes the way they generate tarballs
|
||||
that can result in the hashes changing if you rely on the
|
||||
#. Makes sure that the hash of our releases doesn't change over time.
|
||||
GitHub sometimes annoyingly changes they way they generate
|
||||
tarballs, and then hashes can change if you rely on the
|
||||
auto-generated tarball links.
|
||||
|
||||
#. Gets download counts on releases visible through the GitHub API.
|
||||
|
||||
GitHub tracks downloads of artifacts, but *not* the source
|
||||
#. Gets us download counts on releases visible through the GitHub
|
||||
API. GitHub tracks downloads of artifacts, but *not* the source
|
||||
links. See the `releases
|
||||
page <https://api.github.com/repos/spack/spack/releases>`_ and search
|
||||
for ``download_count`` to see this.
|
||||
|
||||
#. Go to `readthedocs.org <https://readthedocs.org/projects/spack>`_ and
|
||||
activate the release tag.
|
||||
|
||||
This builds the documentation and makes the released version
|
||||
selectable in the versions menu.
|
||||
|
||||
|
||||
.. _merging-releases:
|
||||
|
||||
@@ -1313,23 +806,23 @@ If the new release is the **highest** Spack release yet, you should
|
||||
also tag it as ``releases/latest``. For example, suppose the highest
|
||||
release is currently ``0.15.3``:
|
||||
|
||||
* If you are releasing ``0.15.4`` or ``0.16.0``, then you should tag
|
||||
it with ``releases/latest``, as these are higher than ``0.15.3``.
|
||||
* If you are releasing ``0.15.4`` or ``0.16.0``, then you should tag
|
||||
it with ``releases/latest``, as these are higher than ``0.15.3``.
|
||||
|
||||
* If you are making a new release of an **older** major version of
|
||||
Spack, e.g. ``0.14.4``, then you should not tag it as
|
||||
``releases/latest`` (as there are newer major versions).
|
||||
* If you are making a new release of an **older** major version of
|
||||
Spack, e.g. ``0.14.4``, then you should not tag it as
|
||||
``releases/latest`` (as there are newer major versions).
|
||||
|
||||
To tag ``releases/latest``, do this:
|
||||
To tag ``releases/latest``, do this:
|
||||
|
||||
.. code-block:: console
|
||||
.. code-block:: console
|
||||
|
||||
$ git checkout releases/vX.Y # vX.Y is the new release's branch
|
||||
$ git tag --force releases/latest
|
||||
$ git push --force --tags
|
||||
$ git checkout releases/vX.Y # vX.Y is the new release's branch
|
||||
$ git tag --force releases/latest
|
||||
$ git push --tags
|
||||
|
||||
The ``--force`` argument to ``git tag`` makes ``git`` overwrite the existing
|
||||
``releases/latest`` tag with the new one.
|
||||
The ``--force`` argument makes ``git`` overwrite the existing
|
||||
``releases/latest`` tag with the new one.
|
||||
|
||||
We also merge each release that we tag as ``releases/latest`` into ``develop``.
|
||||
Make sure to do this with a merge commit:
|
||||
@@ -1337,17 +830,17 @@ Make sure to do this with a merge commit:
|
||||
.. code-block:: console
|
||||
|
||||
$ git checkout develop
|
||||
$ git merge --no-ff -s ours vX.Y.Z # vX.Y.Z is the new release's tag
|
||||
$ git merge --no-ff vX.Y.Z # vX.Y.Z is the new release's tag
|
||||
$ git push
|
||||
|
||||
We merge back to ``develop`` because it:
|
||||
|
||||
* updates the version and ``CHANGELOG.md`` on ``develop``; and
|
||||
* updates the version and ``CHANGELOG.md`` on ``develop``.
|
||||
* ensures that your release tag is reachable from the head of
|
||||
``develop``.
|
||||
``develop``
|
||||
|
||||
We *must* use a real merge commit (via the ``--no-ff`` option) to
|
||||
ensure that the release tag is reachable from the tip of ``develop``.
|
||||
We *must* use a real merge commit (via the ``--no-ff`` option) because it
|
||||
ensures that the release tag is reachable from the tip of ``develop``.
|
||||
This is necessary for ``spack -V`` to work properly -- it uses ``git
|
||||
describe --tags`` to find the last reachable tag in the repository and
|
||||
reports how far we are from it. For example:
|
||||
@@ -1365,7 +858,6 @@ the release is complete and tagged. If you do it before you've tagged the
|
||||
release and later decide you want to tag some later commit, you'll need
|
||||
to merge again.
|
||||
|
||||
|
||||
.. _announcing-releases:
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^
|
||||
@@ -1376,40 +868,20 @@ We announce releases in all of the major Spack communication channels.
|
||||
Publishing the release takes care of GitHub. The remaining channels are
|
||||
Twitter, Slack, and the mailing list. Here are the steps:
|
||||
|
||||
#. Announce the release on Twitter.
|
||||
#. Make a tweet to announce the release. It should link to the release's
|
||||
page on GitHub. You can base it on `this example tweet
|
||||
<https://twitter.com/spackpm/status/1231761858182307840>`_.
|
||||
|
||||
* Compose the tweet on the ``@spackpm`` account per the
|
||||
``spack-twitter`` slack channel.
|
||||
#. Ping ``@channel`` in ``#general`` on Slack (`spackpm.slack.com
|
||||
<https://spackpm.slack.com>`_) with a link to the tweet. The tweet
|
||||
will be shown inline so that you do not have to retype your release
|
||||
announcement.
|
||||
|
||||
* Be sure to include a link to the release's page on GitHub.
|
||||
#. Email the Spack mailing list to let them know about the release. As
|
||||
with the tweet, you likely want to link to the release's page on
|
||||
GitHub. It's also helpful to include some information directly in the
|
||||
email. You can base yours on this `example email
|
||||
<https://groups.google.com/forum/#!topic/spack/WT4CT9i_X4s>`_.
|
||||
|
||||
You can base the tweet on `this
|
||||
example <https://twitter.com/spackpm/status/1231761858182307840>`_.
|
||||
|
||||
#. Announce the release on Slack.
|
||||
|
||||
* Compose a message in the ``#general`` Slack channel
|
||||
(`spackpm.slack.com <https://spackpm.slack.com>`_).
|
||||
|
||||
* Preface the message with ``@channel`` to notify even those
|
||||
people not currently logged in.
|
||||
|
||||
* Be sure to include a link to the tweet above.
|
||||
|
||||
The tweet will be shown inline so that you do not have to retype
|
||||
your release announcement.
|
||||
|
||||
#. Announce the release on the Spack mailing list.
|
||||
|
||||
* Compose an email to the Spack mailing list.
|
||||
|
||||
* Be sure to include a link to the release's page on GitHub.
|
||||
|
||||
* It is also helpful to include some information directly in the
|
||||
email.
|
||||
|
||||
You can base your announcement on this `example
|
||||
email <https://groups.google.com/forum/#!topic/spack/WT4CT9i_X4s>`_.
|
||||
|
||||
Once you've completed the above steps, congratulations, you're done!
|
||||
You've finished making the release!
|
||||
Once you've announced the release, congratulations, you're done! You've
|
||||
finished making the release!
|
||||
|
||||
41
lib/spack/docs/docker_for_developers.rst
Normal file
41
lib/spack/docs/docker_for_developers.rst
Normal file
@@ -0,0 +1,41 @@
|
||||
.. Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
.. _docker_for_developers:
|
||||
|
||||
=====================
|
||||
Docker for Developers
|
||||
=====================
|
||||
|
||||
This guide is intended for people who want to use our prepared docker
|
||||
environments to work on developing Spack or working on spack packages. It is
|
||||
meant to serve as the companion documentation for the :ref:`packaging-guide`.
|
||||
|
||||
--------
|
||||
Overview
|
||||
--------
|
||||
|
||||
To get started, all you need is the latest version of ``docker``.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ cd share/spack/docker
|
||||
$ source config/ubuntu.bash
|
||||
$ ./run-image.sh
|
||||
|
||||
This command should drop you into an interactive shell where you can run spack
|
||||
within an isolated docker container running ubuntu. The copy of spack being
|
||||
used should be tied to the working copy of your cloned git repo, so any changes
|
||||
you make should be immediately reflected in the running docker container. Feel
|
||||
free to add or modify any packages or to hack on spack, itself. Your contained
|
||||
copy of spack should immediately reflect all changes.
|
||||
|
||||
To work within a container running a different linux distro, source one of the
|
||||
other environment files under ``config``.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ source config/fedora.bash
|
||||
$ ./run-image.sh
|
||||
@@ -1,4 +1,4 @@
|
||||
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
|
||||
.. Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
@@ -191,24 +191,44 @@ Environment has been activated. Similarly, the ``install`` and
|
||||
==> 0 installed packages
|
||||
|
||||
$ spack install zlib@1.2.11
|
||||
==> Installing zlib-1.2.11-q6cqrdto4iktfg6qyqcc5u4vmfmwb7iv
|
||||
==> No binary for zlib-1.2.11-q6cqrdto4iktfg6qyqcc5u4vmfmwb7iv found: installing from source
|
||||
==> zlib: Executing phase: 'install'
|
||||
[+] ~/spack/opt/spack/linux-rhel7-broadwell/gcc-8.1.0/zlib-1.2.11-q6cqrdto4iktfg6qyqcc5u4vmfmwb7iv
|
||||
==> Installing zlib
|
||||
==> Searching for binary cache of zlib
|
||||
==> Warning: No Spack mirrors are currently configured
|
||||
==> No binary for zlib found: installing from source
|
||||
==> Fetching http://zlib.net/fossils/zlib-1.2.11.tar.gz
|
||||
######################################################################## 100.0%
|
||||
==> Staging archive: /spack/var/spack/stage/zlib-1.2.11-3r4cfkmx3wwfqeof4bc244yduu2mz4ur/zlib-1.2.11.tar.gz
|
||||
==> Created stage in /spack/var/spack/stage/zlib-1.2.11-3r4cfkmx3wwfqeof4bc244yduu2mz4ur
|
||||
==> No patches needed for zlib
|
||||
==> Building zlib [Package]
|
||||
==> Executing phase: 'install'
|
||||
==> Successfully installed zlib
|
||||
Fetch: 0.36s. Build: 11.58s. Total: 11.93s.
|
||||
[+] /spack/opt/spack/linux-rhel7-x86_64/gcc-4.9.3/zlib-1.2.11-3r4cfkmx3wwfqeof4bc244yduu2mz4ur
|
||||
|
||||
$ spack env activate myenv
|
||||
|
||||
$ spack find
|
||||
==> In environment myenv
|
||||
==> No root specs
|
||||
|
||||
==> 0 installed packages
|
||||
|
||||
$ spack install zlib@1.2.8
|
||||
==> Installing zlib-1.2.8-yfc7epf57nsfn2gn4notccaiyxha6z7x
|
||||
==> No binary for zlib-1.2.8-yfc7epf57nsfn2gn4notccaiyxha6z7x found: installing from source
|
||||
==> zlib: Executing phase: 'install'
|
||||
[+] ~/spack/opt/spack/linux-rhel7-broadwell/gcc-8.1.0/zlib-1.2.8-yfc7epf57nsfn2gn4notccaiyxha6z7x
|
||||
==> Updating view at ~/spack/var/spack/environments/myenv/.spack-env/view
|
||||
==> Installing zlib
|
||||
==> Searching for binary cache of zlib
|
||||
==> Warning: No Spack mirrors are currently configured
|
||||
==> No binary for zlib found: installing from source
|
||||
==> Fetching http://zlib.net/fossils/zlib-1.2.8.tar.gz
|
||||
######################################################################## 100.0%
|
||||
==> Staging archive: /spack/var/spack/stage/zlib-1.2.8-y2t6kq3s23l52yzhcyhbpovswajzi7f7/zlib-1.2.8.tar.gz
|
||||
==> Created stage in /spack/var/spack/stage/zlib-1.2.8-y2t6kq3s23l52yzhcyhbpovswajzi7f7
|
||||
==> No patches needed for zlib
|
||||
==> Building zlib [Package]
|
||||
==> Executing phase: 'install'
|
||||
==> Successfully installed zlib
|
||||
Fetch: 0.26s. Build: 2.08s. Total: 2.35s.
|
||||
[+] /spack/opt/spack/linux-rhel7-x86_64/gcc-4.9.3/zlib-1.2.8-y2t6kq3s23l52yzhcyhbpovswajzi7f7
|
||||
|
||||
$ spack find
|
||||
==> In environment myenv
|
||||
@@ -216,17 +236,15 @@ Environment has been activated. Similarly, the ``install`` and
|
||||
zlib@1.2.8
|
||||
|
||||
==> 1 installed package
|
||||
-- linux-rhel7-broadwell / gcc@8.1.0 ----------------------------
|
||||
-- linux-rhel7-x86_64 / gcc@4.9.3 -------------------------------
|
||||
zlib@1.2.8
|
||||
|
||||
$ despacktivate
|
||||
|
||||
$ spack find
|
||||
==> 2 installed packages
|
||||
-- linux-rhel7-broadwell / gcc@8.1.0 ----------------------------
|
||||
-- linux-rhel7-x86_64 / gcc@4.9.3 -------------------------------
|
||||
zlib@1.2.8 zlib@1.2.11
|
||||
|
||||
|
||||
Note that when we installed the abstract spec ``zlib@1.2.8``, it was
|
||||
presented as a root of the Environment. All explicitly installed
|
||||
packages will be listed as roots of the Environment.
|
||||
@@ -248,9 +266,9 @@ Users can add abstract specs to an Environment using the ``spack add``
|
||||
command. The most important component of an Environment is a list of
|
||||
abstract specs.
|
||||
|
||||
Adding a spec adds to the manifest (the ``spack.yaml`` file), which is
|
||||
used to define the roots of the Environment, but does not affect the
|
||||
concrete specs in the lockfile, nor does it install the spec.
|
||||
Adding a spec adds to the manifest (the ``spack.yaml`` file) and to
|
||||
the roots of the Environment, but does not affect the concrete specs
|
||||
in the lockfile, nor does it install the spec.
|
||||
|
||||
The ``spack add`` command is environment aware. It adds to the
|
||||
currently active environment. All environment aware commands can also
|
||||
@@ -331,9 +349,6 @@ installed specs using the ``-c`` (``--concretized``) flag.
|
||||
|
||||
==> 0 installed packages
|
||||
|
||||
|
||||
.. _installing-environment:
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Installing an Environment
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
@@ -356,18 +371,6 @@ command also stores a Spack repo containing the ``package.py`` file
|
||||
used at install time for each package in the ``repos/`` directory in
|
||||
the Environment.
|
||||
|
||||
The ``--no-add`` option can be used in a concrete environment to tell
|
||||
spack to install specs already present in the environment but not to
|
||||
add any new root specs to the environment. For root specs provided
|
||||
to ``spack install`` on the command line, ``--no-add`` is the default,
|
||||
while for dependency specs on the other hand, it is optional. In other
|
||||
words, if there is an unambiguous match in the active concrete environment
|
||||
for a root spec provided to ``spack install`` on the command line, spack
|
||||
does not require you to specify the ``--no-add`` option to prevent the spec
|
||||
from being added again. At the same time, a spec that already exists in the
|
||||
environment, but only as a dependency, will be added to the environment as a
|
||||
root spec without the ``--no-add`` option.
|
||||
|
||||
^^^^^^^
|
||||
Loading
|
||||
^^^^^^^
|
||||
@@ -411,12 +414,6 @@ There are two ways to include configuration information in a Spack Environment:
|
||||
|
||||
#. Included in the ``spack.yaml`` file from another file.
|
||||
|
||||
Many Spack commands also affect configuration information in files
|
||||
automatically. Those commands take a ``--scope`` argument, and the
|
||||
environment can be specified by ``env:NAME`` (to affect environment
|
||||
``foo``, set ``--scope env:foo``). These commands will automatically
|
||||
manipulate configuration inline in the ``spack.yaml`` file.
|
||||
|
||||
"""""""""""""""""""""
|
||||
Inline configurations
|
||||
"""""""""""""""""""""
|
||||
@@ -459,8 +456,8 @@ Environments can include files with either relative or absolute
|
||||
paths. Inline configurations take precedence over included
|
||||
configurations, so you don't have to change shared configuration files
|
||||
to make small changes to an individual Environment. Included configs
|
||||
listed earlier will have higher precedence, as the included configs are
|
||||
applied in reverse order.
|
||||
listed later will have higher precedence, as the included configs are
|
||||
applied in order.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Manually Editing the Specs List
|
||||
@@ -723,8 +720,6 @@ Spack Environment managed views are updated every time the environment
|
||||
is written out to the lock file ``spack.lock``, so the concrete
|
||||
environment and the view are always compatible.
|
||||
|
||||
.. _configuring_environment_views:
|
||||
|
||||
"""""""""""""""""""""""""""""
|
||||
Configuring environment views
|
||||
"""""""""""""""""""""""""""""
|
||||
@@ -835,10 +830,8 @@ environment for Spack commands. The arguments ``-v,--with-view`` and
|
||||
behavior is to activate with the environment view if there is one.
|
||||
|
||||
The environment variables affected by the ``spack env activate``
|
||||
command and the paths that are used to update them are determined by
|
||||
the :ref:`prefix inspections <customize-env-modifications>` defined in
|
||||
your modules configuration; the defaults are summarized in the following
|
||||
table.
|
||||
command and the paths that are used to update them are in the
|
||||
following table.
|
||||
|
||||
=================== =========
|
||||
Variable Paths
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
|
||||
.. Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
@@ -87,12 +87,11 @@ will be available from the command line:
|
||||
--implicit select specs that are not installed or were installed implicitly
|
||||
--output OUTPUT where to dump the result
|
||||
|
||||
The corresponding unit tests can be run giving the appropriate options
|
||||
to ``spack unit-test``:
|
||||
The corresponding unit tests can be run giving the appropriate options to ``spack test``:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack unit-test --extension=scripting
|
||||
$ spack test --extension=scripting
|
||||
|
||||
============================================================== test session starts ===============================================================
|
||||
platform linux2 -- Python 2.7.15rc1, pytest-3.2.5, py-1.4.34, pluggy-0.4.0
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
|
||||
.. Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
@@ -48,8 +48,8 @@ platform, all on the command line.
|
||||
# Add compiler flags using the conventional names
|
||||
$ spack install mpileaks@1.1.2 %gcc@4.7.3 cppflags="-O3 -floop-block"
|
||||
|
||||
# Cross-compile for a different micro-architecture with target=
|
||||
$ spack install mpileaks@1.1.2 target=icelake
|
||||
# Cross-compile for a different architecture with arch=
|
||||
$ spack install mpileaks@1.1.2 arch=bgqos_0
|
||||
|
||||
Users can specify as many or few options as they care about. Spack
|
||||
will fill in the unspecified values with sensible defaults. The two listed
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
|
||||
.. Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
@@ -16,18 +16,15 @@ Prerequisites
|
||||
Spack has the following minimum requirements, which must be installed
|
||||
before Spack is run:
|
||||
|
||||
#. Python 2 (2.6 or 2.7) or 3 (3.5 - 3.9) to run Spack
|
||||
#. Python 2 (2.6 or 2.7) or 3 (3.5 - 3.8) to run Spack
|
||||
#. A C/C++ compiler for building
|
||||
#. The ``make`` executable for building
|
||||
#. The ``tar``, ``gzip``, ``unzip``, ``bzip2``, ``xz`` and optionally ``zstd``
|
||||
executables for extracting source code
|
||||
#. The ``patch`` command to apply patches
|
||||
#. The ``git`` and ``curl`` commands for fetching
|
||||
#. If using the ``gpg`` subcommand, ``gnupg2`` is required
|
||||
|
||||
These requirements can be easily installed on most modern Linux systems;
|
||||
on macOS, XCode is required. Spack is designed to run on HPC
|
||||
platforms like Cray. Not all packages should be expected
|
||||
on Macintosh, XCode is required. Spack is designed to run on HPC
|
||||
platforms like Cray and BlueGene/Q. Not all packages should be expected
|
||||
to work on all platforms. A build matrix showing which packages are
|
||||
working on which systems is planned but not yet available.
|
||||
|
||||
@@ -44,64 +41,35 @@ Getting Spack is easy. You can clone it from the `github repository
|
||||
|
||||
This will create a directory called ``spack``.
|
||||
|
||||
.. _shell-support:
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Add Spack to the Shell
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
^^^^^^^^^^^^^
|
||||
Shell support
|
||||
^^^^^^^^^^^^^
|
||||
|
||||
Once you have cloned Spack, we recommend sourcing the appropriate script
|
||||
for your shell:
|
||||
We'll assume that the full path to your downloaded Spack directory is
|
||||
in the ``SPACK_ROOT`` environment variable. Add ``$SPACK_ROOT/bin``
|
||||
to your path and you're ready to go:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# For bash/zsh/sh
|
||||
$ . spack/share/spack/setup-env.sh
|
||||
$ export PATH=$SPACK_ROOT/bin:$PATH
|
||||
$ spack install libelf
|
||||
|
||||
# For tcsh/csh
|
||||
$ source spack/share/spack/setup-env.csh
|
||||
For a richer experience, use Spack's shell support:
|
||||
|
||||
# For fish
|
||||
$ . spack/share/spack/setup-env.fish
|
||||
.. code-block:: console
|
||||
|
||||
That's it! You're ready to use Spack.
|
||||
# For bash/zsh users
|
||||
$ export SPACK_ROOT=/path/to/spack
|
||||
$ . $SPACK_ROOT/share/spack/setup-env.sh
|
||||
|
||||
Sourcing these files will put the ``spack`` command in your ``PATH``, set
|
||||
up your ``MODULEPATH`` to use Spack's packages, and add other useful
|
||||
shell integration for :ref:`certain commands <packaging-shell-support>`,
|
||||
:ref:`environments <environments>`, and :ref:`modules <modules>`. For
|
||||
``bash`` and ``zsh``, it also sets up tab completion.
|
||||
|
||||
In order to know which directory to add to your ``MODULEPATH``, these scripts
|
||||
query the ``spack`` command. On shared filesystems, this can be a bit slow,
|
||||
especially if you log in frequently. If you don't use modules, or want to set
|
||||
``MODULEPATH`` manually instead, you can set the ``SPACK_SKIP_MODULES``
|
||||
environment variable to skip this step and speed up sourcing the file.
|
||||
|
||||
If you do not want to use Spack's shell support, you can always just run
|
||||
the ``spack`` command directly from ``spack/bin/spack``.
|
||||
|
||||
When the ``spack`` command is executed it searches for an appropriate
|
||||
Python interpreter to use, which can be explicitly overridden by setting
|
||||
the ``SPACK_PYTHON`` environment variable. When sourcing the appropriate shell
|
||||
setup script, ``SPACK_PYTHON`` will be set to the interpreter found at
|
||||
sourcing time, ensuring future invocations of the ``spack`` command will
|
||||
continue to use the same consistent python version regardless of changes in
|
||||
the environment.
|
||||
# For tcsh or csh users (note you must set SPACK_ROOT)
|
||||
$ setenv SPACK_ROOT /path/to/spack
|
||||
$ source $SPACK_ROOT/share/spack/setup-env.csh
|
||||
|
||||
|
||||
^^^^^^^^^^^^^^^^^^
|
||||
Check Installation
|
||||
^^^^^^^^^^^^^^^^^^
|
||||
|
||||
With Spack installed, you should be able to run some basic Spack
|
||||
commands. For example:
|
||||
|
||||
.. command-output:: spack spec netcdf-c
|
||||
|
||||
In theory, Spack doesn't need any additional installation; just
|
||||
download and run! But in real life, additional steps are usually
|
||||
required before Spack can work in a practical sense. Read on...
|
||||
This automatically adds Spack to your ``PATH`` and allows the ``spack``
|
||||
command to be used to execute spack :ref:`commands <shell-support>` and
|
||||
:ref:`useful packaging commands <packaging-shell-support>`.
|
||||
|
||||
^^^^^^^^^^^^^^^^^
|
||||
Clean Environment
|
||||
@@ -117,52 +85,16 @@ environment*, especially for ``PATH``. Only software that comes with
|
||||
the system, or that you know you wish to use with Spack, should be
|
||||
included. This procedure will avoid many strange build errors.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Optional: Bootstrapping clingo
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Spack supports using clingo as an external solver to compute which software
|
||||
needs to be installed. If you have a default compiler supporting C++14 Spack
|
||||
can automatically bootstrap this tool from sources the first time it is
|
||||
needed:
|
||||
^^^^^^^^^^^^^^^^^^
|
||||
Check Installation
|
||||
^^^^^^^^^^^^^^^^^^
|
||||
|
||||
.. code-block:: console
|
||||
With Spack installed, you should be able to run some basic Spack
|
||||
commands. For example:
|
||||
|
||||
$ spack solve zlib
|
||||
[+] /usr (external bison-3.0.4-wu5pgjchxzemk5ya2l3ddqug2d7jv6eb)
|
||||
[+] /usr (external cmake-3.19.4-a4kmcfzxxy45mzku4ipmj5kdiiz5a57b)
|
||||
[+] /usr (external python-3.6.9-x4fou4iqqlh5ydwddx3pvfcwznfrqztv)
|
||||
==> Installing re2c-1.2.1-e3x6nxtk3ahgd63ykgy44mpuva6jhtdt
|
||||
[ ... ]
|
||||
==> Optimization: [0, 0, 0, 0, 0, 1, 0, 0, 0]
|
||||
zlib@1.2.11%gcc@10.1.0+optimize+pic+shared arch=linux-ubuntu18.04-broadwell
|
||||
.. command-output:: spack spec netcdf-c
|
||||
|
||||
If you want to speed-up bootstrapping, you may try to search for ``cmake`` and ``bison``
|
||||
on your system:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack external find cmake bison
|
||||
==> The following specs have been detected on this system and added to /home/spack/.spack/packages.yaml
|
||||
bison@3.0.4 cmake@3.19.4
|
||||
|
||||
All the tools Spack needs for its own functioning are installed in a separate store, which lives
|
||||
under the ``${HOME}/.spack`` directory. The software installed there can be queried with:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack find --bootstrap
|
||||
==> Showing internal bootstrap store at "/home/spack/.spack/bootstrap/store"
|
||||
==> 3 installed packages
|
||||
-- linux-ubuntu18.04-x86_64 / gcc@10.1.0 ------------------------
|
||||
clingo-bootstrap@spack python@3.6.9 re2c@1.2.1
|
||||
|
||||
In case it's needed the bootstrap store can also be cleaned with:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack clean -b
|
||||
==> Removing software in "/home/spack/.spack/bootstrap/store"
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Optional: Alternate Prefix
|
||||
@@ -182,6 +114,15 @@ copy of spack installs packages into its own ``$PREFIX/opt``
|
||||
directory.
|
||||
|
||||
|
||||
^^^^^^^^^^
|
||||
Next Steps
|
||||
^^^^^^^^^^
|
||||
|
||||
In theory, Spack doesn't need any additional installation; just
|
||||
download and run! But in real life, additional steps are usually
|
||||
required before Spack can work in a practical sense. Read on...
|
||||
|
||||
|
||||
.. _compiler-config:
|
||||
|
||||
----------------------
|
||||
@@ -771,9 +712,8 @@ an OpenMPI installed in /opt/local, one would use:
|
||||
|
||||
packages:
|
||||
openmpi:
|
||||
externals:
|
||||
- spec: openmpi@1.10.1
|
||||
prefix: /opt/local
|
||||
paths:
|
||||
openmpi@1.10.1: /opt/local
|
||||
buildable: False
|
||||
|
||||
In general, Spack is easier to use and more reliable if it builds all of
|
||||
@@ -835,9 +775,8 @@ Then add the following to ``~/.spack/packages.yaml``:
|
||||
|
||||
packages:
|
||||
openssl:
|
||||
externals:
|
||||
- spec: openssl@1.0.2g
|
||||
prefix: /usr
|
||||
paths:
|
||||
openssl@1.0.2g: /usr
|
||||
buildable: False
|
||||
|
||||
|
||||
@@ -852,9 +791,8 @@ to add the following to ``packages.yaml``:
|
||||
|
||||
packages:
|
||||
netlib-lapack:
|
||||
externals:
|
||||
- spec: netlib-lapack@3.6.1
|
||||
prefix: /usr
|
||||
paths:
|
||||
netlib-lapack@3.6.1: /usr
|
||||
buildable: False
|
||||
all:
|
||||
providers:
|
||||
@@ -873,6 +811,100 @@ to add the following to ``packages.yaml``:
|
||||
present in PATH, however it will have lower precedence compared to paths
|
||||
from other dependencies. This ensures that binaries in Spack dependencies
|
||||
are preferred over system binaries.
|
||||
|
||||
^^^^^^
|
||||
OpenGL
|
||||
^^^^^^
|
||||
|
||||
To use hardware-accelerated rendering from a system-supplied OpenGL driver,
|
||||
add something like the following to your ``packages`` configuration:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
packages:
|
||||
opengl:
|
||||
paths:
|
||||
opengl+glx@4.5: /usr
|
||||
buildable: False
|
||||
all:
|
||||
providers:
|
||||
gl: [opengl]
|
||||
glx: [opengl]
|
||||
|
||||
For `EGL <https://www.khronos.org/egl>` support, or for certain modern drivers,
|
||||
OpenGL calls are dispatched dynamically at run time to the hardware graphics
|
||||
implementation. This dynamic dispatch is performed using `libglvnd
|
||||
<https://github.com/NVIDIA/libglvnd>`. In this mode, the graphics library
|
||||
(e.g.: opengl) must be built to work with libglvnd. Applications then link
|
||||
against libglvnd instead of the underlying implementation. Environment
|
||||
variables set at run time govern the process by which libglvnd loads the
|
||||
underlying implementation and dispatches calls to it. See `this
|
||||
<https://github.com/NVIDIA/libglvnd/issues/177#issuecomment-496562769>` comment
|
||||
for details on loading a specific GLX implementation and `this
|
||||
<https://github.com/NVIDIA/libglvnd/blob/master/src/EGL/icd_enumeration.md>`
|
||||
page for information about EGL ICD enumeration.
|
||||
|
||||
This codependency between libglvnd and the underlying implementation is modeled
|
||||
in Spack with two packages for libglvnd: libglvnd, which provides libglvnd
|
||||
proper; and libglvnd-fe, a bundle package that depends on libglvnd and an
|
||||
implementation. Implementations that work through libglvnd are no longer
|
||||
providers for graphics virtual dependencies, like "gl" or "glx", but instead
|
||||
provide libglvnd versions of these dependencies ("libglvnd-be-gl",
|
||||
"libglvnd-be-glx", etc.). The libglvnd-fe package depends on these
|
||||
"libglvnd-be-..." virtual packages, which provide the actual implementation.
|
||||
It also depends on libglvnd, itself, and exposes its libraries to downstream
|
||||
applications. For correct operation, the Spack package for the underlying
|
||||
implementation has to set the runtime environment to ensure that it is loaded
|
||||
when an application linked against libglvnd runs. This last detail is
|
||||
important for users who want to set up an external OpenGL implementation that
|
||||
requires libglvnd to work. This setup requires modifying the ``modules``
|
||||
configuration so that modules generated for the external OpenGL implementation
|
||||
set the necessary environment variables.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
packages:
|
||||
opengl:
|
||||
paths:
|
||||
opengl@4.5+glx+egl+glvnd: /does/not/exist
|
||||
buildable: False
|
||||
variants:+glx+egl+glvnd
|
||||
libglvnd-fe:
|
||||
variants:+gl+glx+egl
|
||||
all:
|
||||
providers:
|
||||
glvnd-be-gl: [opengl]
|
||||
glvnd-be-glx: [opengl]
|
||||
glvnd-be-egl: [opengl]
|
||||
gl: [libglvnd-fe]
|
||||
glx: [libglvnd-fe]
|
||||
egl: [libglvnd-fe]
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
modules:
|
||||
tcl:
|
||||
opengl@4.5+glx+glvnd:
|
||||
environment:
|
||||
set:
|
||||
__GLX_VENDOR_LIBRARY_NAME: nvidia
|
||||
opengl@4.5+egl+glvnd:
|
||||
environment:
|
||||
set:
|
||||
__EGL_VENDOR_LIBRARY_FILENAMES: /usr/share/glvnd/egl_vendor.d/10_nvidia.json
|
||||
|
||||
One final detail about the above example is that it avoids setting the true
|
||||
root of the external OpenGL implementation, instead opting to set it to a path
|
||||
that is not expected to exist on the system. This is done for two reasons.
|
||||
First, Spack would add directories under this root to environment variables
|
||||
that would affect the process of building and installing other packages, such
|
||||
as ``PATH`` and ``PKG_CONFIG_PATH``. These additions may potentially prevent
|
||||
those packages from installing successfully, and this risk is especially great
|
||||
for paths that house many libraries and applications, like ``/usr``. Second,
|
||||
providing the true root of the external implementation in the ``packages``
|
||||
configuration is not necessary because libglvnd need only the environment
|
||||
variables set above in the ``modules`` configuration to determine what OpenGL
|
||||
implementation to dispatch calls to at run time.
|
||||
|
||||
^^^
|
||||
Git
|
||||
@@ -1125,33 +1157,6 @@ Secret keys may also be later exported using the
|
||||
<https://www.digitalocean.com/community/tutorials/how-to-setup-additional-entropy-for-cloud-servers-using-haveged>`_
|
||||
provides a good overview of sources of randomness.
|
||||
|
||||
Here is an example of creating a key. Note that we provide a name for the key first
|
||||
(which we can use to reference the key later) and an email address:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack gpg create dinosaur dinosaur@thedinosaurthings.com
|
||||
|
||||
|
||||
If you want to export the key as you create it:
|
||||
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack gpg create --export key.pub dinosaur dinosaur@thedinosaurthings.com
|
||||
|
||||
Or the private key:
|
||||
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack gpg create --export-secret key.priv dinosaur dinosaur@thedinosaurthings.com
|
||||
|
||||
|
||||
You can include both ``--export`` and ``--export-secret``, each with
|
||||
an output file of choice, to export both.
|
||||
|
||||
|
||||
^^^^^^^^^^^^
|
||||
Listing keys
|
||||
^^^^^^^^^^^^
|
||||
@@ -1160,22 +1165,7 @@ In order to list the keys available in the keyring, the
|
||||
``spack gpg list`` command will list trusted keys with the ``--trusted`` flag
|
||||
and keys available for signing using ``--signing``. If you would like to
|
||||
remove keys from your keyring, ``spack gpg untrust <keyid>``. Key IDs can be
|
||||
email addresses, names, or (best) fingerprints. Here is an example of listing
|
||||
the key that we just created:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
gpgconf: socketdir is '/run/user/1000/gnupg'
|
||||
/home/spackuser/spack/opt/spack/gpg/pubring.kbx
|
||||
----------------------------------------------------------
|
||||
pub rsa4096 2021-03-25 [SC]
|
||||
60D2685DAB647AD4DB54125961E09BB6F2A0ADCB
|
||||
uid [ultimate] dinosaur (GPG created for Spack) <dinosaur@thedinosaurthings.com>
|
||||
|
||||
|
||||
Note that the name "dinosaur" can be seen under the uid, which is the unique
|
||||
id. We might need this reference if we want to export or otherwise reference the key.
|
||||
|
||||
email addresses, names, or (best) fingerprints.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Signing and Verifying Packages
|
||||
@@ -1190,38 +1180,6 @@ may also be used to create a signed file which contains the contents, but it
|
||||
is not recommended. Signed packages may be verified by using
|
||||
``spack gpg verify <file>``.
|
||||
|
||||
|
||||
^^^^^^^^^^^^^^
|
||||
Exporting Keys
|
||||
^^^^^^^^^^^^^^
|
||||
|
||||
You likely might want to export a public key, and that looks like this. Let's
|
||||
use the previous example and ask spack to export the key with uid "dinosaur."
|
||||
We will provide an output location (typically a `*.pub` file) and the name of
|
||||
the key.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack gpg export dinosaur.pub dinosaur
|
||||
|
||||
You can then look at the created file, `dinosaur.pub`, to see the exported key.
|
||||
If you want to include the private key, then just add `--secret`:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack gpg export --secret dinosaur.priv dinosaur
|
||||
|
||||
This will write the private key to the file `dinosaur.priv`.
|
||||
|
||||
.. warning::
|
||||
|
||||
You should be very careful about exporting private keys. You likely would
|
||||
only want to do this in the context of moving your spack installation to
|
||||
a different server, and wanting to preserve keys for a buildcache. If you
|
||||
are unsure about exporting, you can ask your local system administrator
|
||||
or for help on an issue or the Spack slack.
|
||||
|
||||
|
||||
.. _cray-support:
|
||||
|
||||
-------------
|
||||
@@ -1317,13 +1275,9 @@ Here's an example of an external configuration for cray modules:
|
||||
|
||||
packages:
|
||||
mpich:
|
||||
externals:
|
||||
- spec: "mpich@7.3.1%gcc@5.2.0 arch=cray_xc-haswell-CNL10"
|
||||
modules:
|
||||
- cray-mpich
|
||||
- spec: "mpich@7.3.1%intel@16.0.0.109 arch=cray_xc-haswell-CNL10"
|
||||
modules:
|
||||
- cray-mpich
|
||||
modules:
|
||||
mpich@7.3.1%gcc@5.2.0 arch=cray_xc-haswell-CNL10: cray-mpich
|
||||
mpich@7.3.1%intel@16.0.0.109 arch=cray_xc-haswell-CNL10: cray-mpich
|
||||
all:
|
||||
providers:
|
||||
mpi: [mpich]
|
||||
@@ -1335,7 +1289,7 @@ via module load.
|
||||
|
||||
.. note::
|
||||
|
||||
For Cray-provided packages, it is best to use ``modules:`` instead of ``prefix:``
|
||||
For Cray-provided packages, it is best to use ``modules:`` instead of ``paths:``
|
||||
in ``packages.yaml``, because the Cray Programming Environment heavily relies on
|
||||
modules (e.g., loading the ``cray-mpich`` module adds MPI libraries to the
|
||||
compiler wrapper link line).
|
||||
@@ -1351,31 +1305,19 @@ Here is an example of a full packages.yaml used at NERSC
|
||||
|
||||
packages:
|
||||
mpich:
|
||||
externals:
|
||||
- spec: "mpich@7.3.1%gcc@5.2.0 arch=cray_xc-CNL10-ivybridge"
|
||||
modules:
|
||||
- cray-mpich
|
||||
- spec: "mpich@7.3.1%intel@16.0.0.109 arch=cray_xc-SuSE11-ivybridge"
|
||||
modules:
|
||||
- cray-mpich
|
||||
modules:
|
||||
mpich@7.3.1%gcc@5.2.0 arch=cray_xc-CNL10-ivybridge: cray-mpich
|
||||
mpich@7.3.1%intel@16.0.0.109 arch=cray_xc-SuSE11-ivybridge: cray-mpich
|
||||
buildable: False
|
||||
netcdf:
|
||||
externals:
|
||||
- spec: "netcdf@4.3.3.1%gcc@5.2.0 arch=cray_xc-CNL10-ivybridge"
|
||||
modules:
|
||||
- cray-netcdf
|
||||
- spec: "netcdf@4.3.3.1%intel@16.0.0.109 arch=cray_xc-CNL10-ivybridge"
|
||||
modules:
|
||||
- cray-netcdf
|
||||
modules:
|
||||
netcdf@4.3.3.1%gcc@5.2.0 arch=cray_xc-CNL10-ivybridge: cray-netcdf
|
||||
netcdf@4.3.3.1%intel@16.0.0.109 arch=cray_xc-CNL10-ivybridge: cray-netcdf
|
||||
buildable: False
|
||||
hdf5:
|
||||
externals:
|
||||
- spec: "hdf5@1.8.14%gcc@5.2.0 arch=cray_xc-CNL10-ivybridge"
|
||||
modules:
|
||||
- cray-hdf5
|
||||
- spec: "hdf5@1.8.14%intel@16.0.0.109 arch=cray_xc-CNL10-ivybridge"
|
||||
modules:
|
||||
- cray-hdf5
|
||||
modules:
|
||||
hdf5@1.8.14%gcc@5.2.0 arch=cray_xc-CNL10-ivybridge: cray-hdf5
|
||||
hdf5@1.8.14%intel@16.0.0.109 arch=cray_xc-CNL10-ivybridge: cray-hdf5
|
||||
buildable: False
|
||||
all:
|
||||
compiler: [gcc@5.2.0, intel@16.0.0.109]
|
||||
@@ -1399,6 +1341,6 @@ environment variables may be propagated into containers that are not
|
||||
using the Cray programming environment.
|
||||
|
||||
To ensure that Spack does not autodetect the Cray programming
|
||||
environment, unset the environment variable ``MODULEPATH``. This
|
||||
environment, unset the environment variable ``CRAYPE_VERSION``. This
|
||||
will cause Spack to treat a linux container on a Cray system as a base
|
||||
linux distro.
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
|
||||
.. Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
@@ -67,7 +67,6 @@ or refer to the full manual below.
|
||||
build_settings
|
||||
environments
|
||||
containers
|
||||
monitoring
|
||||
mirrors
|
||||
module_file_support
|
||||
repositories
|
||||
@@ -78,12 +77,6 @@ or refer to the full manual below.
|
||||
extensions
|
||||
pipelines
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
:caption: Research
|
||||
|
||||
analyze
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
:caption: Contributing
|
||||
@@ -92,6 +85,7 @@ or refer to the full manual below.
|
||||
packaging_guide
|
||||
build_systems
|
||||
developer_guide
|
||||
docker_for_developers
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
|
||||
.. Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
@@ -14,7 +14,7 @@ problems if you encounter them.
|
||||
Variants are not properly forwarded to dependencies
|
||||
---------------------------------------------------
|
||||
|
||||
**Status:** Expected to be fixed by Spack's new concretizer
|
||||
**Status:** Expected to be fixed in the next release
|
||||
|
||||
Sometimes, a variant of a package can also affect how its dependencies are
|
||||
built. For example, in order to build MPI support for a package, it may
|
||||
@@ -49,29 +49,15 @@ A workaround is to explicitly activate the variants of dependencies as well:
|
||||
See https://github.com/spack/spack/issues/267 and
|
||||
https://github.com/spack/spack/issues/2546 for further details.
|
||||
|
||||
-----------------------------------------------
|
||||
depends_on cannot handle recursive dependencies
|
||||
-----------------------------------------------
|
||||
|
||||
**Status:** Not yet a work in progress
|
||||
----------------------------
|
||||
``spack setup`` doesn't work
|
||||
----------------------------
|
||||
|
||||
Although ``depends_on`` can handle any aspect of Spack's spec syntax,
|
||||
it currently cannot handle recursive dependencies. If the ``^`` sigil
|
||||
appears in a ``depends_on`` statement, the concretizer will hang.
|
||||
For example, something like:
|
||||
**Status:** Work in progress
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
depends_on('mfem+cuda ^hypre+cuda', when='+cuda')
|
||||
|
||||
|
||||
should be rewritten as:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
depends_on('mfem+cuda', when='+cuda')
|
||||
depends_on('hypre+cuda', when='+cuda')
|
||||
|
||||
|
||||
See https://github.com/spack/spack/issues/17660 and
|
||||
https://github.com/spack/spack/issues/11160 for more details.
|
||||
Spack provides a ``setup`` command that is useful for the development of
|
||||
software outside of Spack. Unfortunately, this command no longer works.
|
||||
See https://github.com/spack/spack/issues/2597 and
|
||||
https://github.com/spack/spack/issues/2662 for details. This is expected
|
||||
to be fixed by https://github.com/spack/spack/pull/2664.
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
|
||||
.. Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
@@ -159,27 +159,6 @@ can supply a file with specs in it, one per line:
|
||||
This is useful if there is a specific suite of software managed by
|
||||
your site.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^
|
||||
Mirror environment
|
||||
^^^^^^^^^^^^^^^^^^
|
||||
|
||||
To create a mirror of all packages required by a concerte environment, activate the environment and call ``spack mirror create -a``.
|
||||
This is especially useful to create a mirror of an environment concretized on another machine.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
[remote] $ spack env create myenv
|
||||
[remote] $ spack env activate myenv
|
||||
[remote] $ spack add ...
|
||||
[remote] $ spack concretize
|
||||
|
||||
$ sftp remote:/spack/var/environment/myenv/spack.lock
|
||||
$ spack env create myenv spack.lock
|
||||
$ spack env activate myenv
|
||||
$ spack mirror create -a
|
||||
|
||||
|
||||
|
||||
.. _cmd-spack-mirror-add:
|
||||
|
||||
--------------------
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
|
||||
.. Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
@@ -10,16 +10,14 @@ Modules
|
||||
=======
|
||||
|
||||
The use of module systems to manage user environment in a controlled way
|
||||
is a common practice at HPC centers that is often embraced also by
|
||||
individual programmers on their development machines. To support this
|
||||
common practice Spack integrates with `Environment Modules
|
||||
<http://modules.sourceforge.net/>`_ and `LMod
|
||||
<http://lmod.readthedocs.io/en/latest/>`_ by providing post-install hooks
|
||||
that generate module files and commands to manipulate them.
|
||||
is a common practice at HPC centers that is often embraced also by individual
|
||||
programmers on their development machines. To support this common practice
|
||||
Spack integrates with `Environment Modules
|
||||
<http://modules.sourceforge.net/>`_ and `LMod
|
||||
<http://lmod.readthedocs.io/en/latest/>`_ by
|
||||
providing post-install hooks that generate module files and commands to manipulate them.
|
||||
|
||||
Modules are one of several ways you can use Spack packages. For other
|
||||
options that may fit your use case better, you should also look at
|
||||
:ref:`spack load <spack-load>` and :ref:`environments <environments>`.
|
||||
.. _shell-support:
|
||||
|
||||
----------------------------
|
||||
Using module files via Spack
|
||||
@@ -62,33 +60,215 @@ to load the ``cmake`` module:
|
||||
|
||||
$ module load cmake-3.7.2-gcc-6.3.0-fowuuby
|
||||
|
||||
Neither of these is particularly pretty, easy to remember, or easy to
|
||||
type. Luckily, Spack offers many facilities for customizing the module
|
||||
scheme used at your site.
|
||||
Neither of these is particularly pretty, easy to remember, or
|
||||
easy to type. Luckily, Spack has its own interface for using modules.
|
||||
|
||||
^^^^^^^^^^^^^
|
||||
Shell support
|
||||
^^^^^^^^^^^^^
|
||||
|
||||
To enable additional Spack commands for loading and unloading module files,
|
||||
and to add the correct path to ``MODULEPATH``, you need to source the appropriate
|
||||
setup file in the ``$SPACK_ROOT/share/spack`` directory. This will activate shell
|
||||
support for the commands that need it. For ``bash``, ``ksh`` or ``zsh`` users:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ . ${SPACK_ROOT}/share/spack/setup-env.sh
|
||||
|
||||
For ``csh`` and ``tcsh`` instead:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ set SPACK_ROOT ...
|
||||
$ source $SPACK_ROOT/share/spack/setup-env.csh
|
||||
|
||||
Note that in the latter case it is necessary to explicitly set ``SPACK_ROOT``
|
||||
before sourcing the setup file (you will get a meaningful error message
|
||||
if you don't).
|
||||
|
||||
If you want to have Spack's shell support available on the command line at
|
||||
any login you can put this source line in one of the files that are sourced
|
||||
at startup (like ``.profile``, ``.bashrc`` or ``.cshrc``). Be aware though
|
||||
that the startup time may be slightly increased because of that.
|
||||
|
||||
|
||||
.. _cmd-spack-load:
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^
|
||||
``spack load / unload``
|
||||
^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Once you have shell support enabled you can use the same spec syntax
|
||||
you're used to and you can use the same shortened names you use
|
||||
everywhere else in Spack.
|
||||
|
||||
For example this will add the ``mpich`` package built with ``gcc`` to your path:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack install mpich %gcc@4.4.7
|
||||
|
||||
# ... wait for install ...
|
||||
|
||||
$ spack load mpich %gcc@4.4.7
|
||||
$ which mpicc
|
||||
~/spack/opt/linux-debian7-x86_64/gcc@4.4.7/mpich@3.0.4/bin/mpicc
|
||||
|
||||
These commands will add appropriate directories to your ``PATH``,
|
||||
``MANPATH``, ``CPATH``, and ``LD_LIBRARY_PATH``. When you no longer
|
||||
want to use a package, you can type unload or unuse similarly:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack unload mpich %gcc@4.4.7
|
||||
|
||||
.. note::
|
||||
|
||||
The ``load`` and ``unload`` subcommands are only available if you
|
||||
have enabled Spack's shell support. These command DO NOT use the
|
||||
underlying Spack-generated module files.
|
||||
|
||||
^^^^^^^^^^^^^^^
|
||||
Ambiguous specs
|
||||
^^^^^^^^^^^^^^^
|
||||
|
||||
If a spec used with load/unload or is ambiguous (i.e. more than one
|
||||
installed package matches it), then Spack will warn you:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack load libelf
|
||||
==> Error: libelf matches multiple packages.
|
||||
Matching packages:
|
||||
libelf@0.8.13%gcc@4.4.7 arch=linux-debian7-x86_64
|
||||
libelf@0.8.13%intel@15.0.0 arch=linux-debian7-x86_64
|
||||
Use a more specific spec
|
||||
|
||||
You can either type the ``spack load`` command again with a fully
|
||||
qualified argument, or you can add just enough extra constraints to
|
||||
identify one package. For example, above, the key differentiator is
|
||||
that one ``libelf`` is built with the Intel compiler, while the other
|
||||
used ``gcc``. You could therefore just type:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack load libelf %intel
|
||||
|
||||
To identify just the one built with the Intel compiler.
|
||||
|
||||
.. _cmd-spack-module-loads:
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
``spack module tcl loads``
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
In some cases, it is desirable to use a Spack-generated module, rather
|
||||
than relying on Spack's built-in user-environment modification
|
||||
capabilities. To translate a spec into a module name, use ``spack
|
||||
module tcl loads`` or ``spack module lmod loads`` depending on the
|
||||
module system desired.
|
||||
|
||||
|
||||
To load not just a module, but also all the modules it depends on, use
|
||||
the ``--dependencies`` option. This is not required for most modules
|
||||
because Spack builds binaries with RPATH support. However, not all
|
||||
packages use RPATH to find their dependencies: this can be true in
|
||||
particular for Python extensions, which are currently *not* built with
|
||||
RPATH.
|
||||
|
||||
Scripts to load modules recursively may be made with the command:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack module tcl loads --dependencies <spec>
|
||||
|
||||
An equivalent alternative using `process substitution <http://tldp.org/LDP/abs/html/process-sub.html>`_ is:
|
||||
|
||||
.. code-block :: console
|
||||
|
||||
$ source <( spack module tcl loads --dependencies <spec> )
|
||||
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Module Commands for Shell Scripts
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Although Spack is flexible, the ``module`` command is much faster.
|
||||
This could become an issue when emitting a series of ``spack load``
|
||||
commands inside a shell script. By adding the ``--dependencies`` flag,
|
||||
``spack module tcl loads`` may also be used to generate code that can be
|
||||
cut-and-pasted into a shell script. For example:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack module tcl loads --dependencies py-numpy git
|
||||
# bzip2@1.0.6%gcc@4.9.3=linux-x86_64
|
||||
module load bzip2-1.0.6-gcc-4.9.3-ktnrhkrmbbtlvnagfatrarzjojmkvzsx
|
||||
# ncurses@6.0%gcc@4.9.3=linux-x86_64
|
||||
module load ncurses-6.0-gcc-4.9.3-kaazyneh3bjkfnalunchyqtygoe2mncv
|
||||
# zlib@1.2.8%gcc@4.9.3=linux-x86_64
|
||||
module load zlib-1.2.8-gcc-4.9.3-v3ufwaahjnviyvgjcelo36nywx2ufj7z
|
||||
# sqlite@3.8.5%gcc@4.9.3=linux-x86_64
|
||||
module load sqlite-3.8.5-gcc-4.9.3-a3eediswgd5f3rmto7g3szoew5nhehbr
|
||||
# readline@6.3%gcc@4.9.3=linux-x86_64
|
||||
module load readline-6.3-gcc-4.9.3-se6r3lsycrwxyhreg4lqirp6xixxejh3
|
||||
# python@3.5.1%gcc@4.9.3=linux-x86_64
|
||||
module load python-3.5.1-gcc-4.9.3-5q5rsrtjld4u6jiicuvtnx52m7tfhegi
|
||||
# py-setuptools@20.5%gcc@4.9.3=linux-x86_64
|
||||
module load py-setuptools-20.5-gcc-4.9.3-4qr2suj6p6glepnedmwhl4f62x64wxw2
|
||||
# py-nose@1.3.7%gcc@4.9.3=linux-x86_64
|
||||
module load py-nose-1.3.7-gcc-4.9.3-pwhtjw2dvdvfzjwuuztkzr7b4l6zepli
|
||||
# openblas@0.2.17%gcc@4.9.3+shared=linux-x86_64
|
||||
module load openblas-0.2.17-gcc-4.9.3-pw6rmlom7apfsnjtzfttyayzc7nx5e7y
|
||||
# py-numpy@1.11.0%gcc@4.9.3+blas+lapack=linux-x86_64
|
||||
module load py-numpy-1.11.0-gcc-4.9.3-mulodttw5pcyjufva4htsktwty4qd52r
|
||||
# curl@7.47.1%gcc@4.9.3=linux-x86_64
|
||||
module load curl-7.47.1-gcc-4.9.3-ohz3fwsepm3b462p5lnaquv7op7naqbi
|
||||
# autoconf@2.69%gcc@4.9.3=linux-x86_64
|
||||
module load autoconf-2.69-gcc-4.9.3-bkibjqhgqm5e3o423ogfv2y3o6h2uoq4
|
||||
# cmake@3.5.0%gcc@4.9.3~doc+ncurses+openssl~qt=linux-x86_64
|
||||
module load cmake-3.5.0-gcc-4.9.3-x7xnsklmgwla3ubfgzppamtbqk5rwn7t
|
||||
# expat@2.1.0%gcc@4.9.3=linux-x86_64
|
||||
module load expat-2.1.0-gcc-4.9.3-6pkz2ucnk2e62imwakejjvbv6egncppd
|
||||
# git@2.8.0-rc2%gcc@4.9.3+curl+expat=linux-x86_64
|
||||
module load git-2.8.0-rc2-gcc-4.9.3-3bib4hqtnv5xjjoq5ugt3inblt4xrgkd
|
||||
|
||||
The script may be further edited by removing unnecessary modules.
|
||||
|
||||
|
||||
^^^^^^^^^^^^^^^
|
||||
Module Prefixes
|
||||
^^^^^^^^^^^^^^^
|
||||
|
||||
On some systems, modules are automatically prefixed with a certain
|
||||
string; ``spack module tcl loads`` needs to know about that prefix when it
|
||||
issues ``module load`` commands. Add the ``--prefix`` option to your
|
||||
``spack module tcl loads`` commands if this is necessary.
|
||||
|
||||
For example, consider the following on one system:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ module avail
|
||||
linux-SuSE11-x86_64/antlr-2.7.7-gcc-5.3.0-bdpl46y
|
||||
|
||||
$ spack module tcl loads antlr # WRONG!
|
||||
# antlr@2.7.7%gcc@5.3.0~csharp+cxx~java~python arch=linux-SuSE11-x86_64
|
||||
module load antlr-2.7.7-gcc-5.3.0-bdpl46y
|
||||
|
||||
$ spack module tcl loads --prefix linux-SuSE11-x86_64/ antlr
|
||||
# antlr@2.7.7%gcc@5.3.0~csharp+cxx~java~python arch=linux-SuSE11-x86_64
|
||||
module load linux-SuSE11-x86_64/antlr-2.7.7-gcc-5.3.0-bdpl46y
|
||||
|
||||
-------------------------
|
||||
Module file customization
|
||||
-------------------------
|
||||
|
||||
Module files are generated by post-install hooks after the successful
|
||||
installation of a package.
|
||||
|
||||
.. note::
|
||||
|
||||
Spack only generates modulefiles when a package is installed. If
|
||||
you attempt to install a package and it is already installed, Spack
|
||||
will not regenerate modulefiles for the package. This may to
|
||||
inconsistent modulefiles if the Spack module configuration has
|
||||
changed since the package was installed, either by editing a file
|
||||
or changing scopes or environments.
|
||||
|
||||
Later in this section there is a subsection on :ref:`regenerating
|
||||
modules <cmd-spack-module-refresh>` that will allow you to bring
|
||||
your modules to a consistent state.
|
||||
|
||||
The table below summarizes the essential information associated with
|
||||
the different file formats that can be generated by Spack:
|
||||
|
||||
installation of a package. The table below summarizes the essential
|
||||
information associated with the different file formats
|
||||
that can be generated by Spack:
|
||||
|
||||
+-----------------------------+--------------------+-------------------------------+----------------------------------------------+----------------------+
|
||||
| | **Hook name** | **Default root directory** | **Default template file** | **Compatible tools** |
|
||||
@@ -145,8 +325,9 @@ list of environment modifications.
|
||||
to the corresponding environment variables:
|
||||
|
||||
================== =================================
|
||||
LIBRARY_PATH ``self.prefix/rlib/R/lib``
|
||||
LD_LIBRARY_PATH ``self.prefix/rlib/R/lib``
|
||||
PKG_CONFIG_PATH ``self.prefix/rlib/pkgconfig``
|
||||
CPATH ``self.prefix/rlib/R/include``
|
||||
================== =================================
|
||||
|
||||
with the following snippet:
|
||||
@@ -178,46 +359,6 @@ the installation folder of each package for the presence of a set of subdirector
|
||||
(``bin``, ``man``, ``share/man``, etc.). If any is found its full path is prepended
|
||||
to the environment variables listed below the folder name.
|
||||
|
||||
Spack modules can be configured for multiple module sets. The default
|
||||
module set is named ``default``. All Spack commands which operate on
|
||||
modules default to apply the ``default`` module set, but can be
|
||||
applied to any module set in the configuration. Settings applied at
|
||||
the root of the configuration (e.g. ``modules:enable`` rather than
|
||||
``modules:default:enable``) are applied to the default module set for
|
||||
backwards compatibility.
|
||||
|
||||
"""""""""""""""""""""""""
|
||||
Changing the modules root
|
||||
"""""""""""""""""""""""""
|
||||
|
||||
As shown in the table above, the default module root for ``lmod`` is
|
||||
``$spack/share/spack/lmod`` and the default root for ``tcl`` is
|
||||
``$spack/share/spack/modules``. This can be overridden for any module
|
||||
set by changing the ``roots`` key of the configuration.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
modules:
|
||||
default:
|
||||
roots:
|
||||
tcl: /path/to/install/tcl/modules
|
||||
my_custom_lmod_modules:
|
||||
roots:
|
||||
lmod: /path/to/install/custom/lmod/modules
|
||||
...
|
||||
|
||||
This configuration will create two module sets. The default module set
|
||||
will install its ``tcl`` modules to ``/path/to/install/tcl/modules``
|
||||
(and still install its lmod modules, if any, to the default
|
||||
location). The set ``my_custom_lmod_modules`` will install its lmod
|
||||
modules to ``/path/to/install/custom/lmod/modules`` (and still install
|
||||
its tcl modules, if any, to the default location).
|
||||
|
||||
Obviously, having multiple module sets install modules to the default
|
||||
location could be confusing to users of your modules. In the next
|
||||
section, we will discuss enabling and disabling module types (module
|
||||
file generators) for each module set.
|
||||
|
||||
""""""""""""""""""""
|
||||
Activate other hooks
|
||||
""""""""""""""""""""
|
||||
@@ -233,14 +374,13 @@ to the generator being customized:
|
||||
.. code-block:: yaml
|
||||
|
||||
modules:
|
||||
default:
|
||||
enable:
|
||||
- tcl
|
||||
- lmod
|
||||
tcl:
|
||||
# contains environment modules specific customizations
|
||||
lmod:
|
||||
# contains lmod specific customizations
|
||||
enable:
|
||||
- tcl
|
||||
- lmod
|
||||
tcl:
|
||||
# contains environment modules specific customizations
|
||||
lmod:
|
||||
# contains lmod specific customizations
|
||||
|
||||
In general, the configuration options that you can use in ``modules.yaml`` will
|
||||
either change the layout of the module files on the filesystem, or they will affect
|
||||
@@ -449,94 +589,6 @@ that are already in the LMod hierarchy.
|
||||
For hierarchies that are deeper than three layers ``lmod spider`` may have some issues.
|
||||
See `this discussion on the LMod project <https://github.com/TACC/Lmod/issues/114>`_.
|
||||
|
||||
.. _customize-env-modifications:
|
||||
|
||||
"""""""""""""""""""""""""""""""""""
|
||||
Customize environment modifications
|
||||
"""""""""""""""""""""""""""""""""""
|
||||
|
||||
You can control which prefixes in a Spack package are added to
|
||||
environment variables with the ``prefix_inspections`` section; this
|
||||
section maps relative prefixes to the list of environment variables
|
||||
which should be updated with those prefixes.
|
||||
|
||||
The ``prefix_inspections`` configuration is different from other
|
||||
settings in that a ``prefix_inspections`` configuration at the
|
||||
``modules`` level of the configuration file applies to all module
|
||||
sets. This allows users to make general overrides to the default
|
||||
inspections and customize them per-module-set.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
modules:
|
||||
prefix_inspections:
|
||||
bin:
|
||||
- PATH
|
||||
lib:
|
||||
- LIBRARY_PATH
|
||||
'':
|
||||
- CMAKE_PREFIX_PATH
|
||||
|
||||
Prefix inspections are only applied if the relative path inside the
|
||||
installation prefix exists. In this case, for a Spack package ``foo``
|
||||
installed to ``/spack/prefix/foo``, if ``foo`` installs executables to
|
||||
``bin`` but no libraries in ``lib``, the generated module file for
|
||||
``foo`` would update ``PATH`` to contain ``/spack/prefix/foo/bin`` and
|
||||
``CMAKE_PREFIX_PATH`` to contain ``/spack/prefix/foo``, but would not
|
||||
update ``LIBRARY_PATH``.
|
||||
|
||||
There is a special case for prefix inspections relative to environment
|
||||
views. If all of the following conditions hold for a module set
|
||||
configuration:
|
||||
|
||||
#. The configuration is for an :ref:`environment <environments>` and
|
||||
will never be applied outside the environment,
|
||||
#. The environment in question is configured to use a :ref:`view
|
||||
<filesystem-views>`,
|
||||
#. The :ref:`environment view is configured
|
||||
<configuring_environment_views>` with a projection that ensures
|
||||
every package is linked to a unique directory,
|
||||
|
||||
then the module set may be configured to create modules relative to
|
||||
the environment view. This is specified by the ``use_view``
|
||||
configuration option in the module set. If ``True``, the module set is
|
||||
constructed relative to the default view of the
|
||||
environment. Otherwise, the value must be the name of the environment
|
||||
view relative to which to construct modules, or ``False-ish`` to
|
||||
disable the feature explicitly (the default is ``False``).
|
||||
|
||||
If the ``use_view`` value is set in the config, then the prefix
|
||||
inspections for the package are done relative to the package's path in
|
||||
the view.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
spack:
|
||||
modules:
|
||||
view_relative_modules:
|
||||
use_view: my_view
|
||||
prefix_inspections:
|
||||
bin:
|
||||
- PATH
|
||||
view:
|
||||
my_view:
|
||||
projections:
|
||||
root: /path/to/my/view
|
||||
all: '{name}-{hash}'
|
||||
|
||||
The ``spack`` key is relevant to :ref:`environment <environments>`
|
||||
configuration, and the view key is discussed in detail in the section
|
||||
on :ref:`Configuring environment views
|
||||
<configuring_environment_views>`. With this configuration the
|
||||
generated module for package ``foo`` would set ``PATH`` to include
|
||||
``/path/to/my/view/foo-<hash>/bin`` instead of
|
||||
``/spack/prefix/foo/bin``.
|
||||
|
||||
The ``use_view`` option is useful when deploying a large software
|
||||
stack to users who are likely to inspect the modules to find full
|
||||
paths to software, when it is desirable to present the users with a
|
||||
simpler set of paths than those generated by the Spack install tree.
|
||||
|
||||
""""""""""""""""""""""""""""""""""""
|
||||
Filter out environment modifications
|
||||
""""""""""""""""""""""""""""""""""""
|
||||
@@ -645,135 +697,3 @@ subcommand is ``rm``:
|
||||
that are already existing will ask for a confirmation by default. If
|
||||
the command is used in a script it is possible though to pass the
|
||||
``-y`` argument, that will skip this safety measure.
|
||||
|
||||
|
||||
.. _modules-in-shell-scripts:
|
||||
|
||||
------------------------------------
|
||||
Using Spack modules in shell scripts
|
||||
------------------------------------
|
||||
|
||||
The easiest To enable additional Spack commands for loading and unloading
|
||||
module files, and to add the correct path to ``MODULEPATH``, you need to
|
||||
source the appropriate setup file. Assuming Spack is installed in
|
||||
``$SPACK_ROOT``, run the appropriate command for your shell:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# For bash/zsh/sh
|
||||
$ . $SPACK_ROOT/share/spack/setup-env.sh
|
||||
|
||||
# For tcsh/csh
|
||||
$ source $SPACK_ROOT/share/spack/setup-env.csh
|
||||
|
||||
# For fish
|
||||
$ . $SPACK_ROOT/share/spack/setup-env.fish
|
||||
|
||||
If you want to have Spack's shell support available on the command line
|
||||
at any login you can put this source line in one of the files that are
|
||||
sourced at startup (like ``.profile``, ``.bashrc`` or ``.cshrc``). Be
|
||||
aware that the shell startup time may increase slightly as a result.
|
||||
|
||||
.. _cmd-spack-module-loads:
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
``spack module tcl loads``
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
In some cases, it is desirable to use a Spack-generated module, rather
|
||||
than relying on Spack's built-in user-environment modification
|
||||
capabilities. To translate a spec into a module name, use ``spack
|
||||
module tcl loads`` or ``spack module lmod loads`` depending on the
|
||||
module system desired.
|
||||
|
||||
|
||||
To load not just a module, but also all the modules it depends on, use
|
||||
the ``--dependencies`` option. This is not required for most modules
|
||||
because Spack builds binaries with RPATH support. However, not all
|
||||
packages use RPATH to find their dependencies: this can be true in
|
||||
particular for Python extensions, which are currently *not* built with
|
||||
RPATH.
|
||||
|
||||
Scripts to load modules recursively may be made with the command:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack module tcl loads --dependencies <spec>
|
||||
|
||||
An equivalent alternative using `process substitution <http://tldp.org/LDP/abs/html/process-sub.html>`_ is:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ source <( spack module tcl loads --dependencies <spec> )
|
||||
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Module Commands for Shell Scripts
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Although Spack is flexible, the ``module`` command is much faster.
|
||||
This could become an issue when emitting a series of ``spack load``
|
||||
commands inside a shell script. By adding the ``--dependencies`` flag,
|
||||
``spack module tcl loads`` may also be used to generate code that can be
|
||||
cut-and-pasted into a shell script. For example:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack module tcl loads --dependencies py-numpy git
|
||||
# bzip2@1.0.6%gcc@4.9.3=linux-x86_64
|
||||
module load bzip2-1.0.6-gcc-4.9.3-ktnrhkrmbbtlvnagfatrarzjojmkvzsx
|
||||
# ncurses@6.0%gcc@4.9.3=linux-x86_64
|
||||
module load ncurses-6.0-gcc-4.9.3-kaazyneh3bjkfnalunchyqtygoe2mncv
|
||||
# zlib@1.2.8%gcc@4.9.3=linux-x86_64
|
||||
module load zlib-1.2.8-gcc-4.9.3-v3ufwaahjnviyvgjcelo36nywx2ufj7z
|
||||
# sqlite@3.8.5%gcc@4.9.3=linux-x86_64
|
||||
module load sqlite-3.8.5-gcc-4.9.3-a3eediswgd5f3rmto7g3szoew5nhehbr
|
||||
# readline@6.3%gcc@4.9.3=linux-x86_64
|
||||
module load readline-6.3-gcc-4.9.3-se6r3lsycrwxyhreg4lqirp6xixxejh3
|
||||
# python@3.5.1%gcc@4.9.3=linux-x86_64
|
||||
module load python-3.5.1-gcc-4.9.3-5q5rsrtjld4u6jiicuvtnx52m7tfhegi
|
||||
# py-setuptools@20.5%gcc@4.9.3=linux-x86_64
|
||||
module load py-setuptools-20.5-gcc-4.9.3-4qr2suj6p6glepnedmwhl4f62x64wxw2
|
||||
# py-nose@1.3.7%gcc@4.9.3=linux-x86_64
|
||||
module load py-nose-1.3.7-gcc-4.9.3-pwhtjw2dvdvfzjwuuztkzr7b4l6zepli
|
||||
# openblas@0.2.17%gcc@4.9.3+shared=linux-x86_64
|
||||
module load openblas-0.2.17-gcc-4.9.3-pw6rmlom7apfsnjtzfttyayzc7nx5e7y
|
||||
# py-numpy@1.11.0%gcc@4.9.3+blas+lapack=linux-x86_64
|
||||
module load py-numpy-1.11.0-gcc-4.9.3-mulodttw5pcyjufva4htsktwty4qd52r
|
||||
# curl@7.47.1%gcc@4.9.3=linux-x86_64
|
||||
module load curl-7.47.1-gcc-4.9.3-ohz3fwsepm3b462p5lnaquv7op7naqbi
|
||||
# autoconf@2.69%gcc@4.9.3=linux-x86_64
|
||||
module load autoconf-2.69-gcc-4.9.3-bkibjqhgqm5e3o423ogfv2y3o6h2uoq4
|
||||
# cmake@3.5.0%gcc@4.9.3~doc+ncurses+openssl~qt=linux-x86_64
|
||||
module load cmake-3.5.0-gcc-4.9.3-x7xnsklmgwla3ubfgzppamtbqk5rwn7t
|
||||
# expat@2.1.0%gcc@4.9.3=linux-x86_64
|
||||
module load expat-2.1.0-gcc-4.9.3-6pkz2ucnk2e62imwakejjvbv6egncppd
|
||||
# git@2.8.0-rc2%gcc@4.9.3+curl+expat=linux-x86_64
|
||||
module load git-2.8.0-rc2-gcc-4.9.3-3bib4hqtnv5xjjoq5ugt3inblt4xrgkd
|
||||
|
||||
The script may be further edited by removing unnecessary modules.
|
||||
|
||||
|
||||
^^^^^^^^^^^^^^^
|
||||
Module Prefixes
|
||||
^^^^^^^^^^^^^^^
|
||||
|
||||
On some systems, modules are automatically prefixed with a certain
|
||||
string; ``spack module tcl loads`` needs to know about that prefix when it
|
||||
issues ``module load`` commands. Add the ``--prefix`` option to your
|
||||
``spack module tcl loads`` commands if this is necessary.
|
||||
|
||||
For example, consider the following on one system:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ module avail
|
||||
linux-SuSE11-x86_64/antlr-2.7.7-gcc-5.3.0-bdpl46y
|
||||
|
||||
$ spack module tcl loads antlr # WRONG!
|
||||
# antlr@2.7.7%gcc@5.3.0~csharp+cxx~java~python arch=linux-SuSE11-x86_64
|
||||
module load antlr-2.7.7-gcc-5.3.0-bdpl46y
|
||||
|
||||
$ spack module tcl loads --prefix linux-SuSE11-x86_64/ antlr
|
||||
# antlr@2.7.7%gcc@5.3.0~csharp+cxx~java~python arch=linux-SuSE11-x86_64
|
||||
module load linux-SuSE11-x86_64/antlr-2.7.7-gcc-5.3.0-bdpl46y
|
||||
|
||||
@@ -1,265 +0,0 @@
|
||||
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
|
||||
Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
.. _monitoring:
|
||||
|
||||
==========
|
||||
Monitoring
|
||||
==========
|
||||
|
||||
You can use a `spack monitor <https://github.com/spack/spack-monitor>`_ "Spackmon"
|
||||
server to store a database of your packages, builds, and associated metadata
|
||||
for provenance, research, or some other kind of development. You should
|
||||
follow the instructions in the `spack monitor documentation <https://spack-monitor.readthedocs.org>`_
|
||||
to first create a server along with a username and token for yourself.
|
||||
You can then use this guide to interact with the server.
|
||||
|
||||
-------------------
|
||||
Analysis Monitoring
|
||||
-------------------
|
||||
|
||||
To read about how to monitor an analysis (meaning you want to send analysis results
|
||||
to a server) see :ref:`analyze_monitoring`.
|
||||
|
||||
---------------------
|
||||
Monitoring An Install
|
||||
---------------------
|
||||
|
||||
Since an install is typically when you build packages, we logically want
|
||||
to tell spack to monitor during this step. Let's start with an example
|
||||
where we want to monitor the install of hdf5. Unless you have disabled authentication
|
||||
for the server, we first want to export our spack monitor token and username to the environment:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ export SPACKMON_TOKEN=50445263afd8f67e59bd79bff597836ee6c05438
|
||||
$ export SPACKMON_USER=spacky
|
||||
|
||||
|
||||
By default, the host for your server is expected to be at ``http://127.0.0.1``
|
||||
with a prefix of ``ms1``, and if this is the case, you can simply add the
|
||||
``--monitor`` flag to the install command:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack install --monitor hdf5
|
||||
|
||||
|
||||
If you need to customize the host or the prefix, you can do that as well:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack install --monitor --monitor-prefix monitor --monitor-host https://monitor-service.io hdf5
|
||||
|
||||
|
||||
As a precaution, we cut out early in the spack client if you have not provided
|
||||
authentication credentials. For example, if you run the command above without
|
||||
exporting your username or token, you'll see:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
==> Error: You are required to export SPACKMON_TOKEN and SPACKMON_USER
|
||||
|
||||
This extra check is to ensure that we don't start any builds,
|
||||
and then discover that you forgot to export your token. However, if
|
||||
your monitoring server has authentication disabled, you can tell this to
|
||||
the client to skip this step:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack install --monitor --monitor-disable-auth hdf5
|
||||
|
||||
If the service is not running, you'll cleanly exit early - the install will
|
||||
not continue if you've asked it to monitor and there is no service.
|
||||
For example, here is what you'll see if the monitoring service is not running:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
[Errno 111] Connection refused
|
||||
|
||||
|
||||
If you want to continue builds (and stop monitoring) you can set the ``--monitor-keep-going``
|
||||
flag.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack install --monitor --monitor-keep-going hdf5
|
||||
|
||||
This could mean that if a request fails, you only have partial or no data
|
||||
added to your monitoring database. This setting will not be applied to the
|
||||
first request to check if the server is running, but to subsequent requests.
|
||||
If you don't have a monitor server running and you want to build, simply
|
||||
don't provide the ``--monitor`` flag! Finally, if you want to provide one or
|
||||
more tags to your build, you can do:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# Add one tag, "pizza"
|
||||
$ spack install --monitor --monitor-tags pizza hdf5
|
||||
|
||||
# Add two tags, "pizza" and "pasta"
|
||||
$ spack install --monitor --monitor-tags pizza,pasta hdf5
|
||||
|
||||
|
||||
----------------------------
|
||||
Monitoring with Containerize
|
||||
----------------------------
|
||||
|
||||
The same argument group is available to add to a containerize command.
|
||||
|
||||
^^^^^^
|
||||
Docker
|
||||
^^^^^^
|
||||
|
||||
To add monitoring to a Docker container recipe generation using the defaults,
|
||||
and assuming a monitor server running on localhost, you would
|
||||
start with a spack.yaml in your present working directory:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
spack:
|
||||
specs:
|
||||
- samtools
|
||||
|
||||
And then do:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# preview first
|
||||
spack containerize --monitor
|
||||
|
||||
# and then write to a Dockerfile
|
||||
spack containerize --monitor > Dockerfile
|
||||
|
||||
|
||||
The install command will be edited to include commands for enabling monitoring.
|
||||
However, getting secrets into the container for your monitor server is something
|
||||
that should be done carefully. Specifically you should:
|
||||
|
||||
- Never try to define secrets as ENV, ARG, or using ``--build-arg``
|
||||
- Do not try to get the secret into the container via a "temporary" file that you remove (it in fact will still exist in a layer)
|
||||
|
||||
Instead, it's recommended to use buildkit `as explained here <https://pythonspeed.com/articles/docker-build-secrets/>`_.
|
||||
You'll need to again export environment variables for your spack monitor server:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ export SPACKMON_TOKEN=50445263afd8f67e59bd79bff597836ee6c05438
|
||||
$ export SPACKMON_USER=spacky
|
||||
|
||||
And then use buildkit along with your build and identifying the name of the secret:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ DOCKER_BUILDKIT=1 docker build --secret id=st,env=SPACKMON_TOKEN --secret id=su,env=SPACKMON_USER -t spack/container .
|
||||
|
||||
The secrets are expected to come from your environment, and then will be temporarily mounted and available
|
||||
at ``/run/secrets/<name>``. If you forget to supply them (and authentication is required) the build
|
||||
will fail. If you need to build on your host (and interact with a spack monitor at localhost) you'll
|
||||
need to tell Docker to use the host network:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ DOCKER_BUILDKIT=1 docker build --network="host" --secret id=st,env=SPACKMON_TOKEN --secret id=su,env=SPACKMON_USER -t spack/container .
|
||||
|
||||
|
||||
^^^^^^^^^^^
|
||||
Singularity
|
||||
^^^^^^^^^^^
|
||||
|
||||
To add monitoring to a Singularity container build, the spack.yaml needs to
|
||||
be modified slightly to specify wanting a different format:
|
||||
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
spack:
|
||||
specs:
|
||||
- samtools
|
||||
container:
|
||||
format: singularity
|
||||
|
||||
|
||||
Again, generate the recipe:
|
||||
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# preview first
|
||||
$ spack containerize --monitor
|
||||
|
||||
# then write to a Singularity recipe
|
||||
$ spack containerize --monitor > Singularity
|
||||
|
||||
|
||||
Singularity doesn't have a direct way to define secrets at build time, so we have
|
||||
to do a bit of a manual command to add a file, source secrets in it, and remove it.
|
||||
Since Singularity doesn't have layers like Docker, deleting a file will truly
|
||||
remove it from the container and history. So let's say we have this file,
|
||||
``secrets.sh``:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# secrets.sh
|
||||
export SPACKMON_USER=spack
|
||||
export SPACKMON_TOKEN=50445263afd8f67e59bd79bff597836ee6c05438
|
||||
|
||||
|
||||
We would then generate the Singularity recipe, and add a files section,
|
||||
a source of that file at the start of ``%post``, and **importantly**
|
||||
a removal of the final at the end of that same section.
|
||||
|
||||
.. code-block::
|
||||
|
||||
Bootstrap: docker
|
||||
From: spack/ubuntu-bionic:latest
|
||||
Stage: build
|
||||
|
||||
%files
|
||||
secrets.sh /opt/secrets.sh
|
||||
|
||||
%post
|
||||
. /opt/secrets.sh
|
||||
|
||||
# spack install commands are here
|
||||
...
|
||||
|
||||
# Don't forget to remove here!
|
||||
rm /opt/secrets.sh
|
||||
|
||||
|
||||
You can then build the container as your normally would.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ sudo singularity build container.sif Singularity
|
||||
|
||||
|
||||
------------------
|
||||
Monitoring Offline
|
||||
------------------
|
||||
|
||||
In the case that you want to save monitor results to your filesystem
|
||||
and then upload them later (perhaps you are in an environment where you don't
|
||||
have credentials or it isn't safe to use them) you can use the ``--monitor-save-local``
|
||||
flag.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack install --monitor --monitor-save-local hdf5
|
||||
|
||||
This will save results in a subfolder, "monitor" in your designated spack
|
||||
reports folder, which defaults to ``$HOME/.spack/reports/monitor``. When
|
||||
you are ready to upload them to a spack monitor server:
|
||||
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack monitor upload ~/.spack/reports/monitor
|
||||
|
||||
|
||||
You can choose the root directory of results as shown above, or a specific
|
||||
subdirectory. The command accepts other arguments to specify configuration
|
||||
for the monitor.
|
||||
@@ -1,4 +1,4 @@
|
||||
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
|
||||
.. Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
@@ -10,8 +10,8 @@ Package List
|
||||
============
|
||||
|
||||
This is a list of things you can install using Spack. It is
|
||||
automatically generated based on the packages in this Spack
|
||||
version.
|
||||
automatically generated based on the packages in the latest Spack
|
||||
release.
|
||||
|
||||
.. raw:: html
|
||||
:file: package_list.html
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,4 +1,4 @@
|
||||
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
|
||||
.. Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
|
||||
Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
@@ -30,18 +30,52 @@ at least one `runner <https://docs.gitlab.com/runner/>`_. Then the basic steps
|
||||
for setting up a build pipeline are as follows:
|
||||
|
||||
#. Create a repository on your gitlab instance
|
||||
#. Add a ``spack.yaml`` at the root containing your pipeline environment
|
||||
#. Add a ``spack.yaml`` at the root containing your pipeline environment (see
|
||||
below for details)
|
||||
#. Add a ``.gitlab-ci.yml`` at the root containing two jobs (one to generate
|
||||
the pipeline dynamically, and one to run the generated jobs).
|
||||
the pipeline dynamically, and one to run the generated jobs), similar to
|
||||
this one:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
stages: [generate, build]
|
||||
|
||||
generate-pipeline:
|
||||
stage: generate
|
||||
tags:
|
||||
- <custom-tag>
|
||||
script:
|
||||
- spack env activate .
|
||||
- spack ci generate
|
||||
--output-file "${CI_PROJECT_DIR}/jobs_scratch_dir/pipeline.yml"
|
||||
artifacts:
|
||||
paths:
|
||||
- "${CI_PROJECT_DIR}/jobs_scratch_dir/pipeline.yml"
|
||||
|
||||
build-jobs:
|
||||
stage: build
|
||||
trigger:
|
||||
include:
|
||||
- artifact: "jobs_scratch_dir/pipeline.yml"
|
||||
job: generate-pipeline
|
||||
strategy: depend
|
||||
|
||||
|
||||
#. Add any secrets required by the CI process to environment variables using the
|
||||
CI web ui
|
||||
#. Push a commit containing the ``spack.yaml`` and ``.gitlab-ci.yml`` mentioned above
|
||||
to the gitlab repository
|
||||
|
||||
See the :ref:`functional_example` section for a minimal working example. See also
|
||||
the :ref:`custom_Workflow` section for a link to an example of a custom workflow
|
||||
based on spack pipelines.
|
||||
The ``<custom-tag>``, above, is used to pick one of your configured runners to
|
||||
run the pipeline generation phase (this is implemented in the ``spack ci generate``
|
||||
command, which assumes the runner has an appropriate version of spack installed
|
||||
and configured for use). Of course, there are many ways to customize the process.
|
||||
You can configure CDash reporting on the progress of your builds, set up S3 buckets
|
||||
to mirror binaries built by the pipeline, clone a custom spack repository/ref for
|
||||
use by the pipeline, and more.
|
||||
|
||||
While it is possible to set up pipelines on gitlab.com, as illustrated above, the
|
||||
builds there are limited to 60 minutes and generic hardware. It is also possible to
|
||||
While it is possible to set up pipelines on gitlab.com, the builds there are
|
||||
limited to 60 minutes and generic hardware. It is also possible to
|
||||
`hook up <https://about.gitlab.com/blog/2018/04/24/getting-started-gitlab-ci-gcp>`_
|
||||
Gitlab to Google Kubernetes Engine (`GKE <https://cloud.google.com/kubernetes-engine/>`_)
|
||||
or Amazon Elastic Kubernetes Service (`EKS <https://aws.amazon.com/eks>`_), though those
|
||||
@@ -54,144 +88,21 @@ dynamically generated
|
||||
Note that the use of dynamic child pipelines requires running Gitlab version
|
||||
``>= 12.9``.
|
||||
|
||||
.. _functional_example:
|
||||
|
||||
------------------
|
||||
Functional Example
|
||||
------------------
|
||||
|
||||
The simplest fully functional standalone example of a working pipeline can be
|
||||
examined live at this example `project <https://gitlab.com/scott.wittenburg/spack-pipeline-demo>`_
|
||||
on gitlab.com.
|
||||
|
||||
Here's the ``.gitlab-ci.yml`` file from that example that builds and runs the
|
||||
pipeline:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
stages: [generate, build]
|
||||
|
||||
variables:
|
||||
SPACK_REPO: https://github.com/scottwittenburg/spack.git
|
||||
SPACK_REF: pipelines-reproducible-builds
|
||||
|
||||
generate-pipeline:
|
||||
stage: generate
|
||||
tags:
|
||||
- docker
|
||||
image:
|
||||
name: ghcr.io/scottwittenburg/ecpe4s-ubuntu18.04-runner-x86_64:2020-09-01
|
||||
entrypoint: [""]
|
||||
before_script:
|
||||
- git clone ${SPACK_REPO}
|
||||
- pushd spack && git checkout ${SPACK_REF} && popd
|
||||
- . "./spack/share/spack/setup-env.sh"
|
||||
script:
|
||||
- spack env activate --without-view .
|
||||
- spack -d ci generate
|
||||
--artifacts-root "${CI_PROJECT_DIR}/jobs_scratch_dir"
|
||||
--output-file "${CI_PROJECT_DIR}/jobs_scratch_dir/pipeline.yml"
|
||||
artifacts:
|
||||
paths:
|
||||
- "${CI_PROJECT_DIR}/jobs_scratch_dir"
|
||||
|
||||
build-jobs:
|
||||
stage: build
|
||||
trigger:
|
||||
include:
|
||||
- artifact: "jobs_scratch_dir/pipeline.yml"
|
||||
job: generate-pipeline
|
||||
strategy: depend
|
||||
|
||||
The key thing to note above is that there are two jobs: The first job to run,
|
||||
``generate-pipeline``, runs the ``spack ci generate`` command to generate a
|
||||
dynamic child pipeline and write it to a yaml file, which is then picked up
|
||||
by the second job, ``build-jobs``, and used to trigger the downstream pipeline.
|
||||
|
||||
And here's the spack environment built by the pipeline represented as a
|
||||
``spack.yaml`` file:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
spack:
|
||||
view: false
|
||||
concretization: separately
|
||||
|
||||
definitions:
|
||||
- pkgs:
|
||||
- zlib
|
||||
- bzip2
|
||||
- arch:
|
||||
- '%gcc@7.5.0 arch=linux-ubuntu18.04-x86_64'
|
||||
|
||||
specs:
|
||||
- matrix:
|
||||
- - $pkgs
|
||||
- - $arch
|
||||
|
||||
mirrors: { "mirror": "s3://spack-public/mirror" }
|
||||
|
||||
gitlab-ci:
|
||||
before_script:
|
||||
- git clone ${SPACK_REPO}
|
||||
- pushd spack && git checkout ${SPACK_CHECKOUT_VERSION} && popd
|
||||
- . "./spack/share/spack/setup-env.sh"
|
||||
script:
|
||||
- pushd ${SPACK_CONCRETE_ENV_DIR} && spack env activate --without-view . && popd
|
||||
- spack -d ci rebuild
|
||||
mappings:
|
||||
- match: ["os=ubuntu18.04"]
|
||||
runner-attributes:
|
||||
image:
|
||||
name: ghcr.io/scottwittenburg/ecpe4s-ubuntu18.04-runner-x86_64:2020-09-01
|
||||
entrypoint: [""]
|
||||
tags:
|
||||
- docker
|
||||
enable-artifacts-buildcache: True
|
||||
rebuild-index: False
|
||||
|
||||
The elements of this file important to spack ci pipelines are described in more
|
||||
detail below, but there are a couple of things to note about the above working
|
||||
example:
|
||||
|
||||
Normally ``enable-artifacts-buildcache`` is not recommended in production as it
|
||||
results in large binary artifacts getting transferred back and forth between
|
||||
gitlab and the runners. But in this example on gitlab.com where there is no
|
||||
shared, persistent file system, and where no secrets are stored for giving
|
||||
permission to write to an S3 bucket, ``enabled-buildcache-artifacts`` is the only
|
||||
way to propagate binaries from jobs to their dependents.
|
||||
|
||||
Also, it is usually a good idea to let the pipeline generate a final "rebuild the
|
||||
buildcache index" job, so that subsequent pipeline generation can quickly determine
|
||||
which specs are up to date and which need to be rebuilt (it's a good idea for other
|
||||
reasons as well, but those are out of scope for this discussion). In this case we
|
||||
have disabled it (using ``rebuild-index: False``) because the index would only be
|
||||
generated in the artifacts mirror anyway, and consequently would not be available
|
||||
during subesequent pipeline runs.
|
||||
|
||||
.. note::
|
||||
With the addition of reproducible builds (#22887) a previously working
|
||||
pipeline will require some changes:
|
||||
|
||||
* In the build jobs (``runner-attributes``), the environment location changed.
|
||||
This will typically show as a ``KeyError`` in the failing job. Be sure to
|
||||
point to ``${SPACK_CONCRETE_ENV_DIR}``.
|
||||
|
||||
* When using ``include`` in your environment, be sure to make the included
|
||||
files available in the build jobs. This means adding those files to the
|
||||
artifact directory. Those files will also be missing in the reproducibility
|
||||
artifact.
|
||||
|
||||
* Because the location of the environment changed, including files with
|
||||
relative path may have to be adapted to work both in the project context
|
||||
(generation job) and in the concrete env dir context (build job).
|
||||
|
||||
-----------------------------------
|
||||
Spack commands supporting pipelines
|
||||
-----------------------------------
|
||||
|
||||
Spack provides a ``ci`` command with a few sub-commands supporting spack
|
||||
ci pipelines. These commands are covered in more detail in this section.
|
||||
Spack provides a command ``ci`` with two sub-commands: ``spack ci generate`` generates
|
||||
a pipeline (a .gitlab-ci.yml file) from a spack environment, and ``spack ci rebuild``
|
||||
checks a spec against a remote mirror and possibly rebuilds it from source and updates
|
||||
the binary mirror with the latest built package. Both ``spack ci ...`` commands must
|
||||
be run from within the same environment, as each one makes use of the environment for
|
||||
different purposes. Additionally, some options to the commands (or conditions present
|
||||
in the spack environment file) may require particular environment variables to be
|
||||
set in order to function properly. Examples of these are typically secrets
|
||||
needed for pipeline operation that should not be visible in a spack environment
|
||||
file. These environment variables are described in more detail
|
||||
:ref:`ci_environment_variables`.
|
||||
|
||||
.. _cmd-spack-ci:
|
||||
|
||||
@@ -210,107 +121,16 @@ pipeline jobs.
|
||||
|
||||
Concretizes the specs in the active environment, stages them (as described in
|
||||
:ref:`staging_algorithm`), and writes the resulting ``.gitlab-ci.yml`` to disk.
|
||||
During concretization of the environment, ``spack ci generate`` also writes a
|
||||
``spack.lock`` file which is then provided to generated child jobs and made
|
||||
available in all generated job artifacts to aid in reproducing failed builds
|
||||
in a local environment. This means there are two artifacts that need to be
|
||||
exported in your pipeline generation job (defined in your ``.gitlab-ci.yml``).
|
||||
The first is the output yaml file of ``spack ci generate``, and the other is
|
||||
the directory containing the concrete environment files. In the
|
||||
:ref:`functional_example` section, we only mentioned one path in the
|
||||
``artifacts`` ``paths`` list because we used ``--artifacts-root`` as the
|
||||
top level directory containing both the generated pipeline yaml and the
|
||||
concrete environment.
|
||||
|
||||
Using ``--prune-dag`` or ``--no-prune-dag`` configures whether or not jobs are
|
||||
generated for specs that are already up to date on the mirror. If enabling
|
||||
DAG pruning using ``--prune-dag``, more information may be required in your
|
||||
``spack.yaml`` file, see the :ref:`noop_jobs` section below regarding
|
||||
``service-job-attributes``.
|
||||
|
||||
The optional ``--check-index-only`` argument can be used to speed up pipeline
|
||||
generation by telling spack to consider only remote buildcache indices when
|
||||
checking the remote mirror to determine if each spec in the DAG is up to date
|
||||
or not. The default behavior is for spack to fetch the index and check it,
|
||||
but if the spec is not found in the index, to also perform a direct check for
|
||||
the spec on the mirror. If the remote buildcache index is out of date, which
|
||||
can easily happen if it is not updated frequently, this behavior ensures that
|
||||
spack has a way to know for certain about the status of any concrete spec on
|
||||
the remote mirror, but can slow down pipeline generation significantly.
|
||||
|
||||
The ``--optimize`` argument is experimental and runs the generated pipeline
|
||||
document through a series of optimization passes designed to reduce the size
|
||||
of the generated file.
|
||||
|
||||
The ``--dependencies`` is also experimental and disables what in Gitlab is
|
||||
referred to as DAG scheduling, internally using the ``dependencies`` keyword
|
||||
rather than ``needs`` to list dependency jobs. The drawback of using this option
|
||||
is that before any job can begin, all jobs in previous stages must first
|
||||
complete. The benefit is that Gitlab allows more dependencies to be listed
|
||||
when using ``dependencies`` instead of ``needs``.
|
||||
|
||||
The optional ``--output-file`` argument should be an absolute path (including
|
||||
file name) to the generated pipeline, and if not given, the default is
|
||||
``./.gitlab-ci.yml``.
|
||||
|
||||
While optional, the ``--artifacts-root`` argument is used to determine where
|
||||
the concretized environment directory should be located. This directory will
|
||||
be created by ``spack ci generate`` and will contain the ``spack.yaml`` and
|
||||
generated ``spack.lock`` which are then passed to all child jobs as an
|
||||
artifact. This directory will also be the root directory for all artifacts
|
||||
generated by jobs in the pipeline.
|
||||
|
||||
.. _cmd-spack-ci-rebuild:
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
^^^^^^^^^^^^^^^^^^^^
|
||||
``spack ci rebuild``
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
The purpose of the ``spack ci rebuild`` is straightforward: take its assigned
|
||||
spec job, check whether the target mirror already has a binary for that spec,
|
||||
and if not, build the spec from source and push the binary to the mirror. To
|
||||
accomplish this in a reproducible way, the sub-command prepares a ``spack install``
|
||||
command line to build a single spec in the DAG, saves that command in a
|
||||
shell script, ``install.sh``, in the current working directory, and then runs
|
||||
it to install the spec. The shell script is also exported as an artifact to
|
||||
aid in reproducing the build outside of the CI environment.
|
||||
|
||||
If it was necessary to install the spec from source, ``spack ci rebuild`` will
|
||||
also subsequently create a binary package for the spec and try to push it to the
|
||||
mirror.
|
||||
|
||||
The ``spack ci rebuild`` sub-command mainly expects its "input" to come either
|
||||
from environment variables or from the ``gitlab-ci`` section of the ``spack.yaml``
|
||||
environment file. There are two main sources of the environment variables, some
|
||||
are written into ``.gitlab-ci.yml`` by ``spack ci generate``, and some are
|
||||
provided by the GitLab CI runtime.
|
||||
|
||||
.. _cmd-spack-ci-rebuild-index:
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
``spack ci rebuild-index``
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
This is a convenience command to rebuild the buildcache index associated with
|
||||
the mirror in the active, gitlab-enabled environment (specifying the mirror
|
||||
url or name is not required).
|
||||
|
||||
.. _cmd-spack-ci-reproduce-build:
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
``spack ci reproduce-build``
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Given the url to a gitlab pipeline rebuild job, downloads and unzips the
|
||||
artifacts into a local directory (which can be specified with the optional
|
||||
``--working-dir`` argument), then finds the target job in the generated
|
||||
pipeline to extract details about how it was run. Assuming the job used a
|
||||
docker image, the command prints a ``docker run`` command line and some basic
|
||||
instructions on how to reproduce the build locally.
|
||||
|
||||
Note that jobs failing in the pipeline will print messages giving the
|
||||
arguments you can pass to ``spack ci reproduce-build`` in order to reproduce
|
||||
a particular build locally.
|
||||
This sub-command is responsible for ensuring a single spec from the release
|
||||
environment is up to date on the remote mirror configured in the environment,
|
||||
and as such, corresponds to a single job in the ``.gitlab-ci.yml`` file.
|
||||
|
||||
------------------------------------
|
||||
A pipeline-enabled spack environment
|
||||
@@ -369,38 +189,28 @@ corresponds to a known gitlab runner, where the ``match`` section is used
|
||||
in assigning a release spec to one of the runners, and the ``runner-attributes``
|
||||
section is used to configure the spec/job for that particular runner.
|
||||
|
||||
Both the top-level ``gitlab-ci`` section as well as each ``runner-attributes``
|
||||
section can also contain the following keys: ``image``, ``tags``, ``variables``,
|
||||
``before_script``, ``script``, and ``after_script``. If any of these keys are
|
||||
provided at the ``gitlab-ci`` level, they will be used as the defaults for any
|
||||
``runner-attributes``, unless they are overridden in those sections. Specifying
|
||||
any of these keys at the ``runner-attributes`` level generally overrides the
|
||||
keys specified at the higher level, with a couple exceptions. Any ``variables``
|
||||
specified at both levels result in those dictionaries getting merged in the
|
||||
resulting generated job, and any duplicate variable names get assigned the value
|
||||
provided in the specific ``runner-attributes``. If ``tags`` are specified both
|
||||
at the ``gitlab-ci`` level as well as the ``runner-attributes`` level, then the
|
||||
lists of tags are combined, and any duplicates are removed.
|
||||
|
||||
See the section below on using a custom spack for an example of how these keys
|
||||
could be used.
|
||||
|
||||
There are other pipeline options you can configure within the ``gitlab-ci`` section
|
||||
as well.
|
||||
|
||||
The ``bootstrap`` section allows you to specify lists of specs from
|
||||
as well. The ``bootstrap`` section allows you to specify lists of specs from
|
||||
your ``definitions`` that should be staged ahead of the environment's ``specs`` (this
|
||||
section is described in more detail below). The ``enable-artifacts-buildcache`` key
|
||||
takes a boolean and determines whether the pipeline uses artifacts to store and
|
||||
pass along the buildcaches from one stage to the next (the default if you don't
|
||||
provide this option is ``False``).
|
||||
|
||||
The optional ``broken-specs-url`` key tells Spack to check against a list of
|
||||
specs that are known to be currently broken in ``develop``. If any such specs
|
||||
are found, the ``spack ci generate`` command will fail with an error message
|
||||
informing the user what broken specs were encountered. This allows the pipeline
|
||||
to fail early and avoid wasting compute resources attempting to build packages
|
||||
that will not succeed.
|
||||
provide this option is ``False``). The ``enable-debug-messages`` key takes a boolean
|
||||
and allows you to choose whether the pipeline build jobs are run as ``spack -d ci rebuild``
|
||||
or just ``spack ci rebuild`` (the default is not to enable debug messages). The
|
||||
``final-stage-rebuild-index`` section controls whether an extra job is added to the
|
||||
end of your pipeline (in a stage by itself) which will regenerate the mirror's
|
||||
buildcache index. Under normal operation, each pipeline job that rebuilds a package
|
||||
will re-generate the mirror's buildcache index after the buildcache entry for that
|
||||
job has been created and pushed to the mirror. Since jobs in the same stage can run in
|
||||
parallel, there is the possibility that at the end of some stage, the index may not
|
||||
reflect all the binaries in the buildcache. Adding the ``final-stage-rebuild-index``
|
||||
section ensures that at the end of the pipeline, the index will be in sync with the
|
||||
binaries on the mirror. If the mirror lives in an S3 bucket, this job will need to
|
||||
run on a machine with the Python ``boto3`` module installed, and consequently the
|
||||
``final-stage-rebuild-index`` needs to specify a list of ``tags`` to pick a runner
|
||||
satisfying that condition. It can also take an ``image`` key so Docker executor type
|
||||
runners can pick the right image for the index regeneration job.
|
||||
|
||||
The optional ``cdash`` section provides information that will be used by the
|
||||
``spack ci generate`` command (invoked by ``spack ci start``) for reporting
|
||||
@@ -410,81 +220,6 @@ progresses, this build group may have jobs added or removed. The url, project,
|
||||
and site are used to specify the CDash instance to which build results should
|
||||
be reported.
|
||||
|
||||
Take a look at the
|
||||
`schema <https://github.com/spack/spack/blob/develop/lib/spack/spack/schema/gitlab_ci.py>`_
|
||||
for the gitlab-ci section of the spack environment file, to see precisely what
|
||||
syntax is allowed there.
|
||||
|
||||
.. _rebuild_index:
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Note about rebuilding buildcache index
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
By default, while a pipeline job may rebuild a package, create a buildcache
|
||||
entry, and push it to the mirror, it does not automatically re-generate the
|
||||
mirror's buildcache index afterward. Because the index is not needed by the
|
||||
default rebuild jobs in the pipeline, not updating the index at the end of
|
||||
each job avoids possible race conditions between simultaneous jobs, and it
|
||||
avoids the computational expense of regenerating the index. This potentially
|
||||
saves minutes per job, depending on the number of binary packages in the
|
||||
mirror. As a result, the default is that the mirror's buildcache index may
|
||||
not correctly reflect the mirror's contents at the end of a pipeline.
|
||||
|
||||
To make sure the buildcache index is up to date at the end of your pipeline,
|
||||
spack generates a job to update the buildcache index of the target mirror
|
||||
at the end of each pipeline by default. You can disable this behavior by
|
||||
adding ``rebuild-index: False`` inside the ``gitlab-ci`` section of your
|
||||
spack environment. Spack will assign the job any runner attributes found
|
||||
on the ``service-job-attributes``, if you have provided that in your
|
||||
``spack.yaml``.
|
||||
|
||||
.. _noop_jobs:
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Note about "no-op" jobs
|
||||
^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
If no specs in an environment need to be rebuilt during a given pipeline run
|
||||
(meaning all are already up to date on the mirror), a single succesful job
|
||||
(a NO-OP) is still generated to avoid an empty pipeline (which GitLab
|
||||
considers to be an error). An optional ``service-job-attributes`` section
|
||||
can be added to your ``spack.yaml`` where you can provide ``tags`` and
|
||||
``image`` or ``variables`` for the generated NO-OP job. This section also
|
||||
supports providing ``before_script``, ``script``, and ``after_script``, in
|
||||
case you want to take some custom actions in the case of any empty pipeline.
|
||||
|
||||
Following is an example of this section added to a ``spack.yaml``:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
spack:
|
||||
specs:
|
||||
- openmpi
|
||||
mirrors:
|
||||
cloud_gitlab: https://mirror.spack.io
|
||||
gitlab-ci:
|
||||
mappings:
|
||||
- match:
|
||||
- os=centos8
|
||||
runner-attributes:
|
||||
tags:
|
||||
- custom
|
||||
- tag
|
||||
image: spack/centos7
|
||||
service-job-attributes:
|
||||
tags: ['custom', 'tag']
|
||||
image:
|
||||
name: 'some.image.registry/custom-image:latest'
|
||||
entrypoint: ['/bin/bash']
|
||||
script:
|
||||
- echo "Custom message in a custom script"
|
||||
|
||||
The example above illustrates how you can provide the attributes used to run
|
||||
the NO-OP job in the case of an empty pipeline. The only field for the NO-OP
|
||||
job that might be generated for you is ``script``, but that will only happen
|
||||
if you do not provide one yourself.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Assignment of specs to runners
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
@@ -510,19 +245,7 @@ runners known to the gitlab instance. For Docker executor type runners, the
|
||||
as well as an ``entrypoint`` to override whatever the default for that image is).
|
||||
For other types of runners the ``variables`` key will be useful to pass any
|
||||
information on to the runner that it needs to do its work (e.g. scheduler
|
||||
parameters, etc.). Any ``variables`` provided here will be added, verbatim, to
|
||||
each job.
|
||||
|
||||
The ``runner-attributes`` section also allows users to supply custom ``script``,
|
||||
``before_script``, and ``after_script`` sections to be applied to every job
|
||||
scheduled on that runner. This allows users to do any custom preparation or
|
||||
cleanup tasks that fit their particular workflow, as well as completely
|
||||
customize the rebuilding of a spec if they so choose. Spack will not generate
|
||||
a ``before_script`` or ``after_script`` for jobs, but if you do not provide
|
||||
a custom ``script``, spack will generate one for you that assumes the concrete
|
||||
environment directory is located within your ``--artifacts_root`` (or if not
|
||||
provided, within your ``$CI_PROJECT_DIR``), activates that environment for
|
||||
you, and invokes ``spack ci rebuild``.
|
||||
parameters, etc.).
|
||||
|
||||
.. _staging_algorithm:
|
||||
|
||||
@@ -533,8 +256,8 @@ Summary of ``.gitlab-ci.yml`` generation algorithm
|
||||
All specs yielded by the matrix (or all the specs in the environment) have their
|
||||
dependencies computed, and the entire resulting set of specs are staged together
|
||||
before being run through the ``gitlab-ci/mappings`` entries, where each staged
|
||||
spec is assigned a runner. "Staging" is the name given to the process of
|
||||
figuring out in what order the specs should be built, taking into consideration
|
||||
spec is assigned a runner. "Staging" is the name we have given to the process
|
||||
of figuring out in what order the specs should be built, taking into consideration
|
||||
Gitlab CI rules about jobs/stages. In the staging process the goal is to maximize
|
||||
the number of jobs in any stage of the pipeline, while ensuring that the jobs in
|
||||
any stage only depend on jobs in previous stages (since those jobs are guaranteed
|
||||
@@ -545,7 +268,7 @@ a runner, the ``.gitlab-ci.yml`` is written to disk.
|
||||
|
||||
The short example provided above would result in the ``readline``, ``ncurses``,
|
||||
and ``pkgconf`` packages getting staged and built on the runner chosen by the
|
||||
``spack-k8s`` tag. In this example, spack assumes the runner is a Docker executor
|
||||
``spack-k8s`` tag. In this example, we assume the runner is a Docker executor
|
||||
type runner, and thus certain jobs will be run in the ``centos7`` container,
|
||||
and others in the ``ubuntu-18.04`` container. The resulting ``.gitlab-ci.yml``
|
||||
will contain 6 jobs in three stages. Once the jobs have been generated, the
|
||||
@@ -604,12 +327,12 @@ Here's an example of what bootstrapping some compilers might look like:
|
||||
# mappings similar to the example higher up in this description
|
||||
...
|
||||
|
||||
The example above adds a list to the ``definitions`` called ``compiler-pkgs``
|
||||
(you can add any number of these), which lists compiler packages that should
|
||||
be staged ahead of the full matrix of release specs (in this example, only
|
||||
readline). Then within the ``gitlab-ci`` section, note the addition of a
|
||||
``bootstrap`` section, which can contain a list of items, each referring to
|
||||
a list in the ``definitions`` section. These items can either
|
||||
In the example above, we have added a list to the ``definitions`` called
|
||||
``compiler-pkgs`` (you can add any number of these), which lists compiler packages
|
||||
we want to be staged ahead of the full matrix of release specs (which consists
|
||||
only of readline in our example). Then within the ``gitlab-ci`` section, we
|
||||
have added a ``bootstrap`` section, which can contain a list of items, each
|
||||
referring to a list in the ``definitions`` section. These items can either
|
||||
be a dictionary or a string. If you supply a dictionary, it must have a name
|
||||
key whose value must match one of the lists in definitions and it can have a
|
||||
``compiler-agnostic`` key whose value is a boolean. If you supply a string,
|
||||
@@ -645,16 +368,13 @@ Using a custom spack in your pipeline
|
||||
|
||||
If your runners will not have a version of spack ready to invoke, or if for some
|
||||
other reason you want to use a custom version of spack to run your pipelines,
|
||||
this section provides an example of how you could take advantage of
|
||||
user-provided pipeline scripts to accomplish this fairly simply. First, consider
|
||||
specifying the source and version of spack you want to use with variables, either
|
||||
written directly into your ``.gitlab-ci.yml``, or provided by CI variables defined
|
||||
in the gitlab UI or from some upstream pipeline. Let's say you choose the variable
|
||||
names ``SPACK_REPO`` and ``SPACK_REF`` to refer to the particular fork of spack
|
||||
and branch you want for running your pipeline. You can then refer to those in a
|
||||
custom shell script invoked both from your pipeline generation job and your rebuild
|
||||
jobs. Here's the ``generate-pipeline`` job from the top of this document,
|
||||
updated to clone and source a custom spack:
|
||||
this can be accomplished fairly simply. First, create CI environment variables
|
||||
containing the url and branch/tag you want to clone (calling them, for example,
|
||||
``SPACK_REPO`` and ``SPACK_REF``), use them to clone spack in your pre-ci
|
||||
``before_script``, and finally pass those same values along to the workload
|
||||
generation process via the ``spack-repo`` and ``spack-ref`` cli args. Here's
|
||||
the ``generate-pipeline`` job from the top of this document, updated to clone
|
||||
a custom spack and make sure the generated rebuild jobs will clone it too:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
@@ -662,88 +382,26 @@ updated to clone and source a custom spack:
|
||||
tags:
|
||||
- <some-other-tag>
|
||||
before_script:
|
||||
- git clone ${SPACK_REPO}
|
||||
- pushd spack && git checkout ${SPACK_REF} && popd
|
||||
- . "./spack/share/spack/setup-env.sh"
|
||||
- git clone ${SPACK_REPO} --branch ${SPACK_REF}
|
||||
- . ./spack/share/spack/setup-env.sh
|
||||
script:
|
||||
- spack env activate --without-view .
|
||||
- spack ci generate --check-index-only
|
||||
--artifacts-root "${CI_PROJECT_DIR}/jobs_scratch_dir"
|
||||
- spack env activate .
|
||||
- spack ci generate
|
||||
--spack-repo ${SPACK_REPO} --spack-ref ${SPACK_REF}
|
||||
--output-file "${CI_PROJECT_DIR}/jobs_scratch_dir/pipeline.yml"
|
||||
after_script:
|
||||
- rm -rf ./spack
|
||||
artifacts:
|
||||
paths:
|
||||
- "${CI_PROJECT_DIR}/jobs_scratch_dir"
|
||||
- "${CI_PROJECT_DIR}/jobs_scratch_dir/pipeline.yml"
|
||||
|
||||
That takes care of getting the desired version of spack when your pipeline is
|
||||
generated by ``spack ci generate``. You also want your generated rebuild jobs
|
||||
(all of them) to clone that version of spack, so next you would update your
|
||||
``spack.yaml`` from above as follows:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
spack:
|
||||
...
|
||||
gitlab-ci:
|
||||
mappings:
|
||||
- match:
|
||||
- os=ubuntu18.04
|
||||
runner-attributes:
|
||||
tags:
|
||||
- spack-kube
|
||||
image: spack/ubuntu-bionic
|
||||
before_script:
|
||||
- git clone ${SPACK_REPO}
|
||||
- pushd spack && git checkout ${SPACK_REF} && popd
|
||||
- . "./spack/share/spack/setup-env.sh"
|
||||
script:
|
||||
- spack env activate --without-view ${SPACK_CONCRETE_ENV_DIR}
|
||||
- spack -d ci rebuild
|
||||
after_script:
|
||||
- rm -rf ./spack
|
||||
|
||||
Now all of the generated rebuild jobs will use the same shell script to clone
|
||||
spack before running their actual workload.
|
||||
|
||||
Now imagine you have long pipelines with many specs to be built, and you
|
||||
are pointing to a spack repository and branch that has a tendency to change
|
||||
frequently, such as the main repo and its ``develop`` branch. If each child
|
||||
job checks out the ``develop`` branch, that could result in some jobs running
|
||||
with one SHA of spack, while later jobs run with another. To help avoid this
|
||||
issue, the pipeline generation process saves global variables called
|
||||
``SPACK_VERSION`` and ``SPACK_CHECKOUT_VERSION`` that capture the version
|
||||
of spack used to generate the pipeline. While the ``SPACK_VERSION`` variable
|
||||
simply contains the human-readable value produced by ``spack -V`` at pipeline
|
||||
generation time, the ``SPACK_CHECKOUT_VERSION`` variable can be used in a
|
||||
``git checkout`` command to make sure all child jobs checkout the same version
|
||||
of spack used to generate the pipeline. To take advantage of this, you could
|
||||
simply replace ``git checkout ${SPACK_REF}`` in the example ``spack.yaml``
|
||||
above with ``git checkout ${SPACK_CHECKOUT_VERSION}``.
|
||||
|
||||
On the other hand, if you're pointing to a spack repository and branch under your
|
||||
control, there may be no benefit in using the captured ``SPACK_CHECKOUT_VERSION``,
|
||||
and you can instead just clone using the variables you define (``SPACK_REPO``
|
||||
and ``SPACK_REF`` in the example aboves).
|
||||
|
||||
.. _custom_workflow:
|
||||
|
||||
---------------
|
||||
Custom Workflow
|
||||
---------------
|
||||
|
||||
There are many ways to take advantage of spack CI pipelines to achieve custom
|
||||
workflows for building packages or other resources. One example of a custom
|
||||
pipelines workflow is the spack tutorial container
|
||||
`repo <https://github.com/spack/spack-tutorial-container>`_. This project uses
|
||||
GitHub (for source control), GitLab (for automated spack ci pipelines), and
|
||||
DockerHub automated builds to build Docker images (complete with fully populate
|
||||
binary mirror) used by instructors and participants of a spack tutorial.
|
||||
|
||||
Take a look a the repo to see how it is accomplished using spack CI pipelines,
|
||||
and see the following markdown files at the root of the repository for
|
||||
descriptions and documentation describing the workflow: ``DESCRIPTION.md``,
|
||||
``DOCKERHUB_SETUP.md``, ``GITLAB_SETUP.md``, and ``UPDATING.md``.
|
||||
If the ``spack ci generate`` command receives those extra command line arguments,
|
||||
then it adds similar ``before_script`` and ``after_script`` sections for each of
|
||||
the ``spack ci rebuild`` jobs it generates (cloning and sourcing a custom
|
||||
spack in the ``before_script`` and removing it again in the ``after_script``).
|
||||
This gives you control over the version of spack used when the rebuild jobs
|
||||
are actually run on the gitlab runner.
|
||||
|
||||
.. _ci_environment_variables:
|
||||
|
||||
@@ -760,33 +418,28 @@ environment variables used by the pipeline infrastructure are described here.
|
||||
AWS_ACCESS_KEY_ID
|
||||
^^^^^^^^^^^^^^^^^
|
||||
|
||||
Optional. Only needed when binary mirror is an S3 bucket.
|
||||
Needed when binary mirror is an S3 bucket.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
AWS_SECRET_ACCESS_KEY
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Optional. Only needed when binary mirror is an S3 bucket.
|
||||
Needed when binary mirror is an S3 bucket.
|
||||
|
||||
^^^^^^^^^^^^^^^
|
||||
S3_ENDPOINT_URL
|
||||
^^^^^^^^^^^^^^^
|
||||
|
||||
Optional. Only needed when binary mirror is an S3 bucket that is *not* on AWS.
|
||||
Needed when binary mirror is an S3 bucket that is *not* on AWS.
|
||||
|
||||
^^^^^^^^^^^^^^^^^
|
||||
CDASH_AUTH_TOKEN
|
||||
^^^^^^^^^^^^^^^^^
|
||||
|
||||
Optional. Only needed in order to report build groups to CDash.
|
||||
Needed in order to report build groups to CDash.
|
||||
|
||||
^^^^^^^^^^^^^^^^^
|
||||
SPACK_SIGNING_KEY
|
||||
^^^^^^^^^^^^^^^^^
|
||||
|
||||
Optional. Only needed if you want ``spack ci rebuild`` to trust the key you
|
||||
store in this variable, in which case, it will subsequently be used to sign and
|
||||
verify binary packages (when installing or creating buildcaches). You could
|
||||
also have already trusted a key spack know about, or if no key is present anywhere,
|
||||
spack will install specs using ``--no-check-signature`` and create buildcaches
|
||||
using ``-u`` (for unsigned binaries).
|
||||
Needed to sign/verify binary packages from the remote binary mirror.
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
|
||||
.. Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
@@ -9,7 +9,7 @@
|
||||
Package Repositories
|
||||
=============================
|
||||
|
||||
Spack comes with thousands of built-in package recipes in
|
||||
Spack comes with over 1,000 built-in package recipes in
|
||||
``var/spack/repos/builtin/``. This is a **package repository** -- a
|
||||
directory that Spack searches when it needs to find a package by name.
|
||||
You may need to maintain packages for restricted, proprietary or
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
|
||||
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
|
||||
.. Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
@@ -543,8 +543,7 @@ specified from the command line using the ``--projection-file`` option
|
||||
to the ``spack view`` command.
|
||||
|
||||
The projections configuration file is a mapping of partial specs to
|
||||
spec format strings, defined by the :meth:`~spack.spec.Spec.format`
|
||||
function, as shown in the example below.
|
||||
spec format strings, as shown in the example below.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
@@ -704,6 +703,400 @@ environments:
|
||||
Administrators might find things easier to maintain without the
|
||||
added "heavyweight" state of a view.
|
||||
|
||||
------------------------------
|
||||
Developing Software with Spack
|
||||
------------------------------
|
||||
|
||||
For any project, one needs to assemble an
|
||||
environment of that application's dependencies. You might consider
|
||||
loading a series of modules or creating a filesystem view. This
|
||||
approach, while obvious, has some serious drawbacks:
|
||||
|
||||
1. There is no guarantee that an environment created this way will be
|
||||
consistent. Your application could end up with dependency A
|
||||
expecting one version of MPI, and dependency B expecting another.
|
||||
The linker will not be happy...
|
||||
|
||||
2. Suppose you need to debug a package deep within your software DAG.
|
||||
If you build that package with a manual environment, then it
|
||||
becomes difficult to have Spack auto-build things that depend on
|
||||
it. That could be a serious problem, depending on how deep the
|
||||
package in question is in your dependency DAG.
|
||||
|
||||
3. At its core, Spack is a sophisticated concretization algorithm that
|
||||
matches up packages with appropriate dependencies and creates a
|
||||
*consistent* environment for the package it's building. Writing a
|
||||
list of ``spack load`` commands for your dependencies is at least
|
||||
as hard as writing the same list of ``depends_on()`` declarations
|
||||
in a Spack package. But it makes no use of Spack concretization
|
||||
and is more error-prone.
|
||||
|
||||
4. Spack provides an automated, systematic way not just to find a
|
||||
packages's dependencies --- but also to build other packages on
|
||||
top. Any Spack package can become a dependency for another Spack
|
||||
package, offering a powerful vision of software re-use. If you
|
||||
build your package A outside of Spack, then your ability to use it
|
||||
as a building block for other packages in an automated way is
|
||||
diminished: other packages depending on package A will not
|
||||
be able to use Spack to fulfill that dependency.
|
||||
|
||||
5. If you are reading this manual, you probably love Spack. You're
|
||||
probably going to write a Spack package for your software so
|
||||
prospective users can install it with the least amount of pain.
|
||||
Why should you go to additional work to find dependencies in your
|
||||
development environment? Shouldn't Spack be able to help you build
|
||||
your software based on the package you've already written?
|
||||
|
||||
In this section, we show how Spack can be used in the software
|
||||
development process to greatest effect, and how development packages
|
||||
can be seamlessly integrated into the Spack ecosystem. We will show
|
||||
how this process works by example, assuming the software you are
|
||||
creating is called ``mylib``.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
Write the CMake Build
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
For now, the techniques in this section only work for CMake-based
|
||||
projects, although they could be easily extended to other build
|
||||
systems in the future. We will therefore assume you are using CMake
|
||||
to build your project.
|
||||
|
||||
The ``CMakeLists.txt`` file should be written as normal. A few caveats:
|
||||
|
||||
1. Your project should produce binaries with RPATHs. This will ensure
|
||||
that they work the same whether built manually or automatically by
|
||||
Spack. For example:
|
||||
|
||||
.. code-block:: cmake
|
||||
|
||||
# enable @rpath in the install name for any shared library being built
|
||||
# note: it is planned that a future version of CMake will enable this by default
|
||||
set(CMAKE_MACOSX_RPATH 1)
|
||||
|
||||
# Always use full RPATH
|
||||
# http://www.cmake.org/Wiki/CMake_RPATH_handling
|
||||
# http://www.kitware.com/blog/home/post/510
|
||||
|
||||
# use, i.e. don't skip the full RPATH for the build tree
|
||||
SET(CMAKE_SKIP_BUILD_RPATH FALSE)
|
||||
|
||||
# when building, don't use the install RPATH already
|
||||
# (but later on when installing)
|
||||
SET(CMAKE_BUILD_WITH_INSTALL_RPATH FALSE)
|
||||
|
||||
# add the automatically determined parts of the RPATH
|
||||
# which point to directories outside the build tree to the install RPATH
|
||||
SET(CMAKE_INSTALL_RPATH_USE_LINK_PATH TRUE)
|
||||
|
||||
# the RPATH to be used when installing, but only if it's not a system directory
|
||||
LIST(FIND CMAKE_PLATFORM_IMPLICIT_LINK_DIRECTORIES "${CMAKE_INSTALL_PREFIX}/lib" isSystemDir)
|
||||
IF("${isSystemDir}" STREQUAL "-1")
|
||||
SET(CMAKE_INSTALL_RPATH "${CMAKE_INSTALL_PREFIX}/lib")
|
||||
ENDIF("${isSystemDir}" STREQUAL "-1")
|
||||
|
||||
|
||||
2. Spack provides a CMake variable called
|
||||
``SPACK_TRANSITIVE_INCLUDE_PATH``, which contains the ``include/``
|
||||
directory for all of your project's transitive dependencies. It
|
||||
can be useful if your project ``#include``s files from package B,
|
||||
which ``#include`` files from package C, but your project only
|
||||
lists project B as a dependency. This works in traditional
|
||||
single-tree build environments, in which B and C's include files
|
||||
live in the same place. In order to make it work with Spack as
|
||||
well, you must add the following to ``CMakeLists.txt``. It will
|
||||
have no effect when building without Spack:
|
||||
|
||||
.. code-block:: cmake
|
||||
|
||||
# Include all the transitive dependencies determined by Spack.
|
||||
# If we're not running with Spack, this does nothing...
|
||||
include_directories($ENV{SPACK_TRANSITIVE_INCLUDE_PATH})
|
||||
|
||||
.. note::
|
||||
|
||||
Note that this feature is controversial and could break with
|
||||
future versions of GNU ld. The best practice is to make sure
|
||||
anything you ``#include`` is listed as a dependency in your
|
||||
CMakeLists.txt (and Spack package).
|
||||
|
||||
.. _write-the-spack-package:
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Write the Spack Package
|
||||
^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
The Spack package also needs to be written, in tandem with setting up
|
||||
the build (for example, CMake). The most important part of this task
|
||||
is declaring dependencies. Here is an example of the Spack package
|
||||
for the ``mylib`` package (ellipses for brevity):
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
class Mylib(CMakePackage):
|
||||
"""Misc. reusable utilities used by Myapp."""
|
||||
|
||||
homepage = "https://github.com/citibeth/mylib"
|
||||
url = "https://github.com/citibeth/mylib/tarball/123"
|
||||
|
||||
version('0.1.2', '3a6acd70085e25f81b63a7e96c504ef9')
|
||||
version('develop', git='https://github.com/citibeth/mylib.git',
|
||||
branch='develop')
|
||||
|
||||
variant('everytrace', default=False,
|
||||
description='Report errors through Everytrace')
|
||||
...
|
||||
|
||||
extends('python')
|
||||
|
||||
depends_on('eigen')
|
||||
depends_on('everytrace', when='+everytrace')
|
||||
depends_on('proj', when='+proj')
|
||||
...
|
||||
depends_on('cmake', type='build')
|
||||
depends_on('doxygen', type='build')
|
||||
|
||||
def cmake_args(self):
|
||||
spec = self.spec
|
||||
return [
|
||||
'-DUSE_EVERYTRACE=%s' % ('YES' if '+everytrace' in spec else 'NO'),
|
||||
'-DUSE_PROJ4=%s' % ('YES' if '+proj' in spec else 'NO'),
|
||||
...
|
||||
'-DUSE_UDUNITS2=%s' % ('YES' if '+udunits2' in spec else 'NO'),
|
||||
'-DUSE_GTEST=%s' % ('YES' if '+googletest' in spec else 'NO')]
|
||||
|
||||
This is a standard Spack package that can be used to install
|
||||
``mylib`` in a production environment. The list of dependencies in
|
||||
the Spack package will generally be a repeat of the list of CMake
|
||||
dependencies. This package also has some features that allow it to be
|
||||
used for development:
|
||||
|
||||
1. It subclasses ``CMakePackage`` instead of ``Package``. This
|
||||
eliminates the need to write an ``install()`` method, which is
|
||||
defined in the superclass. Instead, one just needs to write the
|
||||
``configure_args()`` method. That method should return the
|
||||
arguments needed for the ``cmake`` command (beyond the standard
|
||||
CMake arguments, which Spack will include already). These
|
||||
arguments are typically used to turn features on/off in the build.
|
||||
|
||||
2. It specifies a non-checksummed version ``develop``. Running
|
||||
``spack install mylib@develop`` the ``@develop`` version will
|
||||
install the latest version off the develop branch. This method of
|
||||
download is useful for the developer of a project while it is in
|
||||
active development; however, it should only be used by developers
|
||||
who control and trust the repository in question!
|
||||
|
||||
3. The ``url``, ``url_for_version()`` and ``homepage`` attributes are
|
||||
not used in development. Don't worry if you don't have any, or if
|
||||
they are behind a firewall.
|
||||
|
||||
^^^^^^^^^^^^^^^^
|
||||
Build with Spack
|
||||
^^^^^^^^^^^^^^^^
|
||||
|
||||
Now that you have a Spack package, you can use Spack to find its
|
||||
dependencies automatically. For example:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ cd mylib
|
||||
$ spack setup mylib@local
|
||||
|
||||
The result will be a file ``spconfig.py`` in the top-level
|
||||
``mylib/`` directory. It is a short script that calls CMake with the
|
||||
dependencies and options determined by Spack --- similar to what
|
||||
happens in ``spack install``, but now written out in script form.
|
||||
From a developer's point of view, you can think of ``spconfig.py`` as
|
||||
a stand-in for the ``cmake`` command.
|
||||
|
||||
.. note::
|
||||
|
||||
You can invent any "version" you like for the ``spack setup``
|
||||
command.
|
||||
|
||||
.. note::
|
||||
|
||||
Although ``spack setup`` does not build your package, it does
|
||||
create and install a module file, and mark in the database that
|
||||
your package has been installed. This can lead to errors, of
|
||||
course, if you don't subsequently install your package.
|
||||
Also... you will need to ``spack uninstall`` before you run
|
||||
``spack setup`` again.
|
||||
|
||||
|
||||
You can now build your project as usual with CMake:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ mkdir build; cd build
|
||||
$ ../spconfig.py .. # Instead of cmake ..
|
||||
$ make
|
||||
$ make install
|
||||
|
||||
Once your ``make install`` command is complete, your package will be
|
||||
installed, just as if you'd run ``spack install``. Except you can now
|
||||
edit, re-build and re-install as often as needed, without checking
|
||||
into Git or downloading tarballs.
|
||||
|
||||
.. note::
|
||||
|
||||
The build you get this way will be *almost* the same as the build
|
||||
from ``spack install``. The only difference is, you will not be
|
||||
using Spack's compiler wrappers. This difference has not caused
|
||||
problems in our experience, as long as your project sets
|
||||
RPATHs as shown above. You DO use RPATHs, right?
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^
|
||||
Build Other Software
|
||||
^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Now that you've built ``mylib`` with Spack, you might want to build
|
||||
another package that depends on it --- for example, ``myapp``. This
|
||||
is accomplished easily enough:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack install myapp ^mylib@local
|
||||
|
||||
Note that auto-built software has now been installed *on top of*
|
||||
manually-built software, without breaking Spack's "web." This
|
||||
property is useful if you need to debug a package deep in the
|
||||
dependency hierarchy of your application. It is a *big* advantage of
|
||||
using ``spack setup`` to build your package's environment.
|
||||
|
||||
If you feel your software is stable, you might wish to install it with
|
||||
``spack install`` and skip the source directory. You can just use,
|
||||
for example:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack install mylib@develop
|
||||
|
||||
.. _release-your-software:
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
Release Your Software
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
You are now ready to release your software as a tarball with a
|
||||
numbered version, and a Spack package that can build it. If you're
|
||||
hosted on GitHub, this process will be a bit easier.
|
||||
|
||||
#. Put tag(s) on the version(s) in your GitHub repo you want to be
|
||||
release versions. For example, a tag ``v0.1.0`` for version 0.1.0.
|
||||
|
||||
#. Set the ``url`` in your ``package.py`` to download a tarball for
|
||||
the appropriate version. GitHub will give you a tarball for any
|
||||
commit in the repo, if you tickle it the right way. For example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
url = 'https://github.com/citibeth/mylib/tarball/v0.1.2'
|
||||
|
||||
#. Use Spack to determine your version's hash, and cut'n'paste it into
|
||||
your ``package.py``:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack checksum mylib 0.1.2
|
||||
==> Found 1 versions of mylib
|
||||
0.1.2 https://github.com/citibeth/mylib/tarball/v0.1.2
|
||||
|
||||
How many would you like to checksum? (default is 5, q to abort)
|
||||
==> Downloading...
|
||||
==> Trying to fetch from https://github.com/citibeth/mylib/tarball/v0.1.2
|
||||
######################################################################## 100.0%
|
||||
==> Checksummed new versions of mylib:
|
||||
version('0.1.2', '3a6acd70085e25f81b63a7e96c504ef9')
|
||||
|
||||
#. You should now be able to install released version 0.1.2 of your package with:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ spack install mylib@0.1.2
|
||||
|
||||
#. There is no need to remove the `develop` version from your package.
|
||||
Spack concretization will always prefer numbered version to
|
||||
non-numeric versions. Users will only get it if they ask for it.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Distribute Your Software
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Once you've released your software, other people will want to build
|
||||
it; and you will need to tell them how. In the past, that has meant a
|
||||
few paragraphs of prose explaining which dependencies to install. But
|
||||
now you use Spack, and those instructions are written in executable
|
||||
Python code. But your software has many dependencies, and you know
|
||||
Spack is the best way to install it:
|
||||
|
||||
#. First, you will want to fork Spack's ``develop`` branch. Your aim
|
||||
is to provide a stable version of Spack that you KNOW will install
|
||||
your software. If you make changes to Spack in the process, you
|
||||
will want to submit pull requests to Spack core.
|
||||
|
||||
#. Add your software's ``package.py`` to that fork. You should submit
|
||||
a pull request for this as well, unless you don't want the public
|
||||
to know about your software.
|
||||
|
||||
#. Prepare instructions that read approximately as follows:
|
||||
|
||||
#. Download Spack from your forked repo.
|
||||
|
||||
#. Install Spack; see :ref:`getting_started`.
|
||||
|
||||
#. Set up an appropriate ``packages.yaml`` file. You should tell
|
||||
your users to include in this file whatever versions/variants
|
||||
are needed to make your software work correctly (assuming those
|
||||
are not already in your ``packages.yaml``).
|
||||
|
||||
#. Run ``spack install mylib``.
|
||||
|
||||
#. Run this script to generate the ``module load`` commands or
|
||||
filesystem view needed to use this software.
|
||||
|
||||
#. Be aware that your users might encounter unexpected bootstrapping
|
||||
issues on their machines, especially if they are running on older
|
||||
systems. The :ref:`getting_started` section should cover this, but
|
||||
there could always be issues.
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^
|
||||
Other Build Systems
|
||||
^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
``spack setup`` currently only supports CMake-based builds, in
|
||||
packages that subclass ``CMakePackage``. The intent is that this
|
||||
mechanism should support a wider range of build systems; for example,
|
||||
GNU Autotools. Someone well-versed in Autotools is needed to develop
|
||||
this patch and test it out.
|
||||
|
||||
Python Distutils is another popular build system that should get
|
||||
``spack setup`` support. For non-compiled languages like Python,
|
||||
``spack diy`` may be used. Even better is to put the source directory
|
||||
directly in the user's ``PYTHONPATH``. Then, edits in source files
|
||||
are immediately available to run without any install process at all!
|
||||
|
||||
^^^^^^^^^^
|
||||
Conclusion
|
||||
^^^^^^^^^^
|
||||
|
||||
The ``spack setup`` development workflow provides better automation,
|
||||
flexibility and safety than workflows relying on environment modules
|
||||
or filesystem views. However, it has some drawbacks:
|
||||
|
||||
#. It currently works only with projects that use the CMake build
|
||||
system. Support for other build systems is not hard to build, but
|
||||
will require a small amount of effort for each build system to be
|
||||
supported. It might not work well with some IDEs.
|
||||
|
||||
#. It only works with packages that sub-class ``StagedPackage``.
|
||||
Currently, most Spack packages do not. Converting them is not
|
||||
hard; but must be done on a package-by-package basis.
|
||||
|
||||
#. It requires that users are comfortable with Spack, as they
|
||||
integrate Spack explicitly in their workflow. Not all users are
|
||||
willing to do this.
|
||||
|
||||
-------------------------------------
|
||||
Using Spack to Replace Homebrew/Conda
|
||||
-------------------------------------
|
||||
@@ -1035,6 +1428,170 @@ The main points that are implemented below:
|
||||
- make -j 2
|
||||
- make test
|
||||
|
||||
.. _workflow_create_docker_image:
|
||||
|
||||
-----------------------------------
|
||||
Using Spack to Create Docker Images
|
||||
-----------------------------------
|
||||
|
||||
Spack can be the ideal tool to set up images for Docker (and Singularity).
|
||||
|
||||
An example ``Dockerfile`` is given below, downloading the latest spack
|
||||
version.
|
||||
|
||||
The following functionality is prepared:
|
||||
|
||||
#. Base image: the example starts from a minimal ubuntu.
|
||||
|
||||
#. Pre-install the spack dependencies.
|
||||
Package installs are followed by a clean-up of the system package index,
|
||||
to avoid outdated information and it saves space.
|
||||
|
||||
#. Install spack in ``/usr/local``.
|
||||
Add ``setup-env.sh`` to profile scripts, so commands in *login* shells
|
||||
can use the whole spack functionality, including modules.
|
||||
|
||||
#. Install an example package (``tar``).
|
||||
As with system package managers above, ``spack install`` commands should be
|
||||
concatenated with a ``&& spack clean -a`` in order to keep image sizes small.
|
||||
|
||||
#. Add a startup hook to an *interactive login shell* so spack modules will be
|
||||
usable.
|
||||
|
||||
In order to build and run the image, execute:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
docker build -t spack .
|
||||
docker run -it spack
|
||||
|
||||
.. code-block:: docker
|
||||
|
||||
FROM ubuntu:16.04
|
||||
MAINTAINER Your Name <someone@example.com>
|
||||
|
||||
# general environment for docker
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
SPACK_ROOT=/usr/local
|
||||
|
||||
# install minimal spack dependencies
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y --no-install-recommends \
|
||||
autoconf \
|
||||
build-essential \
|
||||
ca-certificates \
|
||||
coreutils \
|
||||
curl \
|
||||
environment-modules \
|
||||
git \
|
||||
python \
|
||||
unzip \
|
||||
vim \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# load spack environment on login
|
||||
RUN echo "source $SPACK_ROOT/share/spack/setup-env.sh" \
|
||||
> /etc/profile.d/spack.sh
|
||||
|
||||
# spack settings
|
||||
# note: if you wish to change default settings, add files alongside
|
||||
# the Dockerfile with your desired settings. Then uncomment this line
|
||||
#COPY packages.yaml modules.yaml $SPACK_ROOT/etc/spack/
|
||||
|
||||
# install spack
|
||||
RUN curl -s -L https://api.github.com/repos/spack/spack/tarball \
|
||||
| tar xzC $SPACK_ROOT --strip 1
|
||||
# note: at this point one could also run ``spack bootstrap`` to avoid
|
||||
# parts of the long apt-get install list above
|
||||
|
||||
# install software
|
||||
RUN spack install tar \
|
||||
&& spack clean -a
|
||||
|
||||
# need the executables from a package already during image build?
|
||||
#RUN /bin/bash -l -c ' \
|
||||
# spack load tar \
|
||||
# && which tar'
|
||||
|
||||
# image run hook: the -l will make sure /etc/profile environments are loaded
|
||||
CMD /bin/bash -l
|
||||
|
||||
^^^^^^^^^^^^^^
|
||||
Best Practices
|
||||
^^^^^^^^^^^^^^
|
||||
|
||||
"""
|
||||
MPI
|
||||
"""
|
||||
Due to the dependency on Fortran for OpenMPI, which is the spack default
|
||||
implementation, consider adding ``gfortran`` to the ``apt-get install`` list.
|
||||
|
||||
Recent versions of OpenMPI will require you to pass ``--allow-run-as-root``
|
||||
to your ``mpirun`` calls if started as root user inside Docker.
|
||||
|
||||
For execution on HPC clusters, it can be helpful to import the docker
|
||||
image into Singularity in order to start a program with an *external*
|
||||
MPI. Otherwise, also add ``openssh-server`` to the ``apt-get install`` list.
|
||||
|
||||
""""
|
||||
CUDA
|
||||
""""
|
||||
Starting from CUDA 9.0, Nvidia provides minimal CUDA images based on
|
||||
Ubuntu.
|
||||
Please see `their instructions <https://hub.docker.com/r/nvidia/cuda/>`_.
|
||||
Avoid double-installing CUDA by adding, e.g.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
packages:
|
||||
cuda:
|
||||
paths:
|
||||
cuda@9.0.176%gcc@5.4.0 arch=linux-ubuntu16-x86_64: /usr/local/cuda
|
||||
buildable: False
|
||||
|
||||
to your ``packages.yaml``.
|
||||
Then ``COPY`` in that file into the image as in the example above.
|
||||
|
||||
Users will either need ``nvidia-docker`` or e.g. Singularity to *execute*
|
||||
device kernels.
|
||||
|
||||
"""""""""""
|
||||
Singularity
|
||||
"""""""""""
|
||||
Importing and running the image created above into
|
||||
`Singularity <http://singularity.lbl.gov/>`_ works like a charm.
|
||||
Just use the `docker bootstraping mechanism <http://singularity.lbl.gov/quickstart#bootstrap-recipes>`_:
|
||||
|
||||
.. code-block:: none
|
||||
|
||||
Bootstrap: docker
|
||||
From: registry/user/image:tag
|
||||
|
||||
%runscript
|
||||
exec /bin/bash -l
|
||||
|
||||
""""""""""""""""""""""
|
||||
Docker for Development
|
||||
""""""""""""""""""""""
|
||||
|
||||
For examples of how we use docker in development, see
|
||||
:ref:`docker_for_developers`.
|
||||
|
||||
"""""""""""""""""""""""""
|
||||
Docker on Windows and OSX
|
||||
"""""""""""""""""""""""""
|
||||
|
||||
On Mac OS and Windows, docker runs on a hypervisor that is not allocated much
|
||||
memory by default, and some spack packages may fail to build due to lack of
|
||||
memory. To work around this issue, consider configuring your docker installation
|
||||
to use more of your host memory. In some cases, you can also ease the memory
|
||||
pressure on parallel builds by limiting the parallelism in your config.yaml.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
config:
|
||||
build_jobs: 2
|
||||
|
||||
------------------
|
||||
Upstream Bug Fixes
|
||||
------------------
|
||||
|
||||
1
lib/spack/env/aocc/clang
vendored
1
lib/spack/env/aocc/clang
vendored
@@ -1 +0,0 @@
|
||||
../cc
|
||||
1
lib/spack/env/aocc/clang++
vendored
1
lib/spack/env/aocc/clang++
vendored
@@ -1 +0,0 @@
|
||||
../cpp
|
||||
1
lib/spack/env/aocc/flang
vendored
1
lib/spack/env/aocc/flang
vendored
@@ -1 +0,0 @@
|
||||
../fc
|
||||
58
lib/spack/env/cc
vendored
58
lib/spack/env/cc
vendored
@@ -1,6 +1,6 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
|
||||
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
@@ -22,7 +22,7 @@
|
||||
|
||||
# This is an array of environment variables that need to be set before
|
||||
# the script runs. They are set by routines in spack.build_environment
|
||||
# as part of the package installation process.
|
||||
# as part of spack.package.Package.do_install().
|
||||
parameters=(
|
||||
SPACK_ENV_PATH
|
||||
SPACK_DEBUG_LOG_DIR
|
||||
@@ -107,25 +107,25 @@ case "$command" in
|
||||
cpp)
|
||||
mode=cpp
|
||||
;;
|
||||
cc|c89|c99|gcc|clang|armclang|icc|icx|pgcc|nvc|xlc|xlc_r|fcc)
|
||||
cc|c89|c99|gcc|clang|armclang|icc|pgcc|xlc|xlc_r|fcc)
|
||||
command="$SPACK_CC"
|
||||
language="C"
|
||||
comp="CC"
|
||||
lang_flags=C
|
||||
;;
|
||||
c++|CC|g++|clang++|armclang++|icpc|icpx|pgc++|nvc++|xlc++|xlc++_r|FCC)
|
||||
c++|CC|g++|clang++|armclang++|icpc|pgc++|xlc++|xlc++_r|FCC)
|
||||
command="$SPACK_CXX"
|
||||
language="C++"
|
||||
comp="CXX"
|
||||
lang_flags=CXX
|
||||
;;
|
||||
ftn|f90|fc|f95|gfortran|flang|armflang|ifort|ifx|pgfortran|nvfortran|xlf90|xlf90_r|nagfor|frt)
|
||||
ftn|f90|fc|f95|gfortran|flang|armflang|ifort|pgfortran|xlf90|xlf90_r|nagfor|frt)
|
||||
command="$SPACK_FC"
|
||||
language="Fortran 90"
|
||||
comp="FC"
|
||||
lang_flags=F
|
||||
;;
|
||||
f77|xlf|xlf_r|pgf77)
|
||||
f77|xlf|xlf_r|pgf77|frt)
|
||||
command="$SPACK_F77"
|
||||
language="Fortran 77"
|
||||
comp="F77"
|
||||
@@ -277,22 +277,14 @@ other_args=()
|
||||
isystem_system_includes=()
|
||||
isystem_includes=()
|
||||
|
||||
while [ $# -ne 0 ]; do
|
||||
|
||||
while [ -n "$1" ]; do
|
||||
# an RPATH to be added after the case statement.
|
||||
rp=""
|
||||
|
||||
# Multiple consecutive spaces in the command line can
|
||||
# result in blank arguments
|
||||
if [ -z "$1" ]; then
|
||||
shift
|
||||
continue
|
||||
fi
|
||||
|
||||
case "$1" in
|
||||
-isystem*)
|
||||
arg="${1#-isystem}"
|
||||
isystem_was_used=true
|
||||
isystem_was_used=true
|
||||
if [ -z "$arg" ]; then shift; arg="$1"; fi
|
||||
if system_dir "$arg"; then
|
||||
isystem_system_includes+=("$arg")
|
||||
@@ -319,16 +311,6 @@ while [ $# -ne 0 ]; do
|
||||
fi
|
||||
;;
|
||||
-l*)
|
||||
# -loopopt=0 is generated erroneously in autoconf <= 2.69,
|
||||
# and passed by ifx to the linker, which confuses it with a
|
||||
# library. Filter it out.
|
||||
# TODO: generalize filtering of args with an env var, so that
|
||||
# TODO: we do not have to special case this here.
|
||||
if { [ "$mode" = "ccld" ] || [ $mode = "ld" ]; } \
|
||||
&& [ "$1" != "${1#-loopopt}" ]; then
|
||||
shift
|
||||
continue
|
||||
fi
|
||||
arg="${1#-l}"
|
||||
if [ -z "$arg" ]; then shift; arg="$1"; fi
|
||||
other_args+=("-l$arg")
|
||||
@@ -338,13 +320,9 @@ while [ $# -ne 0 ]; do
|
||||
if [ -z "$arg" ]; then shift; arg="$1"; fi
|
||||
if [[ "$arg" = -rpath=* ]]; then
|
||||
rp="${arg#-rpath=}"
|
||||
elif [[ "$arg" = --rpath=* ]]; then
|
||||
rp="${arg#--rpath=}"
|
||||
elif [[ "$arg" = -rpath,* ]]; then
|
||||
rp="${arg#-rpath,}"
|
||||
elif [[ "$arg" = --rpath,* ]]; then
|
||||
rp="${arg#--rpath,}"
|
||||
elif [[ "$arg" =~ ^-?-rpath$ ]]; then
|
||||
elif [[ "$arg" = -rpath ]]; then
|
||||
shift; arg="$1"
|
||||
if [[ "$arg" != -Wl,* ]]; then
|
||||
die "-Wl,-rpath was not followed by -Wl,*"
|
||||
@@ -361,9 +339,7 @@ while [ $# -ne 0 ]; do
|
||||
if [ -z "$arg" ]; then shift; arg="$1"; fi
|
||||
if [[ "$arg" = -rpath=* ]]; then
|
||||
rp="${arg#-rpath=}"
|
||||
elif [[ "$arg" = --rpath=* ]]; then
|
||||
rp="${arg#--rpath=}"
|
||||
elif [[ "$arg" = -rpath ]] || [[ "$arg" = --rpath ]]; then
|
||||
elif [[ "$arg" = -rpath ]]; then
|
||||
shift; arg="$1"
|
||||
if [[ "$arg" != -Xlinker,* ]]; then
|
||||
die "-Xlinker,-rpath was not followed by -Xlinker,*"
|
||||
@@ -458,7 +434,7 @@ then
|
||||
ld)
|
||||
flags=("${flags[@]}" -headerpad_max_install_names) ;;
|
||||
ccld)
|
||||
flags=("${flags[@]}" "-Wl,-headerpad_max_install_names") ;;
|
||||
flags=("${flags[@]}" -Wl,-headerpad_max_install_names) ;;
|
||||
esac
|
||||
fi
|
||||
|
||||
@@ -515,19 +491,19 @@ args+=("${flags[@]}")
|
||||
# Insert include directories just prior to any system include directories
|
||||
|
||||
for dir in "${includes[@]}"; do args+=("-I$dir"); done
|
||||
for dir in "${isystem_includes[@]}"; do args+=("-isystem" "$dir"); done
|
||||
for dir in "${isystem_includes[@]}"; do args+=("-isystem$dir"); done
|
||||
|
||||
IFS=':' read -ra spack_include_dirs <<< "$SPACK_INCLUDE_DIRS"
|
||||
if [[ $mode == cpp || $mode == cc || $mode == as || $mode == ccld ]]; then
|
||||
if [[ "$isystem_was_used" == "true" ]] ; then
|
||||
for dir in "${spack_include_dirs[@]}"; do args+=("-isystem" "$dir"); done
|
||||
for dir in "${spack_include_dirs[@]}"; do args+=("-isystem$dir"); done
|
||||
else
|
||||
for dir in "${spack_include_dirs[@]}"; do args+=("-I$dir"); done
|
||||
for dir in "${spack_include_dirs[@]}"; do args+=("-I$dir"); done
|
||||
fi
|
||||
fi
|
||||
|
||||
for dir in "${system_includes[@]}"; do args+=("-I$dir"); done
|
||||
for dir in "${isystem_system_includes[@]}"; do args+=("-isystem" "$dir"); done
|
||||
for dir in "${isystem_system_includes[@]}"; do args+=("-isystem$dir"); done
|
||||
|
||||
# Library search paths
|
||||
for dir in "${libdirs[@]}"; do args+=("-L$dir"); done
|
||||
@@ -536,12 +512,12 @@ for dir in "${system_libdirs[@]}"; do args+=("-L$dir"); done
|
||||
# RPATHs arguments
|
||||
case "$mode" in
|
||||
ccld)
|
||||
if [ -n "$dtags_to_add" ] ; then args+=("$linker_arg$dtags_to_add") ; fi
|
||||
if [ ! -z "$dtags_to_add" ] ; then args+=("$linker_arg$dtags_to_add") ; fi
|
||||
for dir in "${rpaths[@]}"; do args+=("$rpath$dir"); done
|
||||
for dir in "${system_rpaths[@]}"; do args+=("$rpath$dir"); done
|
||||
;;
|
||||
ld)
|
||||
if [ -n "$dtags_to_add" ] ; then args+=("$dtags_to_add") ; fi
|
||||
if [ ! -z "$dtags_to_add" ] ; then args+=("$dtags_to_add") ; fi
|
||||
for dir in "${rpaths[@]}"; do args+=("-rpath" "$dir"); done
|
||||
for dir in "${system_rpaths[@]}"; do args+=("-rpath" "$dir"); done
|
||||
;;
|
||||
|
||||
1
lib/spack/env/nvhpc/nvc
vendored
1
lib/spack/env/nvhpc/nvc
vendored
@@ -1 +0,0 @@
|
||||
../cc
|
||||
1
lib/spack/env/nvhpc/nvc++
vendored
1
lib/spack/env/nvhpc/nvc++
vendored
@@ -1 +0,0 @@
|
||||
../cc
|
||||
1
lib/spack/env/nvhpc/nvfortran
vendored
1
lib/spack/env/nvhpc/nvfortran
vendored
@@ -1 +0,0 @@
|
||||
../cc
|
||||
1
lib/spack/env/oneapi/icpx
vendored
1
lib/spack/env/oneapi/icpx
vendored
@@ -1 +0,0 @@
|
||||
../cc
|
||||
1
lib/spack/env/oneapi/icx
vendored
1
lib/spack/env/oneapi/icx
vendored
@@ -1 +0,0 @@
|
||||
../cc
|
||||
1
lib/spack/env/oneapi/ifx
vendored
1
lib/spack/env/oneapi/ifx
vendored
@@ -1 +0,0 @@
|
||||
../cc
|
||||
9
lib/spack/external/__init__.py
vendored
9
lib/spack/external/__init__.py
vendored
@@ -1,4 +1,4 @@
|
||||
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
|
||||
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
@@ -6,13 +6,6 @@
|
||||
"""This module contains the following external, potentially separately
|
||||
licensed, packages that are included in Spack:
|
||||
|
||||
archspec
|
||||
--------
|
||||
|
||||
* Homepage: https://pypi.python.org/pypi/archspec
|
||||
* Usage: Labeling, comparison and detection of microarchitectures
|
||||
* Version: 0.1.2 (commit 26dec9d47e509daf8c970de4c89da200da52ad20)
|
||||
|
||||
argparse
|
||||
--------
|
||||
|
||||
|
||||
7
lib/spack/external/_pytest/assertion/util.py
vendored
7
lib/spack/external/_pytest/assertion/util.py
vendored
@@ -5,12 +5,9 @@
|
||||
import _pytest._code
|
||||
import py
|
||||
try:
|
||||
from collections.abc import Sequence
|
||||
from collections import Sequence
|
||||
except ImportError:
|
||||
try:
|
||||
from collections import Sequence
|
||||
except ImportError:
|
||||
Sequence = list
|
||||
Sequence = list
|
||||
|
||||
|
||||
u = py.builtin._totext
|
||||
|
||||
7
lib/spack/external/_pytest/main.py
vendored
7
lib/spack/external/_pytest/main.py
vendored
@@ -10,12 +10,9 @@
|
||||
import _pytest._code
|
||||
import py
|
||||
try:
|
||||
from collections.abc import MutableMapping as MappingMixin
|
||||
from collections import MutableMapping as MappingMixin
|
||||
except ImportError:
|
||||
try:
|
||||
from collections import MutableMapping as MappingMixin
|
||||
except ImportError:
|
||||
from UserDict import DictMixin as MappingMixin
|
||||
from UserDict import DictMixin as MappingMixin
|
||||
|
||||
from _pytest.config import directory_arg, UsageError, hookimpl
|
||||
from _pytest.outcomes import exit
|
||||
|
||||
5
lib/spack/external/_pytest/python_api.py
vendored
5
lib/spack/external/_pytest/python_api.py
vendored
@@ -398,10 +398,7 @@ def approx(expected, rel=None, abs=None, nan_ok=False):
|
||||
__ https://docs.python.org/3/reference/datamodel.html#object.__ge__
|
||||
"""
|
||||
|
||||
if sys.version_info >= (3, 3):
|
||||
from collections.abc import Mapping, Sequence
|
||||
else:
|
||||
from collections import Mapping, Sequence
|
||||
from collections import Mapping, Sequence
|
||||
from _pytest.compat import STRING_TYPES as String
|
||||
|
||||
# Delegate the comparison to a class that knows how to deal with the type
|
||||
|
||||
22
lib/spack/external/archspec/COPYRIGHT
vendored
22
lib/spack/external/archspec/COPYRIGHT
vendored
@@ -1,22 +0,0 @@
|
||||
Intellectual Property Notice
|
||||
------------------------------
|
||||
|
||||
Archspec is licensed under the Apache License, Version 2.0 (LICENSE-APACHE
|
||||
or http://www.apache.org/licenses/LICENSE-2.0) or the MIT license,
|
||||
(LICENSE-MIT or http://opensource.org/licenses/MIT), at your option.
|
||||
|
||||
Copyrights and patents in the Archspec project are retained by contributors.
|
||||
No copyright assignment is required to contribute to Archspec.
|
||||
|
||||
|
||||
SPDX usage
|
||||
------------
|
||||
|
||||
Individual files contain SPDX tags instead of the full license text.
|
||||
This enables machine processing of license information based on the SPDX
|
||||
License Identifiers that are available here: https://spdx.org/licenses/
|
||||
|
||||
Files that are dual-licensed as Apache-2.0 OR MIT contain the following
|
||||
text in the license header:
|
||||
|
||||
SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
202
lib/spack/external/archspec/LICENSE-APACHE
vendored
202
lib/spack/external/archspec/LICENSE-APACHE
vendored
@@ -1,202 +0,0 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
20
lib/spack/external/archspec/LICENSE-MIT
vendored
20
lib/spack/external/archspec/LICENSE-MIT
vendored
@@ -1,20 +0,0 @@
|
||||
Copyright 2019-2020 Lawrence Livermore National Security, LLC and other
|
||||
Archspec Project Developers. See the top-level COPYRIGHT file for details.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a
|
||||
copy of this software and associated documentation files (the "Software"),
|
||||
to deal in the Software without restriction, including without limitation
|
||||
the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
and/or sell copies of the Software, and to permit persons to whom the
|
||||
Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
DEALINGS IN THE SOFTWARE.
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user