Compare commits

..

6 Commits

Author SHA1 Message Date
Todd Gamblin
d62dc366ef bugfix: bring in .travis.yml from develop 2019-01-13 15:28:07 -08:00
Gregory Becker
4a7e3dcedd version bump: v0.12.1 2019-01-13 14:25:15 -08:00
Gregory Becker
85c9819615 tutorial basics section: fix gcc install version 2019-01-13 14:25:15 -08:00
Gregory Becker
7c4b5b6779 bugfix: add back r's for invalid regexes 2019-01-13 14:25:14 -08:00
Gregory Becker
8554e933d2 Make Spack relocate text files in build caches with relative binaries 2018-11-13 11:04:47 -06:00
Ubuntu
03a53dca5f Revert "Binary caching: remove symlinks, copy files instead (#9747)"
This reverts commit 058cf81312.
2018-11-13 11:04:47 -06:00
7410 changed files with 56605 additions and 302805 deletions

View File

@@ -4,13 +4,33 @@ coverage:
range: 60...90
status:
project:
default:
threshold: 0.2%
default: true
llnl:
threshold: 0.5
paths:
- lib/spack/llnl
commands:
threshold: 0.5
paths:
- lib/spack/spack/cmd
build_systems:
threshold: 0.5
paths:
- lib/spack/spack/build_systems
modules:
threshold: 0.5
paths:
- lib/spack/spack/modules
core:
threshold: 0.5
paths:
- "!lib/spack/llnl"
- "!lib/spack/spack/cmd"
ignore:
- lib/spack/spack/test/.*
- lib/spack/env/.*
- lib/spack/docs/.*
- lib/spack/external/.*
- share/spack/qa/.*
comment: off

View File

@@ -4,14 +4,12 @@
parallel = True
concurrency = multiprocessing
branch = True
source =
bin
lib
source = lib
omit =
lib/spack/spack/test/*
lib/spack/env/*
lib/spack/docs/*
lib/spack/external/*
share/spack/qa/*
[report]
# Regexes for lines to exclude from consideration

View File

@@ -1,11 +1,6 @@
.git/*
opt/spack/*
/etc/spack/*
!/etc/spack/defaults
share/spack/dotkit/*
share/spack/lmod/*
share/spack/modules/*
lib/spack/spack/test/*
.git
opt/spack
share/spack/docker/Dockerfile
share/spack/docker/build-image.sh
share/spack/docker/run-image.sh
share/spack/docker/push-image.sh

65
.flake8
View File

@@ -1,73 +1,24 @@
# -*- conf -*-
# flake8 settings for Spack core files.
#
# These exceptions are for Spack core files. We're slightly more lenient
# These exceptions ar for Spack core files. We're slightly more lenient
# with packages. See .flake8_packages for that.
#
# E1: Indentation
# Let people line things up nicely:
# - E129: visually indented line with same indent as next logical line
#
# E2: Whitespace
# - E221: multiple spaces before operator
# - E241: multiple spaces after ','
# - E272: multiple spaces before keyword
#
# E7: Statement
# - E731: do not assign a lambda expression, use a def
#
# W5: Line break warning
# - W503: line break before binary operator
# - W504: line break after binary operator
# Let people use terse Python features:
# - E731: lambda expressions
#
# These are required to get the package.py files to test clean:
# - F999: syntax error in doctest
#
# N8: PEP8-naming
# - N801: class names should use CapWords convention
# - N813: camelcase imported as lowercase
# - N814: camelcase imported as constant
#
# F4: pyflakes import checks, these are now checked by mypy more precisely
# - F403: from module import *
# - F405: undefined name or from *
#
# Black ignores, these are incompatible with black style and do not follow PEP-8
# - E203: white space around slice operators can be required, ignore : warn
# - W503: see above, already ignored for line-breaks
# Exempt to allow decorator classes to be lowercase, but follow otherwise:
# - N801: CapWords for class names.
#
[flake8]
ignore = E129,E221,E241,E272,E731,W503,W504,F999,N801,N813,N814,F403,F405
max-line-length = 88
# F4: Import
# - F405: `name` may be undefined, or undefined from star imports: `module`
#
# F8: Name
# - F821: undefined name `name`
#
per-file-ignores =
var/spack/repos/*/package.py:F405,F821
# exclude things we usually do not want linting for.
# These still get linted when passed explicitly, as when spack flake8 passes
# them on the command line.
exclude =
.git
etc/
opt/
share/
var/spack/cache/
var/spack/gpg*/
var/spack/junit-report/
var/spack/mock-configs/
lib/spack/external
__pycache__
var
format = spack
[flake8:local-plugins]
report =
spack = flake8_formatter:SpackFormatter
paths =
./share/spack/qa/
ignore = E129,E221,E241,E272,E731,F999,N801,W503,W504
max-line-length = 79

22
.flake8_packages Normal file
View File

@@ -0,0 +1,22 @@
# -*- conf -*-
# flake8 settings for Spack package files.
#
# This should include all the same exceptions that we use for core files.
#
# In Spack packages, we also allow the single `from spack import *`
# wildcard import and dependencies can set globals for their
# dependents. So we add exceptions for checks related to undefined names.
#
# Note that we also add *per-line* exemptions for certain patters in the
# `spack flake8` command. This is where F403 for `from spack import *`
# is added (beause we *only* allow that wildcard).
#
# See .flake8 for regular exceptions.
#
# Redefinition exceptions:
# - F405: `name` may be undefined, or undefined from star imports: `module`
# - F821: undefined name `name` (needed for cmake, configure, etc.)
#
[flake8]
ignore = E129,E221,E241,E272,E731,F999,F405,F821,W503,W504
max-line-length = 79

3
.gitattributes vendored
View File

@@ -1,3 +0,0 @@
*.py diff=python
*.lp linguist-language=Prolog
lib/spack/external/* linguist-vendored

View File

@@ -1,11 +1,14 @@
---
name: "\U0001F41E Bug report"
about: Report a bug in the core of Spack (command not working as expected, etc.)
labels: "bug,triage"
name: Bug report
about: Report a bug in the core of Spack (command not working as expected, etc.)
---
<!-- Explain, in a clear and concise way, the command you ran and the result you were trying to achieve.
Example: "I ran `spack find` to list all the installed packages and ..." -->
*Explain, in a clear and concise way, the command you ran and the result you were trying to achieve.
Example: "I ran Spack find to list all the installed packages and..."*
### Steps to reproduce the issue
@@ -17,26 +20,30 @@ $ spack <command2> <spec>
### Error Message
<!-- If Spack reported an error, provide the error message. If it did not report an error but the output appears incorrect, provide the incorrect output. If there was no error message and no output but the result is incorrect, describe how it does not match what you expect. -->
If Spack reported an error, provide the error message. If it did not report an error
but the output appears incorrect, provide the incorrect output. If there was no error
message and no output but the result is incorrect, describe how it does not match
what you expect. To provide more information you might re-run the commands with
the additional -sd flags:
```console
$ spack --debug --stacktrace <command>
$ spack -sd <command1> <spec>
$ spack -sd <command2> <spec>
...
```
that activate the full debug output.
### Information on your system
<!-- Please include the output of `spack debug report` -->
This includes:
<!-- If you have any relevant configuration detail (custom `packages.yaml` or `modules.yaml`, etc.) you can add that here as well. -->
1. which platform you are using
2. any relevant configuration detail (custom `packages.yaml` or `modules.yaml`, etc.)
### Additional information
-----
<!-- These boxes can be checked by replacing [ ] with [x] or by clicking them after submitting the issue. -->
- [ ] I have run `spack debug report` and reported the version of Spack/Python/Platform
- [ ] I have searched the issues of this repo and believe this is not a duplicate
- [ ] I have run the failing commands in debug mode and reported the output
<!-- We encourage you to try, as much as possible, to reduce your problem to the minimal example that still reproduces the issue. That would help us a lot in fixing it quickly and effectively!
We encourage you to try, as much as possible, to reduce your problem to the minimal example that still reproduces the issue. That would help us a lot in fixing it quickly and effectively!
If you want to ask a question about the tool (how to use it, what it can currently do, etc.), try the `#general` channel on our Slack first. We have a welcoming community and chances are you'll get your reply faster and without opening an issue.
Other than that, thanks for taking the time to contribute to Spack! -->
Other than that, thanks for taking the time to contribute to Spack!

View File

@@ -1,43 +1,78 @@
---
name: "\U0001F4A5 Build error"
about: Some package in Spack didn't build correctly
title: "Installation issue: "
labels: "build-error"
name: Build error
about: Some package in Spack didn't build correctly
---
<!-- Thanks for taking the time to report this build failure. To proceed with the report please:
*Thanks for taking the time to report this build failure. To proceed with the
report please:*
1. Title the issue "Installation issue: <name-of-the-package>".
2. Provide the information required below.
3. Remove the template instructions before posting the issue.
We encourage you to try, as much as possible, to reduce your problem to the minimal example that still reproduces the issue. That would help us a lot in fixing it quickly and effectively! -->
We encourage you to try, as much as possible, to reduce your problem to the minimal example that still reproduces the issue. That would help us a lot in fixing it quickly and effectively!
---
### Steps to reproduce the issue
<!-- Fill in the exact spec you are trying to build and the relevant part of the error message -->
```console
$ spack install <spec>
...
$ spack install <spec> # Fill in the exact spec you are using
... # and the relevant part of the error message
```
### Information on your system
### Platform and user environment
<!-- Please include the output of `spack debug report` -->
Please report your OS here:
```commandline
$ uname -a
Linux nuvolari 4.15.0-29-generic #31-Ubuntu SMP Tue Jul 17 15:39:52 UTC 2018 x86_64 x86_64 x86_64 GNU/Linux
$ lsb_release -d
Description: Ubuntu 18.04.1 LTS
```
and, if relevant, post or attach:
<!-- If you have any relevant configuration detail (custom `packages.yaml` or `modules.yaml`, etc.) you can add that here as well. -->
- `packages.yaml`
- `compilers.yaml`
to the issue
### Additional information
<!-- Please upload the following files. They should be present in the stage directory of the failing build. Also upload any config.log or similar file if one exists. -->
* [spack-build-out.txt]()
* [spack-build-env.txt]()
Sometimes the issue benefits from additional details. In these cases there are
a few things we can suggest doing. First of all, you can post the full output of:
```console
$ spack spec --install-status <spec>
...
```
to show people whether Spack installed a faulty software or if it was not able to
build it at all.
<!-- Some packages have maintainers who have volunteered to debug build failures. Run `spack maintainers <name-of-the-package>` and @mention them here if they exist. -->
If your build didn't make it past the configure stage, Spack as also commands to parse
logs and report error and warning messages:
```console
$ spack log-parse --show=errors,warnings <file-to-parse>
```
You might want to run this command on the `config.log` or any other similar file
found in the stage directory:
```console
$ spack location -s <spec>
```
In case in `config.log` there are other settings that you think might be the cause
of the build failure, you can consider attaching the file to this issue.
### General information
Rebuilding the package with the following options:
```console
$ spack -d install -j 1 <spec>
...
```
will provide additional debug information. After the failure you will find two files in the current directory:
<!-- These boxes can be checked by replacing [ ] with [x] or by clicking them after submitting the issue. -->
- [ ] I have run `spack debug report` and reported the version of Spack/Python/Platform
- [ ] I have run `spack maintainers <name-of-the-package>` and @mentioned any maintainers
- [ ] I have uploaded the build log and environment files
- [ ] I have searched the issues of this repo and believe this is not a duplicate
1. `spack-cc-<spec>.in`, which contains details on the command given in input
to Spack's compiler wrapper
1. `spack-cc-<spec>.out`, which contains the command used to compile / link the
failed object after Spack's compiler wrapper did its processing
You can post or attach those files to provide maintainers with more information on what
is causing the failure.

View File

@@ -1,33 +1,27 @@
---
name: "\U0001F38A Feature request"
name: Feature request
about: Suggest adding a feature that is not yet in Spack
labels: feature
---
<!--*Please add a concise summary of your suggestion here.*-->
*Please add a concise summary of your suggestion here.*
### Rationale
<!--*Is your feature request related to a problem? Please describe it!*-->
*Is your feature request related to a problem? Please describe it!*
### Description
<!--*Describe the solution you'd like and the alternatives you have considered.*-->
*Describe the solution you'd like and the alternatives you have considered.*
### Additional information
<!--*Add any other context about the feature request here.*-->
*Add any other context about the feature request here.*
### General information
- [ ] I have run `spack --version` and reported the version of Spack
- [ ] I have searched the issues of this repo and believe this is not a duplicate
-----
If you want to ask a question about the tool (how to use it, what it can currently do, etc.), try the `#general` channel on our Slack first. We have a welcoming community and chances are you'll get your reply faster and without opening an issue.
<!--If you want to ask a question about the tool (how to use it, what it can currently do, etc.), try the `#general` channel on our Slack first. We have a welcoming community and chances are you'll get your reply faster and without opening an issue.
Other than that, thanks for taking the time to contribute to Spack!
-->
Other than that, thanks for taking the time to contribute to Spack!

View File

@@ -1,6 +0,0 @@
FROM python:3.7-alpine
RUN pip install pygithub
ADD entrypoint.py /entrypoint.py
ENTRYPOINT ["/entrypoint.py"]

View File

@@ -1,85 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""Maintainer review action.
This action checks which packages have changed in a PR, and adds their
maintainers to the pull request for review.
"""
import json
import os
import re
import subprocess
from github import Github
def spack(*args):
"""Run the spack executable with arguments, and return the output split.
This does just enough to run `spack pkg` and `spack maintainers`, the
two commands used by this action.
"""
github_workspace = os.environ['GITHUB_WORKSPACE']
spack = os.path.join(github_workspace, 'bin', 'spack')
output = subprocess.check_output([spack] + list(args))
split = re.split(r'\s*', output.decode('utf-8').strip())
return [s for s in split if s]
def main():
# get these first so that we'll fail early
token = os.environ['GITHUB_TOKEN']
event_path = os.environ['GITHUB_EVENT_PATH']
with open(event_path) as file:
data = json.load(file)
# make sure it's a pull_request event
assert 'pull_request' in data
# only request reviews on open, edit, or reopen
action = data['action']
if action not in ('opened', 'edited', 'reopened'):
return
# get data from the event payload
pr_data = data['pull_request']
base_branch_name = pr_data['base']['ref']
full_repo_name = pr_data['base']['repo']['full_name']
pr_number = pr_data['number']
requested_reviewers = pr_data['requested_reviewers']
author = pr_data['user']['login']
# get a list of packages that this PR modified
changed_pkgs = spack(
'pkg', 'changed', '--type', 'ac', '%s...' % base_branch_name)
# get maintainers for all modified packages
maintainers = set()
for pkg in changed_pkgs:
pkg_maintainers = set(spack('maintainers', pkg))
maintainers |= pkg_maintainers
# remove any maintainers who are already on the PR, and the author,
# as you can't review your own PR)
maintainers -= set(requested_reviewers)
maintainers -= set([author])
if not maintainers:
return
# request reviews from each maintainer
gh = Github(token)
repo = gh.get_repo(full_repo_name)
pr = repo.get_pull(pr_number)
pr.create_review_request(list(maintainers))
if __name__ == "__main__":
main()

View File

@@ -1,7 +0,0 @@
version: 2
updates:
# Maintain dependencies for GitHub Actions
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "daily"

View File

@@ -1,20 +0,0 @@
#!/usr/bin/env sh
. share/spack/setup-env.sh
echo -e "config:\n build_jobs: 2" > etc/spack/config.yaml
spack config add "packages:all:target:[x86_64]"
# TODO: remove this explicit setting once apple-clang detection is fixed
cat <<EOF > etc/spack/compilers.yaml
compilers:
- compiler:
spec: apple-clang@11.0.3
paths:
cc: /usr/bin/clang
cxx: /usr/bin/clang++
f77: /usr/local/bin/gfortran-9
fc: /usr/local/bin/gfortran-9
modules: []
operating_system: catalina
target: x86_64
EOF
spack compiler info apple-clang
spack debug report

View File

@@ -1,77 +0,0 @@
name: linux builds
on:
push:
branches:
- develop
- releases/**
paths-ignore:
# Don't run if we only modified packages in the built-in repository
- 'var/spack/repos/builtin/**'
- '!var/spack/repos/builtin/packages/lz4/**'
- '!var/spack/repos/builtin/packages/mpich/**'
- '!var/spack/repos/builtin/packages/tut/**'
- '!var/spack/repos/builtin/packages/py-setuptools/**'
- '!var/spack/repos/builtin/packages/openjpeg/**'
- '!var/spack/repos/builtin/packages/r-rcpp/**'
- '!var/spack/repos/builtin/packages/ruby-rake/**'
# Don't run if we only modified documentation
- 'lib/spack/docs/**'
pull_request:
branches:
- develop
- releases/**
paths-ignore:
# Don't run if we only modified packages in the built-in repository
- 'var/spack/repos/builtin/**'
- '!var/spack/repos/builtin/packages/lz4/**'
- '!var/spack/repos/builtin/packages/mpich/**'
- '!var/spack/repos/builtin/packages/tut/**'
- '!var/spack/repos/builtin/packages/py-setuptools/**'
- '!var/spack/repos/builtin/packages/openjpeg/**'
- '!var/spack/repos/builtin/packages/r-rcpp/**'
- '!var/spack/repos/builtin/packages/ruby-rake/**'
# Don't run if we only modified documentation
- 'lib/spack/docs/**'
jobs:
build:
runs-on: ubuntu-latest
strategy:
matrix:
package:
- lz4 # MakefilePackage
- mpich~fortran # AutotoolsPackage
- tut # WafPackage
- py-setuptools # PythonPackage
- openjpeg # CMakePackage
- r-rcpp # RPackage
- ruby-rake # RubyPackage
steps:
- uses: actions/checkout@v2
- uses: actions/cache@v2.1.4
with:
path: ~/.ccache
key: ccache-build-${{ matrix.package }}
restore-keys: |
ccache-build-${{ matrix.package }}
- uses: actions/setup-python@v2
with:
python-version: 3.9
- name: Install System Packages
run: |
sudo apt-get update
sudo apt-get -yqq install ccache gfortran perl perl-base r-base r-base-core r-base-dev ruby findutils openssl libssl-dev libpciaccess-dev
R --version
perl --version
ruby --version
- name: Copy Configuration
run: |
ccache -M 300M && ccache -z
# Set up external deps for build tests, b/c they take too long to compile
cp share/spack/qa/configuration/*.yaml etc/spack/
- name: Run the build test
run: |
. share/spack/setup-env.sh
SPEC=${{ matrix.package }} share/spack/qa/run-build-tests
ccache -s

View File

@@ -1,78 +0,0 @@
# These are nightly package tests for macOS
# focus areas:
# - initial user experience
# - scientific python stack
name: macOS builds nightly
on:
schedule:
# nightly at 1 AM
- cron: '0 1 * * *'
pull_request:
branches:
- develop
paths:
# Run if we modify this yaml file
- '.github/workflows/macos_python.yml'
# TODO: run if we touch any of the recipes involved in this
# GitHub Action Limits
# https://help.github.com/en/actions/reference/workflow-syntax-for-github-actions
jobs:
install_gcc:
name: gcc with clang
runs-on: macos-latest
steps:
- uses: actions/checkout@v2
- uses: actions/setup-python@v2
with:
python-version: 3.9
- name: spack install
run: |
. .github/workflows/install_spack.sh
# 9.2.0 is the latest version on which we apply homebrew patch
spack install -v --fail-fast gcc@9.2.0 %apple-clang
install_jupyter_clang:
name: jupyter
runs-on: macos-latest
timeout-minutes: 700
steps:
- uses: actions/checkout@v2
- uses: actions/setup-python@v2
with:
python-version: 3.9
- name: spack install
run: |
. .github/workflows/install_spack.sh
spack install -v --fail-fast py-jupyterlab %apple-clang
install_scipy_clang:
name: scipy, mpl, pd
runs-on: macos-latest
steps:
- uses: actions/checkout@v2
- uses: actions/setup-python@v2
with:
python-version: 3.9
- name: spack install
run: |
. .github/workflows/install_spack.sh
spack install -v --fail-fast py-scipy %apple-clang
spack install -v --fail-fast py-matplotlib %apple-clang
spack install -v --fail-fast py-pandas %apple-clang
install_mpi4py_clang:
name: mpi4py, petsc4py
runs-on: macos-latest
steps:
- uses: actions/checkout@v2
- uses: actions/setup-python@v2
with:
python-version: 3.9
- name: spack install
run: |
. .github/workflows/install_spack.sh
spack install -v --fail-fast py-mpi4py %apple-clang
spack install -v --fail-fast py-petsc4py %apple-clang

View File

@@ -1,9 +0,0 @@
#!/usr/bin/env sh
git config --global user.email "spack@example.com"
git config --global user.name "Test User"
# With fetch-depth: 0 we have a remote develop
# but not a local branch. Don't do this on develop
if [ "$(git branch --show-current)" != "develop" ]
then
git branch develop origin/develop
fi

View File

@@ -1,360 +0,0 @@
name: linux tests
on:
push:
branches:
- develop
- releases/**
pull_request:
branches:
- develop
- releases/**
jobs:
# Validate that the code can be run on all the Python versions
# supported by Spack
validate:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/setup-python@v2
with:
python-version: 3.9
- name: Install Python Packages
run: |
pip install --upgrade pip
pip install --upgrade vermin
- name: vermin (Spack's Core)
run: vermin --backport argparse --backport typing -t=2.6- -t=3.5- -v lib/spack/spack/ lib/spack/llnl/ bin/
- name: vermin (Repositories)
run: vermin --backport argparse --backport typing -t=2.6- -t=3.5- -v var/spack/repos
# Run style checks on the files that have been changed
style:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
with:
fetch-depth: 0
- uses: actions/setup-python@v2
with:
python-version: 3.9
- name: Install Python packages
run: |
pip install --upgrade pip six setuptools flake8 mypy>=0.800 black
- name: Setup git configuration
run: |
# Need this for the git tests to succeed.
git --version
. .github/workflows/setup_git.sh
- name: Run style tests
run: |
share/spack/qa/run-style-tests
# Build the documentation
documentation:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/setup-python@v2
with:
python-version: 3.9
- name: Install System packages
run: |
sudo apt-get -y update
sudo apt-get install -y coreutils ninja-build graphviz
- name: Install Python packages
run: |
pip install --upgrade pip six setuptools
pip install --upgrade -r lib/spack/docs/requirements.txt
- name: Build documentation
run: |
share/spack/qa/run-doc-tests
# Check which files have been updated by the PR
changes:
runs-on: ubuntu-latest
# Set job outputs to values from filter step
outputs:
core: ${{ steps.filter.outputs.core }}
packages: ${{ steps.filter.outputs.packages }}
with_coverage: ${{ steps.coverage.outputs.with_coverage }}
steps:
- uses: actions/checkout@v2
if: ${{ github.event_name == 'push' }}
with:
fetch-depth: 0
# For pull requests it's not necessary to checkout the code
- uses: dorny/paths-filter@v2
id: filter
with:
# See https://github.com/dorny/paths-filter/issues/56 for the syntax used below
filters: |
core:
- './!(var/**)/**'
packages:
- 'var/**'
# Some links for easier reference:
#
# "github" context: https://docs.github.com/en/actions/reference/context-and-expression-syntax-for-github-actions#github-context
# job outputs: https://docs.github.com/en/actions/reference/workflow-syntax-for-github-actions#jobsjob_idoutputs
# setting environment variables from earlier steps: https://docs.github.com/en/actions/reference/workflow-commands-for-github-actions#setting-an-environment-variable
#
- id: coverage
# Run the subsequent jobs with coverage if core has been modified,
# regardless of whether this is a pull request or a push to a branch
run: |
echo Core changes: ${{ steps.filter.outputs.core }}
echo Event name: ${{ github.event_name }}
if [ "${{ steps.filter.outputs.core }}" == "true" ]
then
echo "::set-output name=with_coverage::true"
else
echo "::set-output name=with_coverage::false"
fi
# Run unit tests with different configurations on linux
unittests:
needs: [ validate, style, documentation, changes ]
runs-on: ubuntu-latest
strategy:
matrix:
python-version: [2.7, 3.5, 3.6, 3.7, 3.8, 3.9]
concretizer: ['original', 'clingo']
steps:
- uses: actions/checkout@v2
with:
fetch-depth: 0
- uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
- name: Install System packages
run: |
sudo apt-get -y update
# Needed for unit tests
sudo apt-get install -y coreutils gfortran graphviz gnupg2 mercurial
sudo apt-get install -y ninja-build patchelf
# Needed for kcov
sudo apt-get -y install cmake binutils-dev libcurl4-openssl-dev
sudo apt-get -y install zlib1g-dev libdw-dev libiberty-dev
- name: Install Python packages
run: |
pip install --upgrade pip six setuptools codecov coverage
- name: Setup git configuration
run: |
# Need this for the git tests to succeed.
git --version
. .github/workflows/setup_git.sh
- name: Install kcov for bash script coverage
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
env:
KCOV_VERSION: 34
run: |
KCOV_ROOT=$(mktemp -d)
wget --output-document=${KCOV_ROOT}/${KCOV_VERSION}.tar.gz https://github.com/SimonKagstrom/kcov/archive/v${KCOV_VERSION}.tar.gz
tar -C ${KCOV_ROOT} -xzvf ${KCOV_ROOT}/${KCOV_VERSION}.tar.gz
mkdir -p ${KCOV_ROOT}/build
cd ${KCOV_ROOT}/build && cmake -Wno-dev ${KCOV_ROOT}/kcov-${KCOV_VERSION} && cd -
make -C ${KCOV_ROOT}/build && sudo make -C ${KCOV_ROOT}/build install
- name: Bootstrap clingo from sources
if: ${{ matrix.concretizer == 'clingo' }}
run: |
. share/spack/setup-env.sh
spack external find --not-buildable cmake bison
spack -v solve zlib
- name: Run unit tests (full suite with coverage)
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
env:
COVERAGE: true
SPACK_TEST_SOLVER: ${{ matrix.concretizer }}
run: |
share/spack/qa/run-unit-tests
coverage combine
coverage xml
- name: Run unit tests (reduced suite without coverage)
if: ${{ needs.changes.outputs.with_coverage == 'false' }}
env:
ONLY_PACKAGES: true
SPACK_TEST_SOLVER: ${{ matrix.concretizer }}
run: |
share/spack/qa/run-unit-tests
- uses: codecov/codecov-action@v1
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
with:
flags: unittests,linux,${{ matrix.concretizer }}
# Test shell integration
shell:
needs: [ validate, style, documentation, changes ]
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
with:
fetch-depth: 0
- uses: actions/setup-python@v2
with:
python-version: 3.9
- name: Install System packages
run: |
sudo apt-get -y update
# Needed for shell tests
sudo apt-get install -y coreutils csh zsh tcsh fish dash bash
# Needed for kcov
sudo apt-get -y install cmake binutils-dev libcurl4-openssl-dev
sudo apt-get -y install zlib1g-dev libdw-dev libiberty-dev
- name: Install Python packages
run: |
pip install --upgrade pip six setuptools codecov coverage
- name: Setup git configuration
run: |
# Need this for the git tests to succeed.
git --version
. .github/workflows/setup_git.sh
- name: Install kcov for bash script coverage
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
env:
KCOV_VERSION: 38
run: |
KCOV_ROOT=$(mktemp -d)
wget --output-document=${KCOV_ROOT}/${KCOV_VERSION}.tar.gz https://github.com/SimonKagstrom/kcov/archive/v${KCOV_VERSION}.tar.gz
tar -C ${KCOV_ROOT} -xzvf ${KCOV_ROOT}/${KCOV_VERSION}.tar.gz
mkdir -p ${KCOV_ROOT}/build
cd ${KCOV_ROOT}/build && cmake -Wno-dev ${KCOV_ROOT}/kcov-${KCOV_VERSION} && cd -
make -C ${KCOV_ROOT}/build && sudo make -C ${KCOV_ROOT}/build install
- name: Run shell tests (without coverage)
if: ${{ needs.changes.outputs.with_coverage == 'false' }}
run: |
share/spack/qa/run-shell-tests
- name: Run shell tests (with coverage)
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
env:
COVERAGE: true
run: |
share/spack/qa/run-shell-tests
- uses: codecov/codecov-action@v1
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
with:
flags: shelltests,linux
# Test for Python2.6 run on Centos 6
centos6:
needs: [ validate, style, documentation, changes ]
runs-on: ubuntu-latest
container: spack/github-actions:centos6
steps:
- name: Run unit tests (full test-suite)
# The CentOS 6 container doesn't run with coverage, but
# under the same conditions it runs the full test suite
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
env:
HOME: /home/spack-test
run: |
whoami && echo $HOME && cd $HOME
git clone https://github.com/spack/spack.git && cd spack
git fetch origin ${{ github.ref }}:test-branch
git checkout test-branch
share/spack/qa/run-unit-tests
- name: Run unit tests (only package tests)
if: ${{ needs.changes.outputs.with_coverage == 'false' }}
env:
HOME: /home/spack-test
ONLY_PACKAGES: true
run: |
whoami && echo $HOME && cd $HOME
git clone https://github.com/spack/spack.git && cd spack
git fetch origin ${{ github.ref }}:test-branch
git checkout test-branch
share/spack/qa/run-unit-tests
# Test RHEL8 UBI with platform Python. This job is run
# only on PRs modifying core Spack
rhel8-platform-python:
needs: [ validate, style, documentation, changes ]
runs-on: ubuntu-latest
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
container: registry.access.redhat.com/ubi8/ubi
steps:
- name: Install dependencies
run: |
dnf install -y \
bzip2 curl file gcc-c++ gcc gcc-gfortran git gnupg2 gzip \
make patch tcl unzip which xz
- uses: actions/checkout@v2
- name: Setup repo and non-root user
run: |
git --version
git fetch --unshallow
. .github/workflows/setup_git.sh
useradd spack-test
chown -R spack-test .
- name: Run unit tests
env:
SPACK_PYTHON: /usr/libexec/platform-python
shell: runuser -u spack-test -- bash {0}
run: |
source share/spack/setup-env.sh
spack unit-test -k 'not svn and not hg' -x --verbose
# Test for the clingo based solver (using clingo-cffi)
clingo-cffi:
needs: [ validate, style, documentation, changes ]
runs-on: ubuntu-latest
container: spack/github-actions:clingo-cffi
steps:
- name: Run unit tests
run: |
whoami && echo PWD=$PWD && echo HOME=$HOME && echo SPACK_TEST_SOLVER=$SPACK_TEST_SOLVER
python3 -c "import clingo; print(hasattr(clingo.Symbol, '_rep'), clingo.__version__)"
git clone https://github.com/spack/spack.git && cd spack
git fetch origin ${{ github.ref }}:test-branch
git checkout test-branch
. share/spack/setup-env.sh
spack compiler find
spack solve mpileaks%gcc
if [ "${{ needs.changes.outputs.with_coverage }}" == "true" ]
then
coverage run $(which spack) unit-test -v -x
coverage combine
coverage xml
else
$(which spack) unit-test -m "not maybeslow" -k "package_sanity"
fi
- uses: codecov/codecov-action@v1
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
with:
flags: unittests,linux,clingo
# Run unit tests on MacOS
build:
needs: [ validate, style, documentation, changes ]
runs-on: macos-latest
strategy:
matrix:
python-version: [3.8]
steps:
- uses: actions/checkout@v2
with:
fetch-depth: 0
- uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
- name: Install Python packages
run: |
pip install --upgrade pip six setuptools
pip install --upgrade codecov coverage
pip install --upgrade flake8 pep8-naming mypy
- name: Setup Homebrew packages
run: |
brew install dash fish gcc gnupg2 kcov
- name: Run unit tests
run: |
git --version
. .github/workflows/setup_git.sh
. share/spack/setup-env.sh
if [ "${{ needs.changes.outputs.with_coverage }}" == "true" ]
then
coverage run $(which spack) unit-test -x
coverage combine
coverage xml
else
echo "ONLY PACKAGE RECIPES CHANGED [skipping coverage]"
$(which spack) unit-test -x -m "not maybeslow" -k "package_sanity"
fi
- uses: codecov/codecov-action@v1
if: ${{ needs.changes.outputs.with_coverage == 'true' }}
with:
file: ./coverage.xml
flags: unittests,macos

513
.gitignore vendored
View File

@@ -1,511 +1,28 @@
##########################
# Spack-specific ignores #
##########################
/db
/var/spack/stage
/var/spack/cache
/var/spack/environments
/var/spack/repos/*/index.yaml
/var/spack/repos/*/lock
*.pyc
/opt
*~
.DS_Store
.idea
# Ignore everything in /etc/spack except /etc/spack/defaults
/etc/spack/*
!/etc/spack/defaults
/etc/spackconfig
/share/spack/dotkit
/share/spack/modules
/share/spack/lmod
# Debug logs
spack-db.*
/TAGS
*.swp
/htmlcov
.coverage
#*
.#*
lib/spack/spack/test/.cache
/bin/spackc
*.in.log
*.out.log
###########################
# Python-specific ignores #
###########################
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
#lib/
#lib64/
parts/
sdist/
#var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# PEP 582; used by e.g. github.com/David-OConnor/pyflow
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
########################
# Vim-specific ignores #
########################
# Swap
[._]*.s[a-v][a-z]
!*.svg # comment out if you don't need vector files
[._]*.sw[a-p]
[._]s[a-rt-v][a-z]
[._]ss[a-gi-z]
[._]sw[a-p]
# Session
Session.vim
Sessionx.vim
# Temporary
.netrwhist
*~
# Auto-generated tag files
tags
# Persistent undo
[._]*.un~
##########################
# Emacs-specific ignores #
##########################
*~
\#*\#
/.emacs.desktop
/.emacs.desktop.lock
*.elc
auto-save-list
tramp
.\#*
# Org-mode
.org-id-locations
*_archive
# flymake-mode
*_flymake.*
# eshell files
/eshell/history
/eshell/lastdir
# elpa packages
/elpa/
# reftex files
*.rel
# AUCTeX auto folder
/auto/
# cask packages
.cask/
dist/
# Flycheck
flycheck_*.el
# server auth directory
/server/
# projectiles files
.projectile
# directory configuration
.dir-locals.el
# network security
/network-security.data
############################
# Eclipse-specific ignores #
############################
.metadata
#bin/
tmp/
*.tmp
*.bak
*.swp
*~.nib
local.properties
.settings/
.loadpath
.recommenders
# External tool builders
.externalToolBuilders/
# Locally stored "Eclipse launch configurations"
*.launch
# PyDev specific (Python IDE for Eclipse)
*.pydevproject
# CDT-specific (C/C++ Development Tooling)
.cproject
# CDT- autotools
.autotools
# Java annotation processor (APT)
.factorypath
# PDT-specific (PHP Development Tools)
.buildpath
# sbteclipse plugin
.target
# Tern plugin
.tern-project
# TeXlipse plugin
.texlipse
# STS (Spring Tool Suite)
.springBeans
# Code Recommenders
.recommenders/
# Annotation Processing
.apt_generated/
.apt_generated_test/
# Scala IDE specific (Scala & Java development for Eclipse)
.cache-main
.scala_dependencies
.worksheet
# Uncomment this line if you wish to ignore the project description file.
# Typically, this file would be tracked if it contains build/dependency configurations:
#.project
##################################
# Visual Studio-specific ignores #
##################################
.vscode/*
!.vscode/settings.json
!.vscode/tasks.json
!.vscode/launch.json
!.vscode/extensions.json
*.code-workspace
# Local History for Visual Studio Code
.history/
#################################
# Sublime Text-specific ignores #
#################################
# Cache files for Sublime Text
*.tmlanguage.cache
*.tmPreferences.cache
*.stTheme.cache
# Workspace files are user-specific
*.sublime-workspace
# Project files should be checked into the repository, unless a significant
# proportion of contributors will probably not be using Sublime Text
# *.sublime-project
# SFTP configuration file
sftp-config.json
sftp-config-alt*.json
# Package control specific files
Package Control.last-run
Package Control.ca-list
Package Control.ca-bundle
Package Control.system-ca-bundle
Package Control.cache/
Package Control.ca-certs/
Package Control.merged-ca-bundle
Package Control.user-ca-bundle
oscrypto-ca-bundle.crt
bh_unicode_properties.cache
# Sublime-github package stores a github token in this file
# https://packagecontrol.io/packages/sublime-github
GitHub.sublime-settings
##############################
# JetBrains-specific ignores #
##############################
# Ignore the entire folder since it may conatin more files than
# just the ones listed below
.idea/
# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider
# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
# User-specific stuff
.idea/**/workspace.xml
.idea/**/tasks.xml
.idea/**/usage.statistics.xml
.idea/**/dictionaries
.idea/**/shelf
# Generated files
.idea/**/contentModel.xml
# Sensitive or high-churn files
.idea/**/dataSources/
.idea/**/dataSources.ids
.idea/**/dataSources.local.xml
.idea/**/sqlDataSources.xml
.idea/**/dynamic.xml
.idea/**/uiDesigner.xml
.idea/**/dbnavigator.xml
# Gradle
.idea/**/gradle.xml
.idea/**/libraries
# Gradle and Maven with auto-import
# When using Gradle or Maven with auto-import, you should exclude module files,
# since they will be recreated, and may cause churn. Uncomment if using
# auto-import.
# .idea/artifacts
# .idea/compiler.xml
# .idea/jarRepositories.xml
# .idea/modules.xml
# .idea/*.iml
# .idea/modules
# *.iml
# *.ipr
# CMake
cmake-build-*/
# Mongo Explorer plugin
.idea/**/mongoSettings.xml
# File-based project format
*.iws
# IntelliJ
out/
# mpeltonen/sbt-idea plugin
.idea_modules/
# JIRA plugin
atlassian-ide-plugin.xml
# Cursive Clojure plugin
.idea/replstate.xml
# Crashlytics plugin (for Android Studio and IntelliJ)
com_crashlytics_export_strings.xml
crashlytics.properties
crashlytics-build.properties
fabric.properties
# Editor-based Rest Client
.idea/httpRequests
# Android studio 3.1+ serialized cache file
.idea/caches/build_file_checksums.ser
##########################
# macOS-specific ignores #
##########################
# General
.DS_Store
.AppleDouble
.LSOverride
# Icon must end with two \r
Icon
# Thumbnails
._*
# Files that might appear in the root of a volume
.DocumentRevisions-V100
.fseventsd
.Spotlight-V100
.TemporaryItems
.Trashes
.VolumeIcon.icns
.com.apple.timemachine.donotpresent
# Directories potentially created on remote AFP share
.AppleDB
.AppleDesktop
Network Trash Folder
Temporary Items
.apdisk
##########################
# Linux-specific ignores #
##########################
*~
# temporary files which can be created if a process still has a handle open of a deleted file
.fuse_hidden*
# KDE directory preferences
.directory
# Linux trash folder which might appear on any partition or disk
.Trash-*
# .nfs files are created when an open file is removed but is still being accessed
.nfs*
############################
# Windows-specific ignores #
############################
# Windows thumbnail cache files
Thumbs.db
Thumbs.db:encryptable
ehthumbs.db
ehthumbs_vista.db
# Dump file
*.stackdump
# Folder config file
[Dd]esktop.ini
# Recycle Bin used on file shares
$RECYCLE.BIN/
# Windows Installer files
*.cab
*.msi
*.msix
*.msm
*.msp
# Windows shortcuts
*.lnk

View File

@@ -20,8 +20,8 @@ Geoffrey Oxberry <oxberry1@llnl.gov> Geoffrey Oxberry
Glenn Johnson <glenn-johnson@uiowa.edu> Glenn Johnson <gjohnson@argon-ohpc.hpc.uiowa.edu>
Glenn Johnson <glenn-johnson@uiowa.edu> Glenn Johnson <glennpj@gmail.com>
Gregory Becker <becker33@llnl.gov> Gregory Becker <becker33.llnl.gov>
Gregory Becker <becker33@llnl.gov> Gregory Becker <becker33.llnl.gov>
Gregory Becker <becker33@llnl.gov> Gregory Becker <becker33@llnl.gov>
Gregory Becker <becker33@llnl.gov> becker33 <becker33.llnl.gov>
Gregory Becker <becker33@llnl.gov> becker33 <becker33@llnl.gov>
Gregory L. Lee <lee218@llnl.gov> Greg Lee <lee218@llnl.gov>
Gregory L. Lee <lee218@llnl.gov> Gregory L. Lee <lee218@cab687.llnl.gov>
Gregory L. Lee <lee218@llnl.gov> Gregory L. Lee <lee218@cab690.llnl.gov>

View File

@@ -1,35 +0,0 @@
[mypy]
python_version = 3.7
files=lib/spack/llnl/**/*.py,lib/spack/spack/**/*.py
mypy_path=bin,lib/spack,lib/spack/external,var/spack/repos/builtin
# This and a generated import file allows supporting packages
namespace_packages=True
# To avoid re-factoring all the externals, ignore errors and missing imports
# globally, then turn back on in spack and spack submodules
ignore_errors=True
ignore_missing_imports=True
[mypy-spack.*]
ignore_errors=False
ignore_missing_imports=False
[mypy-packages.*]
ignore_errors=False
ignore_missing_imports=False
[mypy-llnl.*]
ignore_errors=False
ignore_missing_imports=False
[mypy-spack.test.packages]
ignore_errors=True
# ignore errors in fake import path for packages
[mypy-spack.pkg.*]
ignore_errors=True
ignore_missing_imports=True
# jinja has syntax in it that requires python3 and causes a parse error
# skip importing it
[mypy-jinja2]
follow_imports=skip

View File

@@ -1,9 +0,0 @@
version: 2
sphinx:
configuration: lib/spack/docs/conf.py
python:
version: 3.7
install:
- requirements: lib/spack/docs/requirements.txt

218
.travis.yml Normal file
View File

@@ -0,0 +1,218 @@
#=============================================================================
# Project settings
#=============================================================================
# Only build master and develop on push; do not build every branch.
branches:
only:
- master
- develop
- /^releases\/.*$/
#=============================================================================
# Build matrix
#=============================================================================
dist: xenial
jobs:
fast_finish: true
include:
- stage: 'style checks'
python: '2.7'
sudo: required
os: linux
language: python
env: TEST_SUITE=flake8
- stage: 'unit tests + documentation'
python: '2.6'
dist: trusty
sudo: required
os: linux
language: python
env: TEST_SUITE=unit
- python: '2.7'
sudo: required
os: linux
language: python
env: TEST_SUITE=unit
- python: '3.4'
sudo: required
os: linux
language: python
env: TEST_SUITE=unit
- python: '3.5'
sudo: required
os: linux
language: python
env: TEST_SUITE=unit
- python: '3.6'
sudo: required
os: linux
language: python
env: TEST_SUITE=unit
- python: '3.7'
sudo: required
os: linux
language: python
env: TEST_SUITE=unit
- python: '3.6'
sudo: required
os: linux
language: python
env: TEST_SUITE=doc
- os: osx
language: generic
env: [ TEST_SUITE=unit, PYTHON_VERSION=2.7 ]
# mpich (AutotoolsPackage)
- stage: 'build tests'
python: '2.7'
os: linux
language: python
env: [ TEST_SUITE=build, 'SPEC=mpich' ]
# astyle (MakefilePackage)
- python: '2.7'
os: linux
language: python
env: [ TEST_SUITE=build, 'SPEC=astyle' ]
# tut (WafPackage)
- python: '2.7'
os: linux
language: python
env: [ TEST_SUITE=build, 'SPEC=tut' ]
# py-setuptools (PythonPackage)
- python: '2.7'
os: linux
language: python
env: [ TEST_SUITE=build, 'SPEC=py-setuptools' ]
# perl-dbi (PerlPackage)
# - python: '2.7'
# os: linux
# language: python
# env: [ TEST_SUITE=build, 'SPEC=perl-dbi' ]
# openjpeg (CMakePackage + external cmake)
- python: '2.7'
os: linux
language: python
env: [ TEST_SUITE=build, 'SPEC=openjpeg' ]
# r-rcpp (RPackage + external R)
- python: '2.7'
os: linux
language: python
env: [ TEST_SUITE=build, 'SPEC=r-rcpp' ]
# mpich (AutotoolsPackage)
- python: '3.6'
os: linux
language: python
env: [ TEST_SUITE=build, 'SPEC=mpich' ]
allow_failures:
- env: TEST_SUITE=docker
# temporary Python 2.6 exception while we figure out why Travis is hanging
- python: '2.6'
stages:
- 'style checks'
- 'unit tests + documentation'
- 'build tests'
#=============================================================================
# Environment
#=============================================================================
# Use new Travis infrastructure (Docker can't sudo yet)
sudo: false
# Docs need graphviz to build
addons:
# for Linux builds, we use APT
apt:
packages:
- ccache
- cmake
- gfortran
- graphviz
- gnupg2
- kcov
- mercurial
- ninja-build
- perl
- perl-base
- realpath
- r-base
- r-base-core
- r-base-dev
# for Mac builds, we use Homebrew
homebrew:
packages:
- python@2
- gcc
- gnupg2
- ccache
# ~/.ccache needs to be cached directly as Travis is not taking care of it
# (possibly because we use 'language: python' and not 'language: c')
cache:
pip: true
ccache: true
directories:
- ~/.mirror
- ~/.ccache
# Work around Travis's lack of support for Python on OSX
before_install:
- if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then
pip2 install --upgrade pip;
pip2 install virtualenv;
virtualenv venv;
source venv/bin/activate;
fi
- ccache -M 2G && ccache -z
# Install various dependencies
install:
- pip install --upgrade pip
- pip install --upgrade six
- pip install --upgrade setuptools
- pip install --upgrade codecov
- pip install --upgrade flake8
- pip install --upgrade pep8-naming
- if [[ "$TEST_SUITE" == "doc" ]]; then
pip install --upgrade -r lib/spack/docs/requirements.txt;
fi
before_script:
# Need this for the git tests to succeed.
- git config --global user.email "spack@example.com"
- git config --global user.name "Test User"
# Need this to be able to compute the list of changed files
- git fetch origin develop:develop
# Set up external deps for build tests, b/c they take too long to compile
- if [[ "$TEST_SUITE" == "build" ]]; then
cp share/spack/qa/configuration/*.yaml etc/spack/;
fi
#=============================================================================
# Building
#=============================================================================
services:
- docker
script:
- share/spack/qa/run-$TEST_SUITE-tests
after_success:
- ccache -s
- if [[ "$TEST_SUITE" == "unit" || "$TEST_SUITE" == "build" ]]; then
codecov --env PYTHON_VERSION
--required
--flags "${TEST_SUITE}${TRAVIS_OS_NAME}";
fi
#=============================================================================
# Notifications
#=============================================================================
notifications:
email:
recipients: tgamblin@llnl.gov
on_success: change
on_failure: always

View File

@@ -1,748 +0,0 @@
# v0.16.1 (2021-02-22)
This minor release includes a new feature and associated fixes:
* intel-oneapi support through new packages (#20411, #20686, #20693, #20717,
#20732, #20808, #21377, #21448)
This release also contains bug fixes/enhancements for:
* HIP/ROCm support (#19715, #20095)
* concretization (#19988, #20020, #20082, #20086, #20099, #20102, #20128,
#20182, #20193, #20194, #20196, #20203, #20247, #20259, #20307, #20362,
#20383, #20423, #20473, #20506, #20507, #20604, #20638, #20649, #20677,
#20680, #20790)
* environment install reporting fix (#20004)
* avoid import in ABI compatibility info (#20236)
* restore ability of dev-build to skip patches (#20351)
* spack find -d spec grouping (#20028)
* spack smoke test support (#19987, #20298)
* macOS fixes (#20038, #21662)
* abstract spec comparisons (#20341)
* continuous integration (#17563)
* performance improvements for binary relocation (#19690, #20768)
* additional sanity checks for variants in builtin packages (#20373)
* do not pollute auto-generated configuration files with empty lists or
dicts (#20526)
plus assorted documentation (#20021, #20174) and package bug fixes/enhancements
(#19617, #19933, #19986, #20006, #20097, #20198, #20794, #20906, #21411).
# v0.16.0 (2020-11-18)
`v0.16.0` is a major feature release.
## Major features in this release
1. **New concretizer (experimental)** Our new backtracking concretizer is
now in Spack as an experimental feature. You will need to install
`clingo@master+python` and set `concretizer: clingo` in `config.yaml`
to use it. The original concretizer is not exhaustive and is not
guaranteed to find a solution if one exists. We encourage you to use
the new concretizer and to report any bugs you find with it. We
anticipate making the new concretizer the default and including all
required dependencies for it in Spack `v0.17`. For more details, see
#19501.
2. **spack test (experimental)** Users can add `test()` methods to their
packages to run smoke tests on installations with the new `spack test`
command (the old `spack test` is now `spack unit-test`). `spack test`
is environment-aware, so you can `spack install` an environment and
`spack test run` smoke tests on all of its packages. Historical test
logs can be perused with `spack test results`. Generic smoke tests for
MPI implementations, C, C++, and Fortran compilers as well as specific
smoke tests for 18 packages. This is marked experimental because the
test API (`self.run_test()`) is likely to be change, but we encourage
users to upstream tests, and we will maintain and refactor any that
are added to mainline packages (#15702).
3. **spack develop** New `spack develop` command allows you to develop
several packages at once within a Spack environment. Running
`spack develop foo@v1` and `spack develop bar@v2` will check
out specific versions of `foo` and `bar` into subdirectories, which you
can then build incrementally with `spack install ` (#15256).
4. **More parallelism** Spack previously installed the dependencies of a
_single_ spec in parallel. Entire environments can now be installed in
parallel, greatly accelerating builds of large environments. get
parallelism from individual specs. Spack now parallelizes entire
environment builds (#18131).
5. **Customizable base images for spack containerize**
`spack containerize` previously only output a `Dockerfile` based
on `ubuntu`. You may now specify any base image of your choosing (#15028).
6. **more external finding** `spack external find` was added in `v0.15`,
but only `cmake` had support. `spack external find` can now find
`bison`, `cuda`, `findutils`, `flex`, `git`, `lustre` `m4`, `mpich`,
`mvapich2`, `ncurses`, `openmpi`, `perl`, `spectrum-mpi`, `tar`, and
`texinfo` on your system and add them automatically to
`packages.yaml`.
7. **Support aocc, nvhpc, and oneapi compilers** We are aggressively
pursuing support for the newest vendor compilers, especially those for
the U.S. exascale and pre-exascale systems. Compiler classes and
auto-detection for `aocc`, `nvhpc`, `oneapi` are now in Spack (#19345,
#19294, #19330).
## Additional new features of note
* New `spack mark` command can be used to designate packages as explicitly
installed, so that `spack gc` will not garbage-collect them (#16662).
* `install_tree` can be customized with Spack's projection format (#18341)
* `sbang` now lives in the `install_tree` so that all users can access it (#11598)
* `csh` and `tcsh` users no longer need to set `SPACK_ROOT` before
sourcing `setup-env.csh` (#18225)
* Spec syntax now supports `variant=*` syntax for finding any package
that has a particular variant (#19381).
* Spack respects `SPACK_GNUPGHOME` variable for custom GPG directories (#17139)
* Spack now recognizes Graviton chips
## Major refactors
* Use spawn instead of fork on Python >= 3.8 on macOS (#18205)
* Use indexes for public build caches (#19101, #19117, #19132, #19141, #19209)
* `sbang` is an external package now (https://github.com/spack/sbang, #19582)
* `archspec` is an external package now (https://github.com/archspec/archspec, #19600)
## Deprecations and Removals
* `spack bootstrap` was deprecated in v0.14.0, and has now been removed.
* `spack setup` is deprecated as of v0.16.0.
* What was `spack test` is now called `spack unit-test`. `spack test` is
now the smoke testing feature in (2) above.
## Bugfixes
Some of the most notable bugfixes in this release include:
* Better warning messages for deprecated syntax in `packages.yaml` (#18013)
* `buildcache list --allarch` now works properly (#17827)
* Many fixes and tests for buildcaches and binary relcoation (#15687,
*#17455, #17418, #17455, #15687, #18110)
## Package Improvements
Spack now has 5050 total packages, 720 of which were added since `v0.15`.
* ROCm packages (`hip`, `aomp`, more) added by AMD (#19957, #19832, others)
* Many improvements for ARM support
* `llvm-flang`, `flang`, and `f18` removed, as `llvm` has real `flang`
support since Flang was merged to LLVM mainline
* Emerging support for `spack external find` and `spack test` in packages.
## Infrastructure
* Major infrastructure improvements to pipelines on `gitlab.spack.io`
* Support for testing PRs from forks (#19248) is being enabled for all
forks to enable rolling, up-to-date binary builds on `develop`
# v0.15.4 (2020-08-12)
This release contains one feature addition:
* Users can set `SPACK_GNUPGHOME` to override Spack's GPG path (#17139)
Several bugfixes for CUDA, binary packaging, and `spack -V`:
* CUDA package's `.libs` method searches for `libcudart` instead of `libcuda` (#18000)
* Don't set `CUDAHOSTCXX` in environments that contain CUDA (#17826)
* `buildcache create`: `NoOverwriteException` is a warning, not an error (#17832)
* Fix `spack buildcache list --allarch` (#17884)
* `spack -V` works with `releases/latest` tag and shallow clones (#17884)
And fixes for GitHub Actions and tests to ensure that CI passes on the
release branch (#15687, #17279, #17328, #17377, #17732).
# v0.15.3 (2020-07-28)
This release contains the following bugfixes:
* Fix handling of relative view paths (#17721)
* Fixes for binary relocation (#17418, #17455)
* Fix redundant printing of error messages in build environment (#17709)
It also adds a support script for Spack tutorials:
* Add a tutorial setup script to share/spack (#17705, #17722)
# v0.15.2 (2020-07-23)
This minor release includes two new features:
* Spack install verbosity is decreased, and more debug levels are added (#17546)
* The $spack/share/spack/keys directory contains public keys that may be optionally trusted for public binary mirrors (#17684)
This release also includes several important fixes:
* MPICC and related variables are now cleand in the build environment (#17450)
* LLVM flang only builds CUDA offload components when +cuda (#17466)
* CI pipelines no longer upload user environments that can contain secrets to the internet (#17545)
* CI pipelines add bootstrapped compilers to the compiler config (#17536)
* `spack buildcache list` does not exit on first failure and lists later mirrors (#17565)
* Apple's "gcc" executable that is an apple-clang compiler does not generate a gcc compiler config (#17589)
* Mixed compiler toolchains are merged more naturally across different compiler suffixes (#17590)
* Cray Shasta platforms detect the OS properly (#17467)
* Additional more minor fixes.
# v0.15.1 (2020-07-10)
This minor release includes several important fixes:
* Fix shell support on Cray (#17386)
* Fix use of externals installed with other Spack instances (#16954)
* Fix gcc+binutils build (#9024)
* Fixes for usage of intel-mpi (#17378 and #17382)
* Fixes to Autotools config.guess detection (#17333 and #17356)
* Update `spack install` message to prompt user when an environment is not
explicitly activated (#17454)
This release also adds a mirror for all sources that are
fetched in Spack (#17077). It is expected to be useful when the
official website for a Spack package is unavailable.
# v0.15.0 (2020-06-28)
`v0.15.0` is a major feature release.
## Major Features in this release
1. **Cray support** Spack will now work properly on Cray "Cluster"
systems (non XC systems) and after a `module purge` command on Cray
systems. See #12989
2. **Virtual package configuration** Virtual packages are allowed in
packages.yaml configuration. This allows users to specify a virtual
package as non-buildable without needing to specify for each
implementation. See #14934
3. **New config subcommands** This release adds `spack config add` and
`spack config remove` commands to add to and remove from yaml
configuration files from the CLI. See #13920
4. **Environment activation** Anonymous environments are **no longer**
automatically activated in the current working directory. To activate
an environment from a `spack.yaml` file in the current directory, use
the `spack env activate .` command. This removes a concern that users
were too easily polluting their anonymous environments with accidental
installations. See #17258
5. **Apple clang compiler** The clang compiler and the apple-clang
compiler are now separate compilers in Spack. This allows Spack to
improve support for the apple-clang compiler. See #17110
6. **Finding external packages** Spack packages can now support an API
for finding external installations. This allows the `spack external
find` command to automatically add installations of those packages to
the user's configuration. See #15158
## Additional new features of note
* support for using Spack with the fish shell (#9279)
* `spack load --first` option to load first match (instead of prompting user) (#15622)
* support the Cray cce compiler both new and classic versions (#17256, #12989)
* `spack dev-build` command:
* supports stopping before a specified phase (#14699)
* supports automatically launching a shell in the build environment (#14887)
* `spack install --fail-fast` allows builds to fail at the first error (rather than best-effort) (#15295)
* environments: SpecList references can be dereferenced as compiler or dependency constraints (#15245)
* `spack view` command: new support for a copy/relocate view type (#16480)
* ci pipelines: see documentation for several improvements
* `spack mirror -a` command now supports excluding packages (#14154)
* `spack buildcache create` is now environment-aware (#16580)
* module generation: more flexible format for specifying naming schemes (#16629)
* lmod module generation: packages can be configured as core specs for lmod hierarchy (#16517)
## Deprecations and Removals
The following commands were deprecated in v0.13.0, and have now been removed:
* `spack configure`
* `spack build`
* `spack diy`
The following commands were deprecated in v0.14.0, and will be removed in the next major release:
* `spack bootstrap`
## Bugfixes
Some of the most notable bugfixes in this release include:
* Spack environments can now contain the string `-h` (#15429)
* The `spack install` gracefully handles being backgrounded (#15723, #14682)
* Spack uses `-isystem` instead of `-I` in cases that the underlying build system does as well (#16077)
* Spack no longer prints any specs that cannot be safely copied into a Spack command (#16462)
* Incomplete Spack environments containing python no longer cause problems (#16473)
* Several improvements to binary package relocation
## Package Improvements
The Spack project is constantly engaged in routine maintenance,
bugfixes, and improvements for the package ecosystem. Of particular
note in this release are the following:
* Spack now contains 4339 packages. There are 430 newly supported packages in v0.15.0
* GCC now builds properly on ARM architectures (#17280)
* Python: patched to support compiling mixed C/C++ python modules through distutils (#16856)
* improvements to pytorch and py-tensorflow packages
* improvements to major MPI implementations: mvapich2, mpich, openmpi, and others
## Spack Project Management:
* Much of the Spack CI infrastructure has moved from Travis to GitHub Actions (#16610, #14220, #16345)
* All merges to the `develop` branch run E4S CI pipeline (#16338)
* New `spack debug report` command makes reporting bugs easier (#15834)
# v0.14.2 (2020-04-15)
This is a minor release on the `0.14` series. It includes performance
improvements and bug fixes:
* Improvements to how `spack install` handles foreground/background (#15723)
* Major performance improvements for reading the package DB (#14693, #15777)
* No longer check for the old `index.yaml` database file (#15298)
* Properly activate environments with '-h' in the name (#15429)
* External packages have correct `.prefix` in environments/views (#15475)
* Improvements to computing env modifications from sourcing files (#15791)
* Bugfix on Cray machines when getting `TERM` env variable (#15630)
* Avoid adding spurious `LMOD` env vars to Intel modules (#15778)
* Don't output [+] for mock installs run during tests (#15609)
# v0.14.1 (2020-03-20)
This is a bugfix release on top of `v0.14.0`. Specific fixes include:
* several bugfixes for parallel installation (#15339, #15341, #15220, #15197)
* `spack load` now works with packages that have been renamed (#14348)
* bugfix for `suite-sparse` installation (#15326)
* deduplicate identical suffixes added to module names (#14920)
* fix issues with `configure_args` during module refresh (#11084)
* increased test coverage and test fixes (#15237, #15354, #15346)
* remove some unused code (#15431)
# v0.14.0 (2020-02-23)
`v0.14.0` is a major feature release, with 3 highlighted features:
1. **Distributed builds.** Multiple Spack instances will now coordinate
properly with each other through locks. This works on a single node
(where you've called `spack` several times) or across multiple nodes
with a shared filesystem. For example, with SLURM, you could build
`trilinos` and its dependencies on 2 24-core nodes, with 3 Spack
instances per node and 8 build jobs per instance, with `srun -N 2 -n 6
spack install -j 8 trilinos`. This requires a filesystem with locking
enabled, but not MPI or any other library for parallelism.
2. **Build pipelines.** You can also build in parallel through Gitlab
CI. Simply create a Spack environment and push it to Gitlab to build
on Gitlab runners. Pipeline support is now integrated into a single
`spack ci` command, so setting it up is easier than ever. See the
[Pipelines section](https://spack.readthedocs.io/en/v0.14.0/pipelines.html)
in the docs.
3. **Container builds.** The new `spack containerize` command allows you
to create a Docker or Singularity recipe from any Spack environment.
There are options to customize the build if you need them. See the
[Container Images section](https://spack.readthedocs.io/en/latest/containers.html)
in the docs.
In addition, there are several other new commands, many bugfixes and
improvements, and `spack load` no longer requires modules, so you can use
it the same way on your laptop or on your supercomputer.
Spack grew by over 300 packages since our last release in November 2019,
and the project grew to over 500 contributors. Thanks to all of you for
making yet another great release possible. Detailed notes below.
## Major new core features
* Distributed builds: spack instances coordinate and build in parallel (#13100)
* New `spack ci` command to manage CI pipelines (#12854)
* Generate container recipes from environments: `spack containerize` (#14202)
* `spack load` now works without using modules (#14062, #14628)
* Garbage collect old/unused installations with `spack gc` (#13534)
* Configuration files all set environment modifications the same way (#14372,
[docs](https://spack.readthedocs.io/en/v0.14.0/configuration.html#environment-modifications))
* `spack commands --format=bash` auto-generates completion (#14393, #14607)
* Packages can specify alternate fetch URLs in case one fails (#13881)
## Improvements
* Improved locking for concurrency with environments (#14676, #14621, #14692)
* `spack test` sends args to `pytest`, supports better listing (#14319)
* Better support for aarch64 and cascadelake microarch (#13825, #13780, #13820)
* Archspec is now a separate library (see https://github.com/archspec/archspec)
* Many improvements to the `spack buildcache` command (#14237, #14346,
#14466, #14467, #14639, #14642, #14659, #14696, #14698, #14714, #14732,
#14929, #15003, #15086, #15134)
## Selected Bugfixes
* Compilers now require an exact match on version (#8735, #14730, #14752)
* Bugfix for patches that specified specific versions (#13989)
* `spack find -p` now works in environments (#10019, #13972)
* Dependency queries work correctly in `spack find` (#14757)
* Bugfixes for locking upstream Spack instances chains (#13364)
* Fixes for PowerPC clang optimization flags (#14196)
* Fix for issue with compilers and specific microarchitectures (#13733, #14798)
## New commands and options
* `spack ci` (#12854)
* `spack containerize` (#14202)
* `spack gc` (#13534)
* `spack load` accepts `--only package`, `--only dependencies` (#14062, #14628)
* `spack commands --format=bash` (#14393)
* `spack commands --update-completion` (#14607)
* `spack install --with-cache` has new option: `--no-check-signature` (#11107)
* `spack test` now has `--list`, `--list-long`, and `--list-names` (#14319)
* `spack install --help-cdash` moves CDash help out of the main help (#13704)
## Deprecations
* `spack release-jobs` has been rolled into `spack ci`
* `spack bootstrap` will be removed in a future version, as it is no longer
needed to set up modules (see `spack load` improvements above)
## Documentation
* New section on building container images with Spack (see
[docs](https://spack.readthedocs.io/en/latest/containers.html))
* New section on using `spack ci` command to build pipelines (see
[docs](https://spack.readthedocs.io/en/latest/pipelines.html))
* Document how to add conditional dependencies (#14694)
* Document how to use Spack to replace Homebrew/Conda (#13083, see
[docs](https://spack.readthedocs.io/en/latest/workflows.html#using-spack-to-replace-homebrew-conda))
## Important package changes
* 3,908 total packages (345 added since 0.13.0)
* Added first cut at a TensorFlow package (#13112)
* We now build R without "recommended" packages, manage them w/Spack (#12015)
* Elpa and OpenBLAS now leverage microarchitecture support (#13655, #14380)
* Fix `octave` compiler wrapper usage (#14726)
* Enforce that packages in `builtin` aren't missing dependencies (#13949)
# v0.13.4 (2020-02-07)
This release contains several bugfixes:
* bugfixes for invoking python in various environments (#14349, #14496, #14569)
* brought tab completion up to date (#14392)
* bugfix for removing extensions from views in order (#12961)
* bugfix for nondeterministic hashing for specs with externals (#14390)
# v0.13.3 (2019-12-23)
This release contains more major performance improvements for Spack
environments, as well as bugfixes for mirrors and a `python` issue with
RHEL8.
* mirror bugfixes: symlinks, duplicate patches, and exception handling (#13789)
* don't try to fetch `BundlePackages` (#13908)
* avoid re-fetching patches already added to a mirror (#13908)
* avoid re-fetching already added patches (#13908)
* avoid re-fetching already added patches (#13908)
* allow repeated invocations of `spack mirror create` on the same dir (#13908)
* bugfix for RHEL8 when `python` is unavailable (#14252)
* improve concretization performance in environments (#14190)
* improve installation performance in environments (#14263)
# v0.13.2 (2019-12-04)
This release contains major performance improvements for Spack environments, as
well as some bugfixes and minor changes.
* allow missing modules if they are blacklisted (#13540)
* speed up environment activation (#13557)
* mirror path works for unknown versions (#13626)
* environments: don't try to modify run-env if a spec is not installed (#13589)
* use semicolons instead of newlines in module/python command (#13904)
* verify.py: os.path.exists exception handling (#13656)
* Document use of the maintainers field (#13479)
* bugfix with config caching (#13755)
* hwloc: added 'master' version pointing at the HEAD of the master branch (#13734)
* config option to allow gpg warning suppression (#13744)
* fix for relative symlinks when relocating binary packages (#13727)
* allow binary relocation of strings in relative binaries (#13724)
# v0.13.1 (2019-11-05)
This is a bugfix release on top of `v0.13.0`. Specific fixes include:
* `spack find` now displays variants and other spec constraints
* bugfix: uninstall should find concrete specs by DAG hash (#13598)
* environments: make shell modifications partially unconditional (#13523)
* binary distribution: relocate text files properly in relative binaries (#13578)
* bugfix: fetch prefers to fetch local mirrors over remote resources (#13545)
* environments: only write when necessary (#13546)
* bugfix: spack.util.url.join() now handles absolute paths correctly (#13488)
* sbang: use utf-8 for encoding when patching (#13490)
* Specs with quoted flags containing spaces are parsed correctly (#13521)
* targets: print a warning message before downgrading (#13513)
* Travis CI: Test Python 3.8 (#13347)
* Documentation: Database.query methods share docstrings (#13515)
* cuda: fix conflict statements for x86-64 targets (#13472)
* cpu: fix clang flags for generic x86_64 (#13491)
* syaml_int type should use int.__repr__ rather than str.__repr__ (#13487)
* elpa: prefer 2016.05.004 until sse/avx/avx2 issues are resolved (#13530)
* trilinos: temporarily constrain netcdf@:4.7.1 (#13526)
# v0.13.0 (2019-10-25)
`v0.13.0` is our biggest Spack release yet, with *many* new major features.
From facility deployment to improved environments, microarchitecture
support, and auto-generated build farms, this release has features for all of
our users.
Spack grew by over 700 packages in the past year, and the project now has
over 450 contributors. Thanks to all of you for making this release possible.
## Major new core features
- Chaining: use dependencies from external "upstream" Spack instances
- Environments now behave more like virtualenv/conda
- Each env has a *view*: a directory with all packages symlinked in
- Activating an environment sets `PATH`, `LD_LIBRARY_PATH`, `CPATH`,
`CMAKE_PREFIX_PATH`, `PKG_CONFIG_PATH`, etc. to point to this view.
- Spack detects and builds specifically for your microarchitecture
- named, understandable targets like `skylake`, `broadwell`, `power9`, `zen2`
- Spack knows which compilers can build for which architectures
- Packages can easily query support for features like `avx512` and `sse3`
- You can pick a target with, e.g. `spack install foo target=icelake`
- Spack stacks: combinatorial environments for facility deployment
- Environments can now build cartesian products of specs (with `matrix:`)
- Conditional syntax support to exclude certain builds from the stack
- Projections: ability to build easily navigable symlink trees environments
- Support no-source packages (BundlePackage) to aggregate related packages
- Extensions: users can write custom commands that live outside of Spack repo
- Support ARM and Fujitsu compilers
## CI/build farm support
- `spack release-jobs` can detect `package.py` changes and generate
`.gitlab-ci.yml` to create binaries for an environment or stack
in parallel (initial support -- will change in future release).
- Results of build pipelines can be uploaded to a CDash server.
- Spack can now upload/fetch from package mirrors in Amazon S3
## New commands/options
- `spack mirror create --all` downloads *all* package sources/resources/patches
- `spack dev-build` runs phases of the install pipeline on the working directory
- `spack deprecate` permanently symlinks an old, unwanted package to a new one
- `spack verify` chcecks that packages' files match what was originally installed
- `spack find --json` prints `JSON` that is easy to parse with, e.g. `jq`
- `spack find --format FORMAT` allows you to flexibly print package metadata
- `spack spec --json` prints JSON version of `spec.yaml`
## Selected improvements
- Auto-build requested compilers if they do not exist
- Spack automatically adds `RPATHs` needed to make executables find compiler
runtime libraries (e.g., path to newer `libstdc++` in `icpc` or `g++`)
- setup-env.sh is now compatible with Bash, Dash, and Zsh
- Spack now caps build jobs at min(16, ncores) by default
- `spack compiler find` now also throttles number of spawned processes
- Spack now writes stage directories directly to `$TMPDIR` instead of
symlinking stages within `$spack/var/spack/cache`.
- Improved and more powerful `spec` format strings
- You can pass a `spec.yaml` file anywhere in the CLI you can type a spec.
- Many improvements to binary caching
- Gradually supporting new features from Environment Modules v4
- `spack edit` respects `VISUAL` environment variable
- Simplified package syntax for specifying build/run environment modifications
- Numerous improvements to support for environments across Spack commands
- Concretization improvements
## Documentation
- Multi-lingual documentation (Started a Japanese translation)
- Tutorial now has its own site at spack-tutorial.readthedocs.io
- This enables us to keep multiple versions of the tutorial around
## Deprecations
- Spack no longer supports dotkit (LLNL's homegrown, now deprecated module tool)
- `spack build`, `spack configure`, `spack diy` deprecated in favor of
`spack dev-build` and `spack install`
## Important package changes
- 3,563 total packages (718 added since 0.12.1)
- Spack now defaults to Python 3 (previously preferred 2.7 by default)
- Much improved ARM support thanks to Fugaku (RIKEN) and SNL teams
- Support new special versions: master, trunk, and head (in addition to develop)
- Better finding logic for libraries and headers
# v0.12.1 (2018-11-13)
This is a minor bugfix release, with a minor fix in the tutorial and a `flake8` fix.
Bugfixes
* Add `r` back to regex strings in binary distribution
* Fix gcc install version in the tutorial
# v0.12.0 (2018-11-13)
## Major new features
- Spack environments
- `spack.yaml` and `spack.lock` files for tracking dependencies
- Custom configurations via command line
- Better support for linking Python packages into view directories
- Packages have more control over compiler flags via flag handlers
- Better support for module file generation
- Better support for Intel compilers, Intel MPI, etc.
- Many performance improvements, improved startup time
## License
- As of this release, all of Spack is permissively licensed under Apache-2.0 or MIT, at the user's option.
- Consents from over 300 contributors were obtained to make this relicense possible.
- Previous versions were distributed under the LGPL license, version 2.1.
## New packages
Over 2,900 packages (800 added since last year)
Spack would not be possible without our community. Thanks to all of our
[contributors](https://github.com/spack/spack/graphs/contributors) for the
new features and packages in this release!
# v0.11.2 (2018-02-07)
This release contains the following fixes:
* Fixes for `gfortran` 7 compiler detection (#7017)
* Fixes for exceptions thrown during module generation (#7173)
# v0.11.1 (2018-01-19)
This release contains bugfixes for compiler flag handling. There were issues in `v0.11.0` that caused some packages to be built without proper optimization.
Fixes:
* Issue #6999: FFTW installed with Spack 0.11.0 gets built without optimisations
Includes:
* PR #6415: Fixes for flag handling behavior
* PR #6960: Fix type issues with setting flag handlers
* 880e319: Upstream fixes to `list_url` in various R packages
# v0.11.0 (2018-01-17)
Spack v0.11.0 contains many improvements since v0.10.0.
Below is a summary of the major features, broken down by category.
## New packages
- Spack now has 2,178 packages (from 1,114 in v0.10.0)
- Many more Python packages (356) and R packages (471)
- 48 Exascale Proxy Apps (try `spack list -t proxy-app`)
## Core features for users
- Relocatable binary packages (`spack buildcache`, #4854)
- Spack now fully supports Python 3 (#3395)
- Packages can be tagged and searched by tags (#4786)
- Custom module file templates using Jinja (#3183)
- `spack bootstrap` command now sets up a basic module environment (#3057)
- Simplified and better organized help output (#3033)
- Improved, less redundant `spack install` output (#5714, #5950)
- Reworked `spack dependents` and `spack dependencies` commands (#4478)
## Major new features for packagers
- Multi-valued variants (#2386)
- New `conflicts()` directive (#3125)
- New dependency type: `test` dependencies (#5132)
- Packages can require their own patches on dependencies (#5476)
- `depends_on(..., patches=<patch list>)`
- Build interface for passing linker information through Specs (#1875)
- Major packages that use blas/lapack now use this interface
- Flag handlers allow packages more control over compiler flags (#6415)
- Package subclasses support many more build systems:
- autotools, perl, qmake, scons, cmake, makefile, python, R, WAF
- package-level support for installing Intel HPC products (#4300)
- `spack blame` command shows contributors to packages (#5522)
- `spack create` now guesses many more build systems (#2707)
- Better URL parsing to guess package version URLs (#2972)
- Much improved `PythonPackage` support (#3367)
## Core
- Much faster concretization (#5716, #5783)
- Improved output redirection (redirecting build output works properly #5084)
- Numerous improvements to internal structure and APIs
## Tutorials & Documentation
- Many updates to documentation
- [New tutorial material from SC17](https://spack.readthedocs.io/en/latest/tutorial.html)
- configuration
- build systems
- build interface
- working with module generation
- Documentation on docker workflows and best practices
## Selected improvements and bug fixes
- No longer build Python eggs -- installations are plain directories (#3587)
- Improved filtering of system paths from build PATHs and RPATHs (#2083, #3910)
- Git submodules are properly handled on fetch (#3956)
- Can now set default number of parallel build jobs in `config.yaml`
- Improvements to `setup-env.csh` (#4044)
- Better default compiler discovery on Mac OS X (#3427)
- clang will automatically mix with gfortran
- Improved compiler detection on Cray machines (#3075)
- Better support for IBM XL compilers
- Better tab completion
- Resume gracefully after prematurely terminated partial installs (#4331)
- Better mesa support (#5170)
Spack would not be possible without our community. Thanks to all of our
[contributors](https://github.com/spack/spack/graphs/contributors) for the
new features and packages in this release!
# v0.10.0 (2017-01-17)
This is Spack `v0.10.0`. With this release, we will start to push Spack
releases more regularly. This is the last Spack release without
automated package testing. With the next release, we will begin to run
package tests in addition to unit tests.
Spack has grown rapidly from 422 to
[1,114 packages](https://spack.readthedocs.io/en/v0.10.0/package_list.html),
thanks to the hard work of over 100 contributors. Below is a condensed
version of all the changes since `v0.9.1`.
### Packages
- Grew from 422 to 1,114 packages
- Includes major updates like X11, Qt
- Expanded HPC, R, and Python ecosystems
### Core
- Major speed improvements for spack find and concretization
- Completely reworked architecture support
- Platforms can have front-end and back-end OS/target combinations
- Much better support for Cray and BG/Q cross-compiled environments
- Downloads are now cached locally
- Support installations in deeply nested directories: patch long shebangs using `sbang`
### Basic usage
- Easier global configuration via config.yaml
- customize install, stage, and cache locations
- Hierarchical configuration scopes: default, site, user
- Platform-specific scopes allow better per-platform defaults
- Ability to set `cflags`, `cxxflags`, `fflags` on the command line
- YAML-configurable support for both Lmod and tcl modules in mainline
- `spack install` supports --dirty option for emergencies
### For developers
- Support multiple dependency types: `build`, `link`, and `run`
- Added `Package` base classes for custom build systems
- `AutotoolsPackage`, `CMakePackage`, `PythonPackage`, etc.
- `spack create` now guesses many more build systems
- Development environment integration with `spack setup`
- New interface to pass linking information via `spec` objects
- Currently used for `BLAS`/`LAPACK`/`SCALAPACK` libraries
- Polymorphic virtual dependency attributes: `spec['blas'].blas_libs`
### Testing & Documentation
- Unit tests run continuously on Travis CI for Mac and Linux
- Switched from `nose` to `pytest` for unit tests.
- Unit tests take 1 minute now instead of 8
- Massively expanded documentation
- Docs are now hosted on [spack.readthedocs.io](https://spack.readthedocs.io)

View File

@@ -1,4 +1,4 @@
# Spack Community Code of Conduct
# Contributor Covenant Code of Conduct
## Our Pledge
@@ -30,7 +30,7 @@ Project maintainers have the right and responsibility to remove, edit, or reject
## Scope
This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the Spack project or its community. Examples of representing the project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of the project may be further defined and clarified by Spack maintainers.
This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers.
## Enforcement

View File

@@ -28,11 +28,9 @@ text in the license header:
External Packages
-------------------
Spack bundles most external dependencies in lib/spack/external. It also
includes the sbang tool directly in bin/sbang. These packages are covered
by various permissive licenses. A summary listing follows. See the
license included with each package for full details.
Spack bundles its external dependencies in lib/spack/external. These
packages are covered by various permissive licenses. A summary listing
follows. See the license included with each package for full details.
PackageName: argparse
PackageHomePage: https://pypi.python.org/pypi/argparse
@@ -70,6 +68,10 @@ PackageName: py
PackageHomePage: https://pypi.python.org/pypi/py
PackageLicenseDeclared: MIT
PackageName: pyqver
PackageHomePage: https://github.com/ghewgill/pyqver
PackageLicenseDeclared: BSD-3-Clause
PackageName: pytest
PackageHomePage: https://pypi.python.org/pypi/pytest
PackageLicenseDeclared: MIT
@@ -78,18 +80,6 @@ PackageName: ruamel.yaml
PackageHomePage: https://yaml.readthedocs.io/
PackageLicenseDeclared: MIT
PackageName: sbang
PackageHomePage: https://github.com/spack/sbang
PackageLicenseDeclared: Apache-2.0 OR MIT
PackageName: six
PackageHomePage: https://pypi.python.org/pypi/six
PackageLicenseDeclared: MIT
PackageName: macholib
PackageHomePage: https://macholib.readthedocs.io/en/latest/index.html
PackageLicenseDeclared: MIT
PackageName: altgraph
PackageHomePage: https://altgraph.readthedocs.io/en/latest/index.html
PackageLicenseDeclared: MIT

View File

@@ -1,21 +1,20 @@
MIT License
Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
Copyright (c) 2013-2020 LLNS, LLC and other Spack Project Developers.
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.

111
README.md
View File

@@ -1,8 +1,6 @@
# <img src="https://cdn.rawgit.com/spack/spack/develop/share/spack/logo/spack-logo.svg" width="64" valign="middle" alt="Spack"/> Spack
[![Unit Tests](https://github.com/spack/spack/workflows/linux%20tests/badge.svg)](https://github.com/spack/spack/actions)
[![Linux Builds](https://github.com/spack/spack/workflows/linux%20builds/badge.svg)](https://github.com/spack/spack/actions)
[![macOS Builds (nightly)](https://github.com/spack/spack/workflows/macOS%20builds%20nightly/badge.svg?branch=develop)](https://github.com/spack/spack/actions?query=workflow%3A%22macOS+builds+nightly%22)
[![Build Status](https://travis-ci.org/spack/spack.svg?branch=develop)](https://travis-ci.org/spack/spack)
[![codecov](https://codecov.io/gh/spack/spack/branch/develop/graph/badge.svg)](https://codecov.io/gh/spack/spack)
[![Read the Docs](https://readthedocs.org/projects/spack/badge/?version=latest)](https://spack.readthedocs.io)
[![Slack](https://spackpm.herokuapp.com/badge.svg)](https://spackpm.herokuapp.com)
@@ -20,7 +18,7 @@ builds of the same package. With Spack, you can build your software
*all* the ways you want to.
See the
[Feature Overview](https://spack.readthedocs.io/en/latest/features.html)
[Feature Overview](http://spack.readthedocs.io/en/latest/features.html)
for examples and highlights.
To install spack and your first package, make sure you have Python.
@@ -28,43 +26,55 @@ Then:
$ git clone https://github.com/spack/spack.git
$ cd spack/bin
$ ./spack install zlib
$ ./spack install libelf
Documentation
----------------
[**Full documentation**](https://spack.readthedocs.io/) is available, or
run `spack help` or `spack help --all`.
[**Full documentation**](http://spack.readthedocs.io/) for Spack is
the first place to look.
Tutorial
----------------
Try the
[**Spack Tutorial**](http://spack.readthedocs.io/en/latest/tutorial.html),
to learn how to use spack, write packages, or deploy packages for users
at your site.
We maintain a
[**hands-on tutorial**](https://spack.readthedocs.io/en/latest/tutorial.html).
It covers basic to advanced usage, packaging, developer features, and large HPC
deployments. You can do all of the exercises on your own laptop using a
Docker container.
See also:
* [Technical paper](http://www.computer.org/csdl/proceedings/sc/2015/3723/00/2807623.pdf) and
[slides](https://tgamblin.github.io/files/Gamblin-Spack-SC15-Talk.pdf) on Spack's design and implementation.
* [Short presentation](https://tgamblin.github.io/files/Gamblin-Spack-Lightning-Talk-BOF-SC15.pdf) from the *Getting Scientific Software Installed* BOF session at Supercomputing 2015.
Feel free to use these materials to teach users at your organization
about Spack.
Community
Get Involved!
------------------------
Spack is an open source project. Questions, discussion, and
contributions are welcome. Contributions can be anything from new
packages to bugfixes, documentation, or even new core features.
packages to bugfixes, or even new core features.
Resources:
### Mailing list
* **Slack workspace**: [spackpm.slack.com](https://spackpm.slack.com).
To get an invitation, [**click here**](https://spackpm.herokuapp.com).
* **Mailing list**: [groups.google.com/d/forum/spack](https://groups.google.com/d/forum/spack)
* **Twitter**: [@spackpm](https://twitter.com/spackpm). Be sure to
`@mention` us!
If you are interested in contributing to spack, join the mailing list.
We're using Google Groups for this:
* [Spack Google Group](https://groups.google.com/d/forum/spack)
### Slack channel
Spack has a Slack channel where you can chat about all things Spack:
* [Spack on Slack](https://spackpm.slack.com)
[Sign up here](https://spackpm.herokuapp.com) to get an invitation mailed
to you.
### Twitter
You can follow [@spackpm](https://twitter.com/spackpm) on Twitter for
updates. Also, feel free to `@mention` us in in questions or comments
about your own experience with Spack.
### Contributions
Contributing
------------------------
Contributing to Spack is relatively easy. Just send us a
[pull request](https://help.github.com/articles/using-pull-requests/).
When you send your request, make ``develop`` the destination branch on the
@@ -72,40 +82,15 @@ When you send your request, make ``develop`` the destination branch on the
Your PR must pass Spack's unit tests and documentation tests, and must be
[PEP 8](https://www.python.org/dev/peps/pep-0008/) compliant. We enforce
these guidelines with our CI process. To run these tests locally, and for
helpful tips on git, see our
[Contribution Guide](https://spack.readthedocs.io/en/latest/contribution_guide.html).
these guidelines with [Travis CI](https://travis-ci.org/spack/spack). To
run these tests locally, and for helpful tips on git, see our
[Contribution Guide](http://spack.readthedocs.io/en/latest/contribution_guide.html).
Spack's `develop` branch has the latest contributions. Pull requests
should target `develop`, and users who want the latest package versions,
features, etc. can use `develop`.
Releases
--------
For multi-user site deployments or other use cases that need very stable
software installations, we recommend using Spack's
[stable releases](https://github.com/spack/spack/releases).
Each Spack release series also has a corresponding branch, e.g.
`releases/v0.14` has `0.14.x` versions of Spack, and `releases/v0.13` has
`0.13.x` versions. We backport important bug fixes to these branches but
we do not advance the package versions or make other changes that would
change the way Spack concretizes dependencies within a release branch.
So, you can base your Spack deployment on a release branch and `git pull`
to get fixes, without the package churn that comes with `develop`.
The latest release is always available with the `releases/latest` tag.
See the [docs on releases](https://spack.readthedocs.io/en/latest/developer_guide.html#releases)
for more details.
Code of Conduct
------------------------
Please note that Spack has a
[**Code of Conduct**](.github/CODE_OF_CONDUCT.md). By participating in
the Spack community, you agree to abide by its rules.
Spack uses a rough approximation of the
[Git Flow](http://nvie.com/posts/a-successful-git-branching-model/)
branching model. The ``develop`` branch contains the latest
contributions, and ``master`` is always tagged and points to the latest
stable release.
Authors
----------------
@@ -119,7 +104,7 @@ If you are referencing Spack in a publication, please cite the following paper:
* Todd Gamblin, Matthew P. LeGendre, Michael R. Collette, Gregory L. Lee,
Adam Moody, Bronis R. de Supinski, and W. Scott Futral.
[**The Spack Package Manager: Bringing Order to HPC Software Chaos**](https://www.computer.org/csdl/proceedings/sc/2015/3723/00/2807623.pdf).
[**The Spack Package Manager: Bringing Order to HPC Software Chaos**](http://www.computer.org/csdl/proceedings/sc/2015/3723/00/2807623.pdf).
In *Supercomputing 2015 (SC15)*, Austin, Texas, November 15-20 2015. LLNL-CONF-669890.
License
@@ -137,6 +122,6 @@ See [LICENSE-MIT](https://github.com/spack/spack/blob/develop/LICENSE-MIT),
[COPYRIGHT](https://github.com/spack/spack/blob/develop/COPYRIGHT), and
[NOTICE](https://github.com/spack/spack/blob/develop/NOTICE) for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
`SPDX-License-Identifier: (Apache-2.0 OR MIT)`
LLNL-CODE-811652
``LLNL-CODE-647188``

165
bin/sbang
View File

@@ -1,103 +1,114 @@
#!/bin/sh
#!/bin/bash
#
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# sbang project developers. See the top-level COPYRIGHT file for details.
# Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
#
# `sbang`: Run scripts with long shebang lines.
#
# Many operating systems limit the length and number of possible
# arguments in shebang lines, making it hard to use interpreters that are
# deep in the directory hierarchy or require special arguments.
# Many operating systems limit the length of shebang lines, making it
# hard to use interpreters that are deep in the directory hierarchy.
# `sbang` can run such scripts, either as a shebang interpreter, or
# directly on the command line.
#
# To use, put the long shebang on the second line of your script, and
# make sbang the interpreter, like this:
# Usage
# -----------------------------
# Suppose you have a script, long-shebang.sh, like this:
#
# #!/bin/sh /path/to/sbang
# #!/long/path/to/real/interpreter with arguments
# 1 #!/very/long/path/to/some/interpreter
# 2
# 3 echo "success!"
#
# `sbang` will run the real interpreter with the script as its argument.
# Invoking this script will result in an error on some OS's. On
# Linux, you get this:
#
# See https://github.com/spack/sbang for more details.
# $ ./long-shebang.sh
# -bash: ./long: /very/long/path/to/some/interp: bad interpreter:
# No such file or directory
#
# On Mac OS X, the system simply assumes the interpreter is the shell
# and tries to run with it, which is likely not what you want.
#
#
# `sbang` on the command line
# -----------------------------
# You can use `sbang` in two ways. The first is to use it directly,
# from the command line, like this:
#
# $ sbang ./long-shebang.sh
# success!
#
#
# `sbang` as the interpreter
# -----------------------------
# You can also use `sbang` *as* the interpreter for your script. Put
# `#!/bin/bash /path/to/sbang` on line 1, and move the original
# shebang to line 2 of the script:
#
# 1 #!/bin/bash /path/to/sbang
# 2 #!/long/path/to/real/interpreter with arguments
# 3
# 4 echo "success!"
#
# $ ./long-shebang.sh
# success!
#
# On Linux, you could shorten line 1 to `#!/path/to/sbang`, but other
# operating systems like Mac OS X require the interpreter to be a
# binary, so it's best to use `sbang` as a `bash` argument.
# Obviously, for this to work, `sbang` needs to have a short enough
# path that *it* will run without hitting OS limits.
#
# For Lua, scripts the second line can't start with #!, as # is not
# the comment character in lua (even though lua ignores #! on the
# *first* line of a script). So, instrument a lua script like this,
# using -- instead of # on the second line:
#
# 1 #!/bin/bash /path/to/sbang
# 2 --!/long/path/to/lua with arguments
# 3
# 4 print "success!"
#
# How it works
# -----------------------------
# `sbang` is a very simple bash script. It looks at the first two
# lines of a script argument and runs the last line starting with
# `#!`, with the script as an argument. It also forwards arguments.
#
# Generic error handling
die() {
echo "$@" 1>&2;
exit 1
}
# set SBANG_DEBUG to make the script print what would normally be executed.
exec="exec"
if [ -n "${SBANG_DEBUG}" ]; then
exec="echo "
fi
# First argument is the script we want to actually run.
script="$1"
# ensure that the script actually exists
if [ -z "$script" ]; then
die "error: sbang requires exactly one argument"
elif [ ! -f "$script" ]; then
die "$script: no such file or directory"
fi
# Search the first two lines of script for interpreters.
lines=0
while read -r line && [ $lines -ne 2 ]; do
if [ "${line#\#!}" != "$line" ]; then
shebang_line="${line#\#!}"
elif [ "${line#//!}" != "$line" ]; then # // comments
shebang_line="${line#//!}"
elif [ "${line#--!}" != "$line" ]; then # -- lua comments
shebang_line="${line#--!}"
elif [ "${line#<?php\ }" != "$line" ]; then # php comments
shebang_line="${line#<?php\ \#!}"
shebang_line="${shebang_line%\ ?>}"
while read line && ((lines < 2)) ; do
if [[ "$line" = '#!'* ]]; then
interpreter="${line#\#!}"
elif [[ "$line" = '//!'*node* ]]; then
interpreter="${line#//!}"
elif [[ "$line" = '--!'*lua* ]]; then
interpreter="${line#--!}"
fi
lines=$((lines+1))
done < "$script"
# this is ineeded for scripts with sbang parameter
# like ones in intltool
# #!/<spack-long-path>/perl -w
# this is the interpreter line with all the parameters as a vector
interpreter_v=(${interpreter})
# this is the single interpreter path
interpreter_f="${interpreter_v[0]}"
# error if we did not find any interpreter
if [ -z "$shebang_line" ]; then
die "error: sbang found no interpreter in $script"
fi
# parse out the interpreter and first argument
IFS=' ' read -r interpreter arg1 rest <<EOF
$shebang_line
EOF
# Determine if the interpreter is a particular program, accounting for the
# '#!/usr/bin/env PROGRAM' convention. So:
#
# interpreter_is perl
#
# will be true for '#!/usr/bin/perl' and '#!/usr/bin/env perl'
interpreter_is() {
if [ "${interpreter##*/}" = "$1" ]; then
return 0
elif [ "$interpreter" = "/usr/bin/env" ] && [ "$arg1" = "$1" ]; then
return 0
# Invoke any interpreter found, or raise an error if none was found.
if [[ -n "$interpreter_f" ]]; then
if [[ "${interpreter_f##*/}" = "perl" ]]; then
exec $interpreter_v -x "$@"
else
return 1
exec $interpreter_v "$@"
fi
}
if interpreter_is "sbang"; then
die "error: refusing to re-execute sbang to avoid infinite loop."
fi
# Finally invoke the real shebang line
# ruby and perl need -x to ignore the first line of input (the sbang line)
#
if interpreter_is perl || interpreter_is ruby; then
# shellcheck disable=SC2086
$exec $shebang_line -x "$@"
else
# shellcheck disable=SC2086
$exec $shebang_line "$@"
echo "error: sbang found no interpreter in $script"
exit 1
fi

View File

@@ -1,43 +1,19 @@
#!/bin/sh
# -*- python -*-
#!/usr/bin/env python
#
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
# This file is bilingual. The following shell code finds our preferred python.
# Following line is a shell no-op, and starts a multi-line Python comment.
# See https://stackoverflow.com/a/47886254
""":"
# prefer SPACK_PYTHON environment variable, python3, python, then python2
for cmd in "${SPACK_PYTHON:-}" python3 python python2; do
if command -v > /dev/null "$cmd"; then
export SPACK_PYTHON="$(command -v "$cmd")"
exec "${SPACK_PYTHON}" "$0" "$@"
fi
done
echo "==> Error: spack could not find a python interpreter!" >&2
exit 1
":"""
# Line above is a shell no-op, and ends a python multi-line comment.
# The code above runs this file with our preferred python interpreter.
from __future__ import print_function
import os
import sys
min_python3 = (3, 5)
if sys.version_info[:2] < (2, 6) or (
sys.version_info[:2] >= (3, 0) and sys.version_info[:2] < min_python3
):
if sys.version_info[:2] < (2, 6):
v_info = sys.version_info[:3]
msg = "Spack requires Python 2.6, 2.7 or %d.%d or higher " % min_python3
msg += "You are running spack with Python %d.%d.%d." % v_info
sys.exit(msg)
sys.exit("Spack requires Python 2.6 or higher."
"This is Python %d.%d.%d." % v_info)
# Find spack's location and its prefix.
spack_file = os.path.realpath(os.path.expanduser(__file__))
@@ -50,10 +26,8 @@ sys.path.insert(0, spack_lib_path)
# Add external libs
spack_external_libs = os.path.join(spack_lib_path, "external")
if sys.version_info[:2] <= (2, 7):
sys.path.insert(0, os.path.join(spack_external_libs, "py2"))
if sys.version_info[:2] == (2, 6):
sys.path.insert(0, os.path.join(spack_external_libs, "py26"))
sys.path.insert(0, os.path.join(spack_external_libs, 'py26'))
sys.path.insert(0, spack_external_libs)
@@ -61,16 +35,14 @@ sys.path.insert(0, spack_external_libs)
# (see #9206 for a broader description of the issue).
#
# Briefly: ruamel.yaml produces a .pth file when installed with pip that
# makes the site installed package the preferred one, even though sys.path
# makes the site installed package the preferred one, even tough sys.path
# is modified to point to another version of ruamel.yaml.
if "ruamel.yaml" in sys.modules:
del sys.modules["ruamel.yaml"]
if 'ruamel.yaml' in sys.modules:
del sys.modules['ruamel.yaml']
if "ruamel" in sys.modules:
del sys.modules["ruamel"]
import spack.main # noqa
if 'ruamel' in sys.modules:
del sys.modules['ruamel']
# Once we've set up the system path, run the spack main method
if __name__ == "__main__":
sys.exit(spack.main.main())
import spack.main # noqa
sys.exit(spack.main.main())

View File

@@ -1,6 +1,6 @@
#!/bin/sh
#
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
@@ -22,4 +22,4 @@
#
# This is compatible across platforms.
#
exec /usr/bin/env spack python "$@"
/usr/bin/env spack python "$@"

View File

@@ -16,17 +16,7 @@
config:
# This is the path to the root of the Spack install tree.
# You can use $spack here to refer to the root of the spack instance.
install_tree:
root: $spack/opt/spack
projections:
all: "${ARCHITECTURE}/${COMPILERNAME}-${COMPILERVER}/${PACKAGE}-${VERSION}-${HASH}"
# install_tree can include an optional padded length (int or boolean)
# default is False (do not pad)
# if padded_length is True, Spack will pad as close to the system max path
# length as possible
# if padded_length is an integer, Spack will pad to that many characters,
# assuming it is higher than the length of the install_tree root.
# padded_length: 128
install_tree: $spack/opt/spack
# Locations where templates should be found
@@ -34,43 +24,33 @@ config:
- $spack/share/spack/templates
# Default directory layout
install_path_scheme: "${ARCHITECTURE}/${COMPILERNAME}-${COMPILERVER}/${PACKAGE}-${VERSION}-${HASH}"
# Locations where different types of modules should be installed.
module_roots:
tcl: $spack/share/spack/modules
lmod: $spack/share/spack/lmod
dotkit: $spack/share/spack/dotkit
# `build_stage` determines where Spack builds packages.
# Temporary locations Spack can try to use for builds.
#
# The default build location is `$tempdir/$user/spack-stage/$instance`.
# `$tempdir` indicates that we should build in a temporary directory
# (i.e., ``$TMP` or ``$TMPDIR``). On most systems (especially HPC
# machines), building in a temporary directory is significantly faster
# than other locations. `$user` ensures that the directory is unique by
# user, so different users do not fight over Spack's build location.
# Finally, `$instance` is an 8-digit hash that is unique per instance
# of Spack. This ensures that different Spack instances do not fight
# over build locations.
# Spack will use the first one it finds that exists and is writable.
# You can use $tempdir to refer to the system default temp directory
# (as returned by tempfile.gettempdir()).
#
# The second choice, if Spack cannot create the first one for some
# reason, is `~/.spack/stage/$instance`. This is unique to each user's
# home directory, and it is also unique to each Spack instance.
# A value of $spack/var/spack/stage indicates that Spack should run
# builds directly inside its install directory without staging them in
# temporary space.
#
# These choices both have the username in the path. If the username is
# NOT in your chosen `build_stage` location, Spack will append it
# anyway, to avoid conflicts among users in shared temporary spaces.
#
# The build stage can be purged with `spack clean`, so it is important
# to choose a directory that is ONLY used by Spack so that you do not
# accidentally wipe out files that have nothing to do with Spack.
# The build stage can be purged with `spack clean --stage`.
build_stage:
- $tempdir/$user/spack-stage/$instance
- ~/.spack/stage/$instance
- $tempdir
- /nfs/tmp2/$user
- $spack/var/spack/stage
# Directory in which to run tests and store test results.
# Tests will be stored in directories named by date/time and package
# name/hash.
test_stage: ~/.spack/test
# Cache directory for already downloaded source tarballs and archived
# repositories. This can be purged with `spack clean --downloads`.
@@ -82,41 +62,16 @@ config:
misc_cache: ~/.spack/cache
# Timeout in seconds used for downloading sources etc. This only applies
# to the connection phase and can be increased for slow connections or
# servers. 0 means no timeout.
connect_timeout: 10
# If this is false, tools like curl that use SSL will not verify
# certifiates. (e.g., curl will use use the -k option)
verify_ssl: true
# Suppress gpg warnings from binary package verification
# Only suppresses warnings, gpg failure will still fail the install
# Potential rationale to set True: users have already explicitly trusted the
# gpg key they are using, and may not want to see repeated warnings that it
# is self-signed or something of the sort.
suppress_gpg_warnings: false
# If set to true, Spack will attempt to build any compiler on the spec
# that is not already available. If set to False, Spack will only use
# compilers already configured in compilers.yaml
install_missing_compilers: false
# If set to true, Spack will always check checksums after downloading
# archives. If false, Spack skips the checksum step.
checksum: true
# If set to true, Spack will fetch deprecated versions without warning.
# If false, Spack will raise an error when trying to install a deprecated version.
deprecated: false
# If set to true, `spack install` and friends will NOT clean
# potentially harmful variables from the build environment. Use wisely.
dirty: false
@@ -139,37 +94,21 @@ config:
locks: true
# The maximum number of jobs to use when running `make` in parallel,
# always limited by the number of cores available. For instance:
# - If set to 16 on a 4 cores machine `spack install` will run `make -j4`
# - If set to 16 on a 18 cores machine `spack install` will run `make -j16`
# If not set, Spack will use all available cores up to 16.
# build_jobs: 16
# The default number of jobs to use when running `make` in parallel.
# If set to 4, for example, `spack install` will run `make -j4`.
# If not set, all available cores are used by default.
# build_jobs: 4
# If set to true, Spack will use ccache to cache C compiles.
ccache: false
# The concretization algorithm to use in Spack. Options are:
#
# 'original': Spack's original greedy, fixed-point concretizer. This
# algorithm can make decisions too early and will not backtrack
# sufficiently for many specs.
#
# 'clingo': Uses a logic solver under the hood to solve DAGs with full
# backtracking and optimization for user preferences.
#
# 'clingo' currently requires the clingo ASP solver to be installed and
# built with python bindings. 'original' is built in.
concretizer: original
# How long to wait to lock the Spack installation database. This lock is used
# when Spack needs to manage its own package metadata and all operations are
# expected to complete within the default time limit. The timeout should
# therefore generally be left untouched.
db_lock_timeout: 3
db_lock_timeout: 120
# How long to wait when attempting to modify a package (e.g. to install it).
@@ -178,14 +117,3 @@ config:
# anticipates that a significant delay indicates that the lock attempt will
# never succeed.
package_lock_timeout: null
# Control whether Spack embeds RPATH or RUNPATH attributes in ELF binaries.
# Has no effect on macOS. DO NOT MIX these within the same install tree.
# See the Spack documentation for details.
shared_linking: 'rpath'
# Set to 'false' to allow installation on filesystems that doesn't allow setgid bit
# manipulation by unprivileged user (e.g. AFS)
allow_sgid: true

View File

@@ -16,6 +16,8 @@
modules:
prefix_inspections:
lib:
- DYLD_LIBRARY_PATH
- DYLD_FALLBACK_LIBRARY_PATH
lib64:
- DYLD_LIBRARY_PATH
- DYLD_FALLBACK_LIBRARY_PATH

View File

@@ -15,29 +15,13 @@
# -------------------------------------------------------------------------
packages:
all:
compiler:
- apple-clang
- clang
- gcc
- intel
compiler: [clang, gcc, intel]
providers:
elf:
- libelf
unwind:
- apple-libunwind
uuid:
- apple-libuuid
elf: [libelf]
unwind: [apple-libunwind]
apple-libunwind:
buildable: false
externals:
paths:
# Apple bundles libunwind version 35.3 with macOS 10.9 and later,
# although the version number used here isn't critical
- spec: apple-libunwind@35.3
prefix: /usr
apple-libuuid:
buildable: false
externals:
# Apple bundles libuuid in libsystem_c version 1353.100.2,
# although the version number used here isn't critical
- spec: apple-libuuid@1353.100.2
prefix: /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk
apple-libunwind@35.3: /usr
buildable: False

View File

@@ -1,2 +0,0 @@
mirrors:
spack-public: https://spack-llnl-mirror.s3-us-west-2.amazonaws.com/

View File

@@ -16,6 +16,7 @@
modules:
enable:
- tcl
- dotkit
prefix_inspections:
bin:
- PATH
@@ -25,12 +26,16 @@ modules:
- MANPATH
share/aclocal:
- ACLOCAL_PATH
lib:
- LIBRARY_PATH
lib64:
- LIBRARY_PATH
include:
- CPATH
lib/pkgconfig:
- PKG_CONFIG_PATH
lib64/pkgconfig:
- PKG_CONFIG_PATH
share/pkgconfig:
- PKG_CONFIG_PATH
'':
- CMAKE_PREFIX_PATH

View File

@@ -15,42 +15,32 @@
# -------------------------------------------------------------------------
packages:
all:
compiler: [gcc, intel, pgi, clang, xl, nag, fj, aocc]
compiler: [gcc, intel, pgi, clang, xl, nag]
providers:
D: [ldc]
awk: [gawk]
blas: [openblas, amdblis]
blas: [openblas]
daal: [intel-daal]
elf: [elfutils]
fftw-api: [fftw, amdfftw]
gl: [mesa+opengl, mesa18, opengl]
glx: [mesa+glx, mesa18+glx, opengl]
fftw-api: [fftw]
gl: [mesa, opengl]
glu: [mesa-glu, openglu]
golang: [gcc]
iconv: [libiconv]
ipp: [intel-ipp]
java: [openjdk, jdk, ibm-java]
java: [jdk]
jpeg: [libjpeg-turbo, libjpeg]
lapack: [openblas, amdlibflame]
mariadb-client: [mariadb-c-client, mariadb]
lapack: [openblas]
mkl: [intel-mkl]
mpe: [mpe2]
mpi: [openmpi, mpich]
mysql-client: [mysql, mariadb-c-client]
opencl: [pocl]
osmesa: [mesa+osmesa, mesa18+osmesa]
openfoam: [openfoam-com, openfoam-org, foam-extend]
pil: [py-pillow]
pkgconfig: [pkgconf, pkg-config]
rpc: [libtirpc]
scalapack: [netlib-scalapack, amdscalapack]
sycl: [hipsycl]
scalapack: [netlib-scalapack]
szip: [libszip, libaec]
tbb: [intel-tbb]
unwind: [libunwind]
yacc: [bison, byacc]
flame: [libflame, amdlibflame]
uuid: [util-linux-uuid, libuuid]
ziglang: [zig]
permissions:
read: world
write: user

View File

@@ -3,5 +3,3 @@ command_index.rst
spack*.rst
llnl*.rst
_build
.spack-env
spack.lock

View File

@@ -2,7 +2,7 @@
#
# You can set these variables from the command line.
SPHINXOPTS = -W
SPHINXOPTS = -E
SPHINXBUILD = sphinx-build
PAPER =
BUILDDIR = _build

View File

@@ -1,10 +1,10 @@
<html>
<head>
<meta http-equiv="refresh" content="0; url=https://spack.readthedocs.io/" />
<meta http-equiv="refresh" content="0; url=http://spack.readthedocs.io/" />
</head>
<body>
<p>
This page has moved to <a href="https://spack.readthedocs.io/">https://spack.readthedocs.io/</a>
This page has moved to <a href="http://spack.readthedocs.io/">http://spack.readthedocs.io/</a>
</p>
</body>
</html>

View File

@@ -1 +0,0 @@
../../..

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
@@ -25,14 +25,6 @@ It is recommended that the following be put in your ``.bashrc`` file:
alias less='less -R'
If you do not see colorized output when using ``less -R`` it is because color
is being disabled in the piped output. In this case, tell spack to force
colorized output.
.. code-block:: console
$ spack --color always | less -R
--------------------------
Listing available packages
--------------------------
@@ -47,15 +39,12 @@ available. You can see a list of available package names at the
``spack list``
^^^^^^^^^^^^^^
The ``spack list`` command prints out a list of all of the packages Spack
can install:
The ``spack list`` command prints out a list of all of the packages
Spack can install:
.. command-output:: spack list
:ellipsis: 10
There are thousands of them, so we've truncated the output above, but you
can find a :ref:`full list here <package-list>`.
Packages are listed by name in alphabetical order.
The packages are listed by name in alphabetical order.
A pattern to match with no wildcards, ``*`` or ``?``,
will be treated as though it started and ended with
``*``, so ``util`` is equivalent to ``*util*``. All patterns will be treated
@@ -132,27 +121,32 @@ If ``mpileaks`` depends on other packages, Spack will install the
dependencies first. It then fetches the ``mpileaks`` tarball, expands
it, verifies that it was downloaded without errors, builds it, and
installs it in its own directory under ``$SPACK_ROOT/opt``. You'll see
a number of messages from Spack, a lot of build output, and a message
that the package is installed.
a number of messages from spack, a lot of build output, and a message
that the packages is installed:
.. code-block:: console
$ spack install mpileaks
... dependency build output ...
==> Installing mpileaks-1.0-ph7pbnhl334wuhogmugriohcwempqry2
==> No binary for mpileaks-1.0-ph7pbnhl334wuhogmugriohcwempqry2 found: installing from source
==> mpileaks: Executing phase: 'autoreconf'
==> mpileaks: Executing phase: 'configure'
==> mpileaks: Executing phase: 'build'
==> mpileaks: Executing phase: 'install'
[+] ~/spack/opt/linux-rhel7-broadwell/gcc-8.1.0/mpileaks-1.0-ph7pbnhl334wuhogmugriohcwempqry2
==> Installing mpileaks
==> mpich is already installed in ~/spack/opt/linux-debian7-x86_64/gcc@4.4.7/mpich@3.0.4.
==> callpath is already installed in ~/spack/opt/linux-debian7-x86_64/gcc@4.4.7/callpath@1.0.2-5dce4318.
==> adept-utils is already installed in ~/spack/opt/linux-debian7-x86_64/gcc@4.4.7/adept-utils@1.0-5adef8da.
==> Trying to fetch from https://github.com/hpc/mpileaks/releases/download/v1.0/mpileaks-1.0.tar.gz
######################################################################## 100.0%
==> Staging archive: ~/spack/var/spack/stage/mpileaks@1.0%gcc@4.4.7 arch=linux-debian7-x86_64-59f6ad23/mpileaks-1.0.tar.gz
==> Created stage in ~/spack/var/spack/stage/mpileaks@1.0%gcc@4.4.7 arch=linux-debian7-x86_64-59f6ad23.
==> No patches needed for mpileaks.
==> Building mpileaks.
... build output ...
==> Successfully installed mpileaks.
Fetch: 2.16s. Build: 9.82s. Total: 11.98s.
[+] ~/spack/opt/linux-debian7-x86_64/gcc@4.4.7/mpileaks@1.0-59f6ad23
The last line, with the ``[+]``, indicates where the package is
installed.
Add the Spack debug option (one or more times) -- ``spack -d install
mpileaks`` -- to get additional (and even more verbose) output.
^^^^^^^^^^^^^^^^^^^^^^^^^^^
Building a specific version
^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -235,146 +229,6 @@ remove dependent packages *before* removing their dependencies or use the
.. _nondownloadable:
^^^^^^^^^^^^^^^^^^
Garbage collection
^^^^^^^^^^^^^^^^^^
When Spack builds software from sources, if often installs tools that are needed
just to build or test other software. These are not necessary at runtime.
To support cases where removing these tools can be a benefit Spack provides
the ``spack gc`` ("garbage collector") command, which will uninstall all unneeded packages:
.. code-block:: console
$ spack find
==> 24 installed packages
-- linux-ubuntu18.04-broadwell / gcc@9.0.1 ----------------------
autoconf@2.69 findutils@4.6.0 libiconv@1.16 libszip@2.1.1 m4@1.4.18 openjpeg@2.3.1 pkgconf@1.6.3 util-macros@1.19.1
automake@1.16.1 gdbm@1.18.1 libpciaccess@0.13.5 libtool@2.4.6 mpich@3.3.2 openssl@1.1.1d readline@8.0 xz@5.2.4
cmake@3.16.1 hdf5@1.10.5 libsigsegv@2.12 libxml2@2.9.9 ncurses@6.1 perl@5.30.0 texinfo@6.5 zlib@1.2.11
$ spack gc
==> The following packages will be uninstalled:
-- linux-ubuntu18.04-broadwell / gcc@9.0.1 ----------------------
vn47edz autoconf@2.69 6m3f2qn findutils@4.6.0 ubl6bgk libtool@2.4.6 pksawhz openssl@1.1.1d urdw22a readline@8.0
ki6nfw5 automake@1.16.1 fklde6b gdbm@1.18.1 b6pswuo m4@1.4.18 k3s2csy perl@5.30.0 lp5ya3t texinfo@6.5
ylvgsov cmake@3.16.1 5omotir libsigsegv@2.12 leuzbbh ncurses@6.1 5vmfbrq pkgconf@1.6.3 5bmv4tg util-macros@1.19.1
==> Do you want to proceed? [y/N] y
[ ... ]
$ spack find
==> 9 installed packages
-- linux-ubuntu18.04-broadwell / gcc@9.0.1 ----------------------
hdf5@1.10.5 libiconv@1.16 libpciaccess@0.13.5 libszip@2.1.1 libxml2@2.9.9 mpich@3.3.2 openjpeg@2.3.1 xz@5.2.4 zlib@1.2.11
In the example above Spack went through all the packages in the package database
and removed everything that is not either:
1. A package installed upon explicit request of the user
2. A ``link`` or ``run`` dependency, even transitive, of one of the packages at point 1.
You can check :ref:`cmd-spack-find-metadata` to see how to query for explicitly installed packages
or :ref:`dependency-types` for a more thorough treatment of dependency types.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Marking packages explicit or implicit
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
By default, Spack will mark packages a user installs as explicitly installed,
while all of its dependencies will be marked as implicitly installed. Packages
can be marked manually as explicitly or implicitly installed by using
``spack mark``. This can be used in combination with ``spack gc`` to clean up
packages that are no longer required.
.. code-block:: console
$ spack install m4
==> 29005: Installing libsigsegv
[...]
==> 29005: Installing m4
[...]
$ spack install m4 ^libsigsegv@2.11
==> 39798: Installing libsigsegv
[...]
==> 39798: Installing m4
[...]
$ spack find -d
==> 4 installed packages
-- linux-fedora32-haswell / gcc@10.1.1 --------------------------
libsigsegv@2.11
libsigsegv@2.12
m4@1.4.18
libsigsegv@2.12
m4@1.4.18
libsigsegv@2.11
$ spack gc
==> There are no unused specs. Spack's store is clean.
$ spack mark -i m4 ^libsigsegv@2.11
==> m4@1.4.18 : marking the package implicit
$ spack gc
==> The following packages will be uninstalled:
-- linux-fedora32-haswell / gcc@10.1.1 --------------------------
5fj7p2o libsigsegv@2.11 c6ensc6 m4@1.4.18
==> Do you want to proceed? [y/N]
In the example above, we ended up with two versions of ``m4`` since they depend
on different versions of ``libsigsegv``. ``spack gc`` will not remove any of
the packages since both versions of ``m4`` have been installed explicitly
and both versions of ``libsigsegv`` are required by the ``m4`` packages.
``spack mark`` can also be used to implement upgrade workflows. The following
example demonstrates how the ``spack mark`` and ``spack gc`` can be used to
only keep the current version of a package installed.
When updating Spack via ``git pull``, new versions for either ``libsigsegv``
or ``m4`` might be introduced. This will cause Spack to install duplicates.
Since we only want to keep one version, we mark everything as implicitly
installed before updating Spack. If there is no new version for either of the
packages, ``spack install`` will simply mark them as explicitly installed and
``spack gc`` will not remove them.
.. code-block:: console
$ spack install m4
==> 62843: Installing libsigsegv
[...]
==> 62843: Installing m4
[...]
$ spack mark -i -a
==> m4@1.4.18 : marking the package implicit
$ git pull
[...]
$ spack install m4
[...]
==> m4@1.4.18 : marking the package explicit
[...]
$ spack gc
==> There are no unused specs. Spack's store is clean.
When using this workflow for installations that contain more packages, care
has to be taken to either only mark selected packages or issue ``spack install``
for all packages that should be kept.
You can check :ref:`cmd-spack-find-metadata` to see how to query for explicitly
or implicitly installed packages.
^^^^^^^^^^^^^^^^^^^^^^^^^
Non-Downloadable Tarballs
^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -420,7 +274,6 @@ the tarballs in question to it (see :ref:`mirrors`):
$ spack install galahad
-------------------------
Seeing installed packages
-------------------------
@@ -478,19 +331,11 @@ Packages are divided into groups according to their architecture and
compiler. Within each group, Spack tries to keep the view simple, and
only shows the version of installed packages.
.. _cmd-spack-find-metadata:
""""""""""""""""""""""""""""""""
Viewing more metadata
""""""""""""""""""""""""""""""""
``spack find`` can filter the package list based on the package name,
spec, or a number of properties of their installation status. For
example, missing dependencies of a spec can be shown with
``--missing``, deprecated packages can be included with
``--deprecated``, packages which were explicitly installed with
``spack install <package>`` can be singled out with ``--explicit`` and
those which have been pulled in only as dependencies with
``spack find`` can filter the package list based on the package name, spec, or
a number of properties of their installation status. For example, missing
dependencies of a spec can be shown with ``--missing``, packages which were
explicitly installed with ``spack install <package>`` can be singled out with
``--explicit`` and those which have been pulled in only as dependencies with
``--implicit``.
In some cases, there may be different configurations of the *same*
@@ -543,8 +388,8 @@ use ``spack find --paths``:
callpath@1.0.2 ~/spack/opt/linux-debian7-x86_64/gcc@4.4.7/callpath@1.0.2-5dce4318
...
You can restrict your search to a particular package by supplying its
name:
And, finally, you can restrict your search to a particular package
by supplying its name:
.. code-block:: console
@@ -554,10 +399,6 @@ name:
libelf@0.8.12 ~/spack/opt/linux-debian7-x86_64/gcc@4.4.7/libelf@0.8.12
libelf@0.8.13 ~/spack/opt/linux-debian7-x86_64/gcc@4.4.7/libelf@0.8.13
""""""""""""""""""""""""""""""""
Spec queries
""""""""""""""""""""""""""""""""
``spack find`` actually does a lot more than this. You can use
*specs* to query for specific configurations and builds of each
package. If you want to find only libelf versions greater than version
@@ -585,198 +426,6 @@ with the 'debug' compile-time option enabled.
The full spec syntax is discussed in detail in :ref:`sec-specs`.
""""""""""""""""""""""""""""""""
Machine-readable output
""""""""""""""""""""""""""""""""
If you only want to see very specific things about installed packages,
Spack has some options for you. ``spack find --format`` can be used to
output only specific fields:
.. code-block:: console
$ spack find --format "{name}-{version}-{hash}"
autoconf-2.69-icynozk7ti6h4ezzgonqe6jgw5f3ulx4
automake-1.16.1-o5v3tc77kesgonxjbmeqlwfmb5qzj7zy
bzip2-1.0.6-syohzw57v2jfag5du2x4bowziw3m5p67
bzip2-1.0.8-zjny4jwfyvzbx6vii3uuekoxmtu6eyuj
cmake-3.15.1-7cf6onn52gywnddbmgp7qkil4hdoxpcb
...
or:
.. code-block:: console
$ spack find --format "{hash:7}"
icynozk
o5v3tc7
syohzw5
zjny4jw
7cf6onn
...
This uses the same syntax as described in documentation for
:meth:`~spack.spec.Spec.format` -- you can use any of the options there.
This is useful for passing metadata about packages to other command-line
tools.
Alternately, if you want something even more machine readable, you can
output each spec as JSON records using ``spack find --json``. This will
output metadata on specs and all dependencies as json:
.. code-block:: console
$ spack find --json sqlite@3.28.0
[
{
"name": "sqlite",
"hash": "3ws7bsihwbn44ghf6ep4s6h4y2o6eznv",
"version": "3.28.0",
"arch": {
"platform": "darwin",
"platform_os": "mojave",
"target": "x86_64"
},
"compiler": {
"name": "apple-clang",
"version": "10.0.0"
},
"namespace": "builtin",
"parameters": {
"fts": true,
"functions": false,
"cflags": [],
"cppflags": [],
"cxxflags": [],
"fflags": [],
"ldflags": [],
"ldlibs": []
},
"dependencies": {
"readline": {
"hash": "722dzmgymxyxd6ovjvh4742kcetkqtfs",
"type": [
"build",
"link"
]
}
}
},
...
]
You can use this with tools like `jq <https://stedolan.github.io/jq/>`_ to quickly create JSON records
structured the way you want:
.. code-block:: console
$ spack find --json sqlite@3.28.0 | jq -C '.[] | { name, version, hash }'
{
"name": "sqlite",
"version": "3.28.0",
"hash": "3ws7bsihwbn44ghf6ep4s6h4y2o6eznv"
}
{
"name": "readline",
"version": "7.0",
"hash": "722dzmgymxyxd6ovjvh4742kcetkqtfs"
}
{
"name": "ncurses",
"version": "6.1",
"hash": "zvaa4lhlhilypw5quj3akyd3apbq5gap"
}
------------------------
Using installed packages
------------------------
There are several different ways to use Spack packages once you have
installed them. As you've seen, spack packages are installed into long
paths with hashes, and you need a way to get them into your path. The
easiest way is to use :ref:`spack load <cmd-spack-load>`, which is
described in the next section.
Some more advanced ways to use Spack packages include:
* :ref:`environments <environments>`, which you can use to bundle a
number of related packages to "activate" all at once, and
* :ref:`environment modules <modules>`, which are commonly used on
supercomputing clusters. Spack generates module files for every
installation automatically, and you can customize how this is done.
.. _cmd-spack-load:
^^^^^^^^^^^^^^^^^^^^^^^
``spack load / unload``
^^^^^^^^^^^^^^^^^^^^^^^
If you have :ref:`shell support <shell-support>` enabled you can use the
``spack load`` command to quickly get a package on your ``PATH``.
For example this will add the ``mpich`` package built with ``gcc`` to
your path:
.. code-block:: console
$ spack install mpich %gcc@4.4.7
# ... wait for install ...
$ spack load mpich %gcc@4.4.7
$ which mpicc
~/spack/opt/linux-debian7-x86_64/gcc@4.4.7/mpich@3.0.4/bin/mpicc
These commands will add appropriate directories to your ``PATH``,
``MANPATH``, ``CPATH``, and ``LD_LIBRARY_PATH`` according to the
:ref:`prefix inspections <customize-env-modifications>` defined in your
modules configuration. When you no longer want to use a package, you
can type unload or unuse similarly:
.. code-block:: console
$ spack unload mpich %gcc@4.4.7
"""""""""""""""
Ambiguous specs
"""""""""""""""
If a spec used with load/unload or is ambiguous (i.e. more than one
installed package matches it), then Spack will warn you:
.. code-block:: console
$ spack load libelf
==> Error: libelf matches multiple packages.
Matching packages:
qmm4kso libelf@0.8.13%gcc@4.4.7 arch=linux-debian7-x86_64
cd2u6jt libelf@0.8.13%intel@15.0.0 arch=linux-debian7-x86_64
Use a more specific spec
You can either type the ``spack load`` command again with a fully
qualified argument, or you can add just enough extra constraints to
identify one package. For example, above, the key differentiator is
that one ``libelf`` is built with the Intel compiler, while the other
used ``gcc``. You could therefore just type:
.. code-block:: console
$ spack load libelf %intel
To identify just the one built with the Intel compiler. If you want to be
*very* specific, you can load it by its hash. For example, to load the
first ``libelf`` above, you would run:
.. code-block:: console
$ spack load /qmm4kso
We'll learn more about Spack's spec syntax in the next section.
.. _sec-specs:
--------------------
@@ -796,11 +445,11 @@ Here is an example of a much longer spec than we've seen thus far:
.. code-block:: none
mpileaks @1.2:1.4 %gcc@4.7.5 +debug -qt target=x86_64 ^callpath @1.1 %gcc@4.7.2
mpileaks @1.2:1.4 %gcc@4.7.5 +debug -qt arch=bgq_os ^callpath @1.1 %gcc@4.7.2
If provided to ``spack install``, this will install the ``mpileaks``
library at some version between ``1.2`` and ``1.4`` (inclusive),
built using ``gcc`` at version 4.7.5 for a generic ``x86_64`` architecture,
built using ``gcc`` at version 4.7.5 for the Blue Gene/Q architecture,
with debug options enabled, and without Qt support. Additionally, it
says to link it with the ``callpath`` library (which it depends on),
and to build callpath with ``gcc`` 4.7.2. Most specs will not be as
@@ -963,7 +612,7 @@ Variants are named options associated with a particular package. They are
optional, as each package must provide default values for each variant it
makes available. Variants can be specified using
a flexible parameter syntax ``name=<value>``. For example,
``spack install libelf debug=True`` will install libelf built with debug
``spack install libelf debug=True`` will install libelf build with debug
flags. The names of particular variants available for a package depend on
what was provided by the package author. ``spack info <package>`` will
provide information on what build variants are available.
@@ -1024,10 +673,10 @@ compile line.
Notice that the value of the compiler flags must be quoted if it
contains any spaces. Any of ``cppflags=-O3``, ``cppflags="-O3"``,
``cppflags='-O3'``, and ``cppflags="-O3 -fPIC"`` are acceptable, but
``cppflags=-O3 -fPIC`` is not. Additionally, if the value of the
``cppflags=-O3 -fPIC`` is not. Additionally, if they value of the
compiler flags is not the last thing on the line, it must be followed
by a space. The command ``spack install libelf cppflags="-O3"%intel``
will be interpreted as an attempt to set ``cppflags="-O3%intel"``.
by a space. The commmand ``spack install libelf cppflags="-O3"%intel``
will be interpreted as an attempt to set `cppflags="-O3%intel"``.
The six compiler flags are injected in the order of implicit make commands
in GNU Autotools. If all flags are set, the order is
@@ -1038,13 +687,11 @@ in GNU Autotools. If all flags are set, the order is
Compiler environment variables and additional RPATHs
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Sometimes compilers require setting special environment variables to
operate correctly. Spack handles these cases by allowing custom environment
modifications in the ``environment`` attribute of the compiler configuration
section. See also the :ref:`configuration_environment_variables` section
of the configuration files docs for more information.
It is also possible to specify additional ``RPATHs`` that the
In the exceptional case a compiler requires setting special environment
variables, like an explicit library load path. These can bet set in an
extra section in the compiler configuration (the supported environment
modification commands are: ``set``, ``unset``, ``append-path``, and
``prepend-path``). The user can also specify additional ``RPATHs`` that the
compiler will add to all executables generated by that compiler. This is
useful for forcing certain compilers to RPATH their own runtime libraries, so
that executables will run without the need to set ``LD_LIBRARY_PATH``.
@@ -1061,130 +708,44 @@ that executables will run without the need to set ``LD_LIBRARY_PATH``.
fc: /opt/gcc/bin/gfortran
environment:
unset:
- BAD_VARIABLE
BAD_VARIABLE: # The colon is required but the value must be empty
set:
GOOD_VARIABLE_NUM: 1
GOOD_VARIABLE_STR: good
prepend_path:
prepend-path:
PATH: /path/to/binutils
append_path:
append-path:
LD_LIBRARY_PATH: /opt/gcc/lib
extra_rpaths:
- /path/to/some/compiler/runtime/directory
- /path/to/some/other/compiler/runtime/directory
.. note::
The section `environment` is interpreted as an ordered dictionary, which
means two things. First, environment modification are applied in the order
they are specified in the configuration file. Second, you cannot express
environment modifications that require mixing different commands, i.e. you
cannot `set` one variable, than `prepend-path` to another one, and than
again `set` a third one.
^^^^^^^^^^^^^^^^^^^^^^^
Architecture specifiers
^^^^^^^^^^^^^^^^^^^^^^^
Each node in the dependency graph of a spec has an architecture attribute.
This attribute is a triplet of platform, operating system and processor.
You can specify the elements either separately, by using
the reserved keywords ``platform``, ``os`` and ``target``:
.. code-block:: console
$ spack install libelf platform=linux
$ spack install libelf os=ubuntu18.04
$ spack install libelf target=broadwell
or together by using the reserved keyword ``arch``:
The architecture can be specified by using the reserved
words ``target`` and/or ``os`` (``target=x86-64 os=debian7``). You can also
use the triplet form of platform, operating system and processor.
.. code-block:: console
$ spack install libelf arch=cray-CNL10-haswell
Normally users don't have to bother specifying the architecture if they
are installing software for their current host, as in that case the
values will be detected automatically. If you need fine-grained control
over which packages use which targets (or over *all* packages' default
target), see :ref:`concretization-preferences`.
.. admonition:: Cray machines
The situation is a little bit different for Cray machines and a detailed
explanation on how the architecture can be set on them can be found at :ref:`cray-support`
.. _support-for-microarchitectures:
"""""""""""""""""""""""""""""""""""""""
Support for specific microarchitectures
"""""""""""""""""""""""""""""""""""""""
Spack knows how to detect and optimize for many specific microarchitectures
(including recent Intel, AMD and IBM chips) and encodes this information in
the ``target`` portion of the architecture specification. A complete list of
the microarchitectures known to Spack can be obtained in the following way:
.. command-output:: spack arch --known-targets
When a spec is installed Spack matches the compiler being used with the
microarchitecture being targeted to inject appropriate optimization flags
at compile time. Giving a command such as the following:
.. code-block:: console
$ spack install zlib%gcc@9.0.1 target=icelake
will produce compilation lines similar to:
.. code-block:: console
$ /usr/bin/gcc-9 -march=icelake-client -mtune=icelake-client -c ztest10532.c
$ /usr/bin/gcc-9 -march=icelake-client -mtune=icelake-client -c -fPIC -O2 ztest10532.
...
where the flags ``-march=icelake-client -mtune=icelake-client`` are injected
by Spack based on the requested target and compiler.
If Spack knows that the requested compiler can't optimize for the current target
or can't build binaries for that target at all, it will exit with a meaningful error message:
.. code-block:: console
$ spack install zlib%gcc@5.5.0 target=icelake
==> Error: cannot produce optimized binary for micro-architecture "icelake" with gcc@5.5.0 [supported compiler versions are 8:]
When instead an old compiler is selected on a recent enough microarchitecture but there is
no explicit ``target`` specification, Spack will optimize for the best match it can find instead
of failing:
.. code-block:: console
$ spack arch
linux-ubuntu18.04-broadwell
$ spack spec zlib%gcc@4.8
Input spec
--------------------------------
zlib%gcc@4.8
Concretized
--------------------------------
zlib@1.2.11%gcc@4.8+optimize+pic+shared arch=linux-ubuntu18.04-haswell
$ spack spec zlib%gcc@9.0.1
Input spec
--------------------------------
zlib%gcc@9.0.1
Concretized
--------------------------------
zlib@1.2.11%gcc@9.0.1+optimize+pic+shared arch=linux-ubuntu18.04-broadwell
In the snippet above, for instance, the microarchitecture was demoted to ``haswell`` when
compiling with ``gcc@4.8`` since support to optimize for ``broadwell`` starts from ``gcc@4.9:``.
Finally, if Spack has no information to match compiler and target, it will
proceed with the installation but avoid injecting any microarchitecture
specific flags.
.. warning::
Currently, Spack doesn't print any warning to the user if it has no information
on which optimization flags should be used for a given compiler. This behavior
might change in the future.
Users on non-Cray systems won't have to worry about specifying the architecture.
Spack will autodetect what kind of operating system is on your machine as well
as the processor. For more information on how the architecture can be
used on Cray machines, see :ref:`cray-support`
.. _sec-virtual-dependencies:
@@ -1192,7 +753,7 @@ specific flags.
Virtual dependencies
--------------------
The dependency graph for ``mpileaks`` we saw above wasn't *quite*
The dependence graph for ``mpileaks`` we saw above wasn't *quite*
accurate. ``mpileaks`` uses MPI, which is an interface that has many
different implementations. Above, we showed ``mpileaks`` and
``callpath`` depending on ``mpich``, which is one *particular*
@@ -1335,90 +896,6 @@ add a version specifier to the spec:
Notice that the package versions that provide insufficient MPI
versions are now filtered out.
-----------------------------
Deprecating insecure packages
-----------------------------
``spack deprecate`` allows for the removal of insecure packages with
minimal impact to their dependents.
.. warning::
The ``spack deprecate`` command is designed for use only in
extraordinary circumstances. This is a VERY big hammer to be used
with care.
The ``spack deprecate`` command will remove one package and replace it
with another by replacing the deprecated package's prefix with a link
to the deprecator package's prefix.
.. warning::
The ``spack deprecate`` command makes no promises about binary
compatibility. It is up to the user to ensure the deprecator is
suitable for the deprecated package.
Spack tracks concrete deprecated specs and ensures that no future packages
concretize to a deprecated spec.
The first spec given to the ``spack deprecate`` command is the package
to deprecate. It is an abstract spec that must describe a single
installed package. The second spec argument is the deprecator
spec. By default it must be an abstract spec that describes a single
installed package, but with the ``-i/--install-deprecator`` it can be
any abstract spec that Spack will install and then use as the
deprecator. The ``-I/--no-install-deprecator`` option will ensure
the default behavior.
By default, ``spack deprecate`` will deprecate all dependencies of the
deprecated spec, replacing each by the dependency of the same name in
the deprecator spec. The ``-d/--dependencies`` option will ensure the
default, while the ``-D/--no-dependencies`` option will deprecate only
the root of the deprecate spec in favor of the root of the deprecator
spec.
``spack deprecate`` can use symbolic links or hard links. The default
behavior is symbolic links, but the ``-l/--link-type`` flag can take
options ``hard`` or ``soft``.
-----------------------
Verifying installations
-----------------------
The ``spack verify`` command can be used to verify the validity of
Spack-installed packages any time after installation.
At installation time, Spack creates a manifest of every file in the
installation prefix. For links, Spack tracks the mode, ownership, and
destination. For directories, Spack tracks the mode, and
ownership. For files, Spack tracks the mode, ownership, modification
time, hash, and size. The Spack verify command will check, for every
file in each package, whether any of those attributes have changed. It
will also check for newly added files or deleted files from the
installation prefix. Spack can either check all installed packages
using the `-a,--all` or accept specs listed on the command line to
verify.
The ``spack verify`` command can also verify for individual files that
they haven't been altered since installation time. If the given file
is not in a Spack installation prefix, Spack will report that it is
not owned by any package. To check individual files instead of specs,
use the ``-f,--files`` option.
Spack installation manifests are part of the tarball signed by Spack
for binary package distribution. When installed from a binary package,
Spack uses the packaged installation manifest instead of creating one
at install time.
The ``spack verify`` command also accepts the ``-l,--local`` option to
check only local packages (as opposed to those used transparently from
``upstream`` spack instances) and the ``-j,--json`` option to output
machine-readable json data for any errors.
.. _extensions:
---------------------------
Extensions & Python support
---------------------------
@@ -1426,7 +903,8 @@ Extensions & Python support
Spack's installation model assumes that each package will live in its
own install prefix. However, certain packages are typically installed
*within* the directory hierarchy of other packages. For example,
`Python <https://www.python.org>`_ packages are typically installed in the
modules in interpreted languages like `Python
<https://www.python.org>`_ are typically installed in the
``$prefix/lib/python-2.7/site-packages`` directory.
Spack has support for this type of installation as well. In Spack,
@@ -1502,41 +980,33 @@ directly when you run ``python``:
Using Extensions
^^^^^^^^^^^^^^^^
There are four ways to get ``numpy`` working in Python. The first is
to use :ref:`shell-support`. You can simply ``load`` the extension,
and it will be added to the ``PYTHONPATH`` in your current shell:
There are three ways to get ``numpy`` working in Python. The first is
to use :ref:`shell-support`. You can simply ``use`` or ``load`` the
module for the extension, and it will be added to the ``PYTHONPATH``
in your current shell.
For tcl modules:
.. code-block:: console
$ spack load python
$ spack load py-numpy
Now ``import numpy`` will succeed for as long as you keep your current
session open.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Loading Extensions via Modules
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Instead of using Spack's environment modification capabilities through
the ``spack load`` command, you can load numpy through your
environment modules (using ``environment-modules`` or ``lmod``). This
will also add the extension to the ``PYTHONPATH`` in your current
shell.
or, for dotkit:
.. code-block:: console
$ module load <name of numpy module>
$ spack use python
$ spack use py-numpy
If you do not know the name of the specific numpy module you wish to
load, you can use the ``spack module tcl|lmod loads`` command to get
the name of the module from the Spack spec.
Now ``import numpy`` will succeed for as long as you keep your current
session open.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Activating Extensions in a View
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Another way to use extensions is to create a view, which merges the
The second way to use extensions is to create a view, which merges the
python installation along with the extensions into a single prefix.
See :ref:`filesystem-views` for a more in-depth description of views and
:ref:`cmd-spack-view` for usage of the ``spack view`` command.
@@ -1602,12 +1072,12 @@ packages listed as activated:
py-nose@1.3.4 py-numpy@1.9.1 py-setuptools@11.3.1
Now, when a user runs python, ``numpy`` will be available for import
*without* the user having to explicitly load it. ``python@2.7.8`` now
*without* the user having to explicitly loaded. ``python@2.7.8`` now
acts like a system Python installation with ``numpy`` installed inside
of it.
Spack accomplishes this by symbolically linking the *entire* prefix of
the ``py-numpy`` package into the prefix of the ``python`` package. To the
the ``py-numpy`` into the prefix of the ``python`` package. To the
python interpreter, it looks like ``numpy`` is installed in the
``site-packages`` directory.

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
@@ -57,12 +57,6 @@ Build caches are installed via:
$ spack buildcache install
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
List of popular build caches
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
* `Extreme-scale Scientific Software Stack (E4S) <https://e4s-project.github.io/>`_: `build cache <https://oaciss.uoregon.edu/e4s/inventory.html>`_
----------
Relocation

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
@@ -57,37 +57,22 @@ directory. Here's an example of an external configuration:
packages:
openmpi:
externals:
- spec: "openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64"
prefix: /opt/openmpi-1.4.3
- spec: "openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64+debug"
prefix: /opt/openmpi-1.4.3-debug
- spec: "openmpi@1.6.5%intel@10.1 arch=linux-debian7-x86_64"
prefix: /opt/openmpi-1.6.5-intel
paths:
openmpi@1.4.3%gcc@4.4.7 arch=linux-x86_64-debian7: /opt/openmpi-1.4.3
openmpi@1.4.3%gcc@4.4.7 arch=linux-x86_64-debian7+debug: /opt/openmpi-1.4.3-debug
openmpi@1.6.5%intel@10.1 arch=linux-x86_64-debian7: /opt/openmpi-1.6.5-intel
This example lists three installations of OpenMPI, one built with GCC,
one built with GCC and debug information, and another built with Intel.
If Spack is asked to build a package that uses one of these MPIs as a
dependency, it will use the pre-installed OpenMPI in
the given directory. Note that the specified path is the top-level
install prefix, not the ``bin`` subdirectory.
the given directory. ``packages.yaml`` can also be used to specify modules
to load instead of the installation prefixes.
``packages.yaml`` can also be used to specify modules to load instead
of the installation prefixes. The following example says that module
``CMake/3.7.2`` provides cmake version 3.7.2.
.. code-block:: yaml
cmake:
externals:
- spec: cmake@3.7.2
modules:
- CMake/3.7.2
Each ``packages.yaml`` begins with a ``packages:`` attribute, followed
by a list of package names. To specify externals, add an ``externals:``
attribute under the package name, which lists externals.
Each external should specify a ``spec:`` string that should be as
Each ``packages.yaml`` begins with a ``packages:`` token, followed
by a list of package names. To specify externals, add a ``paths`` or ``modules``
token under the package name, which lists externals in a
``spec: /path`` or ``spec: module-name`` format. Each spec should be as
well-defined as reasonably possible. If a
package lacks a spec component, such as missing a compiler or
package version, then Spack will guess the missing component based
@@ -111,13 +96,10 @@ be:
packages:
openmpi:
externals:
- spec: "openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64"
prefix: /opt/openmpi-1.4.3
- spec: "openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64+debug"
prefix: /opt/openmpi-1.4.3-debug
- spec: "openmpi@1.6.5%intel@10.1 arch=linux-debian7-x86_64"
prefix: /opt/openmpi-1.6.5-intel
paths:
openmpi@1.4.3%gcc@4.4.7 arch=linux-x86_64-debian7: /opt/openmpi-1.4.3
openmpi@1.4.3%gcc@4.4.7 arch=linux-x86_64-debian7+debug: /opt/openmpi-1.4.3-debug
openmpi@1.6.5%intel@10.1 arch=linux-x86_64-debian7: /opt/openmpi-1.6.5-intel
buildable: False
The addition of the ``buildable`` flag tells Spack that it should never build
@@ -132,82 +114,6 @@ The ``buildable`` does not need to be paired with external packages.
It could also be used alone to forbid packages that may be
buggy or otherwise undesirable.
Virtual packages in Spack can also be specified as not buildable, and
external implementations can be provided. In the example above,
OpenMPI is configured as not buildable, but Spack will often prefer
other MPI implementations over the externally available OpenMPI. Spack
can be configured with every MPI provider not buildable individually,
but more conveniently:
.. code-block:: yaml
packages:
mpi:
buildable: False
openmpi:
externals:
- spec: "openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64"
prefix: /opt/openmpi-1.4.3
- spec: "openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64+debug"
prefix: /opt/openmpi-1.4.3-debug
- spec: "openmpi@1.6.5%intel@10.1 arch=linux-debian7-x86_64"
prefix: /opt/openmpi-1.6.5-intel
Implementations can also be listed immediately under the virtual they provide:
.. code-block:: yaml
packages:
mpi:
buildable: False
openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64: /opt/openmpi-1.4.3
openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64+debug: /opt/openmpi-1.4.3-debug
openmpi@1.6.5%intel@10.1 arch=linux-debian7-x86_64: /opt/openmpi-1.6.5-intel
mpich@3.3 %clang@9.0.0 arch=linux-debian7-x86_64: /opt/mpich-3.3-intel
Spack can then use any of the listed external implementations of MPI
to satisfy a dependency, and will choose depending on the compiler and
architecture.
.. _cmd-spack-external-find:
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Automatically Find External Packages
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
You can run the :ref:`spack external find <spack-external-find>` command
to search for system-provided packages and add them to ``packages.yaml``.
After running this command your ``packages.yaml`` may include new entries:
.. code-block:: yaml
packages:
cmake:
externals:
- spec: cmake@3.17.2
prefix: /usr
Generally this is useful for detecting a small set of commonly-used packages;
for now this is generally limited to finding build-only dependencies.
Specific limitations include:
* Packages are not discoverable by default: For a package to be
discoverable with ``spack external find``, it needs to add special
logic. See :ref:`here <make-package-findable>` for more details.
* The current implementation only collects and examines executable files,
so it is typically only useful for build/run dependencies (in some cases
if a library package also provides an executable, it may be possible to
extract a meaningful Spec by running the executable - for example the
compiler wrappers in MPI implementations).
* The logic does not search through module files, it can only detect
packages with executables defined in ``PATH``; you can help Spack locate
externals which use module files by loading any associated modules for
packages that you want Spack to know about before running
``spack external find``.
* Spack does not overwrite existing entries in the package configuration:
If there is an external defined for a spec at any configuration scope,
then Spack will not add a new external entry (``spack config blame packages``
can help locate all external entries).
.. _concretization-preferences:
@@ -232,8 +138,7 @@ Here's an example ``packages.yaml`` file that sets preferred packages:
gperftools:
version: [2.2, 2.4, 2.3]
all:
compiler: [gcc@4.4.7, 'gcc@4.6:', intel, clang, pgi]
target: [sandybridge]
compiler: [gcc@4.4.7, gcc@4.6:, intel, clang, pgi]
providers:
mpi: [mvapich2, mpich, openmpi]
@@ -247,11 +152,11 @@ on the command line if explicitly requested.
Each ``packages.yaml`` file begins with the string ``packages:`` and
package names are specified on the next level. The special string ``all``
applies settings to *all* packages. Underneath each package name is one
or more components: ``compiler``, ``variants``, ``version``,
``providers``, and ``target``. Each component has an ordered list of
spec ``constraints``, with earlier entries in the list being preferred
over later entries.
applies settings to each package. Underneath each package name is
one or more components: ``compiler``, ``variants``, ``version``,
or ``providers``. Each component has an ordered list of spec
``constraints``, with earlier entries in the list being preferred over
later entries.
Sometimes a package installation may have constraints that forbid
the first concretization rule, in which case Spack will use the first

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
@@ -29,7 +29,6 @@ on these ideas for each distinct build system that Spack supports:
:maxdepth: 1
:caption: Make-incompatible
build_systems/mavenpackage
build_systems/sconspackage
build_systems/wafpackage
@@ -41,7 +40,6 @@ on these ideas for each distinct build system that Spack supports:
build_systems/cmakepackage
build_systems/mesonpackage
build_systems/qmakepackage
build_systems/sippackage
.. toctree::
:maxdepth: 1
@@ -57,11 +55,8 @@ on these ideas for each distinct build system that Spack supports:
:maxdepth: 1
:caption: Other
build_systems/bundlepackage
build_systems/cudapackage
build_systems/inteloneapipackage
build_systems/intelpackage
build_systems/rocmpackage
build_systems/custompackage
For reference, the :py:mod:`Build System API docs <spack.build_systems>`

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
@@ -233,124 +233,7 @@ You may have noticed that most of the Autotools flags are of the form
``--without-baz``. Since these flags are so common, Spack provides a
couple of helper functions to make your life easier.
"""""""""""""""""
enable_or_disable
"""""""""""""""""
Autotools flags for simple boolean variants can be automatically
generated by calling the ``enable_or_disable`` method. This is
typically used to enable or disable some feature within the package.
.. code-block:: python
variant(
'memchecker',
default=False,
description='Memchecker support for debugging [degrades performance]'
)
config_args.extend(self.enable_or_disable('memchecker'))
In this example, specifying the variant ``+memchecker`` will generate
the following configuration options:
.. code-block:: console
--enable-memchecker
"""""""""""""""
with_or_without
"""""""""""""""
Autotools flags for more complex variants, including boolean variants
and multi-valued variants, can be automatically generated by calling
the ``with_or_without`` method.
.. code-block:: python
variant(
'schedulers',
values=disjoint_sets(
('auto',), ('alps', 'lsf', 'tm', 'slurm', 'sge', 'loadleveler')
).with_non_feature_values('auto', 'none'),
description="List of schedulers for which support is enabled; "
"'auto' lets openmpi determine",
)
if 'schedulers=auto' not in spec:
config_args.extend(self.with_or_without('schedulers'))
In this example, specifying the variant ``schedulers=slurm,sge`` will
generate the following configuration options:
.. code-block:: console
--with-slurm --with-sge
``enable_or_disable`` is actually functionally equivalent with
``with_or_without``, and accepts the same arguments and variant types;
but idiomatic autotools packages often follow these naming
conventions.
""""""""""""""""
activation_value
""""""""""""""""
Autotools parameters that require an option can still be automatically
generated, using the ``activation_value`` argument to
``with_or_without`` (or, rarely, ``enable_or_disable``).
.. code-block:: python
variant(
'fabrics',
values=disjoint_sets(
('auto',), ('psm', 'psm2', 'verbs', 'mxm', 'ucx', 'libfabric')
).with_non_feature_values('auto', 'none'),
description="List of fabrics that are enabled; "
"'auto' lets openmpi determine",
)
if 'fabrics=auto' not in spec:
config_args.extend(self.with_or_without('fabrics',
activation_value='prefix'))
``activation_value`` accepts a callable that generates the configure
parameter value given the variant value; but the special value
``prefix`` tells Spack to automatically use the dependenency's
installation prefix, which is the most common use for such
parameters. In this example, specifying the variant
``fabrics=libfabric`` will generate the following configuration
options:
.. code-block:: console
--with-libfabric=</path/to/libfabric>
""""""""""""""""""""
activation overrides
""""""""""""""""""""
Finally, the behavior of either ``with_or_without`` or
``enable_or_disable`` can be overridden for specific variant
values. This is most useful for multi-values variants where some of
the variant values require atypical behavior.
.. code-block:: python
def with_or_without_verbs(self, activated):
# Up through version 1.6, this option was named --with-openib.
# In version 1.7, it was renamed to be --with-verbs.
opt = 'verbs' if self.spec.satisfies('@1.7:') else 'openib'
if not activated:
return '--without-{0}'.format(opt)
return '--with-{0}={1}'.format(opt, self.spec['rdma-core'].prefix)
Defining ``with_or_without_verbs`` overrides the behavior of a
``fabrics=verbs`` variant, changing the configure-time option to
``--with-openib`` for older versions of the package and specifying an
alternative dependency name:
.. code-block::
--with-openib=</path/to/rdma-core>
TODO: document ``with_or_without`` and ``enable_or_disable``.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Configure script in a sub-directory

View File

@@ -1,52 +0,0 @@
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
.. _bundlepackage:
-------------
BundlePackage
-------------
``BundlePackage`` represents a set of packages that are expected to work well
together, such as a collection of commonly used software libraries. The
associated software is specified as bundle dependencies.
^^^^^^^^
Creation
^^^^^^^^
Be sure to specify the ``bundle`` template if you are using ``spack create``
to generate a package from the template. For example, use the following
command to create a bundle package whose class name will be ``Mybundle``:
.. code-block:: console
$ spack create --template bundle --name mybundle
^^^^^^
Phases
^^^^^^
The ``BundlePackage`` base class does not provide any phases by default
since the bundle does not represent a build system.
^^^
URL
^^^
The ``url`` property does not have meaning since there is no package-specific
code to fetch.
^^^^^^^
Version
^^^^^^^
At least one ``version`` must be specified in order for the package to
build.

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
@@ -128,20 +128,17 @@ Adding flags to cmake
^^^^^^^^^^^^^^^^^^^^^
To add additional flags to the ``cmake`` call, simply override the
``cmake_args`` function. The following example defines values for the flags
``WHATEVER``, ``ENABLE_BROKEN_FEATURE``, ``DETECT_HDF5``, and ``THREADS`` with
and without the :py:meth:`~.CMakePackage.define` and
:py:meth:`~.CMakePackage.define_from_variant` helper functions:
``cmake_args`` function:
.. code-block:: python
def cmake_args(self):
args = [
'-DWHATEVER:STRING=somevalue',
self.define('ENABLE_BROKEN_FEATURE', False),
self.define_from_variant('DETECT_HDF5', 'hdf5'),
self.define_from_variant('THREADS'), # True if +threads
]
args = []
if '+hdf5' in self.spec:
args.append('-DDETECT_HDF5=ON')
else:
args.append('-DDETECT_HDF5=OFF')
return args

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
@@ -9,120 +9,35 @@
CudaPackage
-----------
Different from other packages, ``CudaPackage`` does not represent a build system.
Instead its goal is to simplify and unify usage of ``CUDA`` in other packages by providing a ` mixin-class <https://en.wikipedia.org/wiki/Mixin>`__.
Different from other packages, ``CudaPackage`` does not represent a build
system. Instead its goal is to simplify and unify usage of ``CUDA`` in other
packages.
You can find source for the package at
`<https://github.com/spack/spack/blob/develop/lib/spack/spack/build_systems/cuda.py>`__.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Provided variants and dependencies
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
^^^^^^^^
Variants
^^^^^^^^
This package provides the following variants:
* **cuda**
This variant is used to enable/disable building with ``CUDA``. The default
is disabled (or ``False``).
* **cuda_arch**
This variant supports the optional specification of the architecture.
Valid values are maintained in the ``cuda_arch_values`` property and
are the numeric character equivalent of the compute capability version
(e.g., '10' for version 1.0). Each provided value affects associated
``CUDA`` dependencies and compiler conflicts.
GPUs and their compute capability versions are listed at
https://developer.nvidia.com/cuda-gpus .
^^^^^^^^^
Conflicts
^^^^^^^^^
Conflicts are used to prevent builds with known bugs or issues. While
base ``CUDA`` conflicts have been included with this package, you may
want to add more for your software.
For example, if your package requires ``cuda_arch`` to be specified when
``cuda`` is enabled, you can add the following conflict to your package
to terminate such build attempts with a suitable message:
.. code-block:: python
conflicts('cuda_arch=none', when='+cuda',
msg='CUDA architecture is required')
Similarly, if your software does not support all versions of the property,
you could add ``conflicts`` to your package for those versions. For example,
suppose your software does not work with CUDA compute capability versions
prior to SM 5.0 (``50``). You can add the following code to display a
custom message should a user attempt such a build:
.. code-block:: python
unsupported_cuda_archs = [
'10', '11', '12', '13',
'20', '21',
'30', '32', '35', '37'
]
for value in unsupported_cuda_archs:
conflicts('cuda_arch={0}'.format(value), when='+cuda',
msg='CUDA architecture {0} is not supported'.format(value))
^^^^^^^
Methods
^^^^^^^
This package provides one custom helper method, which is used to build
standard CUDA compiler flags.
**cuda_flags**
This built-in static method returns a list of command line flags
for the chosen ``cuda_arch`` value(s). The flags are intended to
be passed to the CUDA compiler driver (i.e., ``nvcc``).
This method must be explicitly called when you are creating the
arguments for your build in order to use the values.
``CudaPackage`` provides ``cuda`` variant (default to ``off``) to enable/disable
``CUDA``, and ``cuda_arch`` variant to optionally specify the architecture.
It also declares dependencies on the ``CUDA`` package ``depends_on('cuda@...')``
based on the architecture as well as specifies conflicts for certain compiler versions.
^^^^^
Usage
^^^^^
This helper package can be added to your package by adding it as a base
class of your package. For example, you can add it to your
:ref:`CMakePackage <cmakepackage>`-based package as follows:
In order to use it, just add another base class to your package, for example:
.. code-block:: python
:emphasize-lines: 1,7-16
class MyCudaPackage(CMakePackage, CudaPackage):
class MyPackage(CMakePackage, CudaPackage):
...
def cmake_args(self):
spec = self.spec
args = []
...
if '+cuda' in spec:
# Set up the cuda macros needed by the build
args.append('-DWITH_CUDA=ON')
cuda_arch_list = spec.variants['cuda_arch'].value
cuda_arch = cuda_arch_list[0]
if cuda_arch != 'none':
args.append('-DCUDA_FLAGS=-arch=sm_{0}'.format(cuda_arch))
options.append('-DWITH_CUDA=ON')
cuda_arch = spec.variants['cuda_arch'].value
if cuda_arch is not None:
options.append('-DCUDA_FLAGS=-arch=sm_{0}'.format(cuda_arch[0]))
else:
# Ensure build with cuda is disabled
args.append('-DWITH_CUDA=OFF')
...
return args
assuming only the ``WITH_CUDA`` and ``CUDA_FLAGS`` flags are required.
You will need to customize options as needed for your build.
This example also illustrates how to check for the ``cuda`` variant using
``self.spec`` and how to retrieve the ``cuda_arch`` variant's value, which
is a list, using ``self.spec.variants['cuda_arch'].value``.
With over 70 packages using ``CudaPackage`` as of January 2021 there are
lots of examples to choose from to get more ideas for using this package.
options.append('-DWITH_CUDA=OFF')

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)

View File

@@ -1,137 +0,0 @@
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
.. _inteloneapipackage:
====================
IntelOneapiPackage
====================
.. contents::
oneAPI packages in Spack
========================
Spack can install and use the Intel oneAPI products. You may either
use spack to install the oneAPI tools or use the `Intel
installers`_. After installation, you may use the tools directly, or
use Spack to build packages with the tools.
The Spack Python class ``IntelOneapiPackage`` is a base class that is
used by ``IntelOneapiCompilers``, ``IntelOneapiMkl``,
``IntelOneapiTbb`` and other classes to implement the oneAPI
packages. See the :ref:<package-list> for the full list of available
oneAPI packages or use::
spack list -d oneAPI
For more information on a specific package, do::
spack info <package-name>
Intel no longer releases new versions of Parallel Studio, which can be
used in Spack via the :ref:<intelpackage>. All of its components can
now be found in oneAPI.
Example
=======
We start with a simple example that will be sufficient for most
users. Install the oneAPI compilers::
spack install intel-oneapi-compilers
Add the oneAPI compilers to the set of compilers that Spack can use::
spack compiler add `spack location -i intel-oneapi-compilers`/compiler/latest/linux/bin/intel64
spack compiler add `spack location -i intel-oneapi-compilers`/compiler/latest/linux/bin
This adds the compilers to your ``compilers.yaml``. Verify that the
compilers are available::
spack compiler list
The ``intel-oneapi-compilers`` package includes 2 families of
compilers:
* ``intel``: ``icc``, ``icpc``, ``ifort``. Intel's *classic*
compilers.
* ``oneapi``: ``icx``, ``icpx``, ``ifx``. Intel's new generation of
compilers based on LLVM.
To build the ``patchelf`` Spack package with ``icc``, do::
spack install patchelf%intel
To build with with ``icx``, do ::
spack install patchelf%oneapi
In addition to compilers, oneAPI contains many libraries. The ``hdf5``
package works with any compatible MPI implementation. To build
``hdf5`` with Intel oneAPI MPI do::
spack install hdf5 +mpi ^intel-oneapi-mpi
Using an Externally Installed oneAPI
====================================
Spack can also use oneAPI tools that are manually installed with
`Intel Installers`_. The procedures for configuring Spack to use
external compilers and libraries are different.
Compilers
---------
To use the compilers, add some information about the installation to
``compilers.yaml``. For most users, it is sufficient to do::
spack compiler add /opt/intel/oneapi/compiler/latest/linux/bin/intel64
spack compiler add /opt/intel/oneapi/compiler/latest/linux/bin
Adapt the paths above if you did not install the tools in the default
location. After adding the compilers, using them in Spack will be
exactly the same as if you had installed the
``intel-oneapi-compilers`` package. Another option is to manually add
the configuration to ``compilers.yaml`` as described in :ref:`Compiler
configuration <compiler-config>`.
Using oneAPI Tools Installed by Spack
=====================================
Spack can be a convenient way to install and configure compilers and
libaries, even if you do not intend to build a Spack package. If you
want to build a Makefile project using Spack-installed oneAPI compilers,
then use spack to configure your environment::
spack load intel-oneapi-compilers
And then you can build with::
CXX=icpx make
You can also use Spack-installed libraries. For example::
spack load intel-oneapi-mkl
Will update your environment CPATH, LIBRARY_PATH, and other
environment variables for building an application with MKL.
More information
================
This section describes basic use of oneAPI, especially if it has
changed compared to Parallel Studio. See :ref:<intelpackage> for more
information on :ref:<intel-virtual-packages>,
:ref:<intel-unrelated-packages>,
:ref:<intel-integrating-external-libraries>, and
:ref:<using-mkl-tips>.
.. _`Intel installers`: https://software.intel.com/content/www/us/en/develop/documentation/installation-guide-for-intel-oneapi-toolkits-linux/top.html

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
@@ -120,7 +120,7 @@ version numbers seen with most other Spack packages. For example, we have:
...
Preferred version:
professional.2018.3 http:...
Safe versions:
professional.2018.3 http:...
...
@@ -137,7 +137,6 @@ If you need to save disk space or installation time, you could install the
``intel`` compilers-only subset (0.6 GB) and just the library packages you
need, for example ``intel-mpi`` (0.5 GB) and ``intel-mkl`` (2.5 GB).
.. _intel-unrelated-packages:
""""""""""""""""""""
Unrelated packages
@@ -359,8 +358,6 @@ affected by an advanced third method:
Next, visit section `Selecting Intel Compilers`_ to learn how to tell
Spack to use the newly configured compilers.
.. _intel-integrating-external-libraries:
""""""""""""""""""""""""""""""""""
Integrating external libraries
""""""""""""""""""""""""""""""""""
@@ -421,13 +418,9 @@ Adapt the following example. Be sure to maintain the indentation:
# other content ...
intel-mkl:
externals:
- spec: "intel-mkl@2018.2.199 arch=linux-centos6-x86_64"
modules:
- intel-mkl/18/18.0.2
- spec: "intel-mkl@2018.3.222 arch=linux-centos6-x86_64"
modules:
- intel-mkl/18/18.0.3
modules:
intel-mkl@2018.2.199 arch=linux-centos6-x86_64: intel-mkl/18/18.0.2
intel-mkl@2018.3.222 arch=linux-centos6-x86_64: intel-mkl/18/18.0.3
The version numbers for the ``intel-mkl`` specs defined here correspond to file
and directory names that Intel uses for its products because they were adopted
@@ -458,16 +451,12 @@ mechanism.
packages:
intel-parallel-studio:
externals:
- spec: "intel-parallel-studio@cluster.2018.2.199 +mkl+mpi+ipp+tbb+daal arch=linux-centos6-x86_64"
modules:
- intel/18/18.0.2
- spec: "intel-parallel-studio@cluster.2018.3.222 +mkl+mpi+ipp+tbb+daal arch=linux-centos6-x86_64"
modules:
- intel/18/18.0.3
modules:
intel-parallel-studio@cluster.2018.2.199 +mkl+mpi+ipp+tbb+daal arch=linux-centos6-x86_64: intel/18/18.0.2
intel-parallel-studio@cluster.2018.3.222 +mkl+mpi+ipp+tbb+daal arch=linux-centos6-x86_64: intel/18/18.0.3
buildable: False
One additional example illustrates the use of ``prefix:`` instead of
One additional example illustrates the use of ``paths:`` instead of
``modules:``, useful when external modulefiles are not available or not
suitable:
@@ -475,15 +464,13 @@ suitable:
packages:
intel-parallel-studio:
externals:
- spec: "intel-parallel-studio@cluster.2018.2.199 +mkl+mpi+ipp+tbb+daal"
prefix: /opt/intel
- spec: "intel-parallel-studio@cluster.2018.3.222 +mkl+mpi+ipp+tbb+daal"
prefix: /opt/intel
paths:
intel-parallel-studio@cluster.2018.2.199 +mkl+mpi+ipp+tbb+daal: /opt/intel
intel-parallel-studio@cluster.2018.3.222 +mkl+mpi+ipp+tbb+daal: /opt/intel
buildable: False
Note that for the Intel packages discussed here, the directory values in the
``prefix:`` entries must be the high-level and typically version-less
``paths:`` entries must be the high-level and typically version-less
"installation directory" that has been used by Intel's product installer.
Such a directory will typically accumulate various product versions. Amongst
them, Spack will select the correct version-specific product directory based on
@@ -566,7 +553,7 @@ follow `the next section <intel-install-libs_>`_ instead.
f77: stub
fc: stub
Replace ``18.0.3`` with the version that you determined in the preceding
Replace ``18.0.3`` with the version that you determined in the preceeding
step. The contents under ``paths:`` do not matter yet.
You are right to ask: "Why on earth is that necessary?" [fn8]_.
@@ -709,7 +696,7 @@ follow `the next section <intel-install-libs_>`_ instead.
- /home/$user/spack-stage
Do not duplicate the ``config:`` line if it already is present.
Adapt the location, which here is the same as in the preceding example.
Adapt the location, which here is the same as in the preceeding example.
3. Retry installing the large package.
@@ -741,7 +728,7 @@ For packages that contain a compiler, follow `the previous section
.. code-block:: console
$ spack install intel-mpi@2018.3.199
$ spack install intel-mpi@2018.3.199
$ spack install intel-mpi@2018.3.199 %intel@18
4. To prepare the new packages for use with client packages,
@@ -815,7 +802,7 @@ by one of the following means:
Configure the order of compilers in the appropriate ``packages.yaml`` file,
under either an ``all:`` or client-package-specific entry, in a
``compiler:`` list. Consult the Spack documentation for
`Configuring Package Preferences <https://spack-tutorial.readthedocs.io/en/latest/tutorial_configuration.html#configuring-package-preferences>`_
:ref:`Configuring Package Preferences <configs-tutorial-package-prefs>`
and
:ref:`Concretization Preferences <concretization-preferences>`.
@@ -837,7 +824,6 @@ for example:
compiler: [ intel@18, intel@17, gcc@4.4.7, gcc@4.9.3, gcc@7.3.0, ]
.. _intel-virtual-packages:
""""""""""""""""""""""""""""""""""""""""""""""""
Selecting libraries to satisfy virtual packages
@@ -865,7 +851,7 @@ client packages, edit the ``packages.yaml`` file. Customize, either in the
the virtual packages and whose values are the Spack specs that satisfy the
virtual package, in order of decreasing preference. To learn more about the
``providers:`` settings, see the Spack tutorial for
`Configuring Package Preferences <https://spack-tutorial.readthedocs.io/en/latest/tutorial_configuration.html#configuring-package-preferences>`_
:ref:`Configuring Package Preferences <configs-tutorial-package-prefs>`
and the section
:ref:`Concretization Preferences <concretization-preferences>`.
@@ -911,7 +897,6 @@ With the proper installation as detailed above, no special steps should be
required when a client package specifically (and thus deliberately) requests an
Intel package as dependency, this being one of the target use cases for Spack.
.. _using-mkl-tips:
"""""""""""""""""""""""""""""""""""""""""""""""
Tips for configuring client packages to use MKL
@@ -980,14 +965,14 @@ a *virtual* ``mkl`` package is declared in Spack.
Likewise, in a
:ref:`MakefilePackage <makefilepackage>`
or similar package that does not use AutoTools you may need to provide include
or similiar package that does not use AutoTools you may need to provide include
and link options for use on command lines or in environment variables.
For example, to generate an option string of the form ``-I<dir>``, use:
.. code-block:: python
self.spec['blas'].headers.include_flags
and to generate linker options (``-L<dir> -llibname ...``), use the same as above,
.. code-block:: python
@@ -1070,6 +1055,6 @@ Footnotes
2. Set the hash length in ``install-path-scheme``, also in ``config.yaml``
(:ref:`q.v. <config-yaml>`).
3. You will want to set the *same* hash length for
:ref:`module files <modules-projections>`
if you have Spack produce them for you, under ``projections`` in
``modules.yaml``.
:ref:`tcl module files <modules-naming-scheme>`
if you have Spack produce them for you, under ``naming_scheme`` in
``modules.yaml``. Other module dialects cannot be altered in this manner.

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)

View File

@@ -1,102 +0,0 @@
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
.. _mavenpackage:
------------
MavenPackage
------------
Apache Maven is a general-purpose build system that does not rely
on Makefiles to build software. It is designed for building and
managing and Java-based project.
^^^^^^
Phases
^^^^^^
The ``MavenPackage`` base class comes with the following phases:
#. ``build`` - compile code and package into a JAR file
#. ``install`` - copy to installation prefix
By default, these phases run:
.. code-block:: console
$ mvn package
$ install . <prefix>
^^^^^^^^^^^^^^^
Important files
^^^^^^^^^^^^^^^
Maven packages can be identified by the presence of a ``pom.xml`` file.
This file lists dependencies and other metadata about the project.
There may also be configuration files in the ``.mvn`` directory.
^^^^^^^^^^^^^^^^^^^^^^^^^
Build system dependencies
^^^^^^^^^^^^^^^^^^^^^^^^^
Maven requires the ``mvn`` executable to build the project. It also
requires Java at both build- and run-time. Because of this, the base
class automatically adds the following dependencies:
.. code-block:: python
depends_on('java', type=('build', 'run'))
depends_on('maven', type='build')
In the ``pom.xml`` file, you may see sections like:
.. code-block:: xml
<requireJavaVersion>
<version>[1.7,)</version>
</requireJavaVersion>
<requireMavenVersion>
<version>[3.5.4,)</version>
</requireMavenVersion>
This specifies the versions of Java and Maven that are required to
build the package. See
https://docs.oracle.com/middleware/1212/core/MAVEN/maven_version.htm#MAVEN402
for a description of this version range syntax. In this case, you
should add:
.. code-block:: python
depends_on('java@7:', type='build')
depends_on('maven@3.5.4:', type='build')
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Passing arguments to the build phase
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The default build and install phases should be sufficient to install
most packages. However, you may want to pass additional flags to
the build phase. For example:
.. code-block:: python
def build_args(self):
return [
'-Pdist,native',
'-Dtar',
'-Dmaven.javadoc.skip=true'
]
^^^^^^^^^^^^^^^^^^^^^^
External documentation
^^^^^^^^^^^^^^^^^^^^^^
For more information on the Maven build system, see:
https://maven.apache.org/index.html

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
@@ -54,28 +54,6 @@ Packages that use the Meson build system can be identified by the
presence of a ``meson.build`` file. This file declares things
like build instructions and dependencies.
One thing to look for is the ``meson_version`` key that gets passed
to the ``project`` function:
.. code-block:: none
:emphasize-lines: 10
project('gtk+', 'c',
version: '3.94.0',
default_options: [
'buildtype=debugoptimized',
'warning_level=1',
# We only need c99, but glib needs GNU-specific features
# https://github.com/mesonbuild/meson/issues/2289
'c_std=gnu99',
],
meson_version: '>= 0.43.0',
license: 'LGPLv2.1+')
This means that Meson 0.43.0 is the earliest release that will work.
You should specify this in a ``depends_on`` statement.
^^^^^^^^^^^^^^^^^^^^^^^^^
Build system dependencies
^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -89,28 +67,6 @@ the ``MesonPackage`` base class already contains:
depends_on('meson', type='build')
depends_on('ninja', type='build')
If you need to specify a particular version requirement, you can
override this in your package:
.. code-block:: python
depends_on('meson@0.43.0:', type='build')
depends_on('ninja', type='build')
^^^^^^^^^^^^^^^^^^^
Finding meson flags
^^^^^^^^^^^^^^^^^^^
To get a list of valid flags that can be passed to ``meson``, run the
following command in the directory that contains ``meson.build``:
.. code-block:: console
$ meson setup --help
^^^^^^^^^^^^^^^^^^^^^^^^^^
Passing arguments to meson
^^^^^^^^^^^^^^^^^^^^^^^^^^

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
@@ -120,6 +120,8 @@ so ``PerlPackage`` contains:
extends('perl')
depends_on('perl', type=('build', 'run'))
If your package requires a specific version of Perl, you should
specify this.

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
@@ -23,11 +23,20 @@ can be overridden:
* ``build_ext``
* ``build_clib``
* ``build_scripts``
* ``clean``
* ``install``
* ``install_lib``
* ``install_headers``
* ``install_scripts``
* ``install_data``
* ``sdist``
* ``register``
* ``bdist``
* ``bdist_dumb``
* ``bdist_rpm``
* ``bdist_wininst``
* ``upload``
* ``check``
These are all standard ``setup.py`` commands and can be found by running:
@@ -46,7 +55,7 @@ If for whatever reason you need to run more phases, simply modify your
.. code-block:: python
phases = ['build_ext', 'install']
phases = ['build_ext', 'install', 'bdist']
Each phase provides a function ``<phase>`` that runs:
@@ -72,24 +81,6 @@ you'll need to define a function for it like so:
self.setup_py('configure')
^^^^^^
Wheels
^^^^^^
Some Python packages are closed-source and distributed as wheels.
Instead of using the ``PythonPackage`` base class, you should extend
the ``Package`` base class and implement the following custom installation
procedure:
.. code-block:: python
def install(self, spec, prefix):
pip = which('pip')
pip('install', self.stage.archive_file, '--prefix={0}'.format(prefix))
This will require a dependency on pip, as mentioned below.
^^^^^^^^^^^^^^^
Important files
^^^^^^^^^^^^^^^
@@ -104,30 +95,9 @@ file should be considered to be the truth. As dependencies are added or
removed, the documentation is much more likely to become outdated than
the ``setup.py``.
The Python ecosystem has evolved significantly over the years. Before
setuptools became popular, most packages listed their dependencies in a
``requirements.txt`` file. Once setuptools took over, these dependencies
were listed directly in the ``setup.py``. Newer PEPs introduced additional
files, like ``setup.cfg`` and ``pyproject.toml``. You should look out for
all of these files, as they may all contain important information about
package dependencies.
Some Python packages are closed-source and are distributed as Python
wheels. For example, ``py-azureml-sdk`` downloads a ``.whl`` file. This
file is simply a zip file, and can be extracted using:
.. code-block:: console
$ unzip *.whl
The zip file will not contain a ``setup.py``, but it will contain a
``METADATA`` file which contains all the information you need to
write a ``package.py`` build recipe.
^^^^
PyPI
^^^^
^^^^^^^^^^^^^^^^^^^^^^^
Finding Python packages
^^^^^^^^^^^^^^^^^^^^^^^
The vast majority of Python packages are hosted on PyPI - The Python
Package Index. ``pip`` only supports packages hosted on PyPI, making
@@ -135,29 +105,8 @@ it the only option for developers who want a simple installation.
Search for "PyPI <package-name>" to find the download page. Note that
some pages are versioned, and the first result may not be the newest
version. Click on the "Latest Version" button to the top right to see
if a newer version is available. The download page is usually at::
https://pypi.org/project/<package-name>
Since PyPI is so common, the ``PythonPackage`` base class has a
``pypi`` attribute that can be set. Once set, ``pypi`` will be used
to define the ``homepage``, ``url``, and ``list_url``. For example,
the following:
.. code-block:: python
homepage = 'https://pypi.org/project/setuptools/'
url = 'https://pypi.org/packages/source/s/setuptools/setuptools-49.2.0.zip'
list_url = 'https://pypi.org/simple/setuptools/'
is equivalent to:
.. code-block:: python
pypi = 'setuptools/setuptools-49.2.0.zip'
if a newer version is available. The download page is usually at:
https://pypi.org/project/<package-name>
^^^^^^^^^^^
Description
@@ -195,38 +144,50 @@ also get the homepage on the command-line by running:
URL
^^^
If ``pypi`` is set as mentioned above, ``url`` and ``list_url`` will
be automatically set for you. If both ``.tar.gz`` and ``.zip`` versions
are available, ``.tar.gz`` is preferred. If some releases offer both
``.tar.gz`` and ``.zip`` versions, but some only offer ``.zip`` versions,
use ``.zip``.
You may have noticed that Spack allows you to add multiple versions of
the same package without adding multiple versions of the download URL.
It does this by guessing what the version string in the URL is and
replacing this with the requested version. Obviously, if Spack cannot
guess the version correctly, or if non-version-related things change
in the URL, Spack cannot substitute the version properly.
Some Python packages are closed-source and do not ship ``.tar.gz`` or ``.zip``
files on either PyPI or GitHub. If this is the case, you can still download
and install a Python wheel. For example, ``py-azureml-sdk`` is closed source
and can be downloaded from::
Once upon a time, PyPI offered nice, simple download URLs like:
https://pypi.python.org/packages/source/n/numpy/numpy-1.13.1.zip
https://pypi.io/packages/py3/a/azureml_sdk/azureml_sdk-1.11.0-py3-none-any.whl
As you can see, the version is 1.13.1. It probably isn't hard to guess
what URL to use to download version 1.12.0, and Spack was perfectly
capable of performing this calculation.
However, PyPI switched to a new download URL format:
https://pypi.python.org/packages/c0/3a/40967d9f5675fbb097ffec170f59c2ba19fc96373e73ad47c2cae9a30aed/numpy-1.13.1.zip#md5=2c3c0f4edf720c3a7b525dacc825b9ae
You may see Python-specific or OS-specific URLs. Note that when you add a
``.whl`` URL, you should add ``expand=False`` to ensure that Spack doesn't
try to extract the wheel:
and more recently:
https://files.pythonhosted.org/packages/b0/2b/497c2bb7c660b2606d4a96e2035e92554429e139c6c71cdff67af66b58d2/numpy-1.14.3.zip
.. code-block:: python
As you can imagine, it is impossible for Spack to guess what URL to
use to download version 1.12.0 given this URL. There is a solution,
however. PyPI offers a new hidden interface for downloading
Python packages that does not include a hash in the URL:
https://pypi.io/packages/source/n/numpy/numpy-1.13.1.zip
version('1.11.0', sha256='d8c9d24ea90457214d798b0d922489863dad518adde3638e08ef62de28fb183a', expand=False)
This URL redirects to the files.pythonhosted.org URL. The general syntax for
this pypi.io URL is:
https://pypi.io/packages/source/<first-letter-of-name>/<name>/<name>-<version>.<extension>
Please use the pypi.io URL instead of the pypi.python.org URL. If both
``.tar.gz`` and ``.zip`` versions are available, ``.tar.gz`` is preferred.
If some releases offer both ``.tar.gz`` and ``.zip`` versions, but some
only offer ``.zip`` versions, use ``.zip``.
"""""""""""""""
PyPI vs. GitHub
"""""""""""""""
Many packages are hosted on PyPI, but are developed on GitHub or another
Many packages are hosted on PyPI, but are developed on GitHub and other
version control systems. The tarball can be downloaded from either
location, but PyPI is preferred for the following reasons:
#. PyPI contains the bare minimum number of files needed to install the package.
#. PyPI contains the bare minimum of files to install the package.
You may notice that the tarball you download from PyPI does not
have the same checksum as the tarball you download from GitHub.
@@ -263,6 +224,25 @@ location, but PyPI is preferred for the following reasons:
PyPI is nice because it makes it physically impossible to
re-release the same version of a package with a different checksum.
There are some reasons to prefer downloading from GitHub:
#. The GitHub tarball may contain unit tests
As previously mentioned, the PyPI tarball contains the bare minimum
of files to install the package. Unless explicitly specified by the
developers, it will not contain development files like unit tests.
If you desire to run the unit tests during installation, you should
use the GitHub tarball instead.
#. Spack does not yet support ``spack versions`` and ``spack checksum``
with PyPI URLs
These commands work just fine with GitHub URLs. This is a minor
annoyance, not a reason to prefer GitHub over PyPI.
If you really want to run these unit tests, no one will stop you from
submitting a PR for a new package that downloads from GitHub.
^^^^^^^^^^^^^^^^^^^^^^^^^
Build system dependencies
^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -282,26 +262,26 @@ mentions that Python 3 is required, this can be specified as:
.. code-block:: python
depends_on('python@3:', type=('build', 'run'))
depends_on('python@3:', type=('build', 'run')
If Python 2 is required, this would look like:
.. code-block:: python
depends_on('python@:2', type=('build', 'run'))
depends_on('python@:2', type=('build', 'run')
If Python 2.7 is the only version that works, you can use:
.. code-block:: python
depends_on('python@2.7:2.8', type=('build', 'run'))
depends_on('python@2.7:2.8', type=('build', 'run')
The documentation may not always specify supported Python versions.
Another place to check is in the ``setup.py`` or ``setup.cfg`` file.
Look for a line containing ``python_requires``. An example from
Another place to check is in the ``setup.py`` file. Look for a line
containing ``python_requires``. An example from
`py-numpy <https://github.com/spack/spack/blob/develop/var/spack/repos/builtin/packages/py-numpy/package.py>`_
looks like:
@@ -310,7 +290,7 @@ looks like:
python_requires='>=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*'
You may also find a version check at the top of the ``setup.py``:
More commonly, you will find a version check at the top of the file:
.. code-block:: python
@@ -325,39 +305,6 @@ This can be converted to Spack's spec notation like so:
depends_on('python@2.7:2.8,3.4:', type=('build', 'run'))
If you are writing a recipe for a package that only distributes
wheels, look for a section in the ``METADATA`` file that looks like::
Requires-Python: >=3.5,<4
This would be translated to:
.. code-block:: python
extends('python')
depends_on('python@3.5:3.999', type=('build', 'run'))
Many ``setup.py`` or ``setup.cfg`` files also contain information like::
Programming Language :: Python :: 2
Programming Language :: Python :: 2.6
Programming Language :: Python :: 2.7
Programming Language :: Python :: 3
Programming Language :: Python :: 3.3
Programming Language :: Python :: 3.4
Programming Language :: Python :: 3.5
Programming Language :: Python :: 3.6
This is a list of versions of Python that the developer likely tests.
However, you should not use this to restrict the versions of Python
the package uses unless one of the two former methods (``python_requires``
or ``sys.version_info``) is used. There is no logic in setuptools
that prevents the package from building for Python versions not in
this list, and often new releases like Python 3.7 or 3.8 work just fine.
""""""""""
setuptools
""""""""""
@@ -370,7 +317,7 @@ Most notably, there was no way to list a project's dependencies
with distutils. Along came setuptools, a non-builtin build system
designed to overcome the limitations of distutils. Both projects
use a similar API, making the transition easy while adding much
needed functionality. Today, setuptools is used in around 90% of
needed functionality. Today, setuptools is used in around 75% of
the Python packages in Spack.
Since setuptools isn't built-in to Python, you need to add it as a
@@ -413,20 +360,6 @@ run-time. This can be specified as:
depends_on('py-setuptools', type='build')
"""
pip
"""
Packages distributed as Python wheels will require an extra dependency
on pip:
.. code-block:: python
depends_on('py-pip', type='build')
We will use pip to install the actual wheel.
""""""
cython
""""""
@@ -450,12 +383,6 @@ where speed is crucial. There is no reason why someone would not
want an optimized version of a library instead of the pure-Python
version.
Note that some release tarballs come pre-cythonized, and cython is
not needed as a dependency. However, this is becoming less common
as Python continues to evolve and developers discover that cythonized
sources are no longer compatible with newer versions of Python and
need to be re-cythonized.
^^^^^^^^^^^^^^^^^^^
Python dependencies
^^^^^^^^^^^^^^^^^^^
@@ -502,33 +429,21 @@ Obviously, this means that ``py-numpy`` is a dependency.
If the package uses ``setuptools``, check for the following clues:
* ``python_requires``
As mentioned above, this specifies which versions of Python are
required.
* ``setup_requires``
These packages are usually only needed at build-time, so you can
add them with ``type='build'``.
* ``install_requires``
These packages are required for building and installation. You can
add them with ``type=('build', 'run')``.
These packages are required for installation.
* ``extra_requires``
These packages are optional dependencies that enable additional
functionality. You should add a variant that optionally adds these
dependencies. This variant should be False by default.
dependencies.
* ``test_requires``
These are packages that are required to run the unit tests for the
package. These dependencies can be specified using the
``type='test'`` dependency type. However, the PyPI tarballs rarely
contain unit tests, so there is usually no reason to add these.
``type='test'`` dependency type.
In the root directory of the package, you may notice a
``requirements.txt`` file. It may look like this file contains a list
@@ -546,37 +461,13 @@ sphinx. If you can't find any information about the package's
dependencies, you can take a look in ``requirements.txt``, but be sure
not to add test or documentation dependencies.
Newer PEPs have added alternative ways to specify a package's dependencies.
If you don't see any dependencies listed in the ``setup.py``, look for a
``setup.cfg`` or ``pyproject.toml``. These files can be used to store the
same ``install_requires`` information that ``setup.py`` used to use.
If you are write a recipe for a package that only distributes wheels,
check the ``METADATA`` file for lines like::
Requires-Dist: azureml-core (~=1.11.0)
Requires-Dist: azureml-dataset-runtime[fuse] (~=1.11.0)
Requires-Dist: azureml-train (~=1.11.0)
Requires-Dist: azureml-train-automl-client (~=1.11.0)
Requires-Dist: azureml-pipeline (~=1.11.0)
Provides-Extra: accel-models
Requires-Dist: azureml-accel-models (~=1.11.0); extra == 'accel-models'
Provides-Extra: automl
Requires-Dist: azureml-train-automl (~=1.11.0); extra == 'automl'
Lines that use ``Requires-Dist`` are similar to ``install_requires``.
Lines that use ``Provides-Extra`` are similar to ``extra_requires``,
and you can add a variant for those dependencies. The ``~=1.11.0``
syntax is equivalent to ``1.11.0:1.11.999``.
""""""""""
setuptools
""""""""""
Setuptools is a bit of a special case. If a package requires setuptools
at run-time, how do they express this? They could add it to
``install_requires``, but setuptools is imported long before this and is
``install_requires``, but setuptools is imported long before this and
needed to read this line. And since you can't install the package
without setuptools, the developers assume that setuptools will already
be there, so they never mention when it is required. We don't want to
@@ -584,8 +475,7 @@ add run-time dependencies if they aren't needed, so you need to
determine whether or not setuptools is needed. Grep the installation
directory for any files containing a reference to ``setuptools`` or
``pkg_resources``. Both modules come from ``py-setuptools``.
``pkg_resources`` is particularly common in scripts found in
``prefix/bin``.
``pkg_resources`` is particularly common in scripts in ``prefix/bin``.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Passing arguments to setup.py
@@ -659,65 +549,47 @@ a "package" is a directory containing files like:
foo/baz.py
whereas a "module" is a single Python file.
The ``PythonPackage`` base class automatically detects these module
names for you. If, for whatever reason, the module names detected
are wrong, you can provide the names yourself by overriding
``import_modules`` like so:
whereas a "module" is a single Python file. Since ``find_packages``
only returns packages, you'll have to determine the correct module
names yourself. You can now add these packages and modules to the
package like so:
.. code-block:: python
import_modules = ['six']
Sometimes the list of module names to import depends on how the
package was built. For example, the ``py-pyyaml`` package has a
``+libyaml`` variant that enables the build of a faster optimized
version of the library. If the user chooses ``~libyaml``, only the
``yaml`` library will be importable. If the user chooses ``+libyaml``,
both the ``yaml`` and ``yaml.cyaml`` libraries will be available.
This can be expressed like so:
When you run ``spack install --test=root py-six``, Spack will attempt
to import the ``six`` module after installation.
.. code-block:: python
@property
def import_modules(self):
modules = ['yaml']
if '+libyaml' in self.spec:
modules.append('yaml.cyaml')
return modules
These tests often catch missing dependencies and non-RPATHed
These tests most often catch missing dependencies and non-RPATHed
libraries. Make sure not to add modules/packages containing the word
"test", as these likely won't end up in the installation directory,
or may require test dependencies like pytest to be installed.
These tests can be triggered by running ``spack install --test=root``
or by running ``spack test run`` after the installation has finished.
"test", as these likely won't end up in installation directory.
""""""""""
Unit tests
""""""""""
The package you want to install may come with additional unit tests.
You can add additional build-time or install-time tests by adding
additional testing functions. For example, ``py-numpy`` adds:
By default, Spack runs:
.. code-block:: console
$ python setup.py test
if it detects that the ``setup.py`` file supports a ``test`` phase.
You can add additional build-time or install-time tests by overriding
``test`` and ``installtest``, respectively. For example, ``py-numpy``
adds:
.. code-block:: python
@run_after('install')
@on_package_attributes(run_tests=True)
def install_test(self):
with working_dir('spack-test', create=True):
python('-c', 'import numpy; numpy.test("full", verbose=2)')
with working_dir('..'):
python('-c', 'import numpy; numpy.test("full", verbose=2)')
These tests can be triggered by running ``spack install --test=root``.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Setup file in a sub-directory
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -757,7 +629,7 @@ PythonPackage vs. packages that use Python
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
There are many packages that make use of Python, but packages that depend
on Python are not necessarily ``PythonPackage``'s.
on Python are not necessarily ``PythonPackages``.
"""""""""""""""""""""""
Choosing a build system
@@ -779,8 +651,6 @@ that the package uses the ``PythonPackage`` build system. However, there
are occasionally packages that use ``PythonPackage`` that shouldn't
start with ``py-``. For example:
* awscli
* aws-parallelcluster
* busco
* easybuild
* httpie
@@ -854,8 +724,8 @@ and ``pip`` may be a perfectly valid alternative to using Spack. The
main advantage of Spack over ``pip`` is its ability to compile
non-Python dependencies. It can also build cythonized versions of a
package or link to an optimized BLAS/LAPACK library like MKL,
resulting in calculations that run orders of magnitudes faster.
Spack does not offer a significant advantage over other python-management
resulting in calculations that run orders of magnitude faster.
Spack does not offer a significant advantage to other python-management
systems for installing and using tools like flake8 and sphinx.
But if you need packages with non-Python dependencies like
numpy and scipy, Spack will be very valuable to you.
@@ -866,9 +736,8 @@ non-Python dependencies. Anaconda contains many Python packages that
are not yet in Spack, and Spack contains many Python packages that are
not yet in Anaconda. The main advantage of Spack over Anaconda is its
ability to choose a specific compiler and BLAS/LAPACK or MPI library.
Spack also has better platform support for supercomputers, and can build
optimized binaries for your specific microarchitecture. On the other hand,
Anaconda offers Windows support.
Spack also has better platform support for supercomputers. On the
other hand, Anaconda offers Windows support.
^^^^^^^^^^^^^^^^^^^^^^
External documentation

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
@@ -108,19 +108,6 @@ override the ``qmake_args`` method like so:
This method can be used to pass flags as well as variables.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
``*.pro`` file in a sub-directory
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
If the ``*.pro`` file used to tell QMake how to build the package is
found in a sub-directory, you can tell Spack to run all phases in this
sub-directory by adding the following to the package:
.. code-block:: python
build_directory = 'src'
^^^^^^^^^^^^^^^^^^^^^^
External documentation
^^^^^^^^^^^^^^^^^^^^^^

View File

@@ -1,122 +0,0 @@
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
.. _rocmpackage:
-----------
ROCmPackage
-----------
The ``ROCmPackage`` is not a build system but a helper package. Like ``CudaPackage``,
it provides standard variants, dependencies, and conflicts to facilitate building
packages using GPUs though for AMD in this case.
You can find the source for this package (and suggestions for setting up your
``compilers.yaml`` and ``packages.yaml`` files) at
`<https://github.com/spack/spack/blob/develop/lib/spack/spack/build_systems/rocm.py>`__.
^^^^^^^^
Variants
^^^^^^^^
This package provides the following variants:
* **rocm**
This variant is used to enable/disable building with ``rocm``.
The default is disabled (or ``False``).
* **amdgpu_target**
This variant supports the optional specification of the AMD GPU architecture.
Valid values are the names of the GPUs (e.g., ``gfx701``), which are maintained
in the ``amdgpu_targets`` property.
^^^^^^^^^^^^
Dependencies
^^^^^^^^^^^^
This package defines basic ``rocm`` dependencies, including ``llvm`` and ``hip``.
^^^^^^^^^
Conflicts
^^^^^^^^^
Conflicts are used to prevent builds with known bugs or issues. This package
already requires that the ``amdgpu_target`` always be specified for ``rocm``
builds. It also defines a conflict that prevents builds with an ``amdgpu_target``
when ``rocm`` is disabled.
Refer to `Conflicts <https://spack.readthedocs.io/en/latest/packaging_guide.html?highlight=conflicts#conflicts>`__
for more information on package conflicts.
^^^^^^^
Methods
^^^^^^^
This package provides one custom helper method, which is used to build
standard AMD hip compiler flags.
**hip_flags**
This built-in static method returns the appropriately formatted
``--amdgpu-target`` build option for ``hipcc``.
This method must be explicitly called when you are creating the
arguments for your build in order to use the values.
^^^^^
Usage
^^^^^
This helper package can be added to your package by adding it as a base
class of your package. For example, you can add it to your
:ref:`CMakePackage <cmakepackage>`-based package as follows:
.. code-block:: python
:emphasize-lines: 1,3-7,14-25
class MyRocmPackage(CMakePackage, ROCmPackage):
...
# Ensure +rocm and amdgpu_targets are passed to dependencies
depends_on('mydeppackage', when='+rocm')
for val in ROCmPackage.amdgpu_targets:
depends_on('mydeppackage amdgpu_target={0}'.format(val),
when='amdgpu_target={0}'.format(val))
...
def cmake_args(self):
spec = self.spec
args = []
...
if '+rocm' in spec:
# Set up the hip macros needed by the build
args.extend([
'-DENABLE_HIP=ON',
'-DHIP_ROOT_DIR={0}'.format(spec['hip'].prefix])
rocm_archs = spec.variants['amdgpu_target'].value
if 'none' not in rocm_archs:
args.append('-DHIP_HIPCC_FLAGS=--amdgpu-target={0}'
.format(",".join(rocm_archs)))
else:
# Ensure build with hip is disabled
args.append('-DENABLE_HIP=OFF')
...
return args
...
assuming only on the ``ENABLE_HIP``, ``HIP_ROOT_DIR``, and ``HIP_HIPCC_FLAGS``
macros are required to be set and the only dependency needing rocm options
is ``mydeppackage``. You will need to customize the flags as needed for your
build.
This example also illustrates how to check for the ``rocm`` variant using
``self.spec`` and how to retrieve the ``amdgpu_target`` variant's value
using ``self.spec.variants['amdgpu_target'].value``.
All five packages using ``ROCmPackage`` as of January 2021 also use the
:ref:`CudaPackage <cudapackage>`. So it is worth looking at those packages
to get ideas for creating a package that can support both ``cuda`` and
``rocm``.

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
@@ -42,11 +42,7 @@ If it isn't on CRAN, try Bioconductor, another common R repository.
For the purposes of this tutorial, we will be walking through
`r-caret <https://github.com/spack/spack/blob/develop/var/spack/repos/builtin/packages/r-caret/package.py>`_
as an example. If you search for "CRAN caret", you will quickly find what
you are looking for at https://cran.r-project.org/package=caret.
https://cran.r-project.org is the main CRAN website. However, CRAN also
has a https://cloud.r-project.org site that automatically redirects to
`mirrors around the world <https://cloud.r-project.org/mirrors.html>`_.
For stability and performance reasons, we will use https://cloud.r-project.org/package=caret.
you are looking for at https://cran.r-project.org/web/packages/caret/index.html.
If you search for "Package source", you will find the download URL for
the latest release. Use this URL with ``spack create`` to create a new
package.
@@ -79,14 +75,12 @@ Description
The first thing you'll need to add to your new package is a description.
The top of the homepage for ``caret`` lists the following description:
Classification and Regression Training
caret: Classification and Regression Training
Misc functions for training and plotting classification and regression models.
The first line is a short description (title) and the second line is a long
description. In this case the description is only one line but often the
description is several lines. Spack makes use of both short and long
descriptions and convention is to use both when creating an R package.
You can either use the short description (first line), long description
(second line), or both depending on what you feel is most appropriate.
^^^^^^^^
Homepage
@@ -99,8 +93,8 @@ If you look at the bottom of the page, you'll see:
Please use the canonical form https://CRAN.R-project.org/package=caret to link to this page.
Please uphold the wishes of the CRAN admins and use
https://cloud.r-project.org/package=caret as the homepage instead of
https://cloud.r-project.org/web/packages/caret/index.html. The latter may
https://CRAN.R-project.org/package=caret as the homepage instead of
https://cran.r-project.org/web/packages/caret/index.html. The latter may
change without notice.
^^^
@@ -115,78 +109,17 @@ List URL
^^^^^^^^
CRAN maintains a single webpage containing the latest release of every
single package: https://cloud.r-project.org/src/contrib/
single package: https://cran.r-project.org/src/contrib/
Of course, as soon as a new release comes out, the version you were using
in your package is no longer available at that URL. It is moved to an
archive directory. If you search for "Old sources", you will find:
https://cloud.r-project.org/src/contrib/Archive/caret
https://cran.r-project.org/src/contrib/Archive/caret
If you only specify the URL for the latest release, your package will
no longer be able to fetch that version as soon as a new release comes
out. To get around this, add the archive directory as a ``list_url``.
^^^^^^^^^^^^^^^^^^^^^
Bioconductor packages
^^^^^^^^^^^^^^^^^^^^^
Bioconductor packages are set up in a similar way to CRAN packages, but there
are some very important distinctions. Bioconductor packages can be found at:
https://bioconductor.org/. Bioconductor packages are R packages and so follow
the same packaging scheme as CRAN packages. What is different is that
Bioconductor itself is versioned and released. This scheme, using the
Bioconductor package installer, allows further specification of the minimum
version of R as well as further restrictions on the dependencies between
packages than what is possible with the native R packaging system. Spack can
not replicate these extra features and thus Bioconductor packages in Spack need
to be managed as a group during updates in order to maintain package
consistency with Bioconductor itself.
Another key difference is that, while previous versions of packages are
available, they are not available from a site that can be programmatically set,
thus a ``list_url`` attribute can not be used. However, each package is also
available in a git repository, with branches corresponding to each Bioconductor
release. Thus, it is always possible to retrieve the version of any package
corresponding to a Bioconductor release simply by fetching the branch that
corresponds to the Bioconductor release of the package repository. For this
reason, spack Bioconductor R packages use the git repository, with the commit
of the respective branch used in the ``version()`` attribute of the package.
^^^^^^^^^^^^^^^^^^^^^^^^
cran and bioc attributes
^^^^^^^^^^^^^^^^^^^^^^^^
Much like the ``pypi`` attribute for python packages, due to the fact that R
packages are obtained from specific repositories, it is possible to set up shortcut
attributes that can be used to set ``homepage``, ``url``, ``list_url``, and
``git``. For example, the following ``cran`` attribute:
.. code-block:: python
cran = 'caret'
is equivalent to:
.. code-block:: python
homepage = 'https://cloud.r-project.org/package=caret'
url = 'https://cloud.r-project.org/src/contrib/caret_6.0-86.tar.gz'
list_url = 'https://cloud.r-project.org/src/contrib/Archive/caret'
Likewise, the following ``bioc`` attribute:
.. code-block:: python
bioc = 'BiocVersion'
is equivalent to:
.. code-block:: python
homepage = 'https://bioconductor.org/packages/BiocVersion/'
git = 'https://git.bioconductor.org/packages/BiocVersion'
^^^^^^^^^^^^^^^^^^^^^^^^^
Build system dependencies
^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -201,15 +134,16 @@ every R package needs this, the ``RPackage`` base class contains:
.. code-block:: python
extends('r')
depends_on('r', type=('build', 'run'))
Take a close look at the homepage for ``caret``. If you look at the
"Depends" section, you'll notice that ``caret`` depends on "R (≥ 3.2.0)".
"Depends" section, you'll notice that ``caret`` depends on "R (≥ 2.10)".
You should add this to your package like so:
.. code-block:: python
depends_on('r@3.2.0:', type=('build', 'run'))
depends_on('r@2.10:', type=('build', 'run'))
^^^^^^^^^^^^^^
@@ -219,7 +153,7 @@ R dependencies
R packages are often small and follow the classic Unix philosophy
of doing one thing well. They are modular and usually depend on
several other packages. You may find a single package with over a
hundred dependencies. Luckily, R packages are well-documented
hundred dependencies. Luckily, CRAN packages are well-documented
and list all of their dependencies in the following sections:
* Depends
@@ -228,7 +162,7 @@ and list all of their dependencies in the following sections:
As far as Spack is concerned, all 3 of these dependency types
correspond to ``type=('build', 'run')``, so you don't have to worry
about the details. If you are curious what they mean,
about them. If you are curious what they mean,
https://github.com/spack/spack/issues/2951 has a pretty good summary:
``Depends`` is required and will cause those R packages to be *attached*,
@@ -259,14 +193,6 @@ R packages already have enough dependencies as it is, and adding
optional dependencies can really slow down the concretization
process. They can also introduce circular dependencies.
A fifth rarely used section is:
* Enhances
This means that the package can be used as an optional dependency
for another package. Again, these packages should **NOT** be listed
as dependencies.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Core, recommended, and non-core packages
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -282,8 +208,8 @@ If you look at the ``caret`` homepage, you'll notice a few dependencies
that don't have a link to the package, like ``methods``, ``stats``, and
``utils``. These packages are part of the core R distribution and are
tied to the R version installed. You can basically consider these to be
"R itself". These are so essential to R that it would not make sense for
them to be updated via CRAN. If you did, you would basically get a different
"R itself". These are so essential to R so it would not make sense that
they could be updated via CRAN. If so, you would basically get a different
version of R. Thus, they're updated when R is updated.
You can find a list of these core libraries at:
@@ -339,7 +265,7 @@ Non-R dependencies
Some packages depend on non-R libraries for linking. Check out the
`r-stringi <https://github.com/spack/spack/blob/develop/var/spack/repos/builtin/packages/r-stringi/package.py>`_
package for an example: https://cloud.r-project.org/package=stringi.
package for an example: https://CRAN.R-project.org/package=stringi.
If you search for the text "SystemRequirements", you will see:
ICU4C (>= 52, optional)
@@ -360,8 +286,8 @@ like so:
.. code-block:: python
def configure_args(self):
mpi_name = self.spec['mpi'].name
def configure_args(self, spec, prefix):
mpi_name = spec['mpi'].name
# The type of MPI. Supported values are:
# OPENMPI, LAM, MPICH, MPICH2, or CRAY
@@ -418,11 +344,3 @@ External documentation
For more information on installing R packages, see:
https://stat.ethz.ch/R-manual/R-devel/library/utils/html/INSTALL.html
For more information on writing R packages, see:
https://cloud.r-project.org/doc/manuals/r-release/R-exts.html
In particular,
https://cloud.r-project.org/doc/manuals/r-release/R-exts.html#Package-Dependencies
has a great explanation of the difference between Depends, Imports,
and LinkingTo.

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
@@ -12,172 +12,5 @@ RubyPackage
Like Perl, Python, and R, Ruby has its own build system for
installing Ruby gems.
^^^^^^
Phases
^^^^^^
The ``RubyPackage`` base class provides the following phases that
can be overridden:
#. ``build`` - build everything needed to install
#. ``install`` - install everything from build directory
For packages that come with a ``*.gemspec`` file, these phases run:
.. code-block:: console
$ gem build *.gemspec
$ gem install *.gem
For packages that come with a ``Rakefile`` file, these phases run:
.. code-block:: console
$ rake package
$ gem install *.gem
For packages that come pre-packaged as a ``*.gem`` file, the build
phase is skipped and the install phase runs:
.. code-block:: console
$ gem install *.gem
These are all standard ``gem`` commands and can be found by running:
.. code-block:: console
$ gem help commands
For packages that only distribute ``*.gem`` files, these files can be
downloaded with the ``expand=False`` option in the ``version`` directive.
The build phase will be automatically skipped.
^^^^^^^^^^^^^^^
Important files
^^^^^^^^^^^^^^^
When building from source, Ruby packages can be identified by the
presence of any of the following files:
* ``*.gemspec``
* ``Rakefile``
* ``setup.rb`` (not yet supported)
However, not all Ruby packages are released as source code. Some are only
released as ``*.gem`` files. These files can be extracted using:
.. code-block:: console
$ gem unpack *.gem
^^^^^^^^^^^
Description
^^^^^^^^^^^
The ``*.gemspec`` file may contain something like:
.. code-block:: ruby
summary = 'An implementation of the AsciiDoc text processor and publishing toolchain'
description = 'A fast, open source text processor and publishing toolchain for converting AsciiDoc content to HTML 5, DocBook 5, and other formats.'
Either of these can be used for the description of the Spack package.
^^^^^^^^
Homepage
^^^^^^^^
The ``*.gemspec`` file may contain something like:
.. code-block:: ruby
homepage = 'https://asciidoctor.org'
This should be used as the official homepage of the Spack package.
^^^^^^^^^^^^^^^^^^^^^^^^^
Build system dependencies
^^^^^^^^^^^^^^^^^^^^^^^^^
All Ruby packages require Ruby at build and run-time. For this reason,
the base class contains:
.. code-block:: python
extends('ruby')
The ``*.gemspec`` file may contain something like:
.. code-block:: ruby
required_ruby_version = '>= 2.3.0'
This can be added to the Spack package using:
.. code-block:: python
depends_on('ruby@2.3.0:', type=('build', 'run'))
^^^^^^^^^^^^^^^^^
Ruby dependencies
^^^^^^^^^^^^^^^^^
When you install a package with ``gem``, it reads the ``*.gemspec``
file in order to determine the dependencies of the package.
If the dependencies are not yet installed, ``gem`` downloads them
and installs them for you. This may sound convenient, but Spack
cannot rely on this behavior for two reasons:
#. Spack needs to be able to install packages on air-gapped networks.
If there is no internet connection, ``gem`` can't download the
package dependencies. By explicitly listing every dependency in
the ``package.py``, Spack knows what to download ahead of time.
#. Duplicate installations of the same dependency may occur.
Spack supports *activation* of Ruby extensions, which involves
symlinking the package installation prefix to the Ruby installation
prefix. If your package is missing a dependency, that dependency
will be installed to the installation directory of the same package.
If you try to activate the package + dependency, it may cause a
problem if that package has already been activated.
For these reasons, you must always explicitly list all dependencies.
Although the documentation may list the package's dependencies,
often the developers assume people will use ``gem`` and won't have to
worry about it. Always check the ``*.gemspec`` file to find the true
dependencies.
Check for the following clues in the ``*.gemspec`` file:
* ``add_runtime_dependency``
These packages are required for installation.
* ``add_dependency``
This is an alias for ``add_runtime_dependency``
* ``add_development_dependency``
These packages are optional dependencies used for development.
They should not be added as dependencies of the package.
^^^^^^^^^^^^^^^^^^^^^^
External documentation
^^^^^^^^^^^^^^^^^^^^^^
For more information on Ruby packaging, see:
https://guides.rubygems.org/
This build system is a work-in-progress. See
https://github.com/spack/spack/pull/3127 for more information.

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)

View File

@@ -1,146 +0,0 @@
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
.. _sippackage:
----------
SIPPackage
----------
SIP is a tool that makes it very easy to create Python bindings for C and C++
libraries. It was originally developed to create PyQt, the Python bindings for
the Qt toolkit, but can be used to create bindings for any C or C++ library.
SIP comprises a code generator and a Python module. The code generator
processes a set of specification files and generates C or C++ code which is
then compiled to create the bindings extension module. The SIP Python module
provides support functions to the automatically generated code.
^^^^^^
Phases
^^^^^^
The ``SIPPackage`` base class comes with the following phases:
#. ``configure`` - configure the package
#. ``build`` - build the package
#. ``install`` - install the package
By default, these phases run:
.. code-block:: console
$ python configure.py --bindir ... --destdir ...
$ make
$ make install
^^^^^^^^^^^^^^^
Important files
^^^^^^^^^^^^^^^
Each SIP package comes with a custom ``configure.py`` build script,
written in Python. This script contains instructions to build the project.
^^^^^^^^^^^^^^^^^^^^^^^^^
Build system dependencies
^^^^^^^^^^^^^^^^^^^^^^^^^
``SIPPackage`` requires several dependencies. Python is needed to run
the ``configure.py`` build script, and to run the resulting Python
libraries. Qt is needed to provide the ``qmake`` command. SIP is also
needed to build the package. All of these dependencies are automatically
added via the base class
.. code-block:: python
extends('python')
depends_on('qt', type='build')
depends_on('py-sip', type='build')
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Passing arguments to ``configure.py``
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Each phase comes with a ``<phase_args>`` function that can be used to pass
arguments to that particular phase. For example, if you need to pass
arguments to the configure phase, you can use:
.. code-block:: python
def configure_args(self, spec, prefix):
return ['--no-python-dbus']
A list of valid options can be found by running ``python configure.py --help``.
^^^^^^^
Testing
^^^^^^^
Just because a package successfully built does not mean that it built
correctly. The most reliable test of whether or not the package was
correctly installed is to attempt to import all of the modules that
get installed. To get a list of modules, run the following command
in the site-packages directory:
.. code-block:: console
$ python
>>> import setuptools
>>> setuptools.find_packages()
[
'PyQt5', 'PyQt5.QtCore', 'PyQt5.QtGui', 'PyQt5.QtHelp',
'PyQt5.QtMultimedia', 'PyQt5.QtMultimediaWidgets', 'PyQt5.QtNetwork',
'PyQt5.QtOpenGL', 'PyQt5.QtPrintSupport', 'PyQt5.QtQml',
'PyQt5.QtQuick', 'PyQt5.QtSvg', 'PyQt5.QtTest', 'PyQt5.QtWebChannel',
'PyQt5.QtWebSockets', 'PyQt5.QtWidgets', 'PyQt5.QtXml',
'PyQt5.QtXmlPatterns'
]
Large, complex packages like ``py-pyqt5`` will return a long list of
packages, while other packages may return an empty list. These packages
only install a single ``foo.py`` file. In Python packaging lingo,
a "package" is a directory containing files like:
.. code-block:: none
foo/__init__.py
foo/bar.py
foo/baz.py
whereas a "module" is a single Python file.
The ``SIPPackage`` base class automatically detects these module
names for you. If, for whatever reason, the module names detected
are wrong, you can provide the names yourself by overriding
``import_modules`` like so:
.. code-block:: python
import_modules = ['PyQt5']
These tests often catch missing dependencies and non-RPATHed
libraries. Make sure not to add modules/packages containing the word
"test", as these likely won't end up in the installation directory,
or may require test dependencies like pytest to be installed.
These tests can be triggered by running ``spack install --test=root``
or by running ``spack test run`` after the installation has finished.
^^^^^^^^^^^^^^^^^^^^^^
External documentation
^^^^^^^^^^^^^^^^^^^^^^
For more information on the SIP build system, see:
* https://www.riverbankcomputing.com/software/sip/intro
* https://www.riverbankcomputing.com/static/Docs/sip/
* https://wiki.python.org/moin/SIP

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
@@ -47,9 +47,8 @@ Each phase provides a ``<phase>`` function that runs:
where ``<jobs>`` is the number of parallel jobs to build with. Each phase
also has a ``<phase_args>`` function that can pass arguments to this call.
All of these functions are empty. The ``configure`` phase
automatically adds ``--prefix=/path/to/installation/prefix``, so you
don't need to add that in the ``configure_args``.
All of these functions are empty except for the ``configure_args``
function, which passes ``--prefix=/path/to/installation/prefix``.
^^^^^^^
Testing

View File

@@ -1,96 +0,0 @@
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
.. chain:
============================
Chaining Spack Installations
============================
You can point your Spack installation to another installation to use any
packages that are installed there. To register the other Spack instance,
you can add it as an entry to ``upstreams.yaml``:
.. code-block:: yaml
upstreams:
spack-instance-1:
install_tree: /path/to/other/spack/opt/spack
spack-instance-2:
install_tree: /path/to/another/spack/opt/spack
``install_tree`` must point to the ``opt/spack`` directory inside of the
Spack base directory.
Once the upstream Spack instance has been added, ``spack find`` will
automatically check the upstream instance when querying installed packages,
and new package installations for the local Spack install will use any
dependencies that are installed in the upstream instance.
This other instance of Spack has no knowledge of the local Spack instance
and may not have the same permissions or ownership as the local Spack instance.
This has the following consequences:
#. Upstream Spack instances are not locked. Therefore it is up to users to
make sure that the local instance is not using an upstream instance when it
is being modified.
#. Users should not uninstall packages from the upstream instance. Since the
upstream instance doesn't know about the local instance, it cannot prevent
the uninstallation of packages which the local instance depends on.
Other details about upstream installations:
#. If a package is installed both locally and upstream, the local installation
will always be used as a dependency. This can occur if the local Spack
installs a package which is not present in the upstream, but later on the
upstream Spack instance also installs that package.
#. If an upstream Spack instance registers and installs an external package,
the local Spack instance will treat this the same as a Spack-installed
package. This feature will only work if the upstream Spack instance
includes the upstream functionality (i.e. if its commit is after March
27, 2019).
---------------------------------------
Using Multiple Upstream Spack Instances
---------------------------------------
A single Spack instance can use multiple upstream Spack installations. Spack
will search upstream instances in the order you list them in your
configuration. If your installation refers to instances X and Y, in that order,
then instance X must list Y as an upstream in its own ``upstreams.yaml``.
-----------------------------------
Using Modules for Upstream Packages
-----------------------------------
The local Spack instance does not generate modules for packages which are
installed upstream. The local Spack instance can be configured to use the
modules generated by the upstream Spack instance.
There are two requirements to use the modules created by an upstream Spack
instance: firstly the upstream instance must do a ``spack module tcl refresh``,
which generates an index file that maps installed packages to their modules;
secondly, the local Spack instance must add a ``modules`` entry to the
configuration:
.. code-block:: yaml
upstreams:
spack-instance-1:
install_tree: /path/to/other/spack/opt/spack
modules:
tcl: /path/to/other/spack/share/spack/modules
Each time new packages are installed in the upstream Spack instance, the
upstream Spack maintainer should run ``spack module tcl refresh`` (or the
corresponding command for the type of module they intend to use).
.. note::
Spack can generate modules that :ref:`automatically load
<autoloading-dependencies>` the modules of dependency packages. Spack cannot
currently do this for modules in upstream packages.

View File

@@ -1,4 +1,4 @@
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
@@ -20,45 +20,67 @@
import sys
import os
import re
import shutil
import subprocess
from glob import glob
from sphinx.ext.apidoc import main as sphinx_apidoc
# Since Sphinx 1.7, sphinx.apidoc has been moved to sphinx.ext.apidoc
# sphinx.apidoc is deprecated and will be removed in Sphinx 2.0
try:
from sphinx.ext.apidoc import main as sphinx_apidoc
except ImportError:
from sphinx.apidoc import main as sphinx_apidoc
# -- Spack customizations -----------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('_spack_root/lib/spack/external'))
sys.path.insert(0, os.path.abspath('../external'))
if sys.version_info[0] < 3:
sys.path.insert(
0, os.path.abspath('_spack_root/lib/spack/external/yaml/lib'))
sys.path.insert(0, os.path.abspath('../external/yaml/lib'))
else:
sys.path.insert(
0, os.path.abspath('_spack_root/lib/spack/external/yaml/lib3'))
sys.path.append(os.path.abspath('_spack_root/lib/spack/'))
sys.path.insert(0, os.path.abspath('../external/yaml/lib3'))
sys.path.append(os.path.abspath('..'))
# Add the Spack bin directory to the path so that we can use its output in docs.
os.environ['SPACK_ROOT'] = os.path.abspath('_spack_root')
os.environ['PATH'] += "%s%s" % (os.pathsep, os.path.abspath('_spack_root/bin'))
spack_root = '../../..'
os.environ['SPACK_ROOT'] = spack_root
os.environ['PATH'] += '%s%s/bin' % (os.pathsep, spack_root)
# Set an environment variable so that colify will print output like it would to
# a terminal.
os.environ['COLIFY_SIZE'] = '25x120'
#
# Generate package list using spack command
#
with open('package_list.html', 'w') as plist_file:
subprocess.Popen(
[spack_root + '/bin/spack', 'list', '--format=html'],
stdout=plist_file)
#
# Find all the `cmd-spack-*` references and add them to a command index
#
import spack
import spack.cmd
command_names = spack.cmd.all_commands()
documented_commands = set()
for filename in glob('*rst'):
with open(filename) as f:
for line in f:
match = re.match('.. _cmd-(spack-.*):', line)
if match:
documented_commands.add(match.group(1).strip())
os.environ['COLUMNS'] = '120'
# Generate full package list if needed
subprocess.call([
'spack', 'list', '--format=html', '--update=package_list.html'])
# Generate a command index if an update is needed
subprocess.call([
'spack', 'commands',
'--format=rst',
'--header=command_index.in',
'--update=command_index.rst'] + glob('*rst'))
shutil.copy('command_index.in', 'command_index.rst')
with open('command_index.rst', 'a') as index:
subprocess.Popen(
[spack_root + '/bin/spack', 'commands', '--format=rst'] + list(
documented_commands),
stdout=index)
#
# Run sphinx-apidoc
@@ -68,12 +90,13 @@
# Without this, the API Docs will never actually update
#
apidoc_args = [
'--force', # Older versions of Sphinx ignore the first argument
'--force', # Overwrite existing files
'--no-toc', # Don't create a table of contents file
'--output-dir=.', # Directory to place all output
]
sphinx_apidoc(apidoc_args + ['_spack_root/lib/spack/spack'])
sphinx_apidoc(apidoc_args + ['_spack_root/lib/spack/llnl'])
sphinx_apidoc(apidoc_args + ['../spack'])
sphinx_apidoc(apidoc_args + ['../llnl'])
# Enable todo items
todo_include_todos = True
@@ -90,12 +113,12 @@ def resolve_xref(self, env, fromdocname, builder, typ, target, node, contnode):
env, fromdocname, builder, typ, target, node, contnode)
def setup(sphinx):
sphinx.add_domain(PatchedPythonDomain, override=True)
sphinx.override_domain(PatchedPythonDomain)
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.8'
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
@@ -128,14 +151,13 @@ def setup(sphinx):
# General information about the project.
project = u'Spack'
copyright = u'2013-2021, Lawrence Livermore National Laboratory.'
copyright = u'2013-2018, Lawrence Livermore National Laboratory.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import spack
version = '.'.join(str(s) for s in spack.spack_version_info[:2])
# The full version, including alpha/beta/rc tags.
release = spack.spack_version
@@ -144,13 +166,6 @@ def setup(sphinx):
# for a list of supported languages.
#language = None
# Places to look for .po/.mo files for doc translations
#locale_dirs = []
# Sphinx gettext settings
gettext_compact = True
gettext_uuid = False
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
@@ -159,7 +174,7 @@ def setup(sphinx):
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', '_spack_root', '.spack-env']
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
@@ -176,25 +191,7 @@ def setup(sphinx):
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
# We use our own extension of the default style with a few modifications
from pygments.style import Style
from pygments.styles.default import DefaultStyle
from pygments.token import Generic, Comment, Text
class SpackStyle(DefaultStyle):
styles = DefaultStyle.styles.copy()
background_color = "#f4f4f8"
styles[Generic.Output] = "#355"
styles[Generic.Prompt] = "bold #346ec9"
import pkg_resources
dist = pkg_resources.Distribution(__file__)
sys.path.append('.') # make 'conf' module findable
ep = pkg_resources.EntryPoint.parse('spack = conf:SpackStyle', dist=dist)
dist._ep_map = {'pygments.styles': {'plugin1': ep}}
pkg_resources.working_set.add(dist)
pygments_style = 'spack'
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
@@ -223,12 +220,12 @@ class SpackStyle(DefaultStyle):
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = '_spack_root/share/spack/logo/spack-logo-white-text.svg'
html_logo = '../../../share/spack/logo/spack-logo-white-text.svg'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = '_spack_root/share/spack/logo/favicon.ico'
html_favicon = '../../../share/spack/logo/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
@@ -13,7 +13,7 @@ Spack's basic configuration options are set in ``config.yaml``. You can
see the default settings by looking at
``etc/spack/defaults/config.yaml``:
.. literalinclude:: _spack_root/etc/spack/defaults/config.yaml
.. literalinclude:: ../../../etc/spack/defaults/config.yaml
:language: yaml
These settings can be overridden in ``etc/spack/config.yaml`` or
@@ -30,34 +30,25 @@ Default is ``$spack/opt/spack``.
``install_hash_length`` and ``install_path_scheme``
---------------------------------------------------
The default Spack installation path can be very long and can create problems
for scripts with hardcoded shebangs. Additionally, when using the Intel
compiler, and if there is also a long list of dependencies, the compiler may
segfault. If you see the following:
The default Spack installation path can be very long and can create
problems for scripts with hardcoded shebangs. There are two parameters
to help with that. Firstly, the ``install_hash_length`` parameter can
set the length of the hash in the installation path from 1 to 32. The
default path uses the full 32 characters.
.. code-block:: console
: internal error: ** The compiler has encountered an unexpected problem.
** Segmentation violation signal raised. **
Access violation or stack overflow. Please contact Intel Support for assistance.
it may be because variables containing dependency specs may be too long. There
are two parameters to help with long path names. Firstly, the
``install_hash_length`` parameter can set the length of the hash in the
installation path from 1 to 32. The default path uses the full 32 characters.
Secondly, it is also possible to modify the entire installation
scheme. By default Spack uses
``{architecture}/{compiler.name}-{compiler.version}/{name}-{version}-{hash}``
Secondly, it is
also possible to modify the entire installation scheme. By default
Spack uses
``${ARCHITECTURE}/${COMPILERNAME}-${COMPILERVER}/${PACKAGE}-${VERSION}-${HASH}``
where the tokens that are available for use in this directive are the
same as those understood by the :meth:`~spack.spec.Spec.format`
method. Using this parameter it is possible to use a different package
layout or reduce the depth of the installation paths. For example
same as those understood by the ``Spec.format`` method. Using this parameter it
is possible to use a different package layout or reduce the depth of
the installation paths. For example
.. code-block:: yaml
config:
install_path_scheme: '{name}/{version}/{hash:7}'
install_path_scheme: '${PACKAGE}/${VERSION}/${HASH:7}'
would install packages into sub-directories using only the package
name, version and a hash length of 7 characters.
@@ -84,6 +75,7 @@ the location for each type of module. e.g.:
module_roots:
tcl: $spack/share/spack/modules
lmod: $spack/share/spack/lmod
dotkit: $spack/share/spack/dotkit
See :ref:`modules` for details.
@@ -93,46 +85,40 @@ See :ref:`modules` for details.
Spack is designed to run out of a user home directory, and on many
systems the home directory is a (slow) network file system. On most systems,
building in a temporary file system is faster. Usually, there is also more
space available in the temporary location than in the home directory. If the
username is not already in the path, Spack will append the value of ``$user`` to
the selected ``build_stage`` path.
.. warning:: We highly recommend specifying ``build_stage`` paths that
distinguish between staging and other activities to ensure
``spack clean`` does not inadvertently remove unrelated files.
Spack prepends ``spack-stage-`` to temporary staging directory names to
reduce this risk. Using a combination of ``spack`` and or ``stage`` in
each specified path, as shown in the default settings and documented
examples, will add another layer of protection.
building in a temporary file system results in faster builds than building
in the home directory. Usually, there is also more space available in
the temporary location than in the home directory. So, Spack tries to
create build stages in temporary space.
By default, Spack's ``build_stage`` is configured like this:
.. code-block:: yaml
build_stage:
- $tempdir/$user/spack-stage
- ~/.spack/stage
- $tempdir
- /nfs/tmp2/$user
- $spack/var/spack/stage
This can be an ordered list of paths that Spack should search when trying to
This is an ordered list of paths that Spack should search when trying to
find a temporary directory for the build stage. The list is searched in
order, and Spack will use the first directory to which it has write access.
Specifying `~/.spack/stage` first will ensure each user builds in their home
directory. The historic Spack stage path `$spack/var/spack/stage` will build
directly inside the Spack instance. See :ref:`config-file-variables` for more
on ``$tempdir`` and ``$spack``.
See :ref:`config-file-variables` for more on ``$tempdir`` and ``$spack``.
When Spack builds a package, it creates a temporary directory within the
``build_stage``. After the package is successfully installed, Spack deletes
the temporary directory it used to build. Unsuccessful builds are not
deleted, but you can manually purge them with :ref:`spack clean --stage
``build_stage``, and it creates a symbolic link to that directory in
``$spack/var/spack/stage``. This is used to track the stage.
After a package is successfully installed, Spack deletes the temporary
directory it used to build. Unsuccessful builds are not deleted, but you
can manually purge them with :ref:`spack clean --stage
<cmd-spack-clean>`.
.. note::
The build will fail if there is no writable directory in the ``build_stage``
list, where any user- and site-specific setting will be searched first.
The last item in the list is ``$spack/var/spack/stage``. If this is the
only writable directory in the ``build_stage`` list, Spack will build
*directly* in ``$spack/var/spack/stage`` and will not link to temporary
space.
--------------------
``source_cache``
@@ -194,23 +180,16 @@ set ``dirty`` to ``true`` to skip the cleaning step and make all builds
"dirty" by default. Be aware that this will reduce the reproducibility
of builds.
.. _build-jobs:
--------------
``build_jobs``
--------------
Unless overridden in a package or on the command line, Spack builds all
packages in parallel. The default parallelism is equal to the number of
cores on your machine, up to 16. Parallelism cannot exceed the number of
cores available on the host. For a build system that uses Makefiles, this
means running:
- ``make -j<build_jobs>``, when ``build_jobs`` is less than the number of
cores on the machine
- ``make -j<ncores>``, when ``build_jobs`` is greater or equal to the
number of cores on the machine
packages in parallel. For a build system that uses Makefiles, this means
running ``make -j<build_jobs>``, where ``build_jobs`` is the number of
threads to use.
The default parallelism is equal to the number of cores on your machine.
If you work on a shared login node or have a strict ulimit, it may be
necessary to set the default to a lower value. By setting ``build_jobs``
to 4, for example, commands like ``spack install`` will run ``make -j4``
@@ -223,7 +202,7 @@ To build all software in serial, set ``build_jobs`` to 1.
--------------------
When set to ``true`` Spack will use ccache to cache compiles. This is
useful specifically in two cases: (1) when using ``spack dev-build``, and (2)
useful specifically in two cases: (1) when using ``spack setup``, and (2)
when building the same package with many different variants. The default is
``false``.
@@ -236,24 +215,3 @@ ccache`` to learn more about the default settings and how to change
them). Please note that we currently disable ccache's ``hash_dir``
feature to avoid an issue with the stage directory (see
https://github.com/LLNL/spack/pull/3761#issuecomment-294352232).
------------------
``shared_linking``
------------------
Control whether Spack embeds ``RPATH`` or ``RUNPATH`` attributes in ELF binaries
so that they can find their dependencies. Has no effect on macOS.
Two options are allowed:
1. ``rpath`` uses ``RPATH`` and forces the ``--disable-new-tags`` flag to be passed to the linker
2. ``runpath`` uses ``RUNPATH`` and forces the ``--enable-new-tags`` flag to be passed to the linker
``RPATH`` search paths have higher precedence than ``LD_LIBRARY_PATH``
and ld.so will search for libraries in transitive ``RPATHs`` of
parent objects.
``RUNPATH`` search paths have lower precedence than ``LD_LIBRARY_PATH``,
and ld.so will ONLY search for dependencies in the ``RUNPATH`` of
the loading object.
DO NOT MIX the two options within the same install tree.

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
@@ -36,8 +36,8 @@ Here is an example ``config.yaml`` file:
module_roots:
lmod: $spack/share/spack/lmod
build_stage:
- $tempdir/$user/spack-stage
- ~/.spack/stage
- $tempdir
- /nfs/tmp2/$user
Each Spack configuration file is nested under a top-level section
corresponding to its name. So, ``config.yaml`` starts with ``config:``,
@@ -78,13 +78,6 @@ are six configuration scopes. From lowest to highest:
If multiple scopes are listed on the command line, they are ordered
from lowest to highest precedence.
#. **environment**: When using Spack :ref:`environments`, Spack reads
additional configuration from the environment file. See
:ref:`environment-configuration` for further details on these
scopes. Environment scopes can be referenced from the command line
as ``env:name`` (to reference environment ``foo``, use
``env:foo``).
#. **command line**: Build settings specified on the command line take
precedence over all other scopes.
@@ -199,11 +192,10 @@ with MPICH. You can create different configuration scopes for use with
Platform-specific Scopes
------------------------
For each scope above (excluding environment scopes), there can also be
platform-specific settings. For example, on most platforms, GCC is
the preferred compiler. However, on macOS (darwin), Clang often works
for more packages, and is set as the default compiler. This
configuration is set in
For each scope above, there can also be platform-specific settings.
For example, on most platforms, GCC is the preferred compiler.
However, on macOS (darwin), Clang often works for more packages,
and is set as the default compiler. This configuration is set in
``$(prefix)/etc/spack/defaults/darwin/packages.yaml``. It will take
precedence over settings in the ``defaults`` scope, but can still be
overridden by settings in ``system``, ``system/darwin``, ``site``,
@@ -252,8 +244,8 @@ your configurations look like this:
module_roots:
lmod: $spack/share/spack/lmod
build_stage:
- $tempdir/$user/spack-stage
- ~/.spack/stage
- $tempdir
- /nfs/tmp2/$user
.. code-block:: yaml
@@ -277,8 +269,8 @@ command:
module_roots:
lmod: $spack/share/spack/lmod
build_stage:
- $tempdir/$user/spack-stage
- ~/.spack/stage
- $tempdir
- /nfs/tmp2/$user
.. _config-overrides:
@@ -320,8 +312,8 @@ Let's revisit the ``config.yaml`` example one more time. The
:caption: $(prefix)/etc/spack/defaults/config.yaml
build_stage:
- $tempdir/$user/spack-stage
- ~/.spack/stage
- $tempdir
- /nfs/tmp2/$user
Suppose the user configuration adds its *own* list of ``build_stage``
@@ -331,7 +323,7 @@ paths:
:caption: ~/.spack/config.yaml
build_stage:
- /lustre-scratch/$user/spack
- /lustre-scratch/$user
- ~/mystage
@@ -349,10 +341,10 @@ get config`` shows the result:
module_roots:
lmod: $spack/share/spack/lmod
build_stage:
- /lustre-scratch/$user/spack
- /lustre-scratch/$user
- ~/mystage
- $tempdir/$user/spack-stage
- ~/.spack/stage
- $tempdir
- /nfs/tmp2/$user
As in :ref:`config-overrides`, the higher-precedence scope can
@@ -364,7 +356,7 @@ user config looked like this:
:caption: ~/.spack/config.yaml
build_stage::
- /lustre-scratch/$user/spack
- /lustre-scratch/$user
- ~/mystage
@@ -379,7 +371,7 @@ The merged configuration would look like this:
module_roots:
lmod: $spack/share/spack/lmod
build_stage:
- /lustre-scratch/$user/spack
- /lustre-scratch/$user
- ~/mystage
@@ -435,33 +427,6 @@ home directory, and ``~user`` will expand to a specified user's home
directory. The ``~`` must appear at the beginning of the path, or Spack
will not expand it.
.. _configuration_environment_variables:
-------------------------
Environment Modifications
-------------------------
Spack allows to prescribe custom environment modifications in a few places
within its configuration files. Every time these modifications are allowed
they are specified as a dictionary, like in the following example:
.. code-block:: yaml
environment:
set:
LICENSE_FILE: '/path/to/license'
unset:
- CPATH
- LIBRARY_PATH
append_path:
PATH: '/new/bin/dir'
The possible actions that are permitted are ``set``, ``unset``, ``append_path``,
``prepend_path`` and finally ``remove_path``. They all require a dictionary
of variable names mapped to the values used for the modification.
The only exception is ``unset`` that requires just a list of variable names.
No particular order is ensured on the execution of each of these modifications.
----------------------------
Seeing Spack's Configuration
----------------------------
@@ -494,13 +459,14 @@ account all scopes. For example, to see the fully merged
install_tree: $spack/opt/spack
template_dirs:
- $spack/templates
directory_layout: {architecture}/{compiler.name}-{compiler.version}/{name}-{version}-{hash}
directory_layout: ${ARCHITECTURE}/${COMPILERNAME}-${COMPILERVER}/${PACKAGE}-${VERSION}-${HASH}
module_roots:
tcl: $spack/share/spack/modules
lmod: $spack/share/spack/lmod
dotkit: $spack/share/spack/dotkit
build_stage:
- $tempdir/$user/spack-stage
- ~/.spack/stage
- $tempdir
- /nfs/tmp2/$user
- $spack/var/spack/stage
source_cache: $spack/var/spack/cache
misc_cache: ~/.spack/cache
@@ -544,13 +510,14 @@ down the problem:
./my-scope/config.yaml:2 install_tree: /path/to/some/tree
/home/myuser/spack/etc/spack/defaults/config.yaml:23 template_dirs:
/home/myuser/spack/etc/spack/defaults/config.yaml:24 - $spack/templates
/home/myuser/spack/etc/spack/defaults/config.yaml:28 directory_layout: {architecture}/{compiler.name}-{compiler.version}/{name}-{version}-{hash}
/home/myuser/spack/etc/spack/defaults/config.yaml:28 directory_layout: ${ARCHITECTURE}/${COMPILERNAME}-${COMPILERVER}/${PACKAGE}-${VERSION}-${HASH}
/home/myuser/spack/etc/spack/defaults/config.yaml:32 module_roots:
/home/myuser/spack/etc/spack/defaults/config.yaml:33 tcl: $spack/share/spack/modules
/home/myuser/spack/etc/spack/defaults/config.yaml:34 lmod: $spack/share/spack/lmod
/home/myuser/spack/etc/spack/defaults/config.yaml:35 dotkit: $spack/share/spack/dotkit
/home/myuser/spack/etc/spack/defaults/config.yaml:49 build_stage:
/home/myuser/spack/etc/spack/defaults/config.yaml:50 - $tempdir/$user/spack-stage
/home/myuser/spack/etc/spack/defaults/config.yaml:51 - ~/.spack/stage
/home/myuser/spack/etc/spack/defaults/config.yaml:50 - $tempdir
/home/myuser/spack/etc/spack/defaults/config.yaml:51 - /nfs/tmp2/$user
/home/myuser/spack/etc/spack/defaults/config.yaml:52 - $spack/var/spack/stage
/home/myuser/spack/etc/spack/defaults/config.yaml:57 source_cache: $spack/var/spack/cache
/home/myuser/spack/etc/spack/defaults/config.yaml:62 misc_cache: ~/.spack/cache

View File

@@ -1,535 +0,0 @@
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
.. _containers:
================
Container Images
================
Spack :ref:`environments` are a great tool to create container images, but
preparing one that is suitable for production requires some more boilerplate
than just:
.. code-block:: docker
COPY spack.yaml /environment
RUN spack -e /environment install
Additional actions may be needed to minimize the size of the
container, or to update the system software that is installed in the base
image, or to set up a proper entrypoint to run the image. These tasks are
usually both necessary and repetitive, so Spack comes with a command
to generate recipes for container images starting from a ``spack.yaml``.
--------------------
A Quick Introduction
--------------------
Consider having a Spack environment like the following:
.. code-block:: yaml
spack:
specs:
- gromacs+mpi
- mpich
Producing a ``Dockerfile`` from it is as simple as moving to the directory
where the ``spack.yaml`` file is stored and giving the following command:
.. code-block:: console
$ spack containerize > Dockerfile
The ``Dockerfile`` that gets created uses multi-stage builds and
other techniques to minimize the size of the final image:
.. code-block:: docker
# Build stage with Spack pre-installed and ready to be used
FROM spack/ubuntu-bionic:latest as builder
# What we want to install and how we want to install it
# is specified in a manifest file (spack.yaml)
RUN mkdir /opt/spack-environment \
&& (echo "spack:" \
&& echo " specs:" \
&& echo " - gromacs+mpi" \
&& echo " - mpich" \
&& echo " concretization: together" \
&& echo " config:" \
&& echo " install_tree: /opt/software" \
&& echo " view: /opt/view") > /opt/spack-environment/spack.yaml
# Install the software, remove unnecessary deps
RUN cd /opt/spack-environment && spack env activate . && spack install --fail-fast && spack gc -y
# Strip all the binaries
RUN find -L /opt/view/* -type f -exec readlink -f '{}' \; | \
xargs file -i | \
grep 'charset=binary' | \
grep 'x-executable\|x-archive\|x-sharedlib' | \
awk -F: '{print $1}' | xargs strip -s
# Modifications to the environment that are necessary to run
RUN cd /opt/spack-environment && \
spack env activate --sh -d . >> /etc/profile.d/z10_spack_environment.sh
# Bare OS image to run the installed executables
FROM ubuntu:18.04
COPY --from=builder /opt/spack-environment /opt/spack-environment
COPY --from=builder /opt/software /opt/software
COPY --from=builder /opt/view /opt/view
COPY --from=builder /etc/profile.d/z10_spack_environment.sh /etc/profile.d/z10_spack_environment.sh
ENTRYPOINT ["/bin/bash", "--rcfile", "/etc/profile", "-l"]
The image itself can then be built and run in the usual way, with any of the
tools suitable for the task. For instance, if we decided to use ``docker``:
.. code-block:: bash
$ spack containerize > Dockerfile
$ docker build -t myimage .
[ ... ]
$ docker run -it myimage
The various components involved in the generation of the recipe and their
configuration are discussed in details in the sections below.
.. _container_spack_images:
--------------------------
Spack Images on Docker Hub
--------------------------
Docker images with Spack preinstalled and ready to be used are
built on `Docker Hub <https://hub.docker.com/u/spack>`_
at every push to ``develop`` or to a release branch. The OS that
are currently supported are summarized in the table below:
.. _containers-supported-os:
.. list-table:: Supported operating systems
:header-rows: 1
* - Operating System
- Base Image
- Spack Image
* - Ubuntu 16.04
- ``ubuntu:16.04``
- ``spack/ubuntu-xenial``
* - Ubuntu 18.04
- ``ubuntu:18.04``
- ``spack/ubuntu-bionic``
* - CentOS 6
- ``centos:6``
- ``spack/centos6``
* - CentOS 7
- ``centos:7``
- ``spack/centos7``
All the images are tagged with the corresponding release of Spack:
.. image:: dockerhub_spack.png
with the exception of the ``latest`` tag that points to the HEAD
of the ``develop`` branch. These images are available for anyone
to use and take care of all the repetitive tasks that are necessary
to setup Spack within a container. The container recipes generated
by Spack use them as default base images for their ``build`` stage,
even though handles to use custom base images provided by users are
available to accommodate complex use cases.
---------------------------------
Creating Images From Environments
---------------------------------
Any Spack Environment can be used for the automatic generation of container
recipes. Sensible defaults are provided for things like the base image or the
version of Spack used in the image.
If a finer tuning is needed it can be obtained by adding the relevant metadata
under the ``container`` attribute of environments:
.. code-block:: yaml
spack:
specs:
- gromacs+mpi
- mpich
container:
# Select the format of the recipe e.g. docker,
# singularity or anything else that is currently supported
format: docker
# Sets the base images for the stages where Spack builds the
# software or where the software gets installed after being built..
images:
os: "centos:7"
spack: develop
# Whether or not to strip binaries
strip: true
# Additional system packages that are needed at runtime
os_packages:
final:
- libgomp
# Extra instructions
extra_instructions:
final: |
RUN echo 'export PS1="\[$(tput bold)\]\[$(tput setaf 1)\][gromacs]\[$(tput setaf 2)\]\u\[$(tput sgr0)\]:\w $ "' >> ~/.bashrc
# Labels for the image
labels:
app: "gromacs"
mpi: "mpich"
A detailed description of the options available can be found in the
:ref:`container_config_options` section.
-------------------
Setting Base Images
-------------------
The ``images`` subsection is used to select both the image where
Spack builds the software and the image where the built software
is installed. This attribute can be set in two different ways and
which one to use depends on the use case at hand.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Use Official Spack Images From Dockerhub
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
To generate a recipe that uses an official Docker image from the
Spack organization to build the software and the corresponding official OS image
to install the built software, all the user has to do is specify:
1. An operating system under ``images:os``
2. A Spack version under ``images:spack``
Any combination of these two values that can be mapped to one of the images
discussed in :ref:`container_spack_images` is allowed. For instance, the
following ``spack.yaml``:
.. code-block:: yaml
spack:
specs:
- gromacs+mpi
- mpich
container:
images:
os: centos:7
spack: 0.15.4
uses ``spack/centos7:0.15.4`` and ``centos:7`` for the stages where the
software is respectively built and installed:
.. code-block:: docker
# Build stage with Spack pre-installed and ready to be used
FROM spack/centos7:0.15.4 as builder
# What we want to install and how we want to install it
# is specified in a manifest file (spack.yaml)
RUN mkdir /opt/spack-environment \
&& (echo "spack:" \
&& echo " specs:" \
&& echo " - gromacs+mpi" \
&& echo " - mpich" \
&& echo " concretization: together" \
&& echo " config:" \
&& echo " install_tree: /opt/software" \
&& echo " view: /opt/view") > /opt/spack-environment/spack.yaml
[ ... ]
# Bare OS image to run the installed executables
FROM centos:7
COPY --from=builder /opt/spack-environment /opt/spack-environment
COPY --from=builder /opt/software /opt/software
COPY --from=builder /opt/view /opt/view
COPY --from=builder /etc/profile.d/z10_spack_environment.sh /etc/profile.d/z10_spack_environment.sh
ENTRYPOINT ["/bin/bash", "--rcfile", "/etc/profile", "-l"]
This method of selecting base images is the simplest of the two, and we advise
to use it whenever possible. There are cases though where using Spack official
images is not enough to fit production needs. In these situations users can manually
select which base image to start from in the recipe, as we'll see next.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Use Custom Images Provided by Users
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Consider, as an example, building a production grade image for a CUDA
application. The best strategy would probably be to build on top of
images provided by the vendor and regard CUDA as an external package.
Spack doesn't currently provide an official image with CUDA configured
this way, but users can build it on their own and then configure the
environment to explicitly pull it. This requires users to:
1. Specify the image used to build the software under ``images:build``
2. Specify the image used to install the built software under ``images:final``
A ``spack.yaml`` like the following:
.. code-block:: yaml
spack:
specs:
- gromacs@2019.4+cuda build_type=Release
- mpich
- fftw precision=float
packages:
cuda:
buildable: False
externals:
- spec: cuda%gcc
prefix: /usr/local/cuda
container:
images:
build: custom/cuda-10.1-ubuntu18.04:latest
final: nvidia/cuda:10.1-base-ubuntu18.04
produces, for instance, the following ``Dockerfile``:
.. code-block:: docker
# Build stage with Spack pre-installed and ready to be used
FROM custom/cuda-10.1-ubuntu18.04:latest as builder
# What we want to install and how we want to install it
# is specified in a manifest file (spack.yaml)
RUN mkdir /opt/spack-environment \
&& (echo "spack:" \
&& echo " specs:" \
&& echo " - gromacs@2019.4+cuda build_type=Release" \
&& echo " - mpich" \
&& echo " - fftw precision=float" \
&& echo " packages:" \
&& echo " cuda:" \
&& echo " buildable: false" \
&& echo " externals:" \
&& echo " - spec: cuda%gcc" \
&& echo " prefix: /usr/local/cuda" \
&& echo " concretization: together" \
&& echo " config:" \
&& echo " install_tree: /opt/software" \
&& echo " view: /opt/view") > /opt/spack-environment/spack.yaml
# Install the software, remove unnecessary deps
RUN cd /opt/spack-environment && spack env activate . && spack install --fail-fast && spack gc -y
# Strip all the binaries
RUN find -L /opt/view/* -type f -exec readlink -f '{}' \; | \
xargs file -i | \
grep 'charset=binary' | \
grep 'x-executable\|x-archive\|x-sharedlib' | \
awk -F: '{print $1}' | xargs strip -s
# Modifications to the environment that are necessary to run
RUN cd /opt/spack-environment && \
spack env activate --sh -d . >> /etc/profile.d/z10_spack_environment.sh
# Bare OS image to run the installed executables
FROM nvidia/cuda:10.1-base-ubuntu18.04
COPY --from=builder /opt/spack-environment /opt/spack-environment
COPY --from=builder /opt/software /opt/software
COPY --from=builder /opt/view /opt/view
COPY --from=builder /etc/profile.d/z10_spack_environment.sh /etc/profile.d/z10_spack_environment.sh
ENTRYPOINT ["/bin/bash", "--rcfile", "/etc/profile", "-l"]
where the base images for both stages are completely custom.
This second mode of selection for base images is more flexible than just
choosing an operating system and a Spack version, but is also more demanding.
Users may need to generate by themselves their base images and it's also their
responsibility to ensure that:
1. Spack is available in the ``build`` stage and set up correctly to install the required software
2. The artifacts produced in the ``build`` stage can be executed in the ``final`` stage
Therefore we don't recommend its use in cases that can be otherwise
covered by the simplified mode shown first.
----------------------------
Singularity Definition Files
----------------------------
In addition to producing recipes in ``Dockerfile`` format Spack can produce
Singularity Definition Files by just changing the value of the ``format``
attribute:
.. code-block:: console
$ cat spack.yaml
spack:
specs:
- hdf5~mpi
container:
format: singularity
$ spack containerize > hdf5.def
$ sudo singularity build hdf5.sif hdf5.def
The minimum version of Singularity required to build a SIF (Singularity Image Format)
image from the recipes generated by Spack is ``3.5.3``.
.. _container_config_options:
-----------------------
Configuration Reference
-----------------------
The tables below describe all the configuration options that are currently supported
to customize the generation of container recipes:
.. list-table:: General configuration options for the ``container`` section of ``spack.yaml``
:header-rows: 1
* - Option Name
- Description
- Allowed Values
- Required
* - ``format``
- The format of the recipe
- ``docker`` or ``singularity``
- Yes
* - ``images:os``
- Operating system used as a base for the image
- See :ref:`containers-supported-os`
- Yes, if using constrained selection of base images
* - ``images:spack``
- Version of Spack use in the ``build`` stage
- Valid tags for ``base:image``
- Yes, if using constrained selection of base images
* - ``images:build``
- Image to be used in the ``build`` stage
- Any valid container image
- Yes, if using custom selection of base images
* - ``images:final``
- Image to be used in the ``build`` stage
- Any valid container image
- Yes, if using custom selection of base images
* - ``strip``
- Whether to strip binaries
- ``true`` (default) or ``false``
- No
* - ``os_packages:command``
- Tool used to manage system packages
- ``apt``, ``yum``
- Only with custom base images
* - ``os_packages:update``
- Whether or not to update the list of available packages
- True or False (default: True)
- No
* - ``os_packages:build``
- System packages needed at build-time
- Valid packages for the current OS
- No
* - ``os_packages:final``
- System packages needed at run-time
- Valid packages for the current OS
- No
* - ``extra_instructions:build``
- Extra instructions (e.g. `RUN`, `COPY`, etc.) at the end of the ``build`` stage
- Anything understood by the current ``format``
- No
* - ``extra_instructions:final``
- Extra instructions (e.g. `RUN`, `COPY`, etc.) at the end of the ``final`` stage
- Anything understood by the current ``format``
- No
* - ``labels``
- Labels to tag the image
- Pairs of key-value strings
- No
.. list-table:: Configuration options specific to Singularity
:header-rows: 1
* - Option Name
- Description
- Allowed Values
- Required
* - ``singularity:runscript``
- Content of ``%runscript``
- Any valid script
- No
* - ``singularity:startscript``
- Content of ``%startscript``
- Any valid script
- No
* - ``singularity:test``
- Content of ``%test``
- Any valid script
- No
* - ``singularity:help``
- Description of the image
- Description string
- No
--------------
Best Practices
--------------
^^^
MPI
^^^
Due to the dependency on Fortran for OpenMPI, which is the spack default
implementation, consider adding ``gfortran`` to the ``apt-get install`` list.
Recent versions of OpenMPI will require you to pass ``--allow-run-as-root``
to your ``mpirun`` calls if started as root user inside Docker.
For execution on HPC clusters, it can be helpful to import the docker
image into Singularity in order to start a program with an *external*
MPI. Otherwise, also add ``openssh-server`` to the ``apt-get install`` list.
^^^^
CUDA
^^^^
Starting from CUDA 9.0, Nvidia provides minimal CUDA images based on
Ubuntu. Please see `their instructions <https://hub.docker.com/r/nvidia/cuda/>`_.
Avoid double-installing CUDA by adding, e.g.
.. code-block:: yaml
packages:
cuda:
externals:
- spec: "cuda@9.0.176%gcc@5.4.0 arch=linux-ubuntu16-x86_64"
prefix: /usr/local/cuda
buildable: False
to your ``spack.yaml``.
Users will either need ``nvidia-docker`` or e.g. Singularity to *execute*
device kernels.
^^^^^^^^^^^^^^^^^^^^^^^^^
Docker on Windows and OSX
^^^^^^^^^^^^^^^^^^^^^^^^^
On Mac OS and Windows, docker runs on a hypervisor that is not allocated much
memory by default, and some spack packages may fail to build due to lack of
memory. To work around this issue, consider configuring your docker installation
to use more of your host memory. In some cases, you can also ease the memory
pressure on parallel builds by limiting the parallelism in your config.yaml.
.. code-block:: yaml
config:
build_jobs: 2

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
@@ -27,28 +27,17 @@ correspond to one feature/bugfix/extension/etc. One can create PRs with
changes relevant to different ideas, however reviewing such PRs becomes tedious
and error prone. If possible, try to follow the **one-PR-one-package/feature** rule.
--------
Branches
--------
Spack's ``develop`` branch has the latest contributions. Nearly all pull
requests should start from ``develop`` and target ``develop``.
There is a branch for each major release series. Release branches
originate from ``develop`` and have tags for each point release in the
series. For example, ``releases/v0.14`` has tags for ``0.14.0``,
``0.14.1``, ``0.14.2``, etc. versions of Spack. We backport important bug
fixes to these branches, but we do not advance the package versions or
make other changes that would change the way Spack concretizes
dependencies. Currently, the maintainers manage these branches by
cherry-picking from ``develop``. See :ref:`releases` for more
information.
Spack uses a rough approximation of the `Git Flow <http://nvie.com/posts/a-successful-git-branching-model/>`_
branching model. The develop branch contains the latest contributions, and
master is always tagged and points to the latest stable release. Therefore, when
you send your request, make ``develop`` the destination branch on the
`Spack repository <https://github.com/spack/spack>`_.
----------------------
Continuous Integration
----------------------
Spack uses `Github Actions <https://docs.github.com/en/actions>`_ for Continuous Integration
Spack uses `Travis CI <https://travis-ci.org/spack/spack>`_ for Continuous Integration
testing. This means that every time you submit a pull request, a series of tests will
be run to make sure you didn't accidentally introduce any bugs into Spack. **Your PR
will not be accepted until it passes all of these tests.** While you can certainly wait
@@ -57,25 +46,24 @@ locally to speed up the review process.
.. note::
Oftentimes, CI will fail for reasons other than a problem with your PR.
Oftentimes, Travis will fail for reasons other than a problem with your PR.
For example, apt-get, pip, or homebrew will fail to download one of the
dependencies for the test suite, or a transient bug will cause the unit tests
to timeout. If any job fails, click the "Details" link and click on the test(s)
to timeout. If Travis fails, click the "Details" link and click on the test(s)
that is failing. If it doesn't look like it is failing for reasons related to
your PR, you have two options. If you have write permissions for the Spack
repository, you should see a "Restart workflow" button on the right-hand side. If
repository, you should see a "Restart job" button on the right-hand side. If
not, you can close and reopen your PR to rerun all of the tests. If the same
test keeps failing, there may be a problem with your PR. If you notice that
every recent PR is failing with the same error message, it may be that an issue
occurred with the CI infrastructure or one of Spack's dependencies put out a
new release that is causing problems. If this is the case, please file an issue.
every recent PR is failing with the same error message, it may be that Travis
is down or one of Spack's dependencies put out a new release that is causing
problems. If this is the case, please file an issue.
We currently test against Python 2.6, 2.7, and 3.5-3.7 on both macOS and Linux and
If you take a look in ``$SPACK_ROOT/.travis.yml``, you'll notice that we test
against Python 2.6, 2.7, and 3.4-3.7 on both macOS and Linux. We currently
perform 3 types of tests:
.. _cmd-spack-unit-test:
^^^^^^^^^^
Unit Tests
^^^^^^^^^^
@@ -96,109 +84,62 @@ To run *all* of the unit tests, use:
.. code-block:: console
$ spack unit-test
$ spack test
These tests may take several minutes to complete. If you know you are
only modifying a single Spack feature, you can run subsets of tests at a
time. For example, this would run all the tests in
``lib/spack/spack/test/architecture.py``:
These tests may take several minutes to complete. If you know you are only
modifying a single Spack feature, you can run a single unit test at a time:
.. code-block:: console
$ spack unit-test lib/spack/spack/test/architecture.py
$ spack test architecture
And this would run the ``test_platform`` test from that file:
This allows you to develop iteratively: make a change, test that change, make
another change, test that change, etc. To get a list of all available unit
tests, run:
.. command-output:: spack test --list
A more detailed list of available unit tests can be found by running
``spack test --long-list``.
By default, ``pytest`` captures the output of all unit tests. If you add print
statements to a unit test and want to see the output, simply run:
.. code-block:: console
$ spack unit-test lib/spack/spack/test/architecture.py::test_platform
$ spack test -s -k architecture
This allows you to develop iteratively: make a change, test that change,
make another change, test that change, etc. We use `pytest
<http://pytest.org/>`_ as our tests framework, and these types of
arguments are just passed to the ``pytest`` command underneath. See `the
pytest docs
<http://doc.pytest.org/en/latest/usage.html#specifying-tests-selecting-tests>`_
for more details on test selection syntax.
``spack unit-test`` has a few special options that can help you
understand what tests are available. To get a list of all available
unit test files, run:
.. command-output:: spack unit-test --list
:ellipsis: 5
To see a more detailed list of available unit tests, use ``spack
unit-test --list-long``:
.. command-output:: spack unit-test --list-long
:ellipsis: 10
And to see the fully qualified names of all tests, use ``--list-names``:
.. command-output:: spack unit-test --list-names
:ellipsis: 5
You can combine these with ``pytest`` arguments to restrict which tests
you want to know about. For example, to see just the tests in
``architecture.py``:
.. command-output:: spack unit-test --list-long lib/spack/spack/test/architecture.py
You can also combine any of these options with a ``pytest`` keyword
search. See the `pytest usage docs
<https://docs.pytest.org/en/stable/usage.html#specifying-tests-selecting-tests>`_:
for more details on test selection syntax. For example, to see the names of all tests that have "spec"
or "concretize" somewhere in their names:
.. command-output:: spack unit-test --list-names -k "spec and concretize"
By default, ``pytest`` captures the output of all unit tests, and it will
print any captured output for failed tests. Sometimes it's helpful to see
your output interactively, while the tests run (e.g., if you add print
statements to a unit tests). To see the output *live*, use the ``-s``
argument to ``pytest``:
.. code-block:: console
$ spack unit-test -s --list-long lib/spack/spack/test/architecture.py::test_platform
Unit tests are crucial to making sure bugs aren't introduced into
Spack. If you are modifying core Spack libraries or adding new
functionality, please add new unit tests for your feature, and consider
strengthening existing tests. You will likely be asked to do this if you
submit a pull request to the Spack project on GitHub. Check out the
`pytest docs <http://pytest.org/>`_ and feel free to ask for guidance on
how to write tests!
Unit tests are crucial to making sure bugs aren't introduced into Spack. If you
are modifying core Spack libraries or adding new functionality, please consider
adding new unit tests or strengthening existing tests.
.. note::
You may notice the ``share/spack/qa/run-unit-tests`` script in the
repository. This script is designed for CI. It runs the unit
tests and reports coverage statistics back to Codecov. If you want to
run the unit tests yourself, we suggest you use ``spack unit-test``.
There is also a ``run-unit-tests`` script in ``share/spack/qa`` that
runs the unit tests. Afterwards, it reports back to Codecov with the
percentage of Spack that is covered by unit tests. This script is
designed for Travis CI. If you want to run the unit tests yourself, we
suggest you use ``spack test``.
^^^^^^^^^^^^
Style Tests
Flake8 Tests
^^^^^^^^^^^^
Spack uses `Flake8 <http://flake8.pycqa.org/en/latest/>`_ to test for
`PEP 8 <https://www.python.org/dev/peps/pep-0008/>`_ conformance and
`mypy <https://mypy.readthedocs.io/en/stable/>` for type checking. PEP 8 is
`PEP 8 <https://www.python.org/dev/peps/pep-0008/>`_ conformance. PEP 8 is
a series of style guides for Python that provide suggestions for everything
from variable naming to indentation. In order to limit the number of PRs that
were mostly style changes, we decided to enforce PEP 8 conformance. Your PR
needs to comply with PEP 8 in order to be accepted, and if it modifies the
spack library it needs to successfully type-check with mypy as well.
needs to comply with PEP 8 in order to be accepted.
Testing for compliance with spack's style is easy. Simply run the ``spack style``
Testing for PEP 8 compliance is easy. Simply run the ``spack flake8``
command:
.. code-block:: console
$ spack style
$ spack flake8
``spack style`` has a couple advantages over running the tools by hand:
``spack flake8`` has a couple advantages over running ``flake8`` by hand:
#. It only tests files that you have modified since branching off of
``develop``.
@@ -209,9 +150,7 @@ command:
checks. For example, URLs are often longer than 80 characters, so we
exempt them from line length checks. We also exempt lines that start
with "homepage", "url", "version", "variant", "depends_on", and
"extends" in ``package.py`` files. This is now also possible when directly
running flake8 if you can use the ``spack`` formatter plugin included with
spack.
"extends" in ``package.py`` files.
More approved flake8 exemptions can be found
`here <https://github.com/spack/spack/blob/develop/.flake8>`_.
@@ -244,14 +183,14 @@ However, if you aren't compliant with PEP 8, flake8 will complain:
Most of the error messages are straightforward, but if you don't understand what
they mean, just ask questions about them when you submit your PR. The line numbers
will change if you add or delete lines, so simply run ``spack style`` again
will change if you add or delete lines, so simply run ``spack flake8`` again
to update them.
.. tip::
Try fixing flake8 errors in reverse order. This eliminates the need for
multiple runs of ``spack style`` just to re-compute line numbers and
makes it much easier to fix errors directly off of the CI output.
multiple runs of ``spack flake8`` just to re-compute line numbers and
makes it much easier to fix errors directly off of the Travis output.
.. warning::
@@ -284,7 +223,8 @@ documentation. In order to prevent things like broken links and missing imports,
we added documentation tests that build the documentation and fail if there
are any warning or error messages.
Building the documentation requires several dependencies:
Building the documentation requires several dependencies, all of which can be
installed with Spack:
* sphinx
* sphinxcontrib-programoutput
@@ -294,18 +234,11 @@ Building the documentation requires several dependencies:
* mercurial
* subversion
All of these can be installed with Spack, e.g.
.. code-block:: console
$ spack install py-sphinx py-sphinxcontrib-programoutput py-sphinx-rtd-theme graphviz git mercurial subversion
.. warning::
Sphinx has `several required dependencies <https://github.com/spack/spack/blob/develop/var/spack/repos/builtin/packages/py-sphinx/package.py>`_.
If you're using a ``python`` from Spack and you installed
``py-sphinx`` and friends, you need to make them available to your
``python``. The easiest way to do this is to run:
If you installed ``py-sphinx`` with Spack, make sure to add all of these
dependencies to your ``PYTHONPATH``. The easiest way to do this is to run:
.. code-block:: console
@@ -313,10 +246,8 @@ All of these can be installed with Spack, e.g.
$ spack activate py-sphinx-rtd-theme
$ spack activate py-sphinxcontrib-programoutput
so that all of the dependencies are symlinked into that Python's
tree. Alternatively, you could arrange for their library
directories to be added to PYTHONPATH. If you see an error message
like:
so that all of the dependencies are symlinked to a central location.
If you see an error message like:
.. code-block:: console
@@ -331,7 +262,7 @@ Once all of the dependencies are installed, you can try building the documentati
.. code-block:: console
$ cd path/to/spack/lib/spack/docs/
$ cd "$SPACK_ROOT/lib/spack/docs"
$ make clean
$ make
@@ -343,7 +274,7 @@ your PR is accepted.
There is also a ``run-doc-tests`` script in ``share/spack/qa``. The only
difference between running this script and running ``make`` by hand is that
the script will exit immediately if it encounters an error or warning. This
is necessary for CI. If you made a lot of documentation changes, it is
is necessary for Travis CI. If you made a lot of documentation changes, it is
much quicker to run ``make`` by hand so that you can see all of the warnings
at once.
@@ -401,13 +332,13 @@ coverage. This helps us tell what percentage of lines of code in Spack are
covered by unit tests. Although code covered by unit tests can still contain
bugs, it is much less error prone than code that is not covered by unit tests.
Codecov provides `browser extensions <https://github.com/codecov/sourcegraph-codecov>`_
for Google Chrome and Firefox. These extensions integrate with GitHub
Codecov provides `browser extensions <https://github.com/codecov/browser-extension>`_
for Google Chrome, Firefox, and Opera. These extensions integrate with GitHub
and allow you to see coverage line-by-line when viewing the Spack repository.
If you are new to Spack, a great way to get started is to write unit tests to
increase coverage!
Unlike with CI on Github Actions Codecov tests are not required to pass in order for your
Unlike with Travis, Codecov tests are not required to pass in order for your
PR to be merged. If you modify core Spack libraries, we would greatly
appreciate unit tests that cover these changed lines. Otherwise, we have no
way of knowing whether or not your changes introduce a bug. If you make

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
@@ -363,12 +363,11 @@ Developer commands
``spack doc``
^^^^^^^^^^^^^
^^^^^^^^^^^^^^^^^^^
``spack unit-test``
^^^^^^^^^^^^^^^^^^^
.. _cmd-spack-test:
See the :ref:`contributor guide section <cmd-spack-unit-test>` on
``spack unit-test``.
^^^^^^^^^^^^^^
``spack test``
^^^^^^^^^^^^^^
.. _cmd-spack-python:
@@ -396,42 +395,20 @@ other Spack modules:
True
>>>
If you prefer using an IPython interpreter, given that IPython is installed
you can specify the interpreter with ``-i``:
.. code-block:: console
$ spack python -i ipython
Python 3.8.3 (default, May 19 2020, 18:47:26)
Type 'copyright', 'credits' or 'license' for more information
IPython 7.17.0 -- An enhanced Interactive Python. Type '?' for help.
Spack version 0.16.0
Python 3.8.3, Linux x86_64
In [1]:
With either interpreter you can run a single command:
You can also run a single command:
.. code-block:: console
$ spack python -c 'import distro; distro.linux_distribution()'
('Ubuntu', '18.04', 'Bionic Beaver')
$ spack python -i ipython -c 'import distro; distro.linux_distribution()'
Out[1]: ('Ubuntu', '18.04', 'Bionic Beaver')
('Fedora', '25', 'Workstation Edition')
or a file:
.. code-block:: console
$ spack python ~/test_fetching.py
$ spack python -i ipython ~/test_fetching.py
just like you would with the normal ``python`` command.
just like you would with the normal ``python`` command.
.. _cmd-spack-url:
@@ -511,391 +488,10 @@ supply ``--profile`` to Spack on the command line, before any subcommands.
``spack --profile`` output looks like this:
.. command-output:: spack --profile graph hdf5
.. command-output:: spack --profile graph dyninst
:ellipsis: 25
The bottom of the output shows the top most time consuming functions,
slowest on top. The profiling support is from Python's built-in tool,
`cProfile
<https://docs.python.org/2/library/profile.html#module-cProfile>`_.
.. _releases:
--------
Releases
--------
This section documents Spack's release process. It is intended for
project maintainers, as the tasks described here require maintainer
privileges on the Spack repository. For others, we hope this section at
least provides some insight into how the Spack project works.
.. _release-branches:
^^^^^^^^^^^^^^^^
Release branches
^^^^^^^^^^^^^^^^
There are currently two types of Spack releases: :ref:`major releases
<major-releases>` (``0.13.0``, ``0.14.0``, etc.) and :ref:`point releases
<point-releases>` (``0.13.1``, ``0.13.2``, ``0.13.3``, etc.). Here is a
diagram of how Spack release branches work::
o branch: develop (latest version)
|
o merge v0.14.1 into develop
|\
| o branch: releases/v0.14, tag: v0.14.1
o | merge v0.14.0 into develop
|\|
| o tag: v0.14.0
|/
o merge v0.13.2 into develop
|\
| o branch: releases/v0.13, tag: v0.13.2
o | merge v0.13.1 into develop
|\|
| o tag: v0.13.1
o | merge v0.13.0 into develop
|\|
| o tag: v0.13.0
o |
| o
|/
o
The ``develop`` branch has the latest contributions, and nearly all pull
requests target ``develop``.
Each Spack release series also has a corresponding branch, e.g.
``releases/v0.14`` has ``0.14.x`` versions of Spack, and
``releases/v0.13`` has ``0.13.x`` versions. A major release is the first
tagged version on a release branch. Minor releases are back-ported from
develop onto release branches. This is typically done by cherry-picking
bugfix commits off of ``develop``.
To avoid version churn for users of a release series, minor releases
should **not** make changes that would change the concretization of
packages. They should generally only contain fixes to the Spack core.
Both major and minor releases are tagged. After each release, we merge
the release branch back into ``develop`` so that the version bump and any
other release-specific changes are visible in the mainline. As a
convenience, we also tag the latest release as ``releases/latest``,
so that users can easily check it out to get the latest
stable version. See :ref:`merging-releases` for more details.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Scheduling work for releases
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
We schedule work for releases by creating `GitHub projects
<https://github.com/spack/spack/projects>`_. At any time, there may be
several open release projects. For example, here are two releases (from
some past version of the page linked above):
.. image:: images/projects.png
Here, there's one release in progress for ``0.15.1`` and another for
``0.16.0``. Each of these releases has a project board containing issues
and pull requests. GitHub shows a status bar with completed work in
green, work in progress in purple, and work not started yet in gray, so
it's fairly easy to see progress.
Spack's project boards are not firm commitments, and we move work between
releases frequently. If we need to make a release and some tasks are not
yet done, we will simply move them to next minor or major release, rather
than delaying the release to complete them.
For more on using GitHub project boards, see `GitHub's documentation
<https://docs.github.com/en/github/managing-your-work-on-github/about-project-boards>`_.
.. _major-releases:
^^^^^^^^^^^^^^^^^^^^^
Making Major Releases
^^^^^^^^^^^^^^^^^^^^^
Assuming you've already created a project board and completed the work
for a major release, the steps to make the release are as follows:
#. Create two new project boards:
* One for the next major release
* One for the next point release
#. Move any tasks that aren't done yet to one of the new project boards.
Small bugfixes should go to the next point release. Major features,
refactors, and changes that could affect concretization should go in
the next major release.
#. Create a branch for the release, based on ``develop``:
.. code-block:: console
$ git checkout -b releases/v0.15 develop
For a version ``vX.Y.Z``, the branch's name should be
``releases/vX.Y``. That is, you should create a ``releases/vX.Y``
branch if you are preparing the ``X.Y.0`` release.
#. Bump the version in ``lib/spack/spack/__init__.py``. See `this example from 0.13.0
<https://github.com/spack/spack/commit/8eeb64096c98b8a43d1c587f13ece743c864fba9>`_
#. Update ``CHANGELOG.md`` with major highlights in bullet form. Use
proper markdown formatting, like `this example from 0.15.0
<https://github.com/spack/spack/commit/d4bf70d9882fcfe88507e9cb444331d7dd7ba71c>`_.
#. Push the release branch to GitHub.
#. Make sure CI passes on the release branch, including:
* Regular unit tests
* Build tests
* The E4S pipeline at `gitlab.spack.io <https://gitlab.spack.io>`_
If CI is not passing, submit pull requests to ``develop`` as normal
and keep rebasing the release branch on ``develop`` until CI passes.
#. Follow the steps in :ref:`publishing-releases`.
#. Follow the steps in :ref:`merging-releases`.
#. Follow the steps in :ref:`announcing-releases`.
.. _point-releases:
^^^^^^^^^^^^^^^^^^^^^
Making Point Releases
^^^^^^^^^^^^^^^^^^^^^
This assumes you've already created a project board for a point release
and completed the work to be done for the release. To make a point
release:
#. Create one new project board for the next point release.
#. Move any cards that aren't done yet to the next project board.
#. Check out the release branch (it should already exist). For the
``X.Y.Z`` release, the release branch is called ``releases/vX.Y``. For
``v0.15.1``, you would check out ``releases/v0.15``:
.. code-block:: console
$ git checkout releases/v0.15
#. Cherry-pick each pull request in the ``Done`` column of the release
project onto the release branch.
This is **usually** fairly simple since we squash the commits from the
vast majority of pull requests, which means there is only one commit
per pull request to cherry-pick. For example, `this pull request
<https://github.com/spack/spack/pull/15777>`_ has three commits, but
the were squashed into a single commit on merge. You can see the
commit that was created here:
.. image:: images/pr-commit.png
You can easily cherry pick it like this (assuming you already have the
release branch checked out):
.. code-block:: console
$ git cherry-pick 7e46da7
For pull requests that were rebased, you'll need to cherry-pick each
rebased commit individually. There have not been any rebased PRs like
this in recent point releases.
.. warning::
It is important to cherry-pick commits in the order they happened,
otherwise you can get conflicts while cherry-picking. When
cherry-picking onto a point release, look at the merge date,
**not** the number of the pull request or the date it was opened.
Sometimes you may **still** get merge conflicts even if you have
cherry-picked all the commits in order. This generally means there
is some other intervening pull request that the one you're trying
to pick depends on. In these cases, you'll need to make a judgment
call:
1. If the dependency is small, you might just cherry-pick it, too.
If you do this, add it to the release board.
2. If it is large, then you may decide that this fix is not worth
including in a point release, in which case you should remove it
from the release project.
3. You can always decide to manually back-port the fix to the release
branch if neither of the above options makes sense, but this can
require a lot of work. It's seldom the right choice.
#. Bump the version in ``lib/spack/spack/__init__.py``. See `this example from 0.14.1
<https://github.com/spack/spack/commit/ff0abb9838121522321df2a054d18e54b566b44a>`_.
#. Update ``CHANGELOG.md`` with a list of bugfixes. This is typically just a
summary of the commits you cherry-picked onto the release branch. See
`the changelog from 0.14.1
<https://github.com/spack/spack/commit/ff0abb9838121522321df2a054d18e54b566b44a>`_.
#. Push the release branch to GitHub.
#. Make sure CI passes on the release branch, including:
* Regular unit tests
* Build tests
* The E4S pipeline at `gitlab.spack.io <https://gitlab.spack.io>`_
If CI does not pass, you'll need to figure out why, and make changes
to the release branch until it does. You can make more commits, modify
or remove cherry-picked commits, or cherry-pick **more** from
``develop`` to make this happen.
#. Follow the steps in :ref:`publishing-releases`.
#. Follow the steps in :ref:`merging-releases`.
#. Follow the steps in :ref:`announcing-releases`.
.. _publishing-releases:
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Publishing a release on GitHub
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#. Go to `github.com/spack/spack/releases
<https://github.com/spack/spack/releases>`_ and click ``Draft a new
release``. Set the following:
* ``Tag version`` should start with ``v`` and contain *all three*
parts of the version, .g. ``v0.15.1``. This is the name of the tag
that will be created.
* ``Target`` should be the ``releases/vX.Y`` branch (e.g., ``releases/v0.15``).
* ``Release title`` should be ``vX.Y.Z`` (To match the tag, e.g., ``v0.15.1``).
* For the text, paste the latest release markdown from your ``CHANGELOG.md``.
You can save the draft and keep coming back to this as you prepare the release.
#. When you are done, click ``Publish release``.
#. Immediately after publishing, go back to
`github.com/spack/spack/releases
<https://github.com/spack/spack/releases>`_ and download the
auto-generated ``.tar.gz`` file for the release. It's the ``Source
code (tar.gz)`` link.
#. Click ``Edit`` on the release you just did and attach the downloaded
release tarball as a binary. This does two things:
#. Makes sure that the hash of our releases doesn't change over time.
GitHub sometimes annoyingly changes they way they generate
tarballs, and then hashes can change if you rely on the
auto-generated tarball links.
#. Gets us download counts on releases visible through the GitHub
API. GitHub tracks downloads of artifacts, but *not* the source
links. See the `releases
page <https://api.github.com/repos/spack/spack/releases>`_ and search
for ``download_count`` to see this.
#. Go to `readthedocs.org <https://readthedocs.org/projects/spack>`_ and activate
the release tag. This builds the documentation and makes the released version
selectable in the versions menu.
.. _merging-releases:
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Updating `releases/latest` and `develop`
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
If the new release is the **highest** Spack release yet, you should
also tag it as ``releases/latest``. For example, suppose the highest
release is currently ``0.15.3``:
* If you are releasing ``0.15.4`` or ``0.16.0``, then you should tag
it with ``releases/latest``, as these are higher than ``0.15.3``.
* If you are making a new release of an **older** major version of
Spack, e.g. ``0.14.4``, then you should not tag it as
``releases/latest`` (as there are newer major versions).
To tag ``releases/latest``, do this:
.. code-block:: console
$ git checkout releases/vX.Y # vX.Y is the new release's branch
$ git tag --force releases/latest
$ git push --tags
The ``--force`` argument makes ``git`` overwrite the existing
``releases/latest`` tag with the new one.
We also merge each release that we tag as ``releases/latest`` into ``develop``.
Make sure to do this with a merge commit:
.. code-block:: console
$ git checkout develop
$ git merge --no-ff vX.Y.Z # vX.Y.Z is the new release's tag
$ git push
We merge back to ``develop`` because it:
* updates the version and ``CHANGELOG.md`` on ``develop``.
* ensures that your release tag is reachable from the head of
``develop``
We *must* use a real merge commit (via the ``--no-ff`` option) because it
ensures that the release tag is reachable from the tip of ``develop``.
This is necessary for ``spack -V`` to work properly -- it uses ``git
describe --tags`` to find the last reachable tag in the repository and
reports how far we are from it. For example:
.. code-block:: console
$ spack -V
0.14.2-1486-b80d5e74e5
This says that we are at commit ``b80d5e74e5``, which is 1,486 commits
ahead of the ``0.14.2`` release.
We put this step last in the process because it's best to do it only once
the release is complete and tagged. If you do it before you've tagged the
release and later decide you want to tag some later commit, you'll need
to merge again.
.. _announcing-releases:
^^^^^^^^^^^^^^^^^^^^
Announcing a release
^^^^^^^^^^^^^^^^^^^^
We announce releases in all of the major Spack communication channels.
Publishing the release takes care of GitHub. The remaining channels are
Twitter, Slack, and the mailing list. Here are the steps:
#. Make a tweet to announce the release. It should link to the release's
page on GitHub. You can base it on `this example tweet
<https://twitter.com/spackpm/status/1231761858182307840>`_.
#. Ping ``@channel`` in ``#general`` on Slack (`spackpm.slack.com
<https://spackpm.slack.com>`_) with a link to the tweet. The tweet
will be shown inline so that you do not have to retype your release
announcement.
#. Email the Spack mailing list to let them know about the release. As
with the tweet, you likely want to link to the release's page on
GitHub. It's also helpful to include some information directly in the
email. You can base yours on this `example email
<https://groups.google.com/forum/#!topic/spack/WT4CT9i_X4s>`_.
Once you've announced the release, congratulations, you're done! You've
finished making the release!

View File

@@ -0,0 +1,41 @@
.. Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
.. _docker_for_developers:
=====================
Docker for Developers
=====================
This guide is intended for people who want to use our prepared docker
environments to work on developing Spack or working on spack packages. It is
meant to serve as the companion documentation for the :ref:`packaging-guide`.
--------
Overview
--------
To get started, all you need is the latest version of ``docker``.
.. code-block:: console
$ cd share/spack/docker
$ source config/ubuntu.bash
$ ./run-image.sh
This command should drop you into an interactive shell where you can run spack
within an isolated docker container running ubuntu. The copy of spack being
used should be tied to the working copy of your cloned git repo, so any changes
you make should be immediately reflected in the running docker container. Feel
free to add or modify any packages or to hack on spack, itself. Your contained
copy of spack should immediately reflect all changes.
To work within a container running a different linux distro, source one of the
other environment files under ``config``.
.. code-block:: console
$ source config/fedora.bash
$ ./run-image.sh

Binary file not shown.

Before

Width:  |  Height:  |  Size: 88 KiB

View File

@@ -1,848 +0,0 @@
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
.. _environments:
============
Environments
============
An environment is used to group together a set of specs for the
purpose of building, rebuilding and deploying in a coherent fashion.
Environments provide a number of advantages over the *à la carte*
approach of building and loading individual Spack modules:
#. Environments separate the steps of (a) choosing what to
install, (b) concretizing, and (c) installing. This allows
Environments to remain stable and repeatable, even if Spack packages
are upgraded: specs are only re-concretized when the user
explicitly asks for it. It is even possible to reliably
transport environments between different computers running
different versions of Spack!
#. Environments allow several specs to be built at once; a more robust
solution than ad-hoc scripts making multiple calls to ``spack
install``.
#. An Environment that is built as a whole can be loaded as a whole
into the user environment. An Environment can be built to maintain
a filesystem view of its packages, and the environment can load
that view into the user environment at activation time. Spack can
also generate a script to load all modules related to an
environment.
Other packaging systems also provide environments that are similar in
some ways to Spack environments; for example, `Conda environments
<https://conda.io/docs/user-guide/tasks/manage-environments.html>`_ or
`Python Virtual Environments
<https://docs.python.org/3/tutorial/venv.html>`_. Spack environments
provide some distinctive features:
#. A spec installed "in" an environment is no different from the same
spec installed anywhere else in Spack. Environments are assembled
simply by collecting together a set of specs.
#. Spack Environments may contain more than one spec of the same
package.
Spack uses a "manifest and lock" model similar to `Bundler gemfiles
<https://bundler.io/man/gemfile.5.html>`_ and other package
managers. The user input file is named ``spack.yaml`` and the lock
file is named ``spack.lock``
.. _environments-using:
------------------
Using Environments
------------------
Here we follow a typical use case of creating, concretizing,
installing and loading an environment.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Creating a named Environment
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
An environment is created by:
.. code-block:: console
$ spack env create myenv
Spack then creates the directory ``var/spack/environments/myenv``.
.. note::
All named environments are stored in the ``var/spack/environments`` folder.
In the ``var/spack/environments/myenv`` directory, Spack creates the
file ``spack.yaml`` and the hidden directory ``.spack-env``.
Spack stores metadata in the ``.spack-env`` directory. User
interaction will occur through the ``spack.yaml`` file and the Spack
commands that affect it. When the environment is concretized, Spack
will create a file ``spack.lock`` with the concrete information for
the environment.
In addition to being the default location for the view associated with
an Environment, the ``.spack-env`` directory also contains:
* ``repo/``: A repo consisting of the Spack packages used in this
environment. This allows the environment to build the same, in
theory, even on different versions of Spack with different
packages!
* ``logs/``: A directory containing the build logs for the packages
in this Environment.
Spack Environments can also be created from either a ``spack.yaml``
manifest or a ``spack.lock`` lockfile. To create an Environment from a
``spack.yaml`` manifest:
.. code-block:: console
$ spack env create myenv spack.yaml
To create an Environment from a ``spack.lock`` lockfile:
.. code-block:: console
$ spack env create myenv spack.lock
Either of these commands can also take a full path to the
initialization file.
A Spack Environment created from a ``spack.yaml`` manifest is
guaranteed to have the same root specs as the original Environment,
but may concretize differently. A Spack Environment created from a
``spack.lock`` lockfile is guaranteed to have the same concrete specs
as the original Environment. Either may obviously then differ as the
user modifies it.
^^^^^^^^^^^^^^^^^^^^^^^^^
Activating an Environment
^^^^^^^^^^^^^^^^^^^^^^^^^
To activate an environment, use the following command:
.. code-block:: console
$ spack env activate myenv
By default, the ``spack env activate`` will load the view associated
with the Environment into the user environment. The ``-v,
--with-view`` argument ensures this behavior, and the ``-V,
--without-view`` argument activates the environment without changing
the user environment variables.
The ``-p`` option to the ``spack env activate`` command modifies the
user's prompt to begin with the environment name in brackets.
.. code-block:: console
$ spack env activate -p myenv
[myenv] $ ...
To deactivate an environment, use the command:
.. code-block:: console
$ spack env deactivate
or the shortcut alias
.. code-block:: console
$ despacktivate
If the environment was activated with its view, deactivating the
environment will remove the view from the user environment.
^^^^^^^^^^^^^^^^^^^^^^
Anonymous Environments
^^^^^^^^^^^^^^^^^^^^^^
Any directory can be treated as an environment if it contains a file
``spack.yaml``. To load an anonymous environment, use:
.. code-block:: console
$ spack env activate -d /path/to/directory
Anonymous specs can be created in place using the command:
.. code-block:: console
$ spack env create -d .
In this case Spack simply creates a spack.yaml file in the requested
directory.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Environment Sensitive Commands
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Spack commands are environment sensitive. For example, the ``find``
command shows only the specs in the active Environment if an
Environment has been activated. Similarly, the ``install`` and
``uninstall`` commands act on the active environment.
.. code-block:: console
$ spack find
==> 0 installed packages
$ spack install zlib@1.2.11
==> Installing zlib-1.2.11-q6cqrdto4iktfg6qyqcc5u4vmfmwb7iv
==> No binary for zlib-1.2.11-q6cqrdto4iktfg6qyqcc5u4vmfmwb7iv found: installing from source
==> zlib: Executing phase: 'install'
[+] ~/spack/opt/spack/linux-rhel7-broadwell/gcc-8.1.0/zlib-1.2.11-q6cqrdto4iktfg6qyqcc5u4vmfmwb7iv
$ spack env activate myenv
$ spack find
==> In environment myenv
==> No root specs
==> 0 installed packages
$ spack install zlib@1.2.8
==> Installing zlib-1.2.8-yfc7epf57nsfn2gn4notccaiyxha6z7x
==> No binary for zlib-1.2.8-yfc7epf57nsfn2gn4notccaiyxha6z7x found: installing from source
==> zlib: Executing phase: 'install'
[+] ~/spack/opt/spack/linux-rhel7-broadwell/gcc-8.1.0/zlib-1.2.8-yfc7epf57nsfn2gn4notccaiyxha6z7x
==> Updating view at ~/spack/var/spack/environments/myenv/.spack-env/view
$ spack find
==> In environment myenv
==> Root specs
zlib@1.2.8
==> 1 installed package
-- linux-rhel7-broadwell / gcc@8.1.0 ----------------------------
zlib@1.2.8
$ despacktivate
$ spack find
==> 2 installed packages
-- linux-rhel7-broadwell / gcc@8.1.0 ----------------------------
zlib@1.2.8 zlib@1.2.11
Note that when we installed the abstract spec ``zlib@1.2.8``, it was
presented as a root of the Environment. All explicitly installed
packages will be listed as roots of the Environment.
All of the Spack commands that act on the list of installed specs are
Environment-sensitive in this way, including ``install``,
``uninstall``, ``activate``, ``deactivate``, ``find``, ``extensions``,
and more. In the :ref:`environment-configuration` section we will discuss
Environment-sensitive commands further.
^^^^^^^^^^^^^^^^^^^^^
Adding Abstract Specs
^^^^^^^^^^^^^^^^^^^^^
An abstract spec is the user-specified spec before Spack has applied
any defaults or dependency information.
Users can add abstract specs to an Environment using the ``spack add``
command. The most important component of an Environment is a list of
abstract specs.
Adding a spec adds to the manifest (the ``spack.yaml`` file) and to
the roots of the Environment, but does not affect the concrete specs
in the lockfile, nor does it install the spec.
The ``spack add`` command is environment aware. It adds to the
currently active environment. All environment aware commands can also
be called using the ``spack -e`` flag to specify the environment.
.. code-block:: console
$ spack env activate myenv
$ spack add mpileaks
or
.. code-block:: console
$ spack -e myenv add python
.. _environments_concretization:
^^^^^^^^^^^^
Concretizing
^^^^^^^^^^^^
Once some user specs have been added to an environment, they can be
concretized. *By default specs are concretized separately*, one after
the other. This mode of operation permits to deploy a full
software stack where multiple configurations of the same package
need to be installed alongside each other. Central installations done
at HPC centers by system administrators or user support groups
are a common case that fits in this behavior.
Environments *can also be configured to concretize all
the root specs in a self-consistent way* to ensure that
each package in the environment comes with a single configuration. This
mode of operation is usually what is required by software developers that
want to deploy their development environment.
Regardless of which mode of operation has been chosen, the following
command will ensure all the root specs are concretized according to the
constraints that are prescribed in the configuration:
.. code-block:: console
[myenv]$ spack concretize
In the case of specs that are not concretized together, the command
above will concretize only the specs that were added and not yet
concretized. Forcing a re-concretization of all the specs can be done
instead with this command:
.. code-block:: console
[myenv]$ spack concretize -f
When the ``-f`` flag is not used to reconcretize all specs, Spack
guarantees that already concretized specs are unchanged in the
environment.
The ``concretize`` command does not install any packages. For packages
that have already been installed outside of the environment, the
process of adding the spec and concretizing is identical to installing
the spec assuming it concretizes to the exact spec that was installed
outside of the environment.
The ``spack find`` command can show concretized specs separately from
installed specs using the ``-c`` (``--concretized``) flag.
.. code-block:: console
[myenv]$ spack add zlib
[myenv]$ spack concretize
[myenv]$ spack find -c
==> In environment myenv
==> Root specs
zlib
==> Concretized roots
-- linux-rhel7-x86_64 / gcc@4.9.3 -------------------------------
zlib@1.2.11
==> 0 installed packages
.. _installing-environment:
^^^^^^^^^^^^^^^^^^^^^^^^^
Installing an Environment
^^^^^^^^^^^^^^^^^^^^^^^^^
In addition to installing individual specs into an Environment, one
can install the entire Environment at once using the command
.. code-block:: console
[myenv]$ spack install
If the Environment has been concretized, Spack will install the
concretized specs. Otherwise, ``spack install`` will first concretize
the Environment and then install the concretized specs.
As it installs, ``spack install`` creates symbolic links in the
``logs/`` directory in the Environment, allowing for easy inspection
of build logs related to that environment. The ``spack install``
command also stores a Spack repo containing the ``package.py`` file
used at install time for each package in the ``repos/`` directory in
the Environment.
^^^^^^^
Loading
^^^^^^^
Once an environment has been installed, the following creates a load
script for it:
.. code-block:: console
$ spack env loads -r
This creates a file called ``loads`` in the environment directory.
Sourcing that file in Bash will make the environment available to the
user; and can be included in ``.bashrc`` files, etc. The ``loads``
file may also be copied out of the environment, renamed, etc.
----------
spack.yaml
----------
Spack environments can be customized at finer granularity by editing
the ``spack.yaml`` manifest file directly.
.. _environment-configuration:
^^^^^^^^^^^^^^^^^^^^^^^^
Configuring Environments
^^^^^^^^^^^^^^^^^^^^^^^^
A variety of Spack behaviors are changed through Spack configuration
files, covered in more detail in the :ref:`configuration`
section.
Spack Environments provide an additional level of configuration scope
between the custom scope and the user scope discussed in the
configuration documentation.
There are two ways to include configuration information in a Spack Environment:
#. Inline in the ``spack.yaml`` file
#. Included in the ``spack.yaml`` file from another file.
Many Spack commands also affect configuration information in files
automatically. Those commands take a ``--scope`` argument, and the
environment can be specified by ``env:NAME`` (to affect environment
``foo``, set ``--scope env:foo``). These commands will automatically
manipulate configuration inline in the ``spack.yaml`` file.
"""""""""""""""""""""
Inline configurations
"""""""""""""""""""""
Inline Environment-scope configuration is done using the same yaml
format as standard Spack configuration scopes, covered in the
:ref:`configuration` section. Each section is contained under a
top-level yaml object with it's name. For example, a ``spack.yaml``
manifest file containing some package preference configuration (as in
a ``packages.yaml`` file) could contain:
.. code-block:: yaml
spack:
...
packages:
all:
compiler: [intel]
...
This configuration sets the default compiler for all packages to
``intel``.
"""""""""""""""""""""""
Included configurations
"""""""""""""""""""""""
Spack environments allow an ``include`` heading in their yaml
schema. This heading pulls in external configuration files and applies
them to the Environment.
.. code-block:: yaml
spack:
include:
- relative/path/to/config.yaml
- /absolute/path/to/packages.yaml
Environments can include files with either relative or absolute
paths. Inline configurations take precedence over included
configurations, so you don't have to change shared configuration files
to make small changes to an individual Environment. Included configs
listed earlier will have higher precedence, as the included configs are
applied in reverse order.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Manually Editing the Specs List
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The list of abstract/root specs in the Environment is maintained in
the ``spack.yaml`` manifest under the heading ``specs``.
.. code-block:: yaml
spack:
specs:
- ncview
- netcdf
- nco
- py-sphinx
Appending to this list in the yaml is identical to using the ``spack
add`` command from the command line. However, there is more power
available from the yaml file.
"""""""""""""""""""
Spec concretization
"""""""""""""""""""
Specs can be concretized separately or together, as already
explained in :ref:`environments_concretization`. The behavior active
under any environment is determined by the ``concretization`` property:
.. code-block:: yaml
spack:
specs:
- ncview
- netcdf
- nco
- py-sphinx
concretization: together
which can currently take either one of the two allowed values ``together`` or ``separately``
(the default).
.. admonition:: Re-concretization of user specs
When concretizing specs together the entire set of specs will be
re-concretized after any addition of new user specs, to ensure that
the environment remains consistent. When instead the specs are concretized
separately only the new specs will be re-concretized after any addition.
"""""""""""""
Spec Matrices
"""""""""""""
Entries in the ``specs`` list can be individual abstract specs or a
spec matrix.
A spec matrix is a yaml object containing multiple lists of specs, and
evaluates to the cross-product of those specs. Spec matrices also
contain an ``excludes`` directive, which eliminates certain
combinations from the evaluated result.
The following two Environment manifests are identical:
.. code-block:: yaml
spack:
specs:
- zlib %gcc@7.1.0
- zlib %gcc@4.9.3
- libelf %gcc@7.1.0
- libelf %gcc@4.9.3
- libdwarf %gcc@7.1.0
- cmake
spack:
specs:
- matrix:
- [zlib, libelf, libdwarf]
- ['%gcc@7.1.0', '%gcc@4.9.3']
exclude:
- libdwarf%gcc@4.9.3
- cmake
Spec matrices can be used to install swaths of software across various
toolchains.
The concretization logic for spec matrices differs slightly from the
rest of Spack. If a variant or dependency constraint from a matrix is
invalid, Spack will reject the constraint and try again without
it. For example, the following two Environment manifests will produce
the same specs:
.. code-block:: yaml
spack:
specs:
- matrix:
- [zlib, libelf, hdf5+mpi]
- [^mvapich2@2.2, ^openmpi@3.1.0]
spack:
specs:
- zlib
- libelf
- hdf5+mpi ^mvapich2@2.2
- hdf5+mpi ^openmpi@3.1.0
This allows one to create toolchains out of combinations of
constraints and apply them somewhat indiscriminately to packages,
without regard for the applicability of the constraint.
""""""""""""""""""""
Spec List References
""""""""""""""""""""
The last type of possible entry in the specs list is a reference.
The Spack Environment manifest yaml schema contains an additional
heading ``definitions``. Under definitions is an array of yaml
objects. Each object has one or two fields. The one required field is
a name, and the optional field is a ``when`` clause.
The named field is a spec list. The spec list uses the same syntax as
the ``specs`` entry. Each entry in the spec list can be a spec, a spec
matrix, or a reference to an earlier named list. References are
specified using the ``$`` sigil, and are "splatted" into place
(i.e. the elements of the referent are at the same level as the
elements listed separately). As an example, the following two manifest
files are identical.
.. code-block:: yaml
spack:
definitions:
- first: [libelf, libdwarf]
- compilers: ['%gcc', '%intel']
- second:
- $first
- matrix:
- [zlib]
- [$compilers]
specs:
- $second
- cmake
spack:
specs:
- libelf
- libdwarf
- zlib%gcc
- zlib%intel
- cmake
.. note::
Named spec lists in the definitions section may only refer
to a named list defined above itself. Order matters.
In short files like the example, it may be easier to simply list the
included specs. However for more complicated examples involving many
packages across many toolchains, separately factored lists make
Environments substantially more manageable.
Additionally, the ``-l`` option to the ``spack add`` command allows
one to add to named lists in the definitions section of the manifest
file directly from the command line.
The ``when`` directive can be used to conditionally add specs to a
named list. The ``when`` directive takes a string of Python code
referring to a restricted set of variables, and evaluates to a
boolean. The specs listed are appended to the named list if the
``when`` string evaluates to ``True``. In the following snippet, the
named list ``compilers`` is ``['%gcc', '%clang', '%intel']`` on
``x86_64`` systems and ``['%gcc', '%clang']`` on all other systems.
.. code-block:: yaml
spack:
definitions:
- compilers: ['%gcc', '%clang']
- when: arch.satisfies('x86_64:')
compilers: ['%intel']
.. note::
Any definitions with the same named list with true ``when``
clauses (or absent ``when`` clauses) will be appended together
The valid variables for a ``when`` clause are:
#. ``platform``. The platform string of the default Spack
architecture on the system.
#. ``os``. The os string of the default Spack architecture on
the system.
#. ``target``. The target string of the default Spack
architecture on the system.
#. ``architecture`` or ``arch``. A Spack spec satisfying the default Spack
architecture on the system. This supports querying via the ``satisfies``
method, as shown above.
#. ``arch_str``. The architecture string of the default Spack architecture
on the system.
#. ``re``. The standard regex module in Python.
#. ``env``. The user environment (usually ``os.environ`` in Python).
#. ``hostname``. The hostname of the system (if ``hostname`` is an
executable in the user's PATH).
""""""""""""""""""""""""
SpecLists as Constraints
""""""""""""""""""""""""
Dependencies and compilers in Spack can be both packages in an
environment and constraints on other packages. References to SpecLists
allow a shorthand to treat packages in a list as either a compiler or
a dependency using the ``$%`` or ``$^`` syntax respectively.
For example, the following environment has three root packages:
``gcc@8.1.0``, ``mvapich2@2.3.1 %gcc@8.1.0``, and ``hdf5+mpi
%gcc@8.1.0 ^mvapich2@2.3.1``.
.. code-block:: yaml
spack:
definitions:
- compilers: [gcc@8.1.0]
- mpis: [mvapich2@2.3.1]
- packages: [hdf5+mpi]
specs:
- $compilers
- matrix:
- [$mpis]
- [$%compilers]
- matrix:
- [$packages]
- [$^mpis]
- [$%compilers]
This allows for a much-needed reduction in redundancy between packages
and constraints.
^^^^^^^^^^^^^^^^^^^^^^^^^
Environment-managed Views
^^^^^^^^^^^^^^^^^^^^^^^^^
Spack Environments can define filesystem views of their software,
which are maintained as packages and can be installed and uninstalled from
the Environment. Filesystem views provide an access point for packages
from the filesystem for users who want to access those packages
directly. For more information on filesystem views, see the section
:ref:`filesystem-views`.
Spack Environment managed views are updated every time the environment
is written out to the lock file ``spack.lock``, so the concrete
environment and the view are always compatible.
"""""""""""""""""""""""""""""
Configuring environment views
"""""""""""""""""""""""""""""
The Spack Environment manifest file has a top-level keyword
``view``. Each entry under that heading is a view descriptor, headed
by a name. The view descriptor contains the root of the view, and
optionally the projections for the view, and ``select`` and
``exclude`` lists for the view. For example, in the following manifest
file snippet we define a view named ``mpis``, rooted at
``/path/to/view`` in which all projections use the package name,
version, and compiler name to determine the path for a given
package. This view selects all packages that depend on MPI, and
excludes those built with the PGI compiler at version 18.5.
.. code-block:: yaml
spack:
...
view:
mpis:
root: /path/to/view
select: [^mpi]
exclude: ['%pgi@18.5']
projections:
all: {name}/{version}-{compiler.name}
For more information on using view projections, see the section on
:ref:`adding_projections_to_views`. The default for the ``select`` and
``exclude`` values is to select everything and exclude nothing. The
default projection is the default view projection (``{}``).
Any number of views may be defined under the ``view`` heading in a
Spack Environment.
There are two shorthands for environments with a single view. If the
environment at ``/path/to/env`` has a single view, with a root at
``/path/to/env/.spack-env/view``, with default selection and exclusion
and the default projection, we can put ``view: True`` in the
environment manifest. Similarly, if the environment has a view with a
different root, but default selection, exclusion, and projections, the
manifest can say ``view: /path/to/view``. These views are
automatically named ``default``, so that
.. code-block:: yaml
spack:
...
view: True
is equivalent to
.. code-block:: yaml
spack:
...
view:
default:
root: .spack-env/view
and
.. code-block:: yaml
spack:
...
view: /path/to/view
is equivalent to
.. code-block:: yaml
spack:
...
view:
default:
root: /path/to/view
By default, Spack environments are configured with ``view: True`` in
the manifest. Environments can be configured without views using
``view: False``. For backwards compatibility reasons, environments
with no ``view`` key are treated the same as ``view: True``.
From the command line, the ``spack env create`` command takes an
argument ``--with-view [PATH]`` that sets the path for a single, default
view. If no path is specified, the default path is used (``view:
True``). The argument ``--without-view`` can be used to create an
environment without any view configured.
The ``spack env view`` command can be used to change the manage views
of an Environment. The subcommand ``spack env view enable`` will add a
view named ``default`` to an environment. It takes an optional
argument to specify the path for the new default view. The subcommand
``spack env view disable`` will remove the view named ``default`` from
an environment if one exists. The subcommand ``spack env view
regenerate`` will regenerate the views for the environment. This will
apply any updates in the environment configuration that have not yet
been applied.
""""""""""""""""""""""""""""
Activating environment views
""""""""""""""""""""""""""""
The ``spack env activate`` command will put the default view for the
environment into the user's path, in addition to activating the
environment for Spack commands. The arguments ``-v,--with-view`` and
``-V,--without-view`` can be used to tune this behavior. The default
behavior is to activate with the environment view if there is one.
The environment variables affected by the ``spack env activate``
command and the paths that are used to update them are determined by
the :ref:`prefix inspections <customize-env-modifications>` defined in
your modules configuration; the defaults are summarized in the following
table.
=================== =========
Variable Paths
=================== =========
PATH bin
MANPATH man, share/man
ACLOCAL_PATH share/aclocal
LD_LIBRARY_PATH lib, lib64
LIBRARY_PATH lib, lib64
CPATH include
PKG_CONFIG_PATH lib/pkgconfig, lib64/pkgconfig, share/pkgconfig
CMAKE_PREFIX_PATH .
=================== =========
Each of these paths are appended to the view root, and added to the
relevant variable if the path exists. For this reason, it is not
recommended to use non-default projections with the default view of an
environment.
The ``spack env deactivate`` command will remove the default view of
the environment from the user's path.

View File

@@ -1,161 +0,0 @@
spack:
definitions:
- compiler-pkgs:
- 'llvm+clang@6.0.1 os=centos7'
- 'gcc@6.5.0 os=centos7'
- 'llvm+clang@6.0.1 os=ubuntu18.04'
- 'gcc@6.5.0 os=ubuntu18.04'
- pkgs:
- readline@7.0
# - xsdk@0.4.0
- compilers:
- '%gcc@5.5.0'
- '%gcc@6.5.0'
- '%gcc@7.3.0'
- '%clang@6.0.0'
- '%clang@6.0.1'
- oses:
- os=ubuntu18.04
- os=centos7
specs:
- matrix:
- [$pkgs]
- [$compilers]
- [$oses]
exclude:
- '%gcc@7.3.0 os=centos7'
- '%gcc@5.5.0 os=ubuntu18.04'
mirrors:
cloud_gitlab: https://mirror.spack.io
compilers:
# The .gitlab-ci.yml for this project picks a Docker container which does
# not have any compilers pre-built and ready to use, so we need to fake the
# existence of those here.
- compiler:
operating_system: centos7
modules: []
paths:
cc: /not/used
cxx: /not/used
f77: /not/used
fc: /not/used
spec: gcc@5.5.0
target: x86_64
- compiler:
operating_system: centos7
modules: []
paths:
cc: /not/used
cxx: /not/used
f77: /not/used
fc: /not/used
spec: gcc@6.5.0
target: x86_64
- compiler:
operating_system: centos7
modules: []
paths:
cc: /not/used
cxx: /not/used
f77: /not/used
fc: /not/used
spec: clang@6.0.0
target: x86_64
- compiler:
operating_system: centos7
modules: []
paths:
cc: /not/used
cxx: /not/used
f77: /not/used
fc: /not/used
spec: clang@6.0.1
target: x86_64
- compiler:
operating_system: ubuntu18.04
modules: []
paths:
cc: /not/used
cxx: /not/used
f77: /not/used
fc: /not/used
spec: clang@6.0.0
target: x86_64
- compiler:
operating_system: ubuntu18.04
modules: []
paths:
cc: /not/used
cxx: /not/used
f77: /not/used
fc: /not/used
spec: clang@6.0.1
target: x86_64
- compiler:
operating_system: ubuntu18.04
modules: []
paths:
cc: /not/used
cxx: /not/used
f77: /not/used
fc: /not/used
spec: gcc@6.5.0
target: x86_64
- compiler:
operating_system: ubuntu18.04
modules: []
paths:
cc: /not/used
cxx: /not/used
f77: /not/used
fc: /not/used
spec: gcc@7.3.0
target: x86_64
gitlab-ci:
bootstrap:
- name: compiler-pkgs
compiler-agnostic: true
mappings:
- # spack-cloud-ubuntu
match:
# these are specs, if *any* match the spec under consideration, this
# 'mapping' will be used to generate the CI job
- os=ubuntu18.04
runner-attributes:
# 'tags' and 'image' go directly onto the job, 'variables' will
# be added to what we already necessarily create for the job as
# a part of the CI workflow
tags:
- spack-k8s
image:
name: scottwittenburg/spack_builder_ubuntu_18.04
entrypoint: [""]
- # spack-cloud-centos
match:
# these are specs, if *any* match the spec under consideration, this
# 'mapping' will be used to generate the CI job
- 'os=centos7'
runner-attributes:
tags:
- spack-k8s
image:
name: scottwittenburg/spack_builder_centos_7
entrypoint: [""]
cdash:
build-group: Release Testing
url: http://cdash
project: Spack Testing
site: Spack Docker-Compose Workflow
repos: []
upstreams: {}
modules:
enable: []
packages: {}
config: {}

View File

@@ -1,123 +0,0 @@
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
.. extensions:
=================
Custom Extensions
=================
*Spack extensions* permit you to extend Spack capabilities by deploying your
own custom commands or logic in an arbitrary location on your filesystem.
This might be extremely useful e.g. to develop and maintain a command whose purpose is
too specific to be considered for reintegration into the mainline or to
evolve a command through its early stages before starting a discussion to merge
it upstream.
From Spack's point of view an extension is any path in your filesystem which
respects a prescribed naming and layout for files:
.. code-block:: console
spack-scripting/ # The top level directory must match the format 'spack-{extension_name}'
├── pytest.ini # Optional file if the extension ships its own tests
├── scripting # Folder that may contain modules that are needed for the extension commands
│   └── cmd # Folder containing extension commands
│   └── filter.py # A new command that will be available
├── tests # Tests for this extension
│ ├── conftest.py
│ └── test_filter.py
└── templates # Templates that may be needed by the extension
In the example above the extension named *scripting* adds an additional command (``filter``)
and unit tests to verify its behavior. The code for this example can be
obtained by cloning the corresponding git repository:
.. TODO: write an ad-hoc "hello world" extension and make it part of the spack organization
.. code-block:: console
$ pwd
/home/user
$ mkdir tmp && cd tmp
$ git clone https://github.com/alalazo/spack-scripting.git
Cloning into 'spack-scripting'...
remote: Counting objects: 11, done.
remote: Compressing objects: 100% (7/7), done.
remote: Total 11 (delta 0), reused 11 (delta 0), pack-reused 0
Receiving objects: 100% (11/11), done.
As you can see by inspecting the sources, Python modules that are part of the extension
can import any core Spack module.
---------------------------------
Configure Spack to Use Extensions
---------------------------------
To make your current Spack instance aware of extensions you should add their root
paths to ``config.yaml``. In the case of our example this means ensuring that:
.. code-block:: yaml
config:
extensions:
- /home/user/tmp/spack-scripting
is part of your configuration file. Once this is setup any command that the extension provides
will be available from the command line:
.. code-block:: console
$ spack filter --help
usage: spack filter [-h] [--installed | --not-installed]
[--explicit | --implicit] [--output OUTPUT]
...
filter specs based on their properties
positional arguments:
specs specs to be filtered
optional arguments:
-h, --help show this help message and exit
--installed select installed specs
--not-installed select specs that are not yet installed
--explicit select specs that were installed explicitly
--implicit select specs that are not installed or were installed implicitly
--output OUTPUT where to dump the result
The corresponding unit tests can be run giving the appropriate options
to ``spack unit-test``:
.. code-block:: console
$ spack unit-test --extension=scripting
============================================================== test session starts ===============================================================
platform linux2 -- Python 2.7.15rc1, pytest-3.2.5, py-1.4.34, pluggy-0.4.0
rootdir: /home/mculpo/tmp/spack-scripting, inifile: pytest.ini
collected 5 items
tests/test_filter.py ...XX
============================================================ short test summary info =============================================================
XPASS tests/test_filter.py::test_filtering_specs[flags3-specs3-expected3]
XPASS tests/test_filter.py::test_filtering_specs[flags4-specs4-expected4]
=========================================================== slowest 20 test durations ============================================================
3.74s setup tests/test_filter.py::test_filtering_specs[flags0-specs0-expected0]
0.17s call tests/test_filter.py::test_filtering_specs[flags3-specs3-expected3]
0.16s call tests/test_filter.py::test_filtering_specs[flags2-specs2-expected2]
0.15s call tests/test_filter.py::test_filtering_specs[flags1-specs1-expected1]
0.13s call tests/test_filter.py::test_filtering_specs[flags4-specs4-expected4]
0.08s call tests/test_filter.py::test_filtering_specs[flags0-specs0-expected0]
0.04s teardown tests/test_filter.py::test_filtering_specs[flags4-specs4-expected4]
0.00s setup tests/test_filter.py::test_filtering_specs[flags4-specs4-expected4]
0.00s setup tests/test_filter.py::test_filtering_specs[flags3-specs3-expected3]
0.00s setup tests/test_filter.py::test_filtering_specs[flags1-specs1-expected1]
0.00s setup tests/test_filter.py::test_filtering_specs[flags2-specs2-expected2]
0.00s teardown tests/test_filter.py::test_filtering_specs[flags2-specs2-expected2]
0.00s teardown tests/test_filter.py::test_filtering_specs[flags1-specs1-expected1]
0.00s teardown tests/test_filter.py::test_filtering_specs[flags0-specs0-expected0]
0.00s teardown tests/test_filter.py::test_filtering_specs[flags3-specs3-expected3]
====================================================== 3 passed, 2 xpassed in 4.51 seconds =======================================================

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
@@ -48,8 +48,8 @@ platform, all on the command line.
# Add compiler flags using the conventional names
$ spack install mpileaks@1.1.2 %gcc@4.7.3 cppflags="-O3 -floop-block"
# Cross-compile for a different micro-architecture with target=
$ spack install mpileaks@1.1.2 target=icelake
# Cross-compile for a different architecture with arch=
$ spack install mpileaks@1.1.2 arch=bgqos_0
Users can specify as many or few options as they care about. Spack
will fill in the unspecified values with sensible defaults. The two listed
@@ -60,14 +60,14 @@ Customize dependencies
----------------------
Spack allows *dependencies* of a particular installation to be
customized extensively. Suppose that ``hdf5`` depends
on ``openmpi`` and indirectly on ``hwloc``. Using ``^``, users can add custom
customized extensively. Suppose that ``mpileaks`` depends indirectly
on ``libelf`` and ``libdwarf``. Using ``^``, users can add custom
configurations for the dependencies:
.. code-block:: console
# Install hdf5 and link it with specific versions of openmpi and hwloc
$ spack install hdf5@1.10.1 %gcc@4.7.3 +debug ^openmpi+cuda fabrics=auto ^hwloc+gl
# Install mpileaks and link it with specific versions of libelf and libdwarf
$ spack install mpileaks@1.1.2 %gcc@4.7.3 +debug ^libelf@0.8.12 ^libdwarf@20130729+debug
------------------------
Non-destructive installs
@@ -130,7 +130,7 @@ creates a simple python file:
It doesn't take much python coding to get from there to a working
package:
.. literalinclude:: _spack_root/var/spack/repos/builtin/packages/libelf/package.py
.. literalinclude:: ../../../var/spack/repos/builtin/packages/libelf/package.py
:lines: 6-
Spack also provides wrapper functions around common commands like

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
@@ -16,18 +16,14 @@ Prerequisites
Spack has the following minimum requirements, which must be installed
before Spack is run:
#. Python 2 (2.6 or 2.7) or 3 (3.5 - 3.9) to run Spack
#. A C/C++ compiler for building
#. The ``make`` executable for building
#. The ``tar``, ``gzip``, ``bzip2``, ``xz`` and optionally ``zstd``
executables for extracting source code
#. The ``patch`` command to apply patches
#. The ``git`` and ``curl`` commands for fetching
#. If using the ``gpg`` subcommand, ``gnupg2`` is required
1. Python 2 (2.6 or 2.7) or 3 (3.4 - 3.7)
2. A C/C++ compiler
3. The ``git`` and ``curl`` commands.
4. If using the ``gpg`` subcommand, ``gnupg2`` is required.
These requirements can be easily installed on most modern Linux systems;
on macOS, XCode is required. Spack is designed to run on HPC
platforms like Cray. Not all packages should be expected
on Macintosh, XCode is required. Spack is designed to run on HPC
platforms like Cray and BlueGene/Q. Not all packages should be expected
to work on all platforms. A build matrix showing which packages are
working on which systems is planned but not yet available.
@@ -44,58 +40,39 @@ Getting Spack is easy. You can clone it from the `github repository
This will create a directory called ``spack``.
.. _shell-support:
^^^^^^^^^^^^^^^^^^^^^^^^
Add Spack to the Shell
^^^^^^^^^^^^^^^^^^^^^^^^
^^^^^^^^^^^^^
Shell support
^^^^^^^^^^^^^
Once you have cloned Spack, we recommend sourcing the appropriate script
for your shell:
We'll assume that the full path to your downloaded Spack directory is
in the ``SPACK_ROOT`` environment variable. Add ``$SPACK_ROOT/bin``
to your path and you're ready to go:
.. code-block:: console
# For bash/zsh/sh
$ . spack/share/spack/setup-env.sh
$ export PATH=$SPACK_ROOT/bin:$PATH
$ spack install libelf
# For tcsh/csh
$ source spack/share/spack/setup-env.csh
For a richer experience, use Spack's shell support:
# For fish
$ . spack/share/spack/setup-env.fish
.. code-block:: console
That's it! You're ready to use Spack.
# For bash/zsh users
$ export SPACK_ROOT=/path/to/spack
$ . $SPACK_ROOT/share/spack/setup-env.sh
Sourcing these files will put the ``spack`` command in your ``PATH``, set
up your ``MODULEPATH`` to use Spack's packages, and add other useful
shell integration for :ref:`certain commands <packaging-shell-support>`,
:ref:`environments <environments>`, and :ref:`modules <modules>`. For
``bash``, it also sets up tab completion.
If you do not want to use Spack's shell support, you can always just run
the ``spack`` command directly from ``spack/bin/spack``.
When the ``spack`` command is executed it searches for an appropriate
Python interpreter to use, which can be explicitly overridden by setting
the ``SPACK_PYTHON`` environment variable. When sourcing the appropriate shell
setup script, ``SPACK_PYTHON`` will be set to the interpreter found at
sourcing time, ensuring future invocations of the ``spack`` command will
continue to use the same consistent python version regardless of changes in
the environment.
# For tcsh or csh users (note you must set SPACK_ROOT)
$ setenv SPACK_ROOT /path/to/spack
$ source $SPACK_ROOT/share/spack/setup-env.csh
^^^^^^^^^^^^^^^^^^
Check Installation
^^^^^^^^^^^^^^^^^^
This automatically adds Spack to your ``PATH`` and allows the ``spack``
command to be used to execute spack :ref:`commands <shell-support>` and
:ref:`useful packaging commands <packaging-shell-support>`.
With Spack installed, you should be able to run some basic Spack
commands. For example:
.. command-output:: spack spec netcdf-c
In theory, Spack doesn't need any additional installation; just
download and run! But in real life, additional steps are usually
required before Spack can work in a practical sense. Read on...
If :ref:`environment-modules or dotkit <InstallEnvironmentModules>` is
installed and available, the ``spack`` command can also load and unload
:ref:`modules <modules>`.
^^^^^^^^^^^^^^^^^
Clean Environment
@@ -111,52 +88,16 @@ environment*, especially for ``PATH``. Only software that comes with
the system, or that you know you wish to use with Spack, should be
included. This procedure will avoid many strange build errors.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Optional: Bootstrapping clingo
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Spack supports using clingo as an external solver to compute which software
needs to be installed. If you have a default compiler supporting C++14 Spack
can automatically bootstrap this tool from sources the first time it is
needed:
^^^^^^^^^^^^^^^^^^
Check Installation
^^^^^^^^^^^^^^^^^^
.. code-block:: console
With Spack installed, you should be able to run some basic Spack
commands. For example:
$ spack solve zlib
[+] /usr (external bison-3.0.4-wu5pgjchxzemk5ya2l3ddqug2d7jv6eb)
[+] /usr (external cmake-3.19.4-a4kmcfzxxy45mzku4ipmj5kdiiz5a57b)
[+] /usr (external python-3.6.9-x4fou4iqqlh5ydwddx3pvfcwznfrqztv)
==> Installing re2c-1.2.1-e3x6nxtk3ahgd63ykgy44mpuva6jhtdt
[ ... ]
==> Optimization: [0, 0, 0, 0, 0, 1, 0, 0, 0]
zlib@1.2.11%gcc@10.1.0+optimize+pic+shared arch=linux-ubuntu18.04-broadwell
.. command-output:: spack spec netcdf
If you want to speed-up bootstrapping, you may try to search for ``cmake`` and ``bison``
on your system:
.. code-block:: console
$ spack external find cmake bison
==> The following specs have been detected on this system and added to /home/spack/.spack/packages.yaml
bison@3.0.4 cmake@3.19.4
All the tools Spack needs for its own functioning are installed in a separate store, which lives
under the ``${HOME}/.spack`` directory. The software installed there can be queried with:
.. code-block:: console
$ spack find --bootstrap
==> Showing internal bootstrap store at "/home/spack/.spack/bootstrap/store"
==> 3 installed packages
-- linux-ubuntu18.04-x86_64 / gcc@10.1.0 ------------------------
clingo-bootstrap@spack python@3.6.9 re2c@1.2.1
In case it's needed the bootstrap store can also be cleaned with:
.. code-block:: console
$ spack clean -b
==> Removing software in "/home/spack/.spack/bootstrap/store"
^^^^^^^^^^^^^^^^^^^^^^^^^^
Optional: Alternate Prefix
@@ -176,6 +117,15 @@ copy of spack installs packages into its own ``$PREFIX/opt``
directory.
^^^^^^^^^^
Next Steps
^^^^^^^^^^
In theory, Spack doesn't need any additional installation; just
download and run! But in real life, additional steps are usually
required before Spack can work in a practical sense. Read on...
.. _compiler-config:
----------------------
@@ -239,7 +189,7 @@ where the compiler is installed. For example:
.. code-block:: console
$ spack compiler find /usr/local/tools/ic-13.0.079
==> Added 1 new compiler to ~/.spack/linux/compilers.yaml
==> Added 1 new compiler to ~/.spack/compilers.yaml
intel@13.0.079
Or you can run ``spack compiler find`` with no arguments to force
@@ -251,7 +201,7 @@ installed, but you know that new compilers have been added to your
$ module load gcc-4.9.0
$ spack compiler find
==> Added 1 new compiler to ~/.spack/linux/compilers.yaml
==> Added 1 new compiler to ~/.spack/compilers.yaml
gcc@4.9.0
This loads the environment module for gcc-4.9.0 to add it to
@@ -296,7 +246,7 @@ Manual compiler configuration
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
If auto-detection fails, you can manually configure a compiler by
editing your ``~/.spack/<platform>/compilers.yaml`` file. You can do this by running
editing your ``~/.spack/compilers.yaml`` file. You can do this by running
``spack config edit compilers``, which will open the file in your ``$EDITOR``.
Each compiler configuration in the file looks like this:
@@ -312,7 +262,7 @@ Each compiler configuration in the file looks like this:
cxx: /usr/local/bin/icpc-15.0.024-beta
f77: /usr/local/bin/ifort-15.0.024-beta
fc: /usr/local/bin/ifort-15.0.024-beta
spec: intel@15.0.0
spec: intel@15.0.0:
For compilers that do not support Fortran (like ``clang``), put
``None`` for ``f77`` and ``fc``:
@@ -518,21 +468,18 @@ Fortran.
install GCC with Spack (``spack install gcc``) or with Homebrew
(``brew install gcc``).
#. The only thing left to do is to edit ``~/.spack/darwin/compilers.yaml`` to provide
#. The only thing left to do is to edit ``~/.spack/compilers.yaml`` to provide
the path to ``gfortran``:
.. code-block:: yaml
compilers:
- compiler:
...
paths:
cc: /usr/bin/clang
cxx: /usr/bin/clang++
f77: /path/to/bin/gfortran
fc: /path/to/bin/gfortran
spec: apple-clang@11.0.0
darwin-x86_64:
clang@7.3.0-apple:
cc: /usr/bin/clang
cxx: /usr/bin/clang++
f77: /path/to/bin/gfortran
fc: /path/to/bin/gfortran
If you used Spack to install GCC, you can get the installation prefix by
``spack location -i gcc`` (this will only work if you have a single version
@@ -643,12 +590,11 @@ flags to the ``icc`` command:
operating_system: centos7
paths:
cc: /opt/intel-15.0.24/bin/icc-15.0.24-beta
cflags: -gcc-name ~/spack/opt/spack/linux-centos7-x86_64/gcc-4.9.3-iy4rw.../bin/gcc
cxx: /opt/intel-15.0.24/bin/icpc-15.0.24-beta
cxxflags: -gxx-name ~/spack/opt/spack/linux-centos7-x86_64/gcc-4.9.3-iy4rw.../bin/g++
f77: /opt/intel-15.0.24/bin/ifort-15.0.24-beta
fc: /opt/intel-15.0.24/bin/ifort-15.0.24-beta
flags:
cflags: -gcc-name ~/spack/opt/spack/linux-centos7-x86_64/gcc-4.9.3-iy4rw.../bin/gcc
cxxflags: -gxx-name ~/spack/opt/spack/linux-centos7-x86_64/gcc-4.9.3-iy4rw.../bin/g++
fflags: -gcc-name ~/spack/opt/spack/linux-centos7-x86_64/gcc-4.9.3-iy4rw.../bin/gcc
spec: intel@15.0.24.4.9.3
@@ -765,9 +711,8 @@ an OpenMPI installed in /opt/local, one would use:
packages:
openmpi:
externals:
- spec: openmpi@1.10.1
prefix: /opt/local
paths:
openmpi@1.10.1: /opt/local
buildable: False
In general, Spack is easier to use and more reliable if it builds all of
@@ -829,9 +774,8 @@ Then add the following to ``~/.spack/packages.yaml``:
packages:
openssl:
externals:
- spec: openssl@1.0.2g
prefix: /usr
paths:
openssl@1.0.2g: /usr
buildable: False
@@ -846,9 +790,8 @@ to add the following to ``packages.yaml``:
packages:
netlib-lapack:
externals:
- spec: netlib-lapack@3.6.1
prefix: /usr
paths:
netlib-lapack@3.6.1: /usr
buildable: False
all:
providers:
@@ -874,7 +817,7 @@ Git
Some Spack packages use ``git`` to download, which might not work on
some computers. For example, the following error was
encountered on a Macintosh during ``spack install julia@master``:
encountered on a Macintosh during ``spack install julia-master``:
.. code-block:: console
@@ -903,7 +846,7 @@ from websites and from git.
.. warning::
This workaround should be used ONLY as a last resort! Without SSL
This workaround should be used ONLY as a last resort! Wihout SSL
certificate verification, spack and git will download from sites you
wouldn't normally trust. The code you download and run may then be
compromised! While this is not a major issue for archives that will
@@ -952,8 +895,9 @@ Core Spack Utilities
^^^^^^^^^^^^^^^^^^^^
Core Spack uses the following packages, mainly to download and unpack
source code: ``curl``, ``env``, ``git``, ``go``, ``hg``, ``svn``,
``tar``, ``unzip``, ``patch``
source code, and to load generated environment modules: ``curl``,
``env``, ``git``, ``go``, ``hg``, ``svn``, ``tar``, ``unzip``,
``patch``, ``environment-modules``.
As long as the user's environment is set up to successfully run these
programs from outside of Spack, they should work inside of Spack as
@@ -961,6 +905,10 @@ well. They can generally be activated as in the ``curl`` example above;
or some systems might already have an appropriate hand-built
environment module that may be loaded. Either way works.
If you find that you are missing some of these programs, ``spack`` can
build some of them for you with ``spack bootstrap``. Currently supported
programs are ``environment-modules``.
A few notes on specific programs in this list:
""""""""""""""""""""""""""
@@ -988,6 +936,45 @@ other programs will also not work, because they also rely on OpenSSL.
Once ``curl`` has been installed, you can similarly install the others.
.. _InstallEnvironmentModules:
"""""""""""""""""""
Environment Modules
"""""""""""""""""""
In order to use Spack's generated module files, you must have
installed ``environment-modules`` or ``lmod``. The simplest way
to get the latest version of either of these tools is installing
it as part of Spack's bootstrap procedure:
.. code-block:: console
$ spack bootstrap
.. warning::
At the moment ``spack bootstrap`` is only able to install ``environment-modules``.
Extending its capabilities to prefer ``lmod`` where possible is in the roadmap,
and likely to happen before the next release.
Alternatively, on many Linux distributions, you can install a pre-built binary
from the vendor's repository. On Fedora/RHEL/CentOS, for example, this can be
done with the command:
.. code-block:: console
$ yum install environment-modules
Once you have the tool installed and available in your path, you can source
Spack's setup file:
.. code-block:: console
$ source share/spack/setup-env.sh
This activates :ref:`shell support <shell-support>` and makes commands like
``spack load`` available for use.
^^^^^^^^^^^^^^^^^
Package Utilities
^^^^^^^^^^^^^^^^^
@@ -1237,13 +1224,9 @@ Here's an example of an external configuration for cray modules:
packages:
mpich:
externals:
- spec: "mpich@7.3.1%gcc@5.2.0 arch=cray_xc-haswell-CNL10"
modules:
- cray-mpich
- spec: "mpich@7.3.1%intel@16.0.0.109 arch=cray_xc-haswell-CNL10"
modules:
- cray-mpich
modules:
mpich@7.3.1%gcc@5.2.0 arch=cray_xc-haswell-CNL10: cray-mpich
mpich@7.3.1%intel@16.0.0.109 arch=cray_xc-haswell-CNL10: cray-mpich
all:
providers:
mpi: [mpich]
@@ -1255,7 +1238,7 @@ via module load.
.. note::
For Cray-provided packages, it is best to use ``modules:`` instead of ``prefix:``
For Cray-provided packages, it is best to use ``modules:`` instead of ``paths:``
in ``packages.yaml``, because the Cray Programming Environment heavily relies on
modules (e.g., loading the ``cray-mpich`` module adds MPI libraries to the
compiler wrapper link line).
@@ -1271,31 +1254,19 @@ Here is an example of a full packages.yaml used at NERSC
packages:
mpich:
externals:
- spec: "mpich@7.3.1%gcc@5.2.0 arch=cray_xc-CNL10-ivybridge"
modules:
- cray-mpich
- spec: "mpich@7.3.1%intel@16.0.0.109 arch=cray_xc-SuSE11-ivybridge"
modules:
- cray-mpich
modules:
mpich@7.3.1%gcc@5.2.0 arch=cray_xc-CNL10-ivybridge: cray-mpich
mpich@7.3.1%intel@16.0.0.109 arch=cray_xc-SuSE11-ivybridge: cray-mpich
buildable: False
netcdf:
externals:
- spec: "netcdf@4.3.3.1%gcc@5.2.0 arch=cray_xc-CNL10-ivybridge"
modules:
- cray-netcdf
- spec: "netcdf@4.3.3.1%intel@16.0.0.109 arch=cray_xc-CNL10-ivybridge"
modules:
- cray-netcdf
modules:
netcdf@4.3.3.1%gcc@5.2.0 arch=cray_xc-CNL10-ivybridge: cray-netcdf
netcdf@4.3.3.1%intel@16.0.0.109 arch=cray_xc-CNL10-ivybridge: cray-netcdf
buildable: False
hdf5:
externals:
- spec: "hdf5@1.8.14%gcc@5.2.0 arch=cray_xc-CNL10-ivybridge"
modules:
- cray-hdf5
- spec: "hdf5@1.8.14%intel@16.0.0.109 arch=cray_xc-CNL10-ivybridge"
modules:
- cray-hdf5
modules:
hdf5@1.8.14%gcc@5.2.0 arch=cray_xc-CNL10-ivybridge: cray-hdf5
hdf5@1.8.14%intel@16.0.0.109 arch=cray_xc-CNL10-ivybridge: cray-hdf5
buildable: False
all:
compiler: [gcc@5.2.0, intel@16.0.0.109]
@@ -1319,6 +1290,6 @@ environment variables may be propagated into containers that are not
using the Cray programming environment.
To ensure that Spack does not autodetect the Cray programming
environment, unset the environment variable ``MODULEPATH``. This
environment, unset the environment variable ``CRAYPE_VERSION``. This
will cause Spack to treat a linux container on a Cray system as a base
linux distro.

Binary file not shown.

Before

Width:  |  Height:  |  Size: 44 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 68 KiB

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
@@ -55,7 +55,7 @@ or refer to the full manual below.
getting_started
basic_usage
workflows
Tutorial: Spack 101 <https://spack-tutorial.readthedocs.io>
tutorial
known_issues
.. toctree::
@@ -65,17 +65,12 @@ or refer to the full manual below.
configuration
config_yaml
build_settings
environments
containers
mirrors
module_file_support
repositories
binary_caches
command_index
package_list
chain
extensions
pipelines
.. toctree::
:maxdepth: 2
@@ -85,11 +80,7 @@ or refer to the full manual below.
packaging_guide
build_systems
developer_guide
.. toctree::
:maxdepth: 2
:caption: API Docs
docker_for_developers
Spack API Docs <spack>
LLNL API Docs <llnl>

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
@@ -14,7 +14,7 @@ problems if you encounter them.
Variants are not properly forwarded to dependencies
---------------------------------------------------
**Status:** Expected to be fixed by Spack's new concretizer
**Status:** Expected to be fixed in the next release
Sometimes, a variant of a package can also affect how its dependencies are
built. For example, in order to build MPI support for a package, it may
@@ -49,29 +49,15 @@ A workaround is to explicitly activate the variants of dependencies as well:
See https://github.com/spack/spack/issues/267 and
https://github.com/spack/spack/issues/2546 for further details.
-----------------------------------------------
depends_on cannot handle recursive dependencies
-----------------------------------------------
**Status:** Not yet a work in progress
----------------------------
``spack setup`` doesn't work
----------------------------
Although ``depends_on`` can handle any aspect of Spack's spec syntax,
it currently cannot handle recursive dependencies. If the ``^`` sigil
appears in a ``depends_on`` statement, the concretizer will hang.
For example, something like:
**Status:** Work in progress
.. code-block:: python
depends_on('mfem+cuda ^hypre+cuda', when='+cuda')
should be rewritten as:
.. code-block:: python
depends_on('mfem+cuda', when='+cuda')
depends_on('hypre+cuda', when='+cuda')
See https://github.com/spack/spack/issues/17660 and
https://github.com/spack/spack/issues/11160 for more details.
Spack provides a ``setup`` command that is useful for the development of
software outside of Spack. Unfortunately, this command no longer works.
See https://github.com/spack/spack/issues/2597 and
https://github.com/spack/spack/issues/2662 for details. This is expected
to be fixed by https://github.com/spack/spack/pull/2664.

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
@@ -10,25 +10,29 @@ Modules
=======
The use of module systems to manage user environment in a controlled way
is a common practice at HPC centers that is often embraced also by
individual programmers on their development machines. To support this
common practice Spack integrates with `Environment Modules
<http://modules.sourceforge.net/>`_ and `LMod
<http://lmod.readthedocs.io/en/latest/>`_ by providing post-install hooks
that generate module files and commands to manipulate them.
is a common practice at HPC centers that is often embraced also by individual
programmers on their development machines. To support this common practice
Spack integrates with `Environment Modules
<http://modules.sourceforge.net/>`_ , `LMod
<http://lmod.readthedocs.io/en/latest/>`_ and `Dotkit <https://computing.llnl.gov/?set=jobs&page=dotkit>`_ by
providing post-install hooks that generate module files and commands to manipulate them.
Modules are one of several ways you can use Spack packages. For other
options that may fit your use case better, you should also look at
:ref:`spack load <spack-load>` and :ref:`environments <environments>`.
.. note::
If your machine does not already have a module system installed,
we advise you to use either Environment Modules or LMod. See :ref:`InstallEnvironmentModules`
for more details.
.. _shell-support:
----------------------------
Using module files via Spack
----------------------------
If you have installed a supported module system you should be able to
run either ``module avail`` or ``use -l spack`` to see what module
files have been installed. Here is sample output of those programs,
showing lots of installed packages:
If you have installed a supported module system either manually or through
``spack bootstrap``, you should be able to run either ``module avail`` or
``use -l spack`` to see what module files have been installed. Here is
sample output of those programs, showing lots of installed packages:
.. code-block:: console
@@ -62,9 +66,219 @@ to load the ``cmake`` module:
$ module load cmake-3.7.2-gcc-6.3.0-fowuuby
Neither of these is particularly pretty, easy to remember, or easy to
type. Luckily, Spack offers many facilities for customizing the module
scheme used at your site.
Neither of these is particularly pretty, easy to remember, or
easy to type. Luckily, Spack has its own interface for using modules and dotkits.
^^^^^^^^^^^^^
Shell support
^^^^^^^^^^^^^
To enable additional Spack commands for loading and unloading module files,
and to add the correct path to ``MODULEPATH``, you need to source the appropriate
setup file in the ``$SPACK_ROOT/share/spack`` directory. This will activate shell
support for the commands that need it. For ``bash``, ``ksh`` or ``zsh`` users:
.. code-block:: console
$ . ${SPACK_ROOT}/share/spack/setup-env.sh
For ``csh`` and ``tcsh`` instead:
.. code-block:: console
$ set SPACK_ROOT ...
$ source $SPACK_ROOT/share/spack/setup-env.csh
Note that in the latter case it is necessary to explicitly set ``SPACK_ROOT``
before sourcing the setup file (you will get a meaningful error message
if you don't).
When ``bash`` and ``ksh`` users update their environment with ``setup-env.sh``, it will check for spack-installed environment modules and add the ``module`` command to their environment; This only occurs if the module command is not already available. You can install ``environment-modules`` with ``spack bootstrap`` as described in :ref:`InstallEnvironmentModules`.
Finally, if you want to have Spack's shell support available on the command line at
any login you can put this source line in one of the files that are sourced
at startup (like ``.profile``, ``.bashrc`` or ``.cshrc``). Be aware though
that the startup time may be slightly increased because of that.
.. _cmd-spack-load:
^^^^^^^^^^^^^^^^^^^^^^^
``spack load / unload``
^^^^^^^^^^^^^^^^^^^^^^^
Once you have shell support enabled you can use the same spec syntax
you're used to:
========================= ==========================
Modules Dotkit
========================= ==========================
``spack load <spec>`` ``spack use <spec>``
``spack unload <spec>`` ``spack unuse <spec>``
========================= ==========================
And you can use the same shortened names you use everywhere else in
Spack.
For example, if you are using dotkit, this will add the ``mpich``
package built with ``gcc`` to your path:
.. code-block:: console
$ spack install mpich %gcc@4.4.7
# ... wait for install ...
$ spack use mpich %gcc@4.4.7 # dotkit
Prepending: mpich@3.0.4%gcc@4.4.7 (ok)
$ which mpicc
~/spack/opt/linux-debian7-x86_64/gcc@4.4.7/mpich@3.0.4/bin/mpicc
Or, similarly if you are using modules, you could type:
.. code-block:: console
$ spack load mpich %gcc@4.4.7 # modules
These commands will add appropriate directories to your ``PATH``,
``MANPATH``, ``CPATH``, and ``LD_LIBRARY_PATH``. When you no longer
want to use a package, you can type unload or unuse similarly:
.. code-block:: console
$ spack unload mpich %gcc@4.4.7 # modules
$ spack unuse mpich %gcc@4.4.7 # dotkit
.. note::
These ``use``, ``unuse``, ``load``, and ``unload`` subcommands are
only available if you have enabled Spack's shell support *and* you
have dotkit or modules installed on your machine.
^^^^^^^^^^^^^^^^^^^^^^
Ambiguous module names
^^^^^^^^^^^^^^^^^^^^^^
If a spec used with load/unload or use/unuse is ambiguous (i.e. more
than one installed package matches it), then Spack will warn you:
.. code-block:: console
$ spack load libelf
==> Error: Multiple matches for spec libelf. Choose one:
libelf@0.8.13%gcc@4.4.7 arch=linux-debian7-x86_64
libelf@0.8.13%intel@15.0.0 arch=linux-debian7-x86_64
You can either type the ``spack load`` command again with a fully
qualified argument, or you can add just enough extra constraints to
identify one package. For example, above, the key differentiator is
that one ``libelf`` is built with the Intel compiler, while the other
used ``gcc``. You could therefore just type:
.. code-block:: console
$ spack load libelf %intel
To identify just the one built with the Intel compiler.
.. _extensions:
.. _cmd-spack-module-loads:
^^^^^^^^^^^^^^^^^^^^^^^^^^
``spack module tcl loads``
^^^^^^^^^^^^^^^^^^^^^^^^^^
In some cases, it is desirable to load not just a module, but also all
the modules it depends on. This is not required for most modules
because Spack builds binaries with RPATH support. However, not all
packages use RPATH to find their dependencies: this can be true in
particular for Python extensions, which are currently *not* built with
RPATH.
Scripts to load modules recursively may be made with the command:
.. code-block:: console
$ spack module tcl loads --dependencies <spec>
An equivalent alternative using `process substitution <http://tldp.org/LDP/abs/html/process-sub.html>`_ is:
.. code-block :: console
$ source <( spack module tcl loads --dependencies <spec> )
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Module Commands for Shell Scripts
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Although Spack is flexible, the ``module`` command is much faster.
This could become an issue when emitting a series of ``spack load``
commands inside a shell script. By adding the ``--shell`` flag,
``spack module tcl find`` may also be used to generate code that can be
cut-and-pasted into a shell script. For example:
.. code-block:: console
$ spack module tcl loads --dependencies py-numpy git
# bzip2@1.0.6%gcc@4.9.3=linux-x86_64
module load bzip2-1.0.6-gcc-4.9.3-ktnrhkrmbbtlvnagfatrarzjojmkvzsx
# ncurses@6.0%gcc@4.9.3=linux-x86_64
module load ncurses-6.0-gcc-4.9.3-kaazyneh3bjkfnalunchyqtygoe2mncv
# zlib@1.2.8%gcc@4.9.3=linux-x86_64
module load zlib-1.2.8-gcc-4.9.3-v3ufwaahjnviyvgjcelo36nywx2ufj7z
# sqlite@3.8.5%gcc@4.9.3=linux-x86_64
module load sqlite-3.8.5-gcc-4.9.3-a3eediswgd5f3rmto7g3szoew5nhehbr
# readline@6.3%gcc@4.9.3=linux-x86_64
module load readline-6.3-gcc-4.9.3-se6r3lsycrwxyhreg4lqirp6xixxejh3
# python@3.5.1%gcc@4.9.3=linux-x86_64
module load python-3.5.1-gcc-4.9.3-5q5rsrtjld4u6jiicuvtnx52m7tfhegi
# py-setuptools@20.5%gcc@4.9.3=linux-x86_64
module load py-setuptools-20.5-gcc-4.9.3-4qr2suj6p6glepnedmwhl4f62x64wxw2
# py-nose@1.3.7%gcc@4.9.3=linux-x86_64
module load py-nose-1.3.7-gcc-4.9.3-pwhtjw2dvdvfzjwuuztkzr7b4l6zepli
# openblas@0.2.17%gcc@4.9.3+shared=linux-x86_64
module load openblas-0.2.17-gcc-4.9.3-pw6rmlom7apfsnjtzfttyayzc7nx5e7y
# py-numpy@1.11.0%gcc@4.9.3+blas+lapack=linux-x86_64
module load py-numpy-1.11.0-gcc-4.9.3-mulodttw5pcyjufva4htsktwty4qd52r
# curl@7.47.1%gcc@4.9.3=linux-x86_64
module load curl-7.47.1-gcc-4.9.3-ohz3fwsepm3b462p5lnaquv7op7naqbi
# autoconf@2.69%gcc@4.9.3=linux-x86_64
module load autoconf-2.69-gcc-4.9.3-bkibjqhgqm5e3o423ogfv2y3o6h2uoq4
# cmake@3.5.0%gcc@4.9.3~doc+ncurses+openssl~qt=linux-x86_64
module load cmake-3.5.0-gcc-4.9.3-x7xnsklmgwla3ubfgzppamtbqk5rwn7t
# expat@2.1.0%gcc@4.9.3=linux-x86_64
module load expat-2.1.0-gcc-4.9.3-6pkz2ucnk2e62imwakejjvbv6egncppd
# git@2.8.0-rc2%gcc@4.9.3+curl+expat=linux-x86_64
module load git-2.8.0-rc2-gcc-4.9.3-3bib4hqtnv5xjjoq5ugt3inblt4xrgkd
The script may be further edited by removing unnecessary modules.
^^^^^^^^^^^^^^^
Module Prefixes
^^^^^^^^^^^^^^^
On some systems, modules are automatically prefixed with a certain
string; ``spack module tcl loads`` needs to know about that prefix when it
issues ``module load`` commands. Add the ``--prefix`` option to your
``spack module tcl loads`` commands if this is necessary.
For example, consider the following on one system:
.. code-block:: console
$ module avail
linux-SuSE11-x86_64/antlr-2.7.7-gcc-5.3.0-bdpl46y
$ spack module tcl loads antlr # WRONG!
# antlr@2.7.7%gcc@5.3.0~csharp+cxx~java~python arch=linux-SuSE11-x86_64
module load antlr-2.7.7-gcc-5.3.0-bdpl46y
$ spack module tcl loads --prefix linux-SuSE11-x86_64/ antlr
# antlr@2.7.7%gcc@5.3.0~csharp+cxx~java~python arch=linux-SuSE11-x86_64
module load linux-SuSE11-x86_64/antlr-2.7.7-gcc-5.3.0-bdpl46y
-------------------------
Module file customization
@@ -78,6 +292,8 @@ that can be generated by Spack:
+-----------------------------+--------------------+-------------------------------+----------------------------------------------+----------------------+
| | **Hook name** | **Default root directory** | **Default template file** | **Compatible tools** |
+=============================+====================+===============================+==============================================+======================+
| **Dotkit** | ``dotkit`` | share/spack/dotkit | share/spack/templates/modules/modulefile.dk | DotKit |
+-----------------------------+--------------------+-------------------------------+----------------------------------------------+----------------------+
| **TCL - Non-Hierarchical** | ``tcl`` | share/spack/modules | share/spack/templates/modules/modulefile.tcl | Env. Modules/LMod |
+-----------------------------+--------------------+-------------------------------+----------------------------------------------+----------------------+
| **Lua - Hierarchical** | ``lmod`` | share/spack/lmod | share/spack/templates/modules/modulefile.lua | LMod |
@@ -107,7 +323,8 @@ content of the module files generated by Spack. The first one:
.. code-block:: python
def setup_run_environment(self, env):
def setup_environment(self, spack_env, run_env):
"""Set up the compile and runtime environments for a package."""
pass
can alter the content of the module file associated with the same package where it is overridden.
@@ -115,15 +332,16 @@ The second method:
.. code-block:: python
def setup_dependent_run_environment(self, env, dependent_spec):
def setup_dependent_environment(self, spack_env, run_env, dependent_spec):
"""Set up the environment of packages that depend on this one"""
pass
can instead inject run-time environment modifications in the module files of packages
that depend on it. In both cases you need to fill ``run_env`` with the desired
list of environment modifications.
.. admonition:: The ``r`` package and callback APIs
.. note::
The ``r`` package and callback APIs
An example in which it is crucial to override both methods
is given by the ``r`` package. This package installs libraries and headers
in non-standard locations and it is possible to prepend the appropriate directory
@@ -137,15 +355,15 @@ list of environment modifications.
with the following snippet:
.. literalinclude:: _spack_root/var/spack/repos/builtin/packages/r/package.py
:pyobject: R.setup_run_environment
.. literalinclude:: ../../../var/spack/repos/builtin/packages/r/package.py
:pyobject: R.setup_environment
The ``r`` package also knows which environment variable should be modified
to make language extensions provided by other packages available, and modifies
it appropriately in the override of the second method:
.. literalinclude:: _spack_root/var/spack/repos/builtin/packages/r/package.py
:pyobject: R.setup_dependent_run_environment
.. literalinclude:: ../../../var/spack/repos/builtin/packages/r/package.py
:pyobject: R.setup_dependent_environment
.. _modules-yaml:
@@ -156,10 +374,10 @@ Write a configuration file
The configuration files that control module generation behavior
are named ``modules.yaml``. The default configuration:
.. literalinclude:: _spack_root/etc/spack/defaults/modules.yaml
.. literalinclude:: ../../../etc/spack/defaults/modules.yaml
:language: yaml
activates the hooks to generate ``tcl`` module files and inspects
activates the hooks to generate ``tcl`` and ``dotkit`` module files and inspects
the installation folder of each package for the presence of a set of subdirectories
(``bin``, ``man``, ``share/man``, etc.). If any is found its full path is prepended
to the environment variables listed below the folder name.
@@ -181,9 +399,12 @@ to the generator being customized:
modules:
enable:
- tcl
- dotkit
- lmod
tcl:
# contains environment modules specific customizations
dotkit:
# contains dotkit specific customizations
lmod:
# contains lmod specific customizations
@@ -264,14 +485,14 @@ is compiled with ``gcc@4.4.7``, with the only exception of any ``gcc``
or any ``llvm`` installation.
.. _modules-projections:
.. _modules-naming-scheme:
"""""""""""""""""""""""""""""""
Customize the naming of modules
"""""""""""""""""""""""""""""""
"""""""""""""""""""""""""""
Customize the naming scheme
"""""""""""""""""""""""""""
The names of environment modules generated by spack are not always easy to
fully comprehend due to the long hash in the name. There are three module
fully comprehend due to the long hash in the name. There are two module
configuration options to help with that. The first is a global setting to
adjust the hash length. It can be set anywhere from 0 to 32 and has a default
length of 7. This is the representation of the hash in the module file name and
@@ -305,52 +526,26 @@ version of python a set of python extensions is associated with. Likewise, the
``openblas`` string is attached to any program that has openblas in the spec,
most likely via the ``+blas`` variant specification.
The most heavyweight solution to module naming is to change the entire
naming convention for module files. This uses the projections format
covered in :ref:`adding_projections_to_views`.
.. code-block:: yaml
modules:
tcl:
projections:
all: '{name}/{version}-{compiler.name}-{compiler.version}-module'
^mpi: '{name}/{version}-{^mpi.name}-{^mpi.version}-{compiler.name}-{compiler.version}-module'
will create module files that are nested in directories by package
name, contain the version and compiler name and version, and have the
word ``module`` before the hash for all specs that do not depend on
mpi, and will have the same information plus the MPI implementation
name and version for all packages that depend on mpi.
When specifying module names by projection for Lmod modules, we
recommend NOT including names of dependencies (e.g., MPI, compilers)
that are already in the LMod hierarchy.
.. note::
TCL modules
TCL modules also allow for explicit conflicts between modulefiles.
TCL module files
A modification that is specific to ``tcl`` module files is the possibility
to change the naming scheme of modules.
.. code-block:: yaml
modules:
enable:
- tcl
tcl:
projections:
all: '{name}/{version}-{compiler.name}-{compiler.version}'
all:
conflict:
- '{name}'
- 'intel/14.0.1'
modules:
tcl:
naming_scheme: '${PACKAGE}/${VERSION}-${COMPILERNAME}-${COMPILERVER}'
all:
conflict:
- '${PACKAGE}'
- 'intel/14.0.1'
will create module files that will conflict with ``intel/14.0.1`` and with the
base directory of the same module, effectively preventing the possibility to
load two or more versions of the same software at the same time. The tokens
that are available for use in this directive are the same understood by
the :meth:`~spack.spec.Spec.format` method.
the ``Spec.format`` method.
.. note::
@@ -369,8 +564,6 @@ that are already in the LMod hierarchy.
lmod:
core_compilers:
- 'gcc@4.8'
core_specs:
- 'python'
hierarchy:
- 'mpi'
- 'lapack'
@@ -380,46 +573,11 @@ that are already in the LMod hierarchy.
implementations of ``mpi`` and ``lapack``, and let LMod switch safely from one to the
other.
All packages built with a compiler in ``core_compilers`` and all
packages that satisfy a spec in ``core_specs`` will be put in the
``Core`` hierarchy of the lua modules.
.. warning::
Consistency of Core packages
The user is responsible for maintining consistency among core packages, as ``core_specs``
bypasses the hierarchy that allows LMod to safely switch between coherent software stacks.
.. warning::
Deep hierarchies and ``lmod spider``
For hierarchies that are deeper than three layers ``lmod spider`` may have some issues.
See `this discussion on the LMod project <https://github.com/TACC/Lmod/issues/114>`_.
.. _customize-env-modifications:
"""""""""""""""""""""""""""""""""""
Customize environment modifications
"""""""""""""""""""""""""""""""""""
You can control which prefixes in a Spack package are added to environment
variables with the ``prefix_inspections`` section; this section maps relative
prefixes to the list of environment variables which should be updated with
those prefixes.
.. code-block:: yaml
modules:
prefix_inspections:
bin:
- PATH
lib:
- LIBRARY_PATH
'':
- CMAKE_PREFIX_PATH
In this case, for a Spack package ``foo`` installed to ``/spack/prefix/foo``,
the generated module file for ``foo`` would update ``PATH`` to contain
``/spack/prefix/foo/bin``.
""""""""""""""""""""""""""""""""""""
Filter out environment modifications
""""""""""""""""""""""""""""""""""""
@@ -432,17 +590,15 @@ do so by using the environment blacklist:
.. code-block:: yaml
modules:
tcl:
dotkit:
all:
filter:
# Exclude changes to any of these variables
environment_blacklist: ['CPATH', 'LIBRARY_PATH']
The configuration above will generate module files that will not contain
modifications to either ``CPATH`` or ``LIBRARY_PATH``.
.. _autoloading-dependencies:
The configuration above will generate dotkit module files that will not contain
modifications to either ``CPATH`` or ``LIBRARY_PATH`` and environment module
files that instead will contain these modifications.
"""""""""""""""""""""
Autoload dependencies
@@ -462,21 +618,7 @@ activated using ``spack activate``:
The configuration file above will produce module files that will
load their direct dependencies if the package installed depends on ``python``.
The allowed values for the ``autoload`` statement are either ``none``,
``direct`` or ``all``. The default is ``none``.
.. tip::
Building external software
Setting ``autoload`` to ``direct`` for all packages can be useful
when building software outside of a Spack installation that depends on
artifacts in that installation. E.g. (adjust ``lmod`` vs ``tcl``
as appropriate):
.. code-block:: yaml
modules:
lmod:
all:
autoload: 'direct'
``direct`` or ``all``.
.. note::
TCL prerequisites
@@ -528,135 +670,3 @@ subcommand is ``rm``:
that are already existing will ask for a confirmation by default. If
the command is used in a script it is possible though to pass the
``-y`` argument, that will skip this safety measure.
.. _modules-in-shell-scripts:
------------------------------------
Using Spack modules in shell scripts
------------------------------------
The easiest To enable additional Spack commands for loading and unloading
module files, and to add the correct path to ``MODULEPATH``, you need to
source the appropriate setup file. Assuming Spack is installed in
``$SPACK_ROOT``, run the appropriate command for your shell:
.. code-block:: console
# For bash/zsh/sh
$ . $SPACK_ROOT/share/spack/setup-env.sh
# For tcsh/csh
$ source $SPACK_ROOT/share/spack/setup-env.csh
# For fish
$ . $SPACK_ROOT/share/spack/setup-env.fish
If you want to have Spack's shell support available on the command line
at any login you can put this source line in one of the files that are
sourced at startup (like ``.profile``, ``.bashrc`` or ``.cshrc``). Be
aware that the shell startup time may increase slightly as a result.
.. _cmd-spack-module-loads:
^^^^^^^^^^^^^^^^^^^^^^^^^^
``spack module tcl loads``
^^^^^^^^^^^^^^^^^^^^^^^^^^
In some cases, it is desirable to use a Spack-generated module, rather
than relying on Spack's built-in user-environment modification
capabilities. To translate a spec into a module name, use ``spack
module tcl loads`` or ``spack module lmod loads`` depending on the
module system desired.
To load not just a module, but also all the modules it depends on, use
the ``--dependencies`` option. This is not required for most modules
because Spack builds binaries with RPATH support. However, not all
packages use RPATH to find their dependencies: this can be true in
particular for Python extensions, which are currently *not* built with
RPATH.
Scripts to load modules recursively may be made with the command:
.. code-block:: console
$ spack module tcl loads --dependencies <spec>
An equivalent alternative using `process substitution <http://tldp.org/LDP/abs/html/process-sub.html>`_ is:
.. code-block:: console
$ source <( spack module tcl loads --dependencies <spec> )
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Module Commands for Shell Scripts
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Although Spack is flexible, the ``module`` command is much faster.
This could become an issue when emitting a series of ``spack load``
commands inside a shell script. By adding the ``--dependencies`` flag,
``spack module tcl loads`` may also be used to generate code that can be
cut-and-pasted into a shell script. For example:
.. code-block:: console
$ spack module tcl loads --dependencies py-numpy git
# bzip2@1.0.6%gcc@4.9.3=linux-x86_64
module load bzip2-1.0.6-gcc-4.9.3-ktnrhkrmbbtlvnagfatrarzjojmkvzsx
# ncurses@6.0%gcc@4.9.3=linux-x86_64
module load ncurses-6.0-gcc-4.9.3-kaazyneh3bjkfnalunchyqtygoe2mncv
# zlib@1.2.8%gcc@4.9.3=linux-x86_64
module load zlib-1.2.8-gcc-4.9.3-v3ufwaahjnviyvgjcelo36nywx2ufj7z
# sqlite@3.8.5%gcc@4.9.3=linux-x86_64
module load sqlite-3.8.5-gcc-4.9.3-a3eediswgd5f3rmto7g3szoew5nhehbr
# readline@6.3%gcc@4.9.3=linux-x86_64
module load readline-6.3-gcc-4.9.3-se6r3lsycrwxyhreg4lqirp6xixxejh3
# python@3.5.1%gcc@4.9.3=linux-x86_64
module load python-3.5.1-gcc-4.9.3-5q5rsrtjld4u6jiicuvtnx52m7tfhegi
# py-setuptools@20.5%gcc@4.9.3=linux-x86_64
module load py-setuptools-20.5-gcc-4.9.3-4qr2suj6p6glepnedmwhl4f62x64wxw2
# py-nose@1.3.7%gcc@4.9.3=linux-x86_64
module load py-nose-1.3.7-gcc-4.9.3-pwhtjw2dvdvfzjwuuztkzr7b4l6zepli
# openblas@0.2.17%gcc@4.9.3+shared=linux-x86_64
module load openblas-0.2.17-gcc-4.9.3-pw6rmlom7apfsnjtzfttyayzc7nx5e7y
# py-numpy@1.11.0%gcc@4.9.3+blas+lapack=linux-x86_64
module load py-numpy-1.11.0-gcc-4.9.3-mulodttw5pcyjufva4htsktwty4qd52r
# curl@7.47.1%gcc@4.9.3=linux-x86_64
module load curl-7.47.1-gcc-4.9.3-ohz3fwsepm3b462p5lnaquv7op7naqbi
# autoconf@2.69%gcc@4.9.3=linux-x86_64
module load autoconf-2.69-gcc-4.9.3-bkibjqhgqm5e3o423ogfv2y3o6h2uoq4
# cmake@3.5.0%gcc@4.9.3~doc+ncurses+openssl~qt=linux-x86_64
module load cmake-3.5.0-gcc-4.9.3-x7xnsklmgwla3ubfgzppamtbqk5rwn7t
# expat@2.1.0%gcc@4.9.3=linux-x86_64
module load expat-2.1.0-gcc-4.9.3-6pkz2ucnk2e62imwakejjvbv6egncppd
# git@2.8.0-rc2%gcc@4.9.3+curl+expat=linux-x86_64
module load git-2.8.0-rc2-gcc-4.9.3-3bib4hqtnv5xjjoq5ugt3inblt4xrgkd
The script may be further edited by removing unnecessary modules.
^^^^^^^^^^^^^^^
Module Prefixes
^^^^^^^^^^^^^^^
On some systems, modules are automatically prefixed with a certain
string; ``spack module tcl loads`` needs to know about that prefix when it
issues ``module load`` commands. Add the ``--prefix`` option to your
``spack module tcl loads`` commands if this is necessary.
For example, consider the following on one system:
.. code-block:: console
$ module avail
linux-SuSE11-x86_64/antlr-2.7.7-gcc-5.3.0-bdpl46y
$ spack module tcl loads antlr # WRONG!
# antlr@2.7.7%gcc@5.3.0~csharp+cxx~java~python arch=linux-SuSE11-x86_64
module load antlr-2.7.7-gcc-5.3.0-bdpl46y
$ spack module tcl loads --prefix linux-SuSE11-x86_64/ antlr
# antlr@2.7.7%gcc@5.3.0~csharp+cxx~java~python arch=linux-SuSE11-x86_64
module load linux-SuSE11-x86_64/antlr-2.7.7-gcc-5.3.0-bdpl46y

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
@@ -10,8 +10,8 @@ Package List
============
This is a list of things you can install using Spack. It is
automatically generated based on the packages in this Spack
version.
automatically generated based on the packages in the latest Spack
release.
.. raw:: html
:file: package_list.html

File diff suppressed because it is too large Load Diff

View File

@@ -1,614 +0,0 @@
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
.. _pipelines:
=========
Pipelines
=========
Spack provides commands that support generating and running automated build
pipelines designed for Gitlab CI. At the highest level it works like this:
provide a spack environment describing the set of packages you care about,
and include within that environment file a description of how those packages
should be mapped to Gitlab runners. Spack can then generate a ``.gitlab-ci.yml``
file containing job descriptions for all your packages that can be run by a
properly configured Gitlab CI instance. When run, the generated pipeline will
build and deploy binaries, and it can optionally report to a CDash instance
regarding the health of the builds as they evolve over time.
------------------------------
Getting started with pipelines
------------------------------
It is fairly straightforward to get started with automated build pipelines. At
a minimum, you'll need to set up a Gitlab instance (more about Gitlab CI
`here <https://about.gitlab.com/product/continuous-integration/>`_) and configure
at least one `runner <https://docs.gitlab.com/runner/>`_. Then the basic steps
for setting up a build pipeline are as follows:
#. Create a repository on your gitlab instance
#. Add a ``spack.yaml`` at the root containing your pipeline environment (see
below for details)
#. Add a ``.gitlab-ci.yml`` at the root containing two jobs (one to generate
the pipeline dynamically, and one to run the generated jobs), similar to
this one:
.. code-block:: yaml
stages: [generate, build]
generate-pipeline:
stage: generate
tags:
- <custom-tag>
script:
- spack env activate --without-view .
- spack ci generate
--output-file "${CI_PROJECT_DIR}/jobs_scratch_dir/pipeline.yml"
artifacts:
paths:
- "${CI_PROJECT_DIR}/jobs_scratch_dir/pipeline.yml"
build-jobs:
stage: build
trigger:
include:
- artifact: "jobs_scratch_dir/pipeline.yml"
job: generate-pipeline
strategy: depend
#. Add any secrets required by the CI process to environment variables using the
CI web ui
#. Push a commit containing the ``spack.yaml`` and ``.gitlab-ci.yml`` mentioned above
to the gitlab repository
The ``<custom-tag>``, above, is used to pick one of your configured runners to
run the pipeline generation phase (this is implemented in the ``spack ci generate``
command, which assumes the runner has an appropriate version of spack installed
and configured for use). Of course, there are many ways to customize the process.
You can configure CDash reporting on the progress of your builds, set up S3 buckets
to mirror binaries built by the pipeline, clone a custom spack repository/ref for
use by the pipeline, and more.
While it is possible to set up pipelines on gitlab.com, the builds there are
limited to 60 minutes and generic hardware. It is also possible to
`hook up <https://about.gitlab.com/blog/2018/04/24/getting-started-gitlab-ci-gcp>`_
Gitlab to Google Kubernetes Engine (`GKE <https://cloud.google.com/kubernetes-engine/>`_)
or Amazon Elastic Kubernetes Service (`EKS <https://aws.amazon.com/eks>`_), though those
topics are outside the scope of this document.
Spack's pipelines are now making use of the
`trigger <https://docs.gitlab.com/12.9/ee/ci/yaml/README.html#trigger>`_ syntax to run
dynamically generated
`child pipelines <https://docs.gitlab.com/12.9/ee/ci/parent_child_pipelines.html>`_.
Note that the use of dynamic child pipelines requires running Gitlab version
``>= 12.9``.
-----------------------------------
Spack commands supporting pipelines
-----------------------------------
Spack provides a command ``ci`` with two sub-commands: ``spack ci generate`` generates
a pipeline (a .gitlab-ci.yml file) from a spack environment, and ``spack ci rebuild``
checks a spec against a remote mirror and possibly rebuilds it from source and updates
the binary mirror with the latest built package. Both ``spack ci ...`` commands must
be run from within the same environment, as each one makes use of the environment for
different purposes. Additionally, some options to the commands (or conditions present
in the spack environment file) may require particular environment variables to be
set in order to function properly. Examples of these are typically secrets
needed for pipeline operation that should not be visible in a spack environment
file. These environment variables are described in more detail
:ref:`ci_environment_variables`.
.. _cmd-spack-ci:
^^^^^^^^^^^^^^^^^^
``spack ci``
^^^^^^^^^^^^^^^^^^
Super-command for functionality related to generating pipelines and executing
pipeline jobs.
.. _cmd-spack-ci-generate:
^^^^^^^^^^^^^^^^^^^^^
``spack ci generate``
^^^^^^^^^^^^^^^^^^^^^
Concretizes the specs in the active environment, stages them (as described in
:ref:`staging_algorithm`), and writes the resulting ``.gitlab-ci.yml`` to disk.
Using ``--prune-dag`` or ``--no-prune-dag`` configures whether or not jobs are
generated for specs that are already up to date on the mirror. If enabling
DAG pruning using ``--prune-dag``, more information may be required in your
``spack.yaml`` file, see the :ref:`noop_jobs` section below regarding
``service-job-attributes``.
The ``--optimize`` argument is experimental and runs the generated pipeline
document through a series of optimization passes designed to reduce the size
of the generated file.
The ``--dependencies`` is also experimental and disables what in Gitlab is
referred to as DAG scheduling, internally using the ``dependencies`` keyword
rather than ``needs`` to list dependency jobs. The drawback of using this option
is that before any job can begin, all jobs in previous stages must first
complete. The benefit is that Gitlab allows more dependencies to be listed
when using ``dependencies`` instead of ``needs``.
The optional ``--output-file`` argument should be an absolute path (including
file name) to the generated pipeline, and if not given, the default is
``./.gitlab-ci.yml``.
.. _cmd-spack-ci-rebuild:
^^^^^^^^^^^^^^^^^^^^
``spack ci rebuild``
^^^^^^^^^^^^^^^^^^^^
This sub-command is responsible for ensuring a single spec from the release
environment is up to date on the remote mirror configured in the environment,
and as such, corresponds to a single job in the ``.gitlab-ci.yml`` file.
Rather than taking command-line arguments, this sub-command expects information
to be communicated via environment variables, which will typically come via the
``.gitlab-ci.yml`` job as ``variables``.
------------------------------------
A pipeline-enabled spack environment
------------------------------------
Here's an example of a spack environment file that has been enhanced with
sections describing a build pipeline:
.. code-block:: yaml
spack:
definitions:
- pkgs:
- readline@7.0
- compilers:
- '%gcc@5.5.0'
- oses:
- os=ubuntu18.04
- os=centos7
specs:
- matrix:
- [$pkgs]
- [$compilers]
- [$oses]
mirrors:
cloud_gitlab: https://mirror.spack.io
gitlab-ci:
mappings:
- match:
- os=ubuntu18.04
runner-attributes:
tags:
- spack-kube
image: spack/ubuntu-bionic
- match:
- os=centos7
runner-attributes:
tags:
- spack-kube
image: spack/centos7
cdash:
build-group: Release Testing
url: https://cdash.spack.io
project: Spack
site: Spack AWS Gitlab Instance
Hopefully, the ``definitions``, ``specs``, ``mirrors``, etc. sections are already
familiar, as they are part of spack :ref:`environments`. So let's take a more
in-depth look some of the pipeline-related sections in that environment file
that might not be as familiar.
The ``gitlab-ci`` section is used to configure how the pipeline workload should be
generated, mainly how the jobs for building specs should be assigned to the
configured runners on your instance. Each entry within the list of ``mappings``
corresponds to a known gitlab runner, where the ``match`` section is used
in assigning a release spec to one of the runners, and the ``runner-attributes``
section is used to configure the spec/job for that particular runner.
Both the top-level ``gitlab-ci`` section as well as each ``runner-attributes``
section can also contain the following keys: ``image``, ``tags``, ``variables``,
``before_script``, ``script``, and ``after_script``. If any of these keys are
provided at the ``gitlab-ci`` level, they will be used as the defaults for any
``runner-attributes``, unless they are overridden in those sections. Specifying
any of these keys at the ``runner-attributes`` level generally overrides the
keys specified at the higher level, with a couple exceptions. Any ``variables``
specified at both levels result in those dictionaries getting merged in the
resulting generated job, and any duplicate variable names get assigned the value
provided in the specific ``runner-attributes``. If ``tags`` are specified both
at the ``gitlab-ci`` level as well as the ``runner-attributes`` level, then the
lists of tags are combined, and any duplicates are removed.
See the section below on using a custom spack for an example of how these keys
could be used.
There are other pipeline options you can configure within the ``gitlab-ci`` section
as well.
The ``bootstrap`` section allows you to specify lists of specs from
your ``definitions`` that should be staged ahead of the environment's ``specs`` (this
section is described in more detail below). The ``enable-artifacts-buildcache`` key
takes a boolean and determines whether the pipeline uses artifacts to store and
pass along the buildcaches from one stage to the next (the default if you don't
provide this option is ``False``).
The optional ``cdash`` section provides information that will be used by the
``spack ci generate`` command (invoked by ``spack ci start``) for reporting
to CDash. All the jobs generated from this environment will belong to a
"build group" within CDash that can be tracked over time. As the release
progresses, this build group may have jobs added or removed. The url, project,
and site are used to specify the CDash instance to which build results should
be reported.
Take a look at the
`schema <https://github.com/spack/spack/blob/develop/lib/spack/spack/schema/gitlab_ci.py>`_
for the gitlab-ci section of the spack environment file, to see precisely what
syntax is allowed there.
.. _rebuild_index:
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Note about rebuilding buildcache index
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
By default, while a pipeline job may rebuild a package, create a buildcache
entry, and push it to the mirror, it does not automatically re-generate the
mirror's buildcache index afterward. Because the index is not needed by the
default rebuild jobs in the pipeline, not updating the index at the end of
each job avoids possible race conditions between simultaneous jobs, and it
avoids the computational expense of regenerating the index. This potentially
saves minutes per job, depending on the number of binary packages in the
mirror. As a result, the default is that the mirror's buildcache index may
not correctly reflect the mirror's contents at the end of a pipeline.
To make sure the buildcache index is up to date at the end of your pipeline,
spack generates a job to update the buildcache index of the target mirror
at the end of each pipeline by default. You can disable this behavior by
adding ``rebuild-index: False`` inside the ``gitlab-ci`` section of your
spack environment. Spack will assign the job any runner attributes found
on the ``service-job-attributes``, if you have provided that in your
``spack.yaml``.
.. _noop_jobs:
^^^^^^^^^^^^^^^^^^^^^^^
Note about "no-op" jobs
^^^^^^^^^^^^^^^^^^^^^^^
If no specs in an environment need to be rebuilt during a given pipeline run
(meaning all are already up to date on the mirror), a single succesful job
(a NO-OP) is still generated to avoid an empty pipeline (which GitLab
considers to be an error). An optional ``service-job-attributes`` section
can be added to your ``spack.yaml`` where you can provide ``tags`` and
``image`` or ``variables`` for the generated NO-OP job. This section also
supports providing ``before_script``, ``script``, and ``after_script``, in
case you want to take some custom actions in the case of any empty pipeline.
Following is an example of this section added to a ``spack.yaml``:
.. code-block:: yaml
spack:
specs:
- openmpi
mirrors:
cloud_gitlab: https://mirror.spack.io
gitlab-ci:
mappings:
- match:
- os=centos8
runner-attributes:
tags:
- custom
- tag
image: spack/centos7
service-job-attributes:
tags: ['custom', 'tag']
image:
name: 'some.image.registry/custom-image:latest'
entrypoint: ['/bin/bash']
script:
- echo "Custom message in a custom script"
The example above illustrates how you can provide the attributes used to run
the NO-OP job in the case of an empty pipeline. The only field for the NO-OP
job that might be generated for you is ``script``, but that will only happen
if you do not provide one yourself.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Assignment of specs to runners
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The ``mappings`` section corresponds to a list of runners, and during assignment
of specs to runners, the list is traversed in order looking for matches, the
first runner that matches a release spec is assigned to build that spec. The
``match`` section within each runner mapping section is a list of specs, and
if any of those specs match the release spec (the ``spec.satisfies()`` method
is used), then that runner is considered a match.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Configuration of specs/jobs for a runner
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Once a runner has been chosen to build a release spec, the ``runner-attributes``
section provides information determining details of the job in the context of
the runner. The ``runner-attributes`` section must have a ``tags`` key, which
is a list containing at least one tag used to select the runner from among the
runners known to the gitlab instance. For Docker executor type runners, the
``image`` key is used to specify the Docker image used to build the release spec
(and could also appear as a dictionary with a ``name`` specifying the image name,
as well as an ``entrypoint`` to override whatever the default for that image is).
For other types of runners the ``variables`` key will be useful to pass any
information on to the runner that it needs to do its work (e.g. scheduler
parameters, etc.). Any ``variables`` provided here will be added, verbatim, to
each job.
The ``runner-attributes`` section also allows users to supply custom ``script``,
``before_script``, and ``after_script`` sections to be applied to every job
scheduled on that runner. This allows users to do any custom preparation or
cleanup tasks that fit their particular workflow, as well as completely
customize the rebuilding of a spec if they so choose. Spack will not generate
a ``before_script`` or ``after_script`` for jobs, but if you do not provide
a custom ``script``, spack will generate one for you that assumes your
``spack.yaml`` is at the root of the repository, activates that environment for
you, and invokes ``spack ci rebuild``.
.. _staging_algorithm:
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Summary of ``.gitlab-ci.yml`` generation algorithm
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
All specs yielded by the matrix (or all the specs in the environment) have their
dependencies computed, and the entire resulting set of specs are staged together
before being run through the ``gitlab-ci/mappings`` entries, where each staged
spec is assigned a runner. "Staging" is the name given to the process of
figuring out in what order the specs should be built, taking into consideration
Gitlab CI rules about jobs/stages. In the staging process the goal is to maximize
the number of jobs in any stage of the pipeline, while ensuring that the jobs in
any stage only depend on jobs in previous stages (since those jobs are guaranteed
to have completed already). As a runner is determined for a job, the information
in the ``runner-attributes`` is used to populate various parts of the job
description that will be used by Gitlab CI. Once all the jobs have been assigned
a runner, the ``.gitlab-ci.yml`` is written to disk.
The short example provided above would result in the ``readline``, ``ncurses``,
and ``pkgconf`` packages getting staged and built on the runner chosen by the
``spack-k8s`` tag. In this example, spack assumes the runner is a Docker executor
type runner, and thus certain jobs will be run in the ``centos7`` container,
and others in the ``ubuntu-18.04`` container. The resulting ``.gitlab-ci.yml``
will contain 6 jobs in three stages. Once the jobs have been generated, the
presence of a ``SPACK_CDASH_AUTH_TOKEN`` environment variable during the
``spack ci generate`` command would result in all of the jobs being put in a
build group on CDash called "Release Testing" (that group will be created if
it didn't already exist).
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Optional compiler bootstrapping
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Spack pipelines also have support for bootstrapping compilers on systems that
may not already have the desired compilers installed. The idea here is that
you can specify a list of things to bootstrap in your ``definitions``, and
spack will guarantee those will be installed in a phase of the pipeline before
your release specs, so that you can rely on those packages being available in
the binary mirror when you need them later on in the pipeline. At the moment
the only viable use-case for bootstrapping is to install compilers.
Here's an example of what bootstrapping some compilers might look like:
.. code-block:: yaml
spack:
definitions:
- compiler-pkgs:
- 'llvm+clang@6.0.1 os=centos7'
- 'gcc@6.5.0 os=centos7'
- 'llvm+clang@6.0.1 os=ubuntu18.04'
- 'gcc@6.5.0 os=ubuntu18.04'
- pkgs:
- readline@7.0
- compilers:
- '%gcc@5.5.0'
- '%gcc@6.5.0'
- '%gcc@7.3.0'
- '%clang@6.0.0'
- '%clang@6.0.1'
- oses:
- os=ubuntu18.04
- os=centos7
specs:
- matrix:
- [$pkgs]
- [$compilers]
- [$oses]
exclude:
- '%gcc@7.3.0 os=centos7'
- '%gcc@5.5.0 os=ubuntu18.04'
gitlab-ci:
bootstrap:
- name: compiler-pkgs
compiler-agnostic: true
mappings:
# mappings similar to the example higher up in this description
...
The example above adds a list to the ``definitions`` called ``compiler-pkgs``
(you can add any number of these), which lists compiler packages that should
be staged ahead of the full matrix of release specs (in this example, only
readline). Then within the ``gitlab-ci`` section, note the addition of a
``bootstrap`` section, which can contain a list of items, each referring to
a list in the ``definitions`` section. These items can either
be a dictionary or a string. If you supply a dictionary, it must have a name
key whose value must match one of the lists in definitions and it can have a
``compiler-agnostic`` key whose value is a boolean. If you supply a string,
then it needs to match one of the lists provided in ``definitions``. You can
think of the bootstrap list as an ordered list of pipeline "phases" that will
be staged before your actual release specs. While this introduces another
layer of bottleneck in the pipeline (all jobs in all stages of one phase must
complete before any jobs in the next phase can begin), it also means you are
guaranteed your bootstrapped compilers will be available when you need them.
The ``compiler-agnostic`` key can be provided with each item in the
bootstrap list. It tells the ``spack ci generate`` command that any jobs staged
from that particular list should have the compiler removed from the spec, so
that any compiler available on the runner where the job is run can be used to
build the package.
When including a bootstrapping phase as in the example above, the result is that
the bootstrapped compiler packages will be pushed to the binary mirror (and the
local artifacts mirror) before the actual release specs are built. In this case,
the jobs corresponding to subsequent release specs are configured to
``install_missing_compilers``, so that if spack is asked to install a package
with a compiler it doesn't know about, it can be quickly installed from the
binary mirror first.
Since bootstrapping compilers is optional, those items can be left out of the
environment/stack file, and in that case no bootstrapping will be done (only the
specs will be staged for building) and the runners will be expected to already
have all needed compilers installed and configured for spack to use.
-------------------------------------
Using a custom spack in your pipeline
-------------------------------------
If your runners will not have a version of spack ready to invoke, or if for some
other reason you want to use a custom version of spack to run your pipelines,
this section provides an example of how you could take advantage of
user-provided pipeline scripts to accomplish this fairly simply. First, you
could use the GitLab user interface to create CI environment variables
containing the url and branch or tag you want to use (calling them, for
example, ``SPACK_REPO`` and ``SPACK_REF``), then refer to those in a custom shell
script invoked both from your pipeline generation job, as well as in your rebuild
jobs. Here's the ``generate-pipeline`` job from the top of this document,
updated to invoke a custom shell script that will clone and source a custom
spack:
.. code-block:: yaml
generate-pipeline:
tags:
- <some-other-tag>
before_script:
- ./cloneSpack.sh
script:
- spack env activate --without-view .
- spack ci generate
--output-file "${CI_PROJECT_DIR}/jobs_scratch_dir/pipeline.yml"
after_script:
- rm -rf ./spack
artifacts:
paths:
- "${CI_PROJECT_DIR}/jobs_scratch_dir/pipeline.yml"
And the ``cloneSpack.sh`` script could contain:
.. code-block:: bash
#!/bin/bash
git clone ${SPACK_REPO}
pushd ./spack
git checkout ${SPACK_REF}
popd
. "./spack/share/spack/setup-env.sh"
spack --version
Finally, you would also want your generated rebuild jobs to clone that version
of spack, so you would update your ``spack.yaml`` from above as follows:
.. code-block:: yaml
spack:
...
gitlab-ci:
mappings:
- match:
- os=ubuntu18.04
runner-attributes:
tags:
- spack-kube
image: spack/ubuntu-bionic
before_script:
- ./cloneSpack.sh
script:
- spack env activate --without-view .
- spack -d ci rebuild
after_script:
- rm -rf ./spack
Now all of the generated rebuild jobs will use the same shell script to clone
spack before running their actual workload. Note in the above example the
provision of a custom ``script`` section. The reason for this is to run
``spack ci rebuild`` in debug mode to get more information when builds fail.
Now imagine you have long pipelines with many specs to be built, and you
are pointing to a spack repository and branch that has a tendency to change
frequently, such as the main repo and it's ``develop`` branch. If each child
job checks out the ``develop`` branch, that could result in some jobs running
with one SHA of spack, while later jobs run with another. To help avoid this
issue, the pipeline generation process saves global variables called
``SPACK_VERSION`` and ``SPACK_CHECKOUT_VERSION`` that capture the version
of spack used to generate the pipeline. While the ``SPACK_VERSION`` variable
simply contains the human-readable value produced by ``spack -V`` at pipeline
generation time, the ``SPACK_CHECKOUT_VERSION`` variable can be used in a
``git checkout`` command to make sure all child jobs checkout the same version
of spack used to generate the pipeline. To take advantage of this, you could
simply replace ``git checkout ${SPACK_REF}`` in the example ``cloneSpack.sh``
script above with ``git checkout ${SPACK_CHECKOUT_VERSION}``.
On the other hand, if you're pointing to a spack repository and branch under your
control, there may be no benefit in using the captured ``SPACK_CHECKOUT_VERSION``,
and you can instead just clone using the project CI variables you set (in the
earlier example these were ``SPACK_REPO`` and ``SPACK_REF``).
.. _ci_environment_variables:
--------------------------------------------------
Environment variables affecting pipeline operation
--------------------------------------------------
Certain secrets and some other information should be provided to the pipeline
infrastructure via environment variables, usually for reasons of security, but
in some cases to support other pipeline use cases such as PR testing. The
environment variables used by the pipeline infrastructure are described here.
^^^^^^^^^^^^^^^^^
AWS_ACCESS_KEY_ID
^^^^^^^^^^^^^^^^^
Needed when binary mirror is an S3 bucket.
^^^^^^^^^^^^^^^^^^^^^
AWS_SECRET_ACCESS_KEY
^^^^^^^^^^^^^^^^^^^^^
Needed when binary mirror is an S3 bucket.
^^^^^^^^^^^^^^^
S3_ENDPOINT_URL
^^^^^^^^^^^^^^^
Needed when binary mirror is an S3 bucket that is *not* on AWS.
^^^^^^^^^^^^^^^^^
CDASH_AUTH_TOKEN
^^^^^^^^^^^^^^^^^
Needed in order to report build groups to CDash.
^^^^^^^^^^^^^^^^^
SPACK_SIGNING_KEY
^^^^^^^^^^^^^^^^^
Needed to sign/verify binary packages from the remote binary mirror.

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
@@ -9,7 +9,7 @@
Package Repositories
=============================
Spack comes with thousands of built-in package recipes in
Spack comes with over 1,000 built-in package recipes in
``var/spack/repos/builtin/``. This is a **package repository** -- a
directory that Spack searches when it needs to find a package by name.
You may need to maintain packages for restricted, proprietary or
@@ -280,16 +280,16 @@ you install it, you can use ``spack spec -N``:
Concretized
--------------------------------
builtin.hdf5@1.10.0-patch1%apple-clang@7.0.2+cxx~debug+fortran+mpi+shared~szip~threadsafe arch=darwin-elcapitan-x86_64
^builtin.openmpi@2.0.1%apple-clang@7.0.2~mxm~pmi~psm~psm2~slurm~sqlite3~thread_multiple~tm~verbs+vt arch=darwin-elcapitan-x86_64
^builtin.hwloc@1.11.4%apple-clang@7.0.2 arch=darwin-elcapitan-x86_64
^builtin.libpciaccess@0.13.4%apple-clang@7.0.2 arch=darwin-elcapitan-x86_64
^builtin.libtool@2.4.6%apple-clang@7.0.2 arch=darwin-elcapitan-x86_64
^builtin.m4@1.4.17%apple-clang@7.0.2+sigsegv arch=darwin-elcapitan-x86_64
^builtin.libsigsegv@2.10%apple-clang@7.0.2 arch=darwin-elcapitan-x86_64
^builtin.pkg-config@0.29.1%apple-clang@7.0.2+internal_glib arch=darwin-elcapitan-x86_64
^builtin.util-macros@1.19.0%apple-clang@7.0.2 arch=darwin-elcapitan-x86_64
^builtin.zlib@1.2.8%apple-clang@7.0.2+pic arch=darwin-elcapitan-x86_64
builtin.hdf5@1.10.0-patch1%clang@7.0.2-apple+cxx~debug+fortran+mpi+shared~szip~threadsafe arch=darwin-elcapitan-x86_64
^builtin.openmpi@2.0.1%clang@7.0.2-apple~mxm~pmi~psm~psm2~slurm~sqlite3~thread_multiple~tm~verbs+vt arch=darwin-elcapitan-x86_64
^builtin.hwloc@1.11.4%clang@7.0.2-apple arch=darwin-elcapitan-x86_64
^builtin.libpciaccess@0.13.4%clang@7.0.2-apple arch=darwin-elcapitan-x86_64
^builtin.libtool@2.4.6%clang@7.0.2-apple arch=darwin-elcapitan-x86_64
^builtin.m4@1.4.17%clang@7.0.2-apple+sigsegv arch=darwin-elcapitan-x86_64
^builtin.libsigsegv@2.10%clang@7.0.2-apple arch=darwin-elcapitan-x86_64
^builtin.pkg-config@0.29.1%clang@7.0.2-apple+internal_glib arch=darwin-elcapitan-x86_64
^builtin.util-macros@1.19.0%clang@7.0.2-apple arch=darwin-elcapitan-x86_64
^builtin.zlib@1.2.8%clang@7.0.2-apple+pic arch=darwin-elcapitan-x86_64
.. warning::

View File

@@ -1,7 +1,6 @@
# These dependencies should be installed using pip in order
# to build the documentation.
sphinx
sphinx==1.7.0
sphinxcontrib-programoutput
sphinx-rtd-theme
python-levenshtein

View File

@@ -1,19 +0,0 @@
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
#
# These are requirements for building the documentation. You can run
# these commands in this directory to install Sphinx and its plugins,
# then build the docs:
#
# spack install
# spack env activate .
# make
#
spack:
specs:
- py-sphinx
- py-sphinxcontrib-programoutput
- py-sphinx-rtd-theme

View File

@@ -0,0 +1,60 @@
.. Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
.. _spack-101:
=============================
Tutorial: Spack 101
=============================
This is a full-day introduction to Spack with lectures and live demos. It
was presented as a tutorial at `Supercomputing 2018
<http://sc18.supercomputing.org>`_. You can use these materials to teach
a course on Spack at your own site, or you can just skip ahead and read
the live demo scripts to see how Spack is used in practice.
.. _sc16-slides:
.. rubric:: Slides
.. figure:: tutorial/sc16-tutorial-slide-preview.png
:target: http://spack.io/slides/Spack-SC18-Tutorial.pdf
:height: 72px
:align: left
:alt: Slide Preview
`Download Slides <http://spack.io/slides/Spack-SC18-Tutorial.pdf>`_.
**Full citation:** Todd Gamblin, Gregory Becker, Massimiliano Culpo, Matt
Legendre, Mario Melara, Peter Scheibel, and Adam Stewart.
`Managing HPC Software Complexity with Spack
<https://sc18.supercomputing.org/presentation/?id=tut165&sess=sess252>`_.
Tutorial presented at Supercomputing 2018. November 12, 2018, Dallas, TX, USA.
.. _sc16-live-demos:
.. rubric:: Live Demos
These scripts will take you step-by-step through basic Spack tasks. They
correspond to sections in the slides above.
1. :ref:`basics-tutorial`
2. :ref:`configs-tutorial`
3. :ref:`packaging-tutorial`
4. :ref:`environments-tutorial`
5. :ref:`modules-tutorial`
6. :ref:`build-systems-tutorial`
7. :ref:`advanced-packaging-tutorial`
Full contents:
.. toctree::
tutorial_basics
tutorial_configuration
tutorial_packaging
tutorial_environments
tutorial_modules
tutorial_buildsystems
tutorial_advanced_packaging

View File

@@ -0,0 +1,39 @@
# Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
#
# This is a template package file for Spack. We've put "FIXME"
# next to all the things you'll want to change. Once you've handled
# them, you can save this file and test your package like this:
#
# spack install mpileaks
#
# You can edit this file again by typing:
#
# spack edit mpileaks
#
# See the Spack documentation for more information on packaging.
# If you submit this package back to Spack as a pull request,
# please first remove this boilerplate and all FIXME comments.
#
from spack import *
class Mpileaks(Package):
"""FIXME: Put a proper description of your package here."""
# FIXME: Add a proper url for your package's homepage here.
homepage = "http://www.example.com"
url = "https://github.com/hpc/mpileaks/releases/download/v1.0/mpileaks-1.0.tar.gz"
version('1.0', '8838c574b39202a57d7c2d68692718aa')
# FIXME: Add dependencies if required.
# depends_on('foo')
def install(self, spec, prefix):
# FIXME: Unknown build system
make()
make('install')

View File

@@ -0,0 +1,23 @@
# Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Mpileaks(Package):
"""Tool to detect and report MPI objects like MPI_Requests and
MPI_Datatypes."""
homepage = "https://github.com/hpc/mpileaks"
url = "https://github.com/hpc/mpileaks/releases/download/v1.0/mpileaks-1.0.tar.gz" # NOQA
version('1.0', '8838c574b39202a57d7c2d68692718aa')
# FIXME: Add dependencies if required.
# depends_on('foo')
def install(self, spec, prefix):
# FIXME: Unknown build system
make()
make('install')

View File

@@ -0,0 +1,25 @@
# Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Mpileaks(Package):
"""Tool to detect and report MPI objects like MPI_Requests and
MPI_Datatypes."""
homepage = "https://github.com/hpc/mpileaks"
url = "https://github.com/hpc/mpileaks/releases/download/v1.0/mpileaks-1.0.tar.gz"
version('1.0', '8838c574b39202a57d7c2d68692718aa')
depends_on('mpi')
depends_on('adept-utils')
depends_on('callpath')
def install(self, spec, prefix):
# FIXME: Unknown build system
make()
make('install')

View File

@@ -0,0 +1,25 @@
# Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Mpileaks(Package):
"""Tool to detect and report MPI objects like MPI_Requests and
MPI_Datatypes."""
homepage = "https://github.com/hpc/mpileaks"
url = "https://github.com/hpc/mpileaks/releases/download/v1.0/mpileaks-1.0.tar.gz"
version('1.0', '8838c574b39202a57d7c2d68692718aa')
depends_on('mpi')
depends_on('adept-utils')
depends_on('callpath')
def install(self, spec, prefix):
configure()
make()
make('install')

View File

@@ -0,0 +1,27 @@
# Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Mpileaks(Package):
"""Tool to detect and report MPI objects like MPI_Requests and
MPI_Datatypes."""
homepage = "https://github.com/hpc/mpileaks"
url = "https://github.com/hpc/mpileaks/releases/download/v1.0/mpileaks-1.0.tar.gz"
version('1.0', '8838c574b39202a57d7c2d68692718aa')
depends_on('mpi')
depends_on('adept-utils')
depends_on('callpath')
def install(self, spec, prefix):
configure('--with-adept-utils=%s' % self.spec['adept-utils'].prefix,
'--with-callpath=%s' % self.spec['callpath'].prefix,
'--prefix=%s' % self.spec.prefix)
make()
make('install')

View File

@@ -0,0 +1,34 @@
# Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Mpileaks(Package):
"""Tool to detect and report MPI objects like MPI_Requests and
MPI_Datatypes."""
homepage = "https://github.com/hpc/mpileaks"
url = "https://github.com/hpc/mpileaks/releases/download/v1.0/mpileaks-1.0.tar.gz"
version('1.0', '8838c574b39202a57d7c2d68692718aa')
variant('stackstart', values=int, default=0, description='Specify the number of stack frames to truncate.')
depends_on('mpi')
depends_on('adept-utils')
depends_on('callpath')
def install(self, spec, prefix):
stackstart = int(self.spec.variants['stackstart'].value)
confargs = ['--with-adept-utils=%s' % self.spec['adept-utils'].prefix,
'--with-callpath=%s' % self.spec['callpath'].prefix,
'--prefix=%s' % self.spec.prefix]
if stackstart:
confargs.extend(['--with-stack-start-c=%s' % stackstart,
'--with-stack-start-fortran=%s' % stackstart])
configure(*confargs)
make()
make('install')

View File

@@ -0,0 +1,27 @@
# Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Mpileaks(AutotoolsPackage):
"""Tool to detect and report leaked MPI objects like MPI_Requests and
MPI_Datatypes."""
homepage = "https://github.com/hpc/mpileaks"
url = "https://github.com/hpc/mpileaks/releases/download/v1.0/mpileaks-1.0.tar.gz"
version('1.0', '8838c574b39202a57d7c2d68692718aa')
depends_on("mpi")
depends_on("adept-utils")
depends_on("callpath")
def install(self, spec, prefix):
configure("--prefix=" + prefix,
"--with-adept-utils=" + spec['adept-utils'].prefix,
"--with-callpath=" + spec['callpath'].prefix)
make()
make("install")

View File

@@ -0,0 +1,32 @@
# Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Mpileaks(AutotoolsPackage):
"""Tool to detect and report leaked MPI objects like MPI_Requests and
MPI_Datatypes."""
homepage = "https://github.com/hpc/mpileaks"
url = "https://github.com/hpc/mpileaks/releases/download/v1.0/mpileaks-1.0.tar.gz"
version('1.0', '8838c574b39202a57d7c2d68692718aa')
variant("stackstart", values=int, default=0,
description="Specify the number of stack frames to truncate")
depends_on("mpi")
depends_on("adept-utils")
depends_on("callpath")
def configure_args(self):
stackstart = int(self.spec.variants['stackstart'].value)
args = ["--with-adept-utils=" + spec['adept-utils'].prefix,
"--with-callpath=" + spec['callpath'].prefix]
if stackstart:
args.extend(['--with-stack-start-c=%s' % stackstart,
'--with-stack-start-fortran=%s' % stackstart])
return args

Some files were not shown because too many files have changed in this diff Show More