Compare commits

..

6 Commits

Author SHA1 Message Date
Harmen Stoppels
614714fc01 delete dead code 2025-01-06 15:07:48 +01:00
Wouter Deconinck
b328b5d169 PythonPackage: call python and pass env mods 2025-01-05 09:47:58 -06:00
Wouter Deconinck
cda1a2fd91 PythonPackage: fix style 2025-01-03 19:43:21 -06:00
Wouter Deconinck
4181237bcd PythonPackage: fix missing import 2025-01-03 19:40:57 -06:00
Wouter Deconinck
7a4859721a EnvironmentModifications.set_env(self, env) context manager 2025-01-03 19:35:53 -06:00
Wouter Deconinck
d4a28b4e22 PythonPackage: run test_imports in run env context 2025-01-03 19:30:48 -06:00
20578 changed files with 491122 additions and 528646 deletions

View File

@@ -28,7 +28,7 @@ max-line-length = 99
# - F821: undefined name `name`
#
per-file-ignores =
var/spack/*/package.py:F403,F405,F821
var/spack/repos/*/package.py:F403,F405,F821
*-ci-package.py:F403,F405,F821
# exclude things we usually do not want linting for.

3
.gitattributes vendored
View File

@@ -1,3 +1,4 @@
*.py diff=python
*.lp linguist-language=Prolog
lib/spack/external/* linguist-vendored
*.bat text eol=crlf
*.bat text eol=crlf

View File

@@ -59,6 +59,7 @@ jobs:
- name: Package audits (without coverage)
if: ${{ runner.os == 'Windows' }}
run: |
. share/spack/setup-env.sh
spack -d audit packages
./share/spack/qa/validate_last_exit.ps1
spack -d audit configs

View File

@@ -26,7 +26,7 @@ jobs:
dnf install -y \
bzip2 curl file gcc-c++ gcc gcc-gfortran git gzip \
make patch unzip which xz python3 python3-devel tree \
cmake bison bison-devel libstdc++-static gawk
cmake bison bison-devel libstdc++-static
- name: Setup OpenSUSE
if: ${{ matrix.image == 'opensuse/leap:latest' }}
run: |

View File

@@ -40,17 +40,17 @@ jobs:
# 1: Platforms to build for
# 2: Base image (e.g. ubuntu:22.04)
dockerfile: [[amazon-linux, 'linux/amd64,linux/arm64', 'amazonlinux:2'],
[centos-stream9, 'linux/amd64,linux/arm64', 'centos:stream9'],
[leap15, 'linux/amd64,linux/arm64', 'opensuse/leap:15'],
[ubuntu-focal, 'linux/amd64,linux/arm64', 'ubuntu:20.04'],
[ubuntu-jammy, 'linux/amd64,linux/arm64', 'ubuntu:22.04'],
[ubuntu-noble, 'linux/amd64,linux/arm64', 'ubuntu:24.04'],
[almalinux8, 'linux/amd64,linux/arm64', 'almalinux:8'],
[almalinux9, 'linux/amd64,linux/arm64', 'almalinux:9'],
[centos-stream9, 'linux/amd64,linux/arm64,linux/ppc64le', 'centos:stream9'],
[leap15, 'linux/amd64,linux/arm64,linux/ppc64le', 'opensuse/leap:15'],
[ubuntu-focal, 'linux/amd64,linux/arm64,linux/ppc64le', 'ubuntu:20.04'],
[ubuntu-jammy, 'linux/amd64,linux/arm64,linux/ppc64le', 'ubuntu:22.04'],
[ubuntu-noble, 'linux/amd64,linux/arm64,linux/ppc64le', 'ubuntu:24.04'],
[almalinux8, 'linux/amd64,linux/arm64,linux/ppc64le', 'almalinux:8'],
[almalinux9, 'linux/amd64,linux/arm64,linux/ppc64le', 'almalinux:9'],
[rockylinux8, 'linux/amd64,linux/arm64', 'rockylinux:8'],
[rockylinux9, 'linux/amd64,linux/arm64', 'rockylinux:9'],
[fedora39, 'linux/amd64,linux/arm64', 'fedora:39'],
[fedora40, 'linux/amd64,linux/arm64', 'fedora:40']]
[fedora39, 'linux/amd64,linux/arm64,linux/ppc64le', 'fedora:39'],
[fedora40, 'linux/amd64,linux/arm64,linux/ppc64le', 'fedora:40']]
name: Build ${{ matrix.dockerfile[0] }}
if: github.repository == 'spack/spack'
steps:

View File

@@ -9,7 +9,6 @@ on:
branches:
- develop
- releases/**
merge_group:
concurrency:
group: ci-${{github.ref}}-${{github.event.pull_request.number || github.run_number}}
@@ -26,33 +25,29 @@ jobs:
packages: ${{ steps.filter.outputs.packages }}
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
if: ${{ github.event_name == 'push' || github.event_name == 'merge_group' }}
if: ${{ github.event_name == 'push' }}
with:
fetch-depth: 0
# For pull requests it's not necessary to checkout the code
- uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36
id: filter
with:
# For merge group events, compare against the target branch (main)
base: ${{ github.event_name == 'merge_group' && github.event.merge_group.base_ref || '' }}
# For merge group events, use the merge group head ref
ref: ${{ github.event_name == 'merge_group' && github.event.merge_group.head_sha || github.ref }}
# See https://github.com/dorny/paths-filter/issues/56 for the syntax used below
# Don't run if we only modified packages in the
# built-in repository or documentation
filters: |
bootstrap:
- 'var/spack/repos/spack_repo/builtin/packages/clingo-bootstrap/**'
- 'var/spack/repos/spack_repo/builtin/packages/clingo/**'
- 'var/spack/repos/spack_repo/builtin/packages/python/**'
- 'var/spack/repos/spack_repo/builtin/packages/re2c/**'
- 'var/spack/repos/spack_repo/builtin/packages/gnupg/**'
- 'var/spack/repos/spack_repo/builtin/packages/libassuan/**'
- 'var/spack/repos/spack_repo/builtin/packages/libgcrypt/**'
- 'var/spack/repos/spack_repo/builtin/packages/libgpg-error/**'
- 'var/spack/repos/spack_repo/builtin/packages/libksba/**'
- 'var/spack/repos/spack_repo/builtin/packages/npth/**'
- 'var/spack/repos/spack_repo/builtin/packages/pinentry/**'
- 'var/spack/repos/builtin/packages/clingo-bootstrap/**'
- 'var/spack/repos/builtin/packages/clingo/**'
- 'var/spack/repos/builtin/packages/python/**'
- 'var/spack/repos/builtin/packages/re2c/**'
- 'var/spack/repos/builtin/packages/gnupg/**'
- 'var/spack/repos/builtin/packages/libassuan/**'
- 'var/spack/repos/builtin/packages/libgcrypt/**'
- 'var/spack/repos/builtin/packages/libgpg-error/**'
- 'var/spack/repos/builtin/packages/libksba/**'
- 'var/spack/repos/builtin/packages/npth/**'
- 'var/spack/repos/builtin/packages/pinentry/**'
- 'lib/spack/**'
- 'share/spack/**'
- '.github/workflows/bootstrap.yml'
@@ -81,15 +76,10 @@ jobs:
prechecks:
needs: [ changes ]
uses: ./.github/workflows/prechecks.yml
uses: ./.github/workflows/valid-style.yml
secrets: inherit
with:
with_coverage: ${{ needs.changes.outputs.core }}
with_packages: ${{ needs.changes.outputs.packages }}
import-check:
needs: [ changes ]
uses: ./.github/workflows/import-check.yaml
all-prechecks:
needs: [ prechecks ]
@@ -99,7 +89,7 @@ jobs:
- name: Success
run: |
if [ "${{ needs.prechecks.result }}" == "failure" ] || [ "${{ needs.prechecks.result }}" == "canceled" ]; then
echo "Unit tests failed."
echo "Unit tests failed."
exit 1
else
exit 0
@@ -107,7 +97,6 @@ jobs:
coverage:
needs: [ unit-tests, prechecks ]
if: ${{ needs.changes.outputs.core }}
uses: ./.github/workflows/coverage.yml
secrets: inherit
@@ -120,10 +109,10 @@ jobs:
- name: Status summary
run: |
if [ "${{ needs.unit-tests.result }}" == "failure" ] || [ "${{ needs.unit-tests.result }}" == "canceled" ]; then
echo "Unit tests failed."
echo "Unit tests failed."
exit 1
elif [ "${{ needs.bootstrap.result }}" == "failure" ] || [ "${{ needs.bootstrap.result }}" == "canceled" ]; then
echo "Bootstrap tests failed."
echo "Bootstrap tests failed."
exit 1
else
exit 0

View File

@@ -29,8 +29,7 @@ jobs:
- run: coverage xml
- name: "Upload coverage report to CodeCov"
uses: codecov/codecov-action@1e68e06f1dbfde0e4cefc87efeba9e4643565303
uses: codecov/codecov-action@05f5a9cfad807516dbbef9929c4a42df3eb78766
with:
verbose: true
fail_ci_if_error: false
token: ${{ secrets.CODECOV_TOKEN }}

View File

@@ -1,50 +0,0 @@
name: import-check
on:
workflow_call:
jobs:
# Check we don't make the situation with circular imports worse
import-check:
continue-on-error: true
runs-on: ubuntu-latest
steps:
- uses: julia-actions/setup-julia@v2
with:
version: '1.10'
- uses: julia-actions/cache@v2
# PR: use the base of the PR as the old commit
- name: Checkout PR base commit
if: github.event_name == 'pull_request'
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
with:
ref: ${{ github.event.pull_request.base.sha }}
path: old
# not a PR: use the previous commit as the old commit
- name: Checkout previous commit
if: github.event_name != 'pull_request'
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
with:
fetch-depth: 2
path: old
- name: Checkout previous commit
if: github.event_name != 'pull_request'
run: git -C old reset --hard HEAD^
- name: Checkout new commit
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
with:
path: new
- name: Install circular import checker
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
with:
repository: haampie/circular-import-fighter
ref: 4cdb0bf15f04ab6b49041d5ef1bfd9644cce7f33
path: circular-import-fighter
- name: Install dependencies
working-directory: circular-import-fighter
run: make -j dependencies
- name: Circular import check
working-directory: circular-import-fighter
run: make -j compare "SPACK_ROOT=../old ../new"

View File

@@ -1,108 +0,0 @@
name: prechecks
on:
workflow_call:
inputs:
with_coverage:
required: true
type: string
with_packages:
required: true
type: string
concurrency:
group: style-${{github.ref}}-${{github.event.pull_request.number || github.run_number}}
cancel-in-progress: true
jobs:
# Validate that the code can be run on all the Python versions supported by Spack
validate:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
- uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b
with:
python-version: '3.13'
cache: 'pip'
cache-dependency-path: '.github/workflows/requirements/style/requirements.txt'
- name: Install Python Packages
run: |
pip install -r .github/workflows/requirements/style/requirements.txt
- name: vermin (Spack's Core)
run: |
vermin --backport importlib --backport argparse --violations --backport typing -t=3.6- -vvv lib/spack/spack/ lib/spack/llnl/ bin/
- name: vermin (Repositories)
run: |
vermin --backport importlib --backport argparse --violations --backport typing -t=3.6- -vvv var/spack/repos var/spack/test_repos
# Run style checks on the files that have been changed
style:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
with:
fetch-depth: 2
- uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b
with:
python-version: '3.13'
cache: 'pip'
cache-dependency-path: '.github/workflows/requirements/style/requirements.txt'
- name: Install Python packages
run: |
pip install -r .github/workflows/requirements/style/requirements.txt
- name: Run style tests
run: |
bin/spack style --base HEAD^1
bin/spack license verify
pylint -j $(nproc) --disable=all --enable=unspecified-encoding --ignore-paths=lib/spack/external lib
audit:
uses: ./.github/workflows/audit.yaml
secrets: inherit
with:
with_coverage: ${{ inputs.with_coverage }}
python_version: '3.13'
verify-checksums:
# do not run if the commit message or PR description contains [skip-verify-checksums]
if: >-
${{ inputs.with_packages == 'true' &&
!contains(github.event.pull_request.body, '[skip-verify-checksums]') &&
!contains(github.event.head_commit.message, '[skip-verify-checksums]') }}
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29
with:
fetch-depth: 2
- name: Verify Added Checksums
run: |
bin/spack ci verify-versions HEAD^1 HEAD
# Check that spack can bootstrap the development environment on Python 3.6 - RHEL8
bootstrap-dev-rhel8:
runs-on: ubuntu-latest
container: registry.access.redhat.com/ubi8/ubi
steps:
- name: Install dependencies
run: |
dnf install -y \
bzip2 curl file gcc-c++ gcc gcc-gfortran git gnupg2 gzip \
make patch tcl unzip which xz
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
- name: Setup repo and non-root user
run: |
git --version
git config --global --add safe.directory '*'
git fetch --unshallow
. .github/workflows/bin/setup_git.sh
useradd spack-test
chown -R spack-test .
- name: Bootstrap Spack development environment
shell: runuser -u spack-test -- bash {0}
run: |
source share/spack/setup-env.sh
spack debug report
spack -d bootstrap now --dev
spack -d style -t black
spack unit-test -V

View File

@@ -1,8 +1,7 @@
black==25.1.0
clingo==5.8.0
flake8==7.2.0
isort==6.0.1
mypy==1.15.0
types-six==1.17.0.20250403
black==24.10.0
clingo==5.7.1
flake8==7.1.1
isort==5.13.2
mypy==1.8.0
types-six==1.17.0.20241205
vermin==1.6.0
pylint==3.3.7

View File

@@ -1,37 +0,0 @@
name: sync with spack/spack-packages
on:
push:
branches:
- develop
jobs:
sync:
if: github.repository == 'spack/spack'
runs-on: ubuntu-latest
steps:
- name: Checkout spack/spack
run: git clone https://github.com/spack/spack.git
- name: Checkout spack/spack-packages
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
with:
ssh-key: ${{ secrets.SYNC_PACKAGES_KEY }}
path: spack-packages
repository: spack/spack-packages
- name: Install git-filter-repo
run: |
curl -LfsO https://raw.githubusercontent.com/newren/git-filter-repo/refs/tags/v2.47.0/git-filter-repo
echo "67447413e273fc76809289111748870b6f6072f08b17efe94863a92d810b7d94 git-filter-repo" | sha256sum -c -
chmod +x git-filter-repo
sudo mv git-filter-repo /usr/local/bin/
- name: Sync spack/spack-packages with spack/spack
run: |
cd spack-packages
git-filter-repo --quiet --source ../spack \
--path var/spack/repos/ --path-rename var/spack/repos/:python/ \
--path share/spack/gitlab/cloud_pipelines/ --path-rename share/spack/gitlab/cloud_pipelines/:.ci/gitlab/ \
--refs develop
- name: Push
run: |
cd spack-packages
git push git@github.com:spack/spack-packages.git develop:develop --force

View File

@@ -19,6 +19,9 @@ jobs:
on_develop:
- ${{ github.ref == 'refs/heads/develop' }}
include:
- python-version: '3.6'
os: ubuntu-20.04
on_develop: ${{ github.ref == 'refs/heads/develop' }}
- python-version: '3.7'
os: ubuntu-22.04
on_develop: ${{ github.ref == 'refs/heads/develop' }}

166
.github/workflows/valid-style.yml vendored Normal file
View File

@@ -0,0 +1,166 @@
name: style
on:
workflow_call:
inputs:
with_coverage:
required: true
type: string
concurrency:
group: style-${{github.ref}}-${{github.event.pull_request.number || github.run_number}}
cancel-in-progress: true
jobs:
# Validate that the code can be run on all the Python versions supported by Spack
validate:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
- uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b
with:
python-version: '3.11'
cache: 'pip'
- name: Install Python Packages
run: |
pip install --upgrade pip setuptools
pip install -r .github/workflows/requirements/style/requirements.txt
- name: vermin (Spack's Core)
run: vermin --backport importlib --backport argparse --violations --backport typing -t=3.6- -vvv lib/spack/spack/ lib/spack/llnl/ bin/
- name: vermin (Repositories)
run: vermin --backport importlib --backport argparse --violations --backport typing -t=3.6- -vvv var/spack/repos
# Run style checks on the files that have been changed
style:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
with:
fetch-depth: 0
- uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b
with:
python-version: '3.11'
cache: 'pip'
- name: Install Python packages
run: |
pip install --upgrade pip setuptools
pip install -r .github/workflows/requirements/style/requirements.txt
- name: Setup git configuration
run: |
# Need this for the git tests to succeed.
git --version
. .github/workflows/bin/setup_git.sh
- name: Run style tests
run: |
share/spack/qa/run-style-tests
audit:
uses: ./.github/workflows/audit.yaml
secrets: inherit
with:
with_coverage: ${{ inputs.with_coverage }}
python_version: '3.11'
# Check that spack can bootstrap the development environment on Python 3.6 - RHEL8
bootstrap-dev-rhel8:
runs-on: ubuntu-latest
container: registry.access.redhat.com/ubi8/ubi
steps:
- name: Install dependencies
run: |
dnf install -y \
bzip2 curl file gcc-c++ gcc gcc-gfortran git gnupg2 gzip \
make patch tcl unzip which xz
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
- name: Setup repo and non-root user
run: |
git --version
git config --global --add safe.directory '*'
git fetch --unshallow
. .github/workflows/bin/setup_git.sh
useradd spack-test
chown -R spack-test .
- name: Bootstrap Spack development environment
shell: runuser -u spack-test -- bash {0}
run: |
source share/spack/setup-env.sh
spack debug report
spack -d bootstrap now --dev
spack -d style -t black
spack unit-test -V
# Check we don't make the situation with circular imports worse
import-check:
runs-on: ubuntu-latest
steps:
- uses: julia-actions/setup-julia@v2
with:
version: '1.10'
- uses: julia-actions/cache@v2
# PR: use the base of the PR as the old commit
- name: Checkout PR base commit
if: github.event_name == 'pull_request'
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
with:
ref: ${{ github.event.pull_request.base.sha }}
path: old
# not a PR: use the previous commit as the old commit
- name: Checkout previous commit
if: github.event_name != 'pull_request'
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
with:
fetch-depth: 2
path: old
- name: Checkout previous commit
if: github.event_name != 'pull_request'
run: git -C old reset --hard HEAD^
- name: Checkout new commit
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
with:
path: new
- name: Install circular import checker
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
with:
repository: haampie/circular-import-fighter
ref: b5d6ce9be35f602cca7d5a6aa0259fca10639cca
path: circular-import-fighter
- name: Install dependencies
working-directory: circular-import-fighter
run: make -j dependencies
- name: Problematic imports before
working-directory: circular-import-fighter
run: make SPACK_ROOT=../old SUFFIX=.old
- name: Problematic imports after
working-directory: circular-import-fighter
run: make SPACK_ROOT=../new SUFFIX=.new
- name: Compare import cycles
working-directory: circular-import-fighter
run: |
edges_before="$(head -n1 solution.old)"
edges_after="$(head -n1 solution.new)"
if [ "$edges_after" -gt "$edges_before" ]; then
printf '\033[1;31mImport check failed: %s imports need to be deleted, ' "$edges_after"
printf 'previously this was %s\033[0m\n' "$edges_before"
printf 'Compare \033[1;97m"Problematic imports before"\033[0m and '
printf '\033[1;97m"Problematic imports after"\033[0m.\n'
exit 1
else
printf '\033[1;32mImport check passed: %s <= %s\033[0m\n' "$edges_after" "$edges_before"
fi
# Further style checks from pylint
pylint:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
with:
fetch-depth: 0
- uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b
with:
python-version: '3.13'
cache: 'pip'
- name: Install Python packages
run: |
pip install --upgrade pip setuptools pylint
- name: Pylint (Spack Core)
run: |
pylint -j 4 --disable=all --enable=unspecified-encoding --ignore-paths=lib/spack/external lib

1
.gitignore vendored
View File

@@ -201,6 +201,7 @@ tramp
# Org-mode
.org-id-locations
*_archive
# flymake-mode
*_flymake.*

View File

@@ -46,42 +46,18 @@ See the
[Feature Overview](https://spack.readthedocs.io/en/latest/features.html)
for examples and highlights.
Installation
----------------
To install spack, first make sure you have Python & Git.
To install spack and your first package, make sure you have Python & Git.
Then:
```bash
git clone -c feature.manyFiles=true --depth=2 https://github.com/spack/spack.git
```
<details>
<summary>What are <code>manyFiles=true</code> and <code>--depth=2</code>?</summary>
<br>
$ git clone -c feature.manyFiles=true --depth=2 https://github.com/spack/spack.git
$ cd spack/bin
$ ./spack install zlib
> [!TIP]
> `-c feature.manyFiles=true` improves git's performance on repositories with 1,000+ files.
>
> `--depth=2` prunes the git history to reduce the size of the Spack installation.
</details>
```bash
# For bash/zsh/sh
. spack/share/spack/setup-env.sh
# For tcsh/csh
source spack/share/spack/setup-env.csh
# For fish
. spack/share/spack/setup-env.fish
```
```bash
# Now you're ready to install a package!
spack install zlib-ng
```
Documentation
----------------

View File

@@ -25,6 +25,7 @@ exit 1
# The code above runs this file with our preferred python interpreter.
import os
import os.path
import sys
min_python3 = (3, 6)

View File

@@ -43,28 +43,6 @@ concretizer:
# (e.g. py-setuptools, cmake etc.)
# "full" (experimental): allows separation of the entire build-tool stack (e.g. the entire "cmake" subDAG)
strategy: minimal
# Maximum number of duplicates in a DAG, when using a strategy that allows duplicates. "default" is the
# number used if there isn't a more specific alternative
max_dupes:
default: 1
# Virtuals
c: 2
cxx: 2
fortran: 1
# Regular packages
cmake: 2
gmake: 2
python: 2
python-venv: 2
py-cython: 2
py-flit-core: 2
py-pip: 2
py-setuptools: 2
py-wheel: 2
xcb-proto: 2
# Compilers
gcc: 2
llvm: 2
# Option to specify compatibility between operating systems for reuse of compilers and packages
# Specified as a key: [list] where the key is the os that is being targeted, and the list contains the OS's
# it can reuse. Note this is a directional compatibility so mutual compatibility between two OS's
@@ -85,7 +63,3 @@ concretizer:
# Setting this to false yields unreproducible results, so we advise to use that value only
# for debugging purposes (e.g. check which constraints can help Spack concretize faster).
error_on_timeout: true
# Static analysis may reduce the concretization time by generating smaller ASP problems, in
# cases where there are requirements that prevent part of the search space to be explored.
static_analysis: false

View File

@@ -19,7 +19,7 @@ config:
install_tree:
root: $spack/opt/spack
projections:
all: "{architecture.platform}-{architecture.target}/{name}-{version}-{hash}"
all: "{architecture}/{compiler.name}-{compiler.version}/{name}-{version}-{hash}"
# install_tree can include an optional padded length (int or boolean)
# default is False (do not pad)
# if padded_length is True, Spack will pad as close to the system max path
@@ -90,9 +90,10 @@ config:
misc_cache: $user_cache_path/cache
# Abort downloads after this many seconds if not data is received.
# Setting this to 0 will disable the timeout.
connect_timeout: 30
# Timeout in seconds used for downloading sources etc. This only applies
# to the connection phase and can be increased for slow connections or
# servers. 0 means no timeout.
connect_timeout: 10
# If this is false, tools like curl that use SSL will not verify

View File

@@ -15,18 +15,17 @@
# -------------------------------------------------------------------------
packages:
all:
compiler:
- apple-clang
- clang
- gcc
providers:
c: [apple-clang, llvm, gcc]
cxx: [apple-clang, llvm, gcc]
elf: [libelf]
fortran: [gcc]
fuse: [macfuse]
gl: [apple-gl]
glu: [apple-glu]
unwind: [apple-libunwind]
uuid: [apple-libuuid]
apple-clang:
buildable: false
apple-gl:
buildable: false
externals:
@@ -51,12 +50,3 @@ packages:
# although the version number used here isn't critical
- spec: apple-libuuid@1353.100.2
prefix: /Library/Developer/CommandLineTools/SDKs/MacOSX.sdk
c:
prefer:
- apple-clang
cxx:
prefer:
- apple-clang
fortran:
prefer:
- gcc

View File

@@ -15,18 +15,19 @@
# -------------------------------------------------------------------------
packages:
all:
compiler: [gcc, clang, oneapi, xl, nag, fj, aocc]
providers:
awk: [gawk]
armci: [armcimpi]
blas: [openblas, amdblis]
c: [gcc, llvm, intel-oneapi-compilers]
cxx: [gcc, llvm, intel-oneapi-compilers]
c: [gcc]
cxx: [gcc]
D: [ldc]
daal: [intel-oneapi-daal]
elf: [elfutils]
fftw-api: [fftw, amdfftw]
flame: [libflame, amdlibflame]
fortran: [gcc, llvm, intel-oneapi-compilers]
fortran: [gcc]
fortran-rt: [gcc-runtime, intel-oneapi-runtime]
fuse: [libfuse]
gl: [glx, osmesa]
@@ -35,7 +36,7 @@ packages:
go-or-gccgo-bootstrap: [go-bootstrap, gcc]
iconv: [libiconv]
ipp: [intel-oneapi-ipp]
java: [openjdk, jdk]
java: [openjdk, jdk, ibm-java]
jpeg: [libjpeg-turbo, libjpeg]
lapack: [openblas, amdlibflame]
libc: [glibc, musl]
@@ -64,7 +65,6 @@ packages:
unwind: [libunwind]
uuid: [util-linux-uuid, libuuid]
wasi-sdk: [wasi-sdk-prebuilt]
xkbdata-api: [xkeyboard-config, xkbdata]
xxd: [xxd-standalone, vim]
yacc: [bison, byacc]
ziglang: [zig]
@@ -72,39 +72,15 @@ packages:
permissions:
read: world
write: user
cce:
buildable: false
cray-fftw:
buildable: false
cray-libsci:
buildable: false
cray-mpich:
buildable: false
cray-mvapich2:
buildable: false
cray-pmi:
buildable: false
egl:
buildable: false
essl:
buildable: false
fj:
buildable: false
fujitsu-mpi:
buildable: false
fujitsu-ssl2:
buildable: false
glibc:
buildable: false
hpcx-mpi:
buildable: false
iconv:
prefer: [libiconv]
mpt:
buildable: false
musl:
buildable: false
spectrum-mpi:
buildable: false
xl:
buildable: false

View File

@@ -11,4 +11,4 @@
# ~/.spack/repos.yaml
# -------------------------------------------------------------------------
repos:
- $spack/var/spack/repos/spack_repo/builtin
- $spack/var/spack/repos/builtin

View File

@@ -1,5 +1,5 @@
config:
locks: false
build_stage::
- '$user_cache_path/stage'
- '$spack/.staging'
stage_name: '{name}-{version}-{hash:7}'

View File

@@ -15,13 +15,8 @@
# -------------------------------------------------------------------------
packages:
all:
compiler:
- msvc
providers:
c : [msvc]
cxx: [msvc]
mpi: [msmpi]
gl: [wgl]
mpi:
require:
- one_of: [msmpi]
msvc:
buildable: false

View File

@@ -276,7 +276,7 @@ remove dependent packages *before* removing their dependencies or use the
Garbage collection
^^^^^^^^^^^^^^^^^^
When Spack builds software from sources, it often installs tools that are needed
When Spack builds software from sources, if often installs tools that are needed
just to build or test other software. These are not necessary at runtime.
To support cases where removing these tools can be a benefit Spack provides
the ``spack gc`` ("garbage collector") command, which will uninstall all unneeded packages:
@@ -1291,61 +1291,55 @@ based on site policies.
Variants
^^^^^^^^
Variants are named options associated with a particular package and are
typically used to enable or disable certain features at build time. They
are optional, as each package must provide default values for each variant
it makes available.
The names of variants available for a particular package depend on
Variants are named options associated with a particular package. They are
optional, as each package must provide default values for each variant it
makes available. Variants can be specified using
a flexible parameter syntax ``name=<value>``. For example,
``spack install mercury debug=True`` will install mercury built with debug
flags. The names of particular variants available for a package depend on
what was provided by the package author. ``spack info <package>`` will
provide information on what build variants are available.
There are different types of variants:
For compatibility with earlier versions, variants which happen to be
boolean in nature can be specified by a syntax that represents turning
options on and off. For example, in the previous spec we could have
supplied ``mercury +debug`` with the same effect of enabling the debug
compile time option for the libelf package.
1. Boolean variants. Typically used to enable or disable a feature at
compile time. For example, a package might have a ``debug`` variant that
can be explicitly enabled with ``+debug`` and disabled with ``~debug``.
2. Single-valued variants. Often used to set defaults. For example, a package
might have a ``compression`` variant that determines the default
compression algorithm, which users could set to ``compression=gzip`` or
``compression=zstd``.
3. Multi-valued variants. A package might have a ``fabrics`` variant that
determines which network fabrics to support. Users could set this to
``fabrics=verbs,ofi`` to enable both InfiniBand verbs and OpenFabrics
interfaces. The values are separated by commas.
Depending on the package a variant may have any default value. For
``mercury`` here, ``debug`` is ``False`` by default, and we turned it on
with ``debug=True`` or ``+debug``. If a variant is ``True`` by default
you can turn it off by either adding ``-name`` or ``~name`` to the spec.
The meaning of ``fabrics=verbs,ofi`` is to enable *at least* the specified
fabrics, but other fabrics may be enabled as well. If the intent is to
enable *only* the specified fabrics, then the ``fabrics:=verbs,ofi``
syntax should be used with the ``:=`` operator.
There are two syntaxes here because, depending on context, ``~`` and
``-`` may mean different things. In most shells, the following will
result in the shell performing home directory substitution:
.. note::
.. code-block:: sh
In certain shells, the the ``~`` character is expanded to the home
directory. To avoid these issues, avoid whitespace between the package
name and the variant:
mpileaks ~debug # shell may try to substitute this!
mpileaks~debug # use this instead
.. code-block:: sh
If there is a user called ``debug``, the ``~`` will be incorrectly
expanded. In this situation, you would want to write ``libelf
-debug``. However, ``-`` can be ambiguous when included after a
package name without spaces:
mpileaks ~debug # shell may try to substitute this!
mpileaks~debug # use this instead
.. code-block:: sh
Alternatively, you can use the ``-`` character to disable a variant,
but be aware that this requires a space between the package name and
the variant:
mpileaks-debug # wrong!
mpileaks -debug # right
.. code-block:: sh
Spack allows the ``-`` character to be part of package names, so the
above will be interpreted as a request for the ``mpileaks-debug``
package, not a request for ``mpileaks`` built without ``debug``
options. In this scenario, you should write ``mpileaks~debug`` to
avoid ambiguity.
mpileaks-debug # wrong: refers to a package named "mpileaks-debug"
mpileaks -debug # right: refers to a package named mpileaks with debug disabled
As a last resort, ``debug=False`` can also be used to disable a boolean variant.
"""""""""""""""""""""""""""""""""""
Variant propagation to dependencies
"""""""""""""""""""""""""""""""""""
When spack normalizes specs, it prints them out with no spaces boolean
variants using the backwards compatibility syntax and uses only ``~``
for disabled boolean variants. The ``-`` and spaces on the command
line are provided for convenience and legibility.
Spack allows variants to propagate their value to the package's
dependency by using ``++``, ``--``, and ``~~`` for boolean variants.
@@ -1415,29 +1409,27 @@ that executables will run without the need to set ``LD_LIBRARY_PATH``.
.. code-block:: yaml
packages:
gcc:
externals:
- spec: gcc@4.9.3
prefix: /opt/gcc
extra_attributes:
compilers:
c: /opt/gcc/bin/gcc
cxx: /opt/gcc/bin/g++
fortran: /opt/gcc/bin/gfortran
environment:
unset:
- BAD_VARIABLE
set:
GOOD_VARIABLE_NUM: 1
GOOD_VARIABLE_STR: good
prepend_path:
PATH: /path/to/binutils
append_path:
LD_LIBRARY_PATH: /opt/gcc/lib
extra_rpaths:
- /path/to/some/compiler/runtime/directory
- /path/to/some/other/compiler/runtime/directory
compilers:
- compiler:
spec: gcc@4.9.3
paths:
cc: /opt/gcc/bin/gcc
c++: /opt/gcc/bin/g++
f77: /opt/gcc/bin/gfortran
fc: /opt/gcc/bin/gfortran
environment:
unset:
- BAD_VARIABLE
set:
GOOD_VARIABLE_NUM: 1
GOOD_VARIABLE_STR: good
prepend_path:
PATH: /path/to/binutils
append_path:
LD_LIBRARY_PATH: /opt/gcc/lib
extra_rpaths:
- /path/to/some/compiler/runtime/directory
- /path/to/some/other/compiler/runtime/directory
^^^^^^^^^^^^^^^^^^^^^^^
@@ -1769,24 +1761,19 @@ Verifying installations
The ``spack verify`` command can be used to verify the validity of
Spack-installed packages any time after installation.
^^^^^^^^^^^^^^^^^^^^^^^^^
``spack verify manifest``
^^^^^^^^^^^^^^^^^^^^^^^^^
At installation time, Spack creates a manifest of every file in the
installation prefix. For links, Spack tracks the mode, ownership, and
destination. For directories, Spack tracks the mode, and
ownership. For files, Spack tracks the mode, ownership, modification
time, hash, and size. The ``spack verify manifest`` command will check,
for every file in each package, whether any of those attributes have
changed. It will also check for newly added files or deleted files from
the installation prefix. Spack can either check all installed packages
time, hash, and size. The Spack verify command will check, for every
file in each package, whether any of those attributes have changed. It
will also check for newly added files or deleted files from the
installation prefix. Spack can either check all installed packages
using the `-a,--all` or accept specs listed on the command line to
verify.
The ``spack verify manifest`` command can also verify for individual files
that they haven't been altered since installation time. If the given file
The ``spack verify`` command can also verify for individual files that
they haven't been altered since installation time. If the given file
is not in a Spack installation prefix, Spack will report that it is
not owned by any package. To check individual files instead of specs,
use the ``-f,--files`` option.
@@ -1801,22 +1788,6 @@ check only local packages (as opposed to those used transparently from
``upstream`` spack instances) and the ``-j,--json`` option to output
machine-readable json data for any errors.
^^^^^^^^^^^^^^^^^^^^^^^^^^
``spack verify libraries``
^^^^^^^^^^^^^^^^^^^^^^^^^^
The ``spack verify libraries`` command can be used to verify that packages
do not have accidental system dependencies. This command scans the install
prefixes of packages for executables and shared libraries, and resolves
their needed libraries in their RPATHs. When needed libraries cannot be
located, an error is reported. This typically indicates that a package
was linked against a system library, instead of a library provided by
a Spack package.
This verification can also be enabled as a post-install hook by setting
``config:shared_linking:missing_library_policy`` to ``error`` or ``warn``
in :ref:`config.yaml <config-yaml>`.
-----------------------
Filesystem requirements
-----------------------
@@ -1916,7 +1887,7 @@ diagnostics. Issues, if found, are reported to stdout:
PKG-DIRECTIVES: 1 issue found
1. lammps: wrong variant in "conflicts" directive
the variant 'adios' does not exist
in /home/spack/spack/var/spack/repos/spack_repo/builtin/packages/lammps/package.py
in /home/spack/spack/var/spack/repos/builtin/packages/lammps/package.py
------------

View File

@@ -45,14 +45,10 @@ provided binary cache, which can be a local directory or a remote URL.
Here is an example where a build cache is created in a local directory named
"spack-cache", to which we push the "ninja" spec:
ninja-1.12.1-vmvycib6vmiofkdqgrblo7zsvp7odwut
.. code-block:: console
$ spack buildcache push ./spack-cache ninja
==> Selected 30 specs to push to file:///home/spackuser/spack/spack-cache
...
==> [30/30] Pushed ninja@1.12.1/ngldn2k
==> Pushing binary packages to file:///home/spackuser/spack/spack-cache/build_cache
Note that ``ninja`` must be installed locally for this to work.
@@ -89,7 +85,7 @@ You can see that the mirror is added with ``spack mirror list`` as follows:
spack-public https://spack-llnl-mirror.s3-us-west-2.amazonaws.com/
At this point, you've created a buildcache, but Spack hasn't indexed it, so if
At this point, you've create a buildcache, but spack hasn't indexed it, so if
you run ``spack buildcache list`` you won't see any results. You need to index
this new build cache as follows:
@@ -102,10 +98,9 @@ Now you can use list:
.. code-block:: console
$ spack buildcache list
==> 24 cached builds.
-- linux-ubuntu22.04-sapphirerapids / gcc@12.3.0 ----------------
[ ... ]
ninja@1.12.1
==> 1 cached build.
-- linux-ubuntu20.04-skylake / gcc@9.3.0 ------------------------
ninja@1.10.2
With ``mymirror`` configured and an index available, Spack will automatically
use it during concretization and installation. That means that you can expect
@@ -116,17 +111,17 @@ verify by re-installing ninja:
$ spack uninstall ninja
$ spack install ninja
[ ... ]
==> Installing ninja-1.12.1-ngldn2kpvb6lqc44oqhhow7fzg7xu7lh [24/24]
gpg: Signature made Thu 06 Mar 2025 10:03:38 AM MST
gpg: using RSA key 75BC0528114909C076E2607418010FFAD73C9B07
==> Installing ninja-1.11.1-yxferyhmrjkosgta5ei6b4lqf6bxbscz
==> Fetching file:///home/spackuser/spack/spack-cache/build_cache/linux-ubuntu20.04-skylake-gcc-9.3.0-ninja-1.10.2-yxferyhmrjkosgta5ei6b4lqf6bxbscz.spec.json.sig
gpg: Signature made Do 12 Jan 2023 16:01:04 CET
gpg: using RSA key 61B82B2B2350E171BD17A1744E3A689061D57BF6
gpg: Good signature from "example (GPG created for Spack) <example@example.com>" [ultimate]
==> Fetching file:///home/spackuser/spack/spack-cache/blobs/sha256/f0/f08eb62661ad159d2d258890127fc6053f5302a2f490c1c7f7bd677721010ee0
==> Fetching file:///home/spackuser/spack/spack-cache/blobs/sha256/c7/c79ac6e40dfdd01ac499b020e52e57aa91151febaea3ad183f90c0f78b64a31a
==> Extracting ninja-1.12.1-ngldn2kpvb6lqc44oqhhow7fzg7xu7lh from binary cache
==> ninja: Successfully installed ninja-1.12.1-ngldn2kpvb6lqc44oqhhow7fzg7xu7lh
Search: 0.00s. Fetch: 0.11s. Install: 0.11s. Extract: 0.10s. Relocate: 0.00s. Total: 0.22s
[+] /home/spackuser/spack/opt/spack/linux-ubuntu22.04-sapphirerapids/gcc-12.3.0/ninja-1.12.1-ngldn2kpvb6lqc44oqhhow7fzg7xu7lh
==> Fetching file:///home/spackuser/spack/spack-cache/build_cache/linux-ubuntu20.04-skylake/gcc-9.3.0/ninja-1.10.2/linux-ubuntu20.04-skylake-gcc-9.3.0-ninja-1.10.2-yxferyhmrjkosgta5ei6b4lqf6bxbscz.spack
==> Extracting ninja-1.10.2-yxferyhmrjkosgta5ei6b4lqf6bxbscz from binary cache
==> ninja: Successfully installed ninja-1.11.1-yxferyhmrjkosgta5ei6b4lqf6bxbscz
Search: 0.00s. Fetch: 0.17s. Install: 0.12s. Total: 0.29s
[+] /home/harmen/spack/opt/spack/linux-ubuntu20.04-skylake/gcc-9.3.0/ninja-1.11.1-yxferyhmrjkosgta5ei6b4lqf6bxbscz
It worked! You've just completed a full example of creating a build cache with
a spec of interest, adding it as a mirror, updating its index, listing the contents,
@@ -318,7 +313,7 @@ other system dependencies. However, they are still compatible with tools like
``skopeo``, ``podman``, and ``docker`` for pulling and pushing.
.. note::
The Docker ``overlayfs2`` storage driver is limited to 128 layers, above which a
The docker ``overlayfs2`` storage driver is limited to 128 layers, above which a
``max depth exceeded`` error may be produced when pulling the image. There
are `alternative drivers <https://docs.docker.com/storage/storagedriver/>`_.
@@ -349,18 +344,19 @@ which lets you get started quickly. See the following resources for more informa
^^^^^^^^^^^^^^^^^^^^^^^^^^^
Create tarball of installed Spack package and all dependencies.
Tarballs and specfiles are compressed and checksummed, manifests are signed if gpg2 is available.
Commands like ``spack buildcache install`` will search Spack mirrors to get the list of build caches.
Tarballs are checksummed and signed if gpg2 is available.
Places them in a directory ``build_cache`` that can be copied to a mirror.
Commands like ``spack buildcache install`` will search Spack mirrors for build_cache to get the list of build caches.
============== ========================================================================================================================
Arguments Description
============== ========================================================================================================================
``<specs>`` list of partial specs or hashes with a leading ``/`` to match from installed packages and used for creating build caches
``-d <path>`` directory in which ``v3`` and ``blobs`` directories are created, defaults to ``.``
``-f`` overwrite compressed tarball and spec metadata files if they already exist
``-d <path>`` directory in which ``build_cache`` directory is created, defaults to ``.``
``-f`` overwrite ``.spack`` file in ``build_cache`` directory if it exists
``-k <key>`` the key to sign package with. In the case where multiple keys exist, the package will be unsigned unless ``-k`` is used.
``-r`` make paths in binaries relative before creating tarball
``-y`` answer yes to all questions about creating unsigned build caches
``-y`` answer yes to all create unsigned ``build_cache`` questions
============== ========================================================================================================================
^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -401,165 +397,6 @@ List public keys available on Spack mirror.
========= ==============================================
Arguments Description
========= ==============================================
``-it`` trust the keys downloaded with prompt for each
``-i`` trust the keys downloaded with prompt for each
``-y`` answer yes to all trust all keys downloaded
========= ==============================================
.. _build_cache_layout:
------------------
Build Cache Layout
------------------
This section describes the structure and content of URL-style build caches, as
distinguished from OCI-style build caches.
The entry point for a binary package is a manifest json file that points to at
least two other files stored as content-addressed blobs. These files include a spec
metadata file, as well as the installation directory of the package stored as
a compressed archive file. Binary package manifest files are named to indicate
the package name and version, as well as the hash of the concrete spec. For
example::
gcc-runtime-12.3.0-qyu2lvgt3nxh7izxycugdbgf5gsdpkjt.spec.manifest.json
would contain the manifest for a binary package of ``gcc-runtime@12.3.0``.
The id of the built package is defined to be the DAG hash of the concrete spec,
and exists in the name of the file as well. The id distinguishes a particular
binary package from all other binary packages with the same package name and
version. Below is an example binary package manifest file. Such a file would
live in the versioned spec manifests directory of a binary mirror, for example
``v3/manifests/spec/``::
{
"version": 3,
"data": [
{
"contentLength": 10731083,
"mediaType": "application/vnd.spack.install.v2.tar+gzip",
"compression": "gzip",
"checksumAlgorithm": "sha256",
"checksum": "0f24aa6b5dd7150067349865217acd3f6a383083f9eca111d2d2fed726c88210"
},
{
"contentLength": 1000,
"mediaType": "application/vnd.spack.spec.v5+json",
"compression": "gzip",
"checksumAlgorithm": "sha256",
"checksum": "fba751c4796536737c9acbb718dad7429be1fa485f5585d450ab8b25d12ae041"
}
]
}
The manifest points to both the compressed tar file as well as the compressed
spec metadata file, and contains the checksum of each. This checksum
is also used as the address of the associated file, and hence, must be
known in order to locate the tarball or spec file within the mirror. Once the
tarball or spec metadata file is downloaded, the checksum should be computed locally
and compared to the checksum in the manifest to ensure the contents have not changed
since the binary package was pushed. Spack stores all data files (including compressed
tar files, spec metadata, indices, public keys, etc) within a ``blobs/<hash-algorithm>/``
directory, using the first two characters of the checksum as a sub-directory
to reduce the number files in a single folder. Here is a depiction of the
organization of binary mirror contents::
mirror_directory/
v3/
layout.json
manifests/
spec/
gcc-runtime/
gcc-runtime-12.3.0-s2nqujezsce4x6uhtvxscu7jhewqzztx.spec.manifest.json
gmake/
gmake-4.4.1-lpr4j77rcgkg5536tmiuzwzlcjsiomph.spec.manifest.json
compiler-wrapper/
compiler-wrapper-1.0-s7ieuyievp57vwhthczhaq2ogowf3ohe.spec.manifest.json
index/
index.manifest.json
key/
75BC0528114909C076E2607418010FFAD73C9B07.key.manifest.json
keys.manifest.json
blobs/
sha256/
0f/
0f24aa6b5dd7150067349865217acd3f6a383083f9eca111d2d2fed726c88210
fb/
fba751c4796536737c9acbb718dad7429be1fa485f5585d450ab8b25d12ae041
2a/
2a21836d206ccf0df780ab0be63fdf76d24501375306a35daa6683c409b7922f
...
Files within the ``manifests`` directory are organized into subdirectories by
the type of entity they represent. Binary package manifests live in the ``spec/``
directory, binary cache index manifests live in the ``index/`` directory, and
manifests for public keys and their indices live in the ``key/`` subdirectory.
Regardless of the type of entity they represent, all manifest files are named
with an extension ``.manifest.json``.
Every manifest contains a ``data`` array, each element of which refers to an
associated file stored a content-addressed blob. Considering the example spec
manifest shown above, the compressed installation archive can be found by
picking out the data blob with the appropriate ``mediaType``, which in this
case would be ``application/vnd.spack.install.v1.tar+gzip``. The associated
file is found by looking in the blobs directory under ``blobs/sha256/fb/`` for
the file named with the complete checksum value.
As mentioned above, every entity in a binary mirror (aka build cache) is stored
as a content-addressed blob pointed to by a manifest. While an example spec
manifest (i.e. a manifest for a binary package) is shown above, here is what
the manifest of a build cache index looks like::
{
"version": 3,
"data": [
{
"contentLength": 6411,
"mediaType": "application/vnd.spack.db.v8+json",
"compression": "none",
"checksumAlgorithm": "sha256",
"checksum": "225a3e9da24d201fdf9d8247d66217f5b3f4d0fc160db1498afd998bfd115234"
}
]
}
Some things to note about this manifest are that it points to a blob that is not
compressed (``compression: "none"``), and that the ``mediaType`` is one we have
not seen yet, ``application/vnd.spack.db.v8+json``. The decision not to compress
build cache indices stems from the fact that spack does not yet sign build cache
index manifests. Once that changes, you may start to see these indices stored as
compressed blobs.
For completeness, here are examples of manifests for the other two types of entities
you might find in a spack build cache. First a public key manifest::
{
"version": 3,
"data": [
{
"contentLength": 2472,
"mediaType": "application/pgp-keys",
"compression": "none",
"checksumAlgorithm": "sha256",
"checksum": "9fc18374aebc84deb2f27898da77d4d4410e5fb44c60c6238cb57fb36147e5c7"
}
]
}
Note the ``mediaType`` of ``application/pgp-keys``. Finally, a public key index manifest::
{
"version": 3,
"data": [
{
"contentLength": 56,
"mediaType": "application/vnd.spack.keyindex.v1+json",
"compression": "none",
"checksumAlgorithm": "sha256",
"checksum": "29b3a0eb6064fd588543bc43ac7d42d708a69058dafe4be0859e3200091a9a1c"
}
]
}
Again note the ``mediaType`` of ``application/vnd.spack.keyindex.v1+json``. Also note
that both the above manifest examples refer to uncompressed blobs, this is for the same
reason spack does not yet compress build cache index blobs.

View File

@@ -14,7 +14,7 @@ is an entire command dedicated to the management of every aspect of bootstrappin
.. command-output:: spack bootstrap --help
Spack is configured to bootstrap its dependencies lazily by default; i.e., the first time they are needed and
Spack is configured to bootstrap its dependencies lazily by default; i.e. the first time they are needed and
can't be found. You can readily check if any prerequisite for using Spack is missing by running:
.. code-block:: console
@@ -36,8 +36,8 @@ can't be found. You can readily check if any prerequisite for using Spack is mis
In the case of the output shown above Spack detected that both ``clingo`` and ``gnupg``
are missing and it's giving detailed information on why they are needed and whether
they can be bootstrapped. The return code of this command summarizes the results; if any
dependencies are missing, the return code is ``1``, otherwise ``0``. Running a command that
they can be bootstrapped. The return code of this command summarizes the results, if any
dependencies are missing the return code is ``1``, otherwise ``0``. Running a command that
concretizes a spec, like:
.. code-block:: console
@@ -170,7 +170,7 @@ bootstrapping.
To register the mirror on the platform where it's supposed to be used run the following command(s):
% spack bootstrap add --trust local-sources /opt/bootstrap/metadata/sources
% spack bootstrap add --trust local-binaries /opt/bootstrap/metadata/binaries
% spack buildcache update-index /opt/bootstrap/bootstrap_cache
This command needs to be run on a machine with internet access and the resulting folder
has to be moved over to the air-gapped system. Once the local sources are added using the

View File

@@ -63,10 +63,11 @@ on these ideas for each distinct build system that Spack supports:
build_systems/cudapackage
build_systems/custompackage
build_systems/inteloneapipackage
build_systems/intelpackage
build_systems/rocmpackage
build_systems/sourceforgepackage
For reference, the :py:mod:`Build System API docs <spack_repo.builtin.build_systems>`
For reference, the :py:mod:`Build System API docs <spack.build_systems>`
provide a list of build systems and methods/attributes that can be
overridden. If you are curious about the implementation of a particular
build system, you can view the source code by running:
@@ -83,14 +84,14 @@ packages. You can quickly find examples by running:
.. code-block:: console
$ cd var/spack/repos/spack_repo/builtin/packages
$ cd var/spack/repos/builtin/packages
$ grep -l QMakePackage */package.py
You can then view these packages with ``spack edit``.
This guide is intended to supplement the
:py:mod:`Build System API docs <spack_repo.builtin.build_systems>` with examples of
:py:mod:`Build System API docs <spack.build_systems>` with examples of
how to override commonly used methods. It also provides rules of thumb
and suggestions for package developers who are unfamiliar with a
particular build system.

View File

@@ -272,9 +272,9 @@ often lists dependencies and the flags needed to locate them. The
"environment variables" section lists environment variables that the
build system uses to pass flags to the compiler and linker.
^^^^^^^^^^^^^^^^^^^^^^^^^
Adding flags to configure
^^^^^^^^^^^^^^^^^^^^^^^^^
^^^^^^^^^^^^^^^^^^^^^^^^^^
Addings flags to configure
^^^^^^^^^^^^^^^^^^^^^^^^^^
For most of the flags you encounter, you will want a variant to
optionally enable/disable them. You can then optionally pass these
@@ -285,7 +285,7 @@ function like so:
def configure_args(self):
args = []
...
if self.spec.satisfies("+mpi"):
args.append("--enable-mpi")
else:
@@ -299,10 +299,7 @@ Alternatively, you can use the :ref:`enable_or_disable <autotools_enable_or_dis
.. code-block:: python
def configure_args(self):
args = []
...
args.extend(self.enable_or_disable("mpi"))
return args
return [self.enable_or_disable("mpi")]
Note that we are explicitly disabling MPI support if it is not
@@ -347,14 +344,7 @@ typically used to enable or disable some feature within the package.
default=False,
description="Memchecker support for debugging [degrades performance]"
)
...
def configure_args(self):
args = []
...
args.extend(self.enable_or_disable("memchecker"))
return args
config_args.extend(self.enable_or_disable("memchecker"))
In this example, specifying the variant ``+memchecker`` will generate
the following configuration options:

View File

@@ -27,10 +27,10 @@ it could use the ``require`` directive as follows:
Spack has a number of built-in bundle packages, such as:
* `AmdAocl <https://github.com/spack/spack/blob/develop/var/spack/repos/spack_repo/builtin/packages/amd_aocl/package.py>`_
* `EcpProxyApps <https://github.com/spack/spack/blob/develop/var/spack/repos/spack_repo/builtin/packages/ecp_proxy_apps/package.py>`_
* `Libc <https://github.com/spack/spack/blob/develop/var/spack/repos/spack_repo/builtin/packages/libc/package.py>`_
* `Xsdk <https://github.com/spack/spack/blob/develop/var/spack/repos/spack_repo/builtin/packages/xsdk/package.py>`_
* `AmdAocl <https://github.com/spack/spack/blob/develop/var/spack/repos/builtin/packages/amd-aocl/package.py>`_
* `EcpProxyApps <https://github.com/spack/spack/blob/develop/var/spack/repos/builtin/packages/ecp-proxy-apps/package.py>`_
* `Libc <https://github.com/spack/spack/blob/develop/var/spack/repos/builtin/packages/libc/package.py>`_
* `Xsdk <https://github.com/spack/spack/blob/develop/var/spack/repos/builtin/packages/xsdk/package.py>`_
where ``Xsdk`` also inherits from ``CudaPackage`` and ``RocmPackage`` and
``Libc`` is a virtual bundle package for the C standard library.

View File

@@ -129,8 +129,8 @@ Adding flags to cmake
To add additional flags to the ``cmake`` call, simply override the
``cmake_args`` function. The following example defines values for the flags
``WHATEVER``, ``ENABLE_BROKEN_FEATURE``, ``DETECT_HDF5``, and ``THREADS`` with
and without the :meth:`~spack_repo.builtin.build_systems.cmake.CMakeBuilder.define` and
:meth:`~spack_repo.builtin.build_systems.cmake.CMakeBuilder.define_from_variant` helper functions:
and without the :meth:`~spack.build_systems.cmake.CMakeBuilder.define` and
:meth:`~spack.build_systems.cmake.CMakeBuilder.define_from_variant` helper functions:
.. code-block:: python
@@ -199,7 +199,7 @@ a variant to control this:
However, not every CMake package accepts all four of these options.
Grep the ``CMakeLists.txt`` file to see if the default values are
missing or replaced. For example, the
`dealii <https://github.com/spack/spack/blob/develop/var/spack/repos/spack_repo/builtin/packages/dealii/package.py>`_
`dealii <https://github.com/spack/spack/blob/develop/var/spack/repos/builtin/packages/dealii/package.py>`_
package overrides the default variant with:
.. code-block:: python

View File

@@ -20,8 +20,8 @@ start is to look at the definitions of other build systems. This guide
focuses mostly on how Spack's build systems work.
In this guide, we will be using the
`perl <https://github.com/spack/spack/blob/develop/var/spack/repos/spack_repo/builtin/packages/perl/package.py>`_ and
`cmake <https://github.com/spack/spack/blob/develop/var/spack/repos/spack_repo/builtin/packages/cmake/package.py>`_
`perl <https://github.com/spack/spack/blob/develop/var/spack/repos/builtin/packages/perl/package.py>`_ and
`cmake <https://github.com/spack/spack/blob/develop/var/spack/repos/builtin/packages/cmake/package.py>`_
packages as examples. ``perl``'s build system is a hand-written
``Configure`` shell script, while ``cmake`` bootstraps itself during
installation. Both of these packages require custom build systems.
@@ -56,13 +56,13 @@ If you look at the ``perl`` package, you'll see:
.. code-block:: python
phases = ("configure", "build", "install")
phases = ["configure", "build", "install"]
Similarly, ``cmake`` defines:
.. code-block:: python
phases = ("bootstrap", "build", "install")
phases = ["bootstrap", "build", "install"]
If we look at the ``cmake`` example, this tells Spack's ``PackageBase``
class to run the ``bootstrap``, ``build``, and ``install`` functions

View File

@@ -33,6 +33,9 @@ For more information on a specific package, do::
spack info --all <package-name>
Intel no longer releases new versions of Parallel Studio, which can be
used in Spack via the :ref:`intelpackage`. All of its components can
now be found in oneAPI.
Examples
========
@@ -47,8 +50,34 @@ Install the oneAPI compilers::
spack install intel-oneapi-compilers
Add the compilers to your ``compilers.yaml`` so spack can use them::
To build the ``patchelf`` Spack package with ``icx``, do::
spack compiler add `spack location -i intel-oneapi-compilers`/compiler/latest/bin
Verify that the compilers are available::
spack compiler list
Note that 2024 and later releases do not include ``icc``. Before 2024,
the package layout was different::
spack compiler add `spack location -i intel-oneapi-compilers`/compiler/latest/linux/bin/intel64
spack compiler add `spack location -i intel-oneapi-compilers`/compiler/latest/linux/bin
The ``intel-oneapi-compilers`` package includes 2 families of
compilers:
* ``intel``: ``icc``, ``icpc``, ``ifort``. Intel's *classic*
compilers. 2024 and later releases contain ``ifort``, but not
``icc`` and ``icpc``.
* ``oneapi``: ``icx``, ``icpx``, ``ifx``. Intel's new generation of
compilers based on LLVM.
To build the ``patchelf`` Spack package with ``icc``, do::
spack install patchelf%intel
To build with with ``icx``, do ::
spack install patchelf%oneapi
@@ -63,6 +92,15 @@ Install the oneAPI compilers::
spack install intel-oneapi-compilers
Add the compilers to your ``compilers.yaml`` so Spack can use them::
spack compiler add `spack location -i intel-oneapi-compilers`/compiler/latest/bin
spack compiler add `spack location -i intel-oneapi-compilers`/compiler/latest/bin
Verify that the compilers are available::
spack compiler list
Clone `spack-configs <https://github.com/spack/spack-configs>`_ repo and activate Intel oneAPI CPU environment::
git clone https://github.com/spack/spack-configs
@@ -111,7 +149,7 @@ Compilers
---------
To use the compilers, add some information about the installation to
``packages.yaml``. For most users, it is sufficient to do::
``compilers.yaml``. For most users, it is sufficient to do::
spack compiler add /opt/intel/oneapi/compiler/latest/bin
@@ -119,7 +157,7 @@ Adapt the paths above if you did not install the tools in the default
location. After adding the compilers, using them is the same
as if you had installed the ``intel-oneapi-compilers`` package.
Another option is to manually add the configuration to
``packages.yaml`` as described in :ref:`Compiler configuration
``compilers.yaml`` as described in :ref:`Compiler configuration
<compiler-config>`.
Before 2024, the directory structure was different::
@@ -162,5 +200,15 @@ You can also use Spack-installed libraries. For example::
Will update your environment CPATH, LIBRARY_PATH, and other
environment variables for building an application with oneMKL.
More information
================
This section describes basic use of oneAPI, especially if it has
changed compared to Parallel Studio. See :ref:`intelpackage` for more
information on :ref:`intel-virtual-packages`,
:ref:`intel-unrelated-packages`,
:ref:`intel-integrating-external-libraries`, and
:ref:`using-mkl-tips`.
.. _`Intel installers`: https://software.intel.com/content/www/us/en/develop/documentation/installation-guide-for-intel-oneapi-toolkits-linux/top.html

File diff suppressed because it is too large Load Diff

View File

@@ -91,14 +91,14 @@ there are any other variables you need to set, you can do this in the
.. code-block:: python
def setup_build_environment(self, env: EnvironmentModifications) -> None:
def setup_build_environment(self, env):
env.set("PREFIX", prefix)
env.set("BLASLIB", spec["blas"].libs.ld_flags)
`cbench <https://github.com/spack/spack/blob/develop/var/spack/repos/spack_repo/builtin/packages/cbench/package.py>`_
`cbench <https://github.com/spack/spack/blob/develop/var/spack/repos/builtin/packages/cbench/package.py>`_
is a good example of a simple package that does this, while
`esmf <https://github.com/spack/spack/blob/develop/var/spack/repos/spack_repo/builtin/packages/esmf/package.py>`_
`esmf <https://github.com/spack/spack/blob/develop/var/spack/repos/builtin/packages/esmf/package.py>`_
is a good example of a more complex package.
""""""""""""""""""""""
@@ -129,7 +129,7 @@ If you do need access to the spec, you can create a property like so:
]
`cloverleaf <https://github.com/spack/spack/blob/develop/var/spack/repos/spack_repo/builtin/packages/cloverleaf/package.py>`_
`cloverleaf <https://github.com/spack/spack/blob/develop/var/spack/repos/builtin/packages/cloverleaf/package.py>`_
is a good example of a package that uses this strategy.
"""""""""""""
@@ -152,7 +152,7 @@ and a ``filter`` method to help with this. For example:
makefile.filter(r"^\s*FC\s*=.*", f"FC = {spack_fc}")
`stream <https://github.com/spack/spack/blob/develop/var/spack/repos/spack_repo/builtin/packages/stream/package.py>`_
`stream <https://github.com/spack/spack/blob/develop/var/spack/repos/builtin/packages/stream/package.py>`_
is a good example of a package that involves editing a Makefile to set
the appropriate variables.
@@ -192,7 +192,7 @@ well for storing variables:
inc.write(f"{key} = {config[key]}\n")
`elk <https://github.com/spack/spack/blob/develop/var/spack/repos/spack_repo/builtin/packages/elk/package.py>`_
`elk <https://github.com/spack/spack/blob/develop/var/spack/repos/builtin/packages/elk/package.py>`_
is a good example of a package that uses a dictionary to store
configuration variables.
@@ -213,7 +213,7 @@ them in a list:
inc.write(f"{var}\n")
`hpl <https://github.com/spack/spack/blob/develop/var/spack/repos/spack_repo/builtin/packages/hpl/package.py>`_
`hpl <https://github.com/spack/spack/blob/develop/var/spack/repos/builtin/packages/hpl/package.py>`_
is a good example of a package that uses a list to store
configuration variables.

View File

@@ -12,7 +12,8 @@ The ``ROCmPackage`` is not a build system but a helper package. Like ``CudaPacka
it provides standard variants, dependencies, and conflicts to facilitate building
packages using GPUs though for AMD in this case.
You can find the source for this package (and suggestions for setting up your ``packages.yaml`` file) at
You can find the source for this package (and suggestions for setting up your
``compilers.yaml`` and ``packages.yaml`` files) at
`<https://github.com/spack/spack/blob/develop/lib/spack/spack/build_systems/rocm.py>`__.
^^^^^^^^

View File

@@ -39,7 +39,7 @@ for "CRAN <package-name>" and you should quickly find what you want.
If it isn't on CRAN, try Bioconductor, another common R repository.
For the purposes of this tutorial, we will be walking through
`r-caret <https://github.com/spack/spack/blob/develop/var/spack/repos/spack_repo/builtin/packages/r_caret/package.py>`_
`r-caret <https://github.com/spack/spack/blob/develop/var/spack/repos/builtin/packages/r-caret/package.py>`_
as an example. If you search for "CRAN caret", you will quickly find what
you are looking for at https://cran.r-project.org/package=caret.
https://cran.r-project.org is the main CRAN website. However, CRAN also
@@ -337,7 +337,7 @@ Non-R dependencies
^^^^^^^^^^^^^^^^^^
Some packages depend on non-R libraries for linking. Check out the
`r-stringi <https://github.com/spack/spack/blob/develop/var/spack/repos/spack_repo/builtin/packages/r_stringi/package.py>`_
`r-stringi <https://github.com/spack/spack/blob/develop/var/spack/repos/builtin/packages/r-stringi/package.py>`_
package for an example: https://cloud.r-project.org/package=stringi.
If you search for the text "SystemRequirements", you will see:
@@ -352,7 +352,7 @@ Passing arguments to the installation
Some R packages provide additional flags that can be passed to
``R CMD INSTALL``, often to locate non-R dependencies.
`r-rmpi <https://github.com/spack/spack/blob/develop/var/spack/repos/spack_repo/builtin/packages/r_rmpi/package.py>`_
`r-rmpi <https://github.com/spack/spack/blob/develop/var/spack/repos/builtin/packages/r-rmpi/package.py>`_
is an example of this, and flags for linking to an MPI library. To pass
these to the installation command, you can override ``configure_args``
like so:

View File

@@ -104,10 +104,10 @@ Finding available options
The first place to start when looking for a list of valid options to
build a package is ``scons --help``. Some packages like
`kahip <https://github.com/spack/spack/blob/develop/var/spack/repos/spack_repo/builtin/packages/kahip/package.py>`_
`kahip <https://github.com/spack/spack/blob/develop/var/spack/repos/builtin/packages/kahip/package.py>`_
don't bother overwriting the default SCons help message, so this isn't
very useful, but other packages like
`serf <https://github.com/spack/spack/blob/develop/var/spack/repos/spack_repo/builtin/packages/serf/package.py>`_
`serf <https://github.com/spack/spack/blob/develop/var/spack/repos/builtin/packages/serf/package.py>`_
print a list of valid command-line variables:
.. code-block:: console
@@ -177,7 +177,7 @@ print a list of valid command-line variables:
More advanced packages like
`cantera <https://github.com/spack/spack/blob/develop/var/spack/repos/spack_repo/builtin/packages/cantera/package.py>`_
`cantera <https://github.com/spack/spack/blob/develop/var/spack/repos/builtin/packages/cantera/package.py>`_
use ``scons --help`` to print a list of subcommands:
.. code-block:: console

View File

@@ -35,8 +35,8 @@
if not os.path.exists(link_name):
os.symlink(os.path.abspath("../../.."), link_name, target_is_directory=True)
sys.path.insert(0, os.path.abspath("_spack_root/lib/spack/external"))
sys.path.insert(0, os.path.abspath("_spack_root/lib/spack/external/_vendoring"))
sys.path.append(os.path.abspath("_spack_root/lib/spack/"))
sys.path.append(os.path.abspath("_spack_root/var/spack/repos/"))
# Add the Spack bin directory to the path so that we can use its output in docs.
os.environ["SPACK_ROOT"] = os.path.abspath("_spack_root")
@@ -76,20 +76,11 @@
apidoc_args
+ [
"_spack_root/lib/spack/spack",
"_spack_root/lib/spack/spack/package.py", # sphinx struggles with os.chdir re-export.
"_spack_root/lib/spack/spack/test/*.py",
"_spack_root/lib/spack/spack/test/cmd/*.py",
]
)
sphinx_apidoc(apidoc_args + ["_spack_root/lib/spack/llnl"])
sphinx_apidoc(
apidoc_args
+ [
"--implicit-namespaces",
"_spack_root/var/spack/repos/spack_repo",
"_spack_root/var/spack/repos/spack_repo/builtin/packages",
]
)
# Enable todo items
todo_include_todos = True
@@ -218,7 +209,7 @@ def setup(sphinx):
# Spack classes that are private and we don't want to expose
("py:class", "spack.provider_index._IndexBase"),
("py:class", "spack.repo._PrependFileLoader"),
("py:class", "spack_repo.builtin.build_systems._checks.BuilderWithDefaults"),
("py:class", "spack.build_systems._checks.BuilderWithDefaults"),
# Spack classes that intersphinx is unable to resolve
("py:class", "spack.version.StandardVersion"),
("py:class", "spack.spec.DependencySpec"),
@@ -228,20 +219,10 @@ def setup(sphinx):
("py:class", "spack.install_test.Pb"),
("py:class", "spack.filesystem_view.SimpleFilesystemView"),
("py:class", "spack.traverse.EdgeAndDepth"),
("py:class", "_vendoring.archspec.cpu.microarchitecture.Microarchitecture"),
("py:class", "archspec.cpu.microarchitecture.Microarchitecture"),
("py:class", "spack.compiler.CompilerCache"),
# TypeVar that is not handled correctly
("py:class", "llnl.util.lang.T"),
("py:class", "llnl.util.lang.KT"),
("py:class", "llnl.util.lang.VT"),
("py:class", "llnl.util.lang.K"),
("py:class", "llnl.util.lang.V"),
("py:class", "llnl.util.lang.ClassPropertyType"),
("py:obj", "llnl.util.lang.KT"),
("py:obj", "llnl.util.lang.VT"),
("py:obj", "llnl.util.lang.ClassPropertyType"),
("py:obj", "llnl.util.lang.K"),
("py:obj", "llnl.util.lang.V"),
]
# The reST default role (used for this markup: `text`) to use for all documents.

View File

@@ -25,23 +25,14 @@ These settings can be overridden in ``etc/spack/config.yaml`` or
The location where Spack will install packages and their dependencies.
Default is ``$spack/opt/spack``.
---------------
``projections``
---------------
---------------------------------------------------
``install_hash_length`` and ``install_path_scheme``
---------------------------------------------------
.. warning::
Modifying projections of the install tree is strongly discouraged.
By default Spack installs all packages into a unique directory relative to the install
tree root with the following layout:
.. code-block::
{architecture}/{compiler.name}-{compiler.version}/{name}-{version}-{hash}
In very rare cases, it may be necessary to reduce the length of this path. For example,
very old versions of the Intel compiler are known to segfault when input paths are too long:
The default Spack installation path can be very long and can create problems
for scripts with hardcoded shebangs. Additionally, when using the Intel
compiler, and if there is also a long list of dependencies, the compiler may
segfault. If you see the following:
.. code-block:: console
@@ -49,25 +40,36 @@ very old versions of the Intel compiler are known to segfault when input paths a
** Segmentation violation signal raised. **
Access violation or stack overflow. Please contact Intel Support for assistance.
Another case is Python and R packages with many runtime dependencies, which can result
in very large ``PYTHONPATH`` and ``R_LIBS`` environment variables. This can cause the
``execve`` system call to fail with ``E2BIG``, preventing processes from starting.
it may be because variables containing dependency specs may be too long. There
are two parameters to help with long path names. Firstly, the
``install_hash_length`` parameter can set the length of the hash in the
installation path from 1 to 32. The default path uses the full 32 characters.
For this reason, Spack allows users to modify the installation layout through custom
projections. For example
Secondly, it is also possible to modify the entire installation
scheme. By default Spack uses
``{architecture}/{compiler.name}-{compiler.version}/{name}-{version}-{hash}``
where the tokens that are available for use in this directive are the
same as those understood by the :meth:`~spack.spec.Spec.format`
method. Using this parameter it is possible to use a different package
layout or reduce the depth of the installation paths. For example
.. code-block:: yaml
config:
install_tree:
root: $spack/opt/spack
projections:
all: "{name}/{version}/{hash:16}"
install_path_scheme: '{name}/{version}/{hash:7}'
would install packages into sub-directories using only the package name, version and a
hash length of 16 characters.
would install packages into sub-directories using only the package
name, version and a hash length of 7 characters.
Notice that reducing the hash length increases the likelihood of hash collisions.
When using either parameter to set the hash length it only affects the
representation of the hash in the installation directory. You
should be aware that the smaller the hash length the more likely
naming conflicts will occur. These parameters are independent of those
used to configure module names.
.. warning:: Modifying the installation hash length or path scheme after
packages have been installed will prevent Spack from being
able to find the old installation directories.
--------------------
``build_stage``
@@ -125,8 +127,6 @@ are stored in ``$spack/var/spack/cache``. These are stored indefinitely
by default. Can be purged with :ref:`spack clean --downloads
<cmd-spack-clean>`.
.. _Misc Cache:
--------------------
``misc_cache``
--------------------
@@ -148,16 +148,15 @@ this can expose you to attacks. Use at your own risk.
``ssl_certs``
--------------------
Path to custom certificates for SSL verification. The value can be a
filesystem path, or an environment variable that expands to an absolute file path.
Path to custom certificats for SSL verification. The value can be a
filesytem path, or an environment variable that expands to an absolute file path.
The default value is set to the environment variable ``SSL_CERT_FILE``
to use the same syntax used by many other applications that automatically
detect custom certificates.
When ``url_fetch_method:curl`` the ``config:ssl_certs`` should resolve to
a single file. Spack will then set the environment variable ``CURL_CA_BUNDLE``
in the subprocess calling ``curl``. If additional ``curl`` arguments are required,
they can be set in the config, e.g. ``url_fetch_method:'curl -k -q'``.
If ``url_fetch_method:urllib`` then files and directories are supported i.e.
in the subprocess calling ``curl``.
If ``url_fetch_method:urllib`` then files and directories are supported i.e.
``config:ssl_certs:$SSL_CERT_FILE`` or ``config:ssl_certs:$SSL_CERT_DIR``
will work.
In all cases the expanded path must be absolute for Spack to use the certificates.
@@ -337,52 +336,3 @@ create a new alias called ``inst`` that will always call ``install -v``:
aliases:
inst: install -v
-------------------------------
``concretization_cache:enable``
-------------------------------
When set to ``true``, Spack will utilize a cache of solver outputs from
successful concretization runs. When enabled, Spack will check the concretization
cache prior to running the solver. If a previous request to solve a given
problem is present in the cache, Spack will load the concrete specs and other
solver data from the cache rather than running the solver. Specs not previously
concretized will be added to the cache on a successful solve. The cache additionally
holds solver statistics, so commands like ``spack solve`` will still return information
about the run that produced a given solver result.
This cache is a subcache of the :ref:`Misc Cache` and as such will be cleaned when the Misc
Cache is cleaned.
When ``false`` or ommitted, all concretization requests will be performed from scatch
----------------------------
``concretization_cache:url``
----------------------------
Path to the location where Spack will root the concretization cache. Currently this only supports
paths on the local filesystem.
Default location is under the :ref:`Misc Cache` at: ``$misc_cache/concretization``
------------------------------------
``concretization_cache:entry_limit``
------------------------------------
Sets a limit on the number of concretization results that Spack will cache. The limit is evaluated
after each concretization run; if Spack has stored more results than the limit allows, the
oldest concretization results are pruned until 10% of the limit has been removed.
Setting this value to 0 disables the automatic pruning. It is expected users will be
responsible for maintaining this cache.
-----------------------------------
``concretization_cache:size_limit``
-----------------------------------
Sets a limit on the size of the concretization cache in bytes. The limit is evaluated
after each concretization run; if Spack has stored more results than the limit allows, the
oldest concretization results are pruned until 10% of the limit has been removed.
Setting this value to 0 disables the automatic pruning. It is expected users will be
responsible for maintaining this cache.

View File

@@ -11,10 +11,9 @@ Configuration Files
Spack has many configuration files. Here is a quick list of them, in
case you want to skip directly to specific docs:
* :ref:`packages.yaml <compiler-config>`
* :ref:`compilers.yaml <compiler-config>`
* :ref:`concretizer.yaml <concretizer-options>`
* :ref:`config.yaml <config-yaml>`
* :ref:`include.yaml <include-yaml>`
* :ref:`mirrors.yaml <mirrors>`
* :ref:`modules.yaml <modules>`
* :ref:`packages.yaml <packages-config>`
@@ -46,12 +45,6 @@ Each Spack configuration file is nested under a top-level section
corresponding to its name. So, ``config.yaml`` starts with ``config:``,
``mirrors.yaml`` starts with ``mirrors:``, etc.
.. tip::
Validation and autocompletion of Spack config files can be enabled in
your editor with the YAML language server. See `spack/schemas
<https://github.com/spack/schemas>`_ for more information.
.. _configuration-scopes:
--------------------
@@ -101,7 +94,7 @@ are six configuration scopes. From lowest to highest:
precedence over all other scopes.
Each configuration directory may contain several configuration files,
such as ``config.yaml``, ``packages.yaml``, or ``mirrors.yaml``. When
such as ``config.yaml``, ``compilers.yaml``, or ``mirrors.yaml``. When
configurations conflict, settings from higher-precedence scopes override
lower-precedence settings.

View File

@@ -11,7 +11,7 @@ Container Images
Spack :ref:`environments` can easily be turned into container images. This page
outlines two ways in which this can be done:
1. By installing the environment on the host system and copying the installations
1. By installing the environment on the host system, and copying the installations
into the container image. This approach does not require any tools like Docker
or Singularity to be installed.
2. By generating a Docker or Singularity recipe that can be used to build the
@@ -56,8 +56,8 @@ environment roots and its runtime dependencies.
.. note::
When using registries like GHCR and Docker Hub, the ``--oci-password`` flag specifies not
the password for your account, but rather a personal access token you need to generate separately.
When using registries like GHCR and Docker Hub, the ``--oci-password`` flag is not
the password for your account, but a personal access token you need to generate separately.
The specified ``--base-image`` should have a libc that is compatible with the host system.
For example if your host system is Ubuntu 20.04, you can use ``ubuntu:20.04``, ``ubuntu:22.04``

View File

@@ -226,9 +226,9 @@ If all is well, you'll see something like this:
Modified files:
var/spack/repos/spack_repo/builtin/packages/hdf5/package.py
var/spack/repos/spack_repo/builtin/packages/hdf/package.py
var/spack/repos/spack_repo/builtin/packages/netcdf/package.py
var/spack/repos/builtin/packages/hdf5/package.py
var/spack/repos/builtin/packages/hdf/package.py
var/spack/repos/builtin/packages/netcdf/package.py
=======================================================
Flake8 checks were clean.
@@ -236,9 +236,9 @@ However, if you aren't compliant with PEP 8, flake8 will complain:
.. code-block:: console
var/spack/repos/spack_repo/builtin/packages/netcdf/package.py:26: [F401] 'os' imported but unused
var/spack/repos/spack_repo/builtin/packages/netcdf/package.py:61: [E303] too many blank lines (2)
var/spack/repos/spack_repo/builtin/packages/netcdf/package.py:106: [E501] line too long (92 > 79 characters)
var/spack/repos/builtin/packages/netcdf/package.py:26: [F401] 'os' imported but unused
var/spack/repos/builtin/packages/netcdf/package.py:61: [E303] too many blank lines (2)
var/spack/repos/builtin/packages/netcdf/package.py:106: [E501] line too long (92 > 79 characters)
Flake8 found errors.
Most of the error messages are straightforward, but if you don't understand what
@@ -280,7 +280,7 @@ All of these can be installed with Spack, e.g.
.. warning::
Sphinx has `several required dependencies <https://github.com/spack/spack/blob/develop/var/spack/repos/spack_repo/builtin/packages/py-sphinx/package.py>`_.
Sphinx has `several required dependencies <https://github.com/spack/spack/blob/develop/var/spack/repos/builtin/packages/py-sphinx/package.py>`_.
If you're using a ``python`` from Spack and you installed
``py-sphinx`` and friends, you need to make them available to your
``python``. The easiest way to do this is to run:
@@ -361,6 +361,7 @@ and the tags associated with the class of runners to build on.
* ``.linux_neoverse_n1``
* ``.linux_neoverse_v1``
* ``.linux_neoverse_v2``
* ``.linux_power``
* ``.linux_skylake``
* ``.linux_x86_64``
* ``.linux_x86_64_v4``

View File

@@ -154,7 +154,9 @@ Package-related modules
:mod:`spack.util.naming`
Contains functions for mapping between Spack package names,
Python module names, and Python class names.
Python module names, and Python class names. Functions like
:func:`~spack.util.naming.mod_to_class` handle mapping package
module names to class names.
:mod:`spack.directives`
*Directives* are functions that can be called inside a package definition
@@ -541,10 +543,10 @@ With either interpreter you can run a single command:
.. code-block:: console
$ spack python -c 'from spack.concretize import concretize_one; concretize_one("python")'
$ spack python -c 'from spack.spec import Spec; Spec("python").concretized()'
...
$ spack python -i ipython -c 'from spack.concretize import concretize_one; concretize_one("python")'
$ spack python -i ipython -c 'from spack.spec import Spec; Spec("python").concretized()'
Out[1]: ...
or a file:

View File

@@ -1,34 +0,0 @@
.. Copyright Spack Project Developers. See COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
.. _env-vars-yaml:
=============================================
Environment Variable Settings (env_vars.yaml)
=============================================
Spack allows you to include shell environment variable modifications
for a spack environment by including an ``env_vars.yaml``. Environment
varaibles can be modified by setting, unsetting, appending, and prepending
variables in the shell environment.
The changes to the shell environment will take effect when the spack
environment is activated.
for example,
.. code-block:: yaml
env_vars:
set:
ENVAR_TO_SET_IN_ENV_LOAD: "FOO"
unset:
ENVAR_TO_UNSET_IN_ENV_LOAD:
prepend_path:
PATH_LIST: "path/to/prepend"
append_path:
PATH_LIST: "path/to/append"
remove_path:
PATH_LIST: "path/to/remove"

View File

@@ -112,19 +112,6 @@ the original but may concretize differently in the presence of different
explicit or default configuration settings (e.g., a different version of
Spack or for a different user account).
Environments created from a manifest will copy any included configs
from relative paths inside the environment. Relative paths from
outside the environment will cause errors, and absolute paths will be
kept absolute. For example, if ``spack.yaml`` includes:
.. code-block:: yaml
spack:
include: [./config.yaml]
then the created environment will have its own copy of the file
``config.yaml`` copied from the location in the original environment.
Create an environment from a ``spack.lock`` file using:
.. code-block:: console
@@ -173,7 +160,7 @@ accepts. If an environment already exists then spack will simply activate it
and ignore the create-specific flags.
.. code-block:: console
$ spack env activate --create -p myenv
# ...
# [creates if myenv does not exist yet]
@@ -437,8 +424,8 @@ Developing Packages in a Spack Environment
The ``spack develop`` command allows one to develop Spack packages in
an environment. It requires a spec containing a concrete version, and
will configure Spack to install the package from local source.
If a version is not provided from the command line interface then spack
will configure Spack to install the package from local source.
If a version is not provided from the command line interface then spack
will automatically pick the highest version the package has defined.
This means any infinity versions (``develop``, ``main``, ``stable``) will be
preferred in this selection process.
@@ -448,22 +435,15 @@ set, and Spack will ensure the package and its dependents are rebuilt
any time the environment is installed if the package's local source
code has been modified. Spack's native implementation to check for modifications
is to check if ``mtime`` is newer than the installation.
A custom check can be created by overriding the ``detect_dev_src_change`` method
in your package class. This is particularly useful for projects using custom spack repo's
to drive development and want to optimize performance.
A custom check can be created by overriding the ``detect_dev_src_change`` method
in your package class. This is particularly useful for projects using custom spack repo's
to drive development and want to optimize performance.
Spack ensures that all instances of a
developed package in the environment are concretized to match the
version (and other constraints) passed as the spec argument to the
``spack develop`` command.
When working deep in the graph it is often desirable to have multiple specs marked
as ``develop`` so you don't have to restage and/or do full rebuilds each time you
call ``spack install``. The ``--recursive`` flag can be used in these scenarios
to ensure that all the dependents of the initial spec you provide are also marked
as develop specs. The ``--recursive`` flag requires a pre-concretized environment
so the graph can be traversed from the supplied spec all the way to the root specs.
For packages with ``git`` attributes, git branches, tags, and commits can
also be used as valid concrete versions (see :ref:`version-specifier`).
This means that for a package ``foo``, ``spack develop foo@git.main`` will clone
@@ -473,7 +453,7 @@ Further development on ``foo`` can be tested by re-installing the environment,
and eventually committed and pushed to the upstream git repo.
If the package being developed supports out-of-source builds then users can use the
``--build_directory`` flag to control the location and name of the build directory.
``--build_directory`` flag to control the location and name of the build directory.
This is a shortcut to set the ``package_attributes:build_directory`` in the
``packages`` configuration (see :ref:`assigning-package-attributes`).
The supplied location will become the build-directory for that package in all future builds.
@@ -539,9 +519,7 @@ from the command line.
You can also include an environment directly in the ``spack.yaml`` file. It
involves adding the ``include_concrete`` heading in the yaml followed by the
absolute path to the independent environments. Note, that you may use Spack
config variables such as ``$spack`` or environment variables as long as the
expression expands to an absolute path.
absolute path to the independent environments.
.. code-block:: yaml
@@ -551,7 +529,7 @@ expression expands to an absolute path.
unify: true
include_concrete:
- /absolute/path/to/environment1
- $spack/../path/to/environment2
- /absolute/path/to/environment2
Once the ``spack.yaml`` has been updated you must concretize the environment to
@@ -669,56 +647,34 @@ a ``packages.yaml`` file) could contain:
# ...
packages:
all:
providers:
mpi: [openmpi]
compiler: [intel]
# ...
This configuration sets the default mpi provider to be openmpi.
This configuration sets the default compiler for all packages to
``intel``.
^^^^^^^^^^^^^^^^^^^^^^^
Included configurations
^^^^^^^^^^^^^^^^^^^^^^^
Spack environments allow an ``include`` heading in their yaml schema.
This heading pulls in external configuration files and applies them to
the environment.
Spack environments allow an ``include`` heading in their yaml
schema. This heading pulls in external configuration files and applies
them to the environment.
.. code-block:: yaml
spack:
include:
- environment/relative/path/to/config.yaml
- path: https://github.com/path/to/raw/config/compilers.yaml
sha256: 26e871804a92cd07bb3d611b31b4156ae93d35b6a6d6e0ef3a67871fcb1d258b
- relative/path/to/config.yaml
- https://github.com/path/to/raw/config/compilers.yaml
- /absolute/path/to/packages.yaml
- path: /path/to/$os/$target/environment
optional: true
- path: /path/to/os-specific/config-dir
when: os == "ventura"
Included configuration files are required *unless* they are explicitly optional
or the entry's condition evaluates to ``false``. Optional includes are specified
with the ``optional`` clause and conditional with the ``when`` clause. (See
:ref:`include-yaml` for more information on optional and conditional entries.)
Files are listed using paths to individual files or directories containing them.
Path entries may be absolute or relative to the environment or specified as
URLs. URLs to individual files must link to the **raw** form of the file's
contents (e.g., `GitHub
<https://docs.github.com/en/repositories/working-with-files/using-files/viewing-and-understanding-files#viewing-or-copying-the-raw-file-content>`_
or `GitLab
<https://docs.gitlab.com/ee/api/repository_files.html#get-raw-file-from-repository>`_) **and** include a valid sha256 for the file.
Only the ``file``, ``ftp``, ``http`` and ``https`` protocols (or schemes) are
supported. Spack-specific, environment and user path variables can be used.
(See :ref:`config-file-variables` for more information.)
.. warning::
Recursive includes are not currently processed in a breadth-first manner
so the value of a configuration option that is altered by multiple included
files may not be what you expect. This will be addressed in a future
update.
Environments can include files or URLs. File paths can be relative or
absolute. URLs include the path to the text for individual files or
can be the path to a directory containing configuration files.
Spack supports ``file``, ``http``, ``https`` and ``ftp`` protocols (or
schemes). Spack-specific, environment and user path variables may be
used in these paths. See :ref:`config-file-variables` for more information.
^^^^^^^^^^^^^^^^^^^^^^^^
Configuration precedence
@@ -1002,28 +958,6 @@ For example, the following environment has three root packages:
This allows for a much-needed reduction in redundancy between packages
and constraints.
-------------------------------
Modifying Environment Variables
-------------------------------
Spack Environments can modify the active shell's environment variables when activated. The environment can be
configured to set, unset, prepend, or append using ``env_vars`` configuration in the ``spack.yaml`` or through config scopes
file:
.. code-block:: yaml
spack:
env_vars:
set:
ENVAR_TO_SET_IN_ENV_LOAD: "FOO"
unset:
ENVAR_TO_UNSET_IN_ENV_LOAD:
prepend_path:
PATH_LIST: "path/to/prepend"
append_path:
PATH_LIST: "path/to/append"
remove_path:
PATH_LIST: "path/to/remove"
-----------------
Environment Views

View File

@@ -0,0 +1,161 @@
spack:
definitions:
- compiler-pkgs:
- 'llvm+clang@6.0.1 os=centos7'
- 'gcc@6.5.0 os=centos7'
- 'llvm+clang@6.0.1 os=ubuntu18.04'
- 'gcc@6.5.0 os=ubuntu18.04'
- pkgs:
- readline@7.0
# - xsdk@0.4.0
- compilers:
- '%gcc@5.5.0'
- '%gcc@6.5.0'
- '%gcc@7.3.0'
- '%clang@6.0.0'
- '%clang@6.0.1'
- oses:
- os=ubuntu18.04
- os=centos7
specs:
- matrix:
- [$pkgs]
- [$compilers]
- [$oses]
exclude:
- '%gcc@7.3.0 os=centos7'
- '%gcc@5.5.0 os=ubuntu18.04'
mirrors:
cloud_gitlab: https://mirror.spack.io
compilers:
# The .gitlab-ci.yml for this project picks a Docker container which does
# not have any compilers pre-built and ready to use, so we need to fake the
# existence of those here.
- compiler:
operating_system: centos7
modules: []
paths:
cc: /not/used
cxx: /not/used
f77: /not/used
fc: /not/used
spec: gcc@5.5.0
target: x86_64
- compiler:
operating_system: centos7
modules: []
paths:
cc: /not/used
cxx: /not/used
f77: /not/used
fc: /not/used
spec: gcc@6.5.0
target: x86_64
- compiler:
operating_system: centos7
modules: []
paths:
cc: /not/used
cxx: /not/used
f77: /not/used
fc: /not/used
spec: clang@6.0.0
target: x86_64
- compiler:
operating_system: centos7
modules: []
paths:
cc: /not/used
cxx: /not/used
f77: /not/used
fc: /not/used
spec: clang@6.0.1
target: x86_64
- compiler:
operating_system: ubuntu18.04
modules: []
paths:
cc: /not/used
cxx: /not/used
f77: /not/used
fc: /not/used
spec: clang@6.0.0
target: x86_64
- compiler:
operating_system: ubuntu18.04
modules: []
paths:
cc: /not/used
cxx: /not/used
f77: /not/used
fc: /not/used
spec: clang@6.0.1
target: x86_64
- compiler:
operating_system: ubuntu18.04
modules: []
paths:
cc: /not/used
cxx: /not/used
f77: /not/used
fc: /not/used
spec: gcc@6.5.0
target: x86_64
- compiler:
operating_system: ubuntu18.04
modules: []
paths:
cc: /not/used
cxx: /not/used
f77: /not/used
fc: /not/used
spec: gcc@7.3.0
target: x86_64
gitlab-ci:
bootstrap:
- name: compiler-pkgs
compiler-agnostic: true
mappings:
- # spack-cloud-ubuntu
match:
# these are specs, if *any* match the spec under consideration, this
# 'mapping' will be used to generate the CI job
- os=ubuntu18.04
runner-attributes:
# 'tags' and 'image' go directly onto the job, 'variables' will
# be added to what we already necessarily create for the job as
# a part of the CI workflow
tags:
- spack-k8s
image:
name: scottwittenburg/spack_builder_ubuntu_18.04
entrypoint: [""]
- # spack-cloud-centos
match:
# these are specs, if *any* match the spec under consideration, this
# 'mapping' will be used to generate the CI job
- 'os=centos7'
runner-attributes:
tags:
- spack-k8s
image:
name: scottwittenburg/spack_builder_centos_7
entrypoint: [""]
cdash:
build-group: Release Testing
url: http://cdash
project: Spack Testing
site: Spack Docker-Compose Workflow
repos: []
upstreams: {}
modules:
enable: []
packages: {}
config: {}

View File

@@ -131,7 +131,7 @@ creates a simple python file:
It doesn't take much python coding to get from there to a working
package:
.. literalinclude:: _spack_root/var/spack/repos/spack_repo/builtin/packages/libelf/package.py
.. literalinclude:: _spack_root/var/spack/repos/builtin/packages/libelf/package.py
:lines: 5-
Spack also provides wrapper functions around common commands like

View File

@@ -30,7 +30,7 @@ than always choosing the latest versions or default variants.
.. note::
As a rule of thumb: requirements + constraints > strong preferences > reuse > preferences > defaults.
As a rule of thumb: requirements + constraints > reuse > preferences > defaults.
The following set of criteria (from lowest to highest precedence) explain
common cases where concretization output may seem surprising at first.
@@ -56,19 +56,7 @@ common cases where concretization output may seem surprising at first.
concretizer:
reuse: dependencies # other options are 'true' and 'false'
3. :ref:`Strong preferences <package-strong-preferences>` configured in ``packages.yaml``
are higher priority than reuse, and can be used to strongly prefer a specific version
or variant, without erroring out if it's not possible. Strong preferences are specified
as follows:
.. code-block:: yaml
packages:
foo:
prefer:
- "@1.1: ~mpi"
4. :ref:`Package requirements <package-requirements>` configured in ``packages.yaml``,
3. :ref:`Package requirements <package-requirements>` configured in ``packages.yaml``,
and constraints from the command line as well as ``package.py`` files override all
of the above. Requirements are specified as follows:
@@ -78,8 +66,6 @@ common cases where concretization output may seem surprising at first.
foo:
require:
- "@1.2: +mpi"
conflicts:
- "@1.4"
Requirements and constraints restrict the set of possible solutions, while reuse
behavior and preferences influence what an optimal solution looks like.

View File

@@ -20,7 +20,7 @@ be present on the machine where Spack is run:
:header-rows: 1
These requirements can be easily installed on most modern Linux systems;
on macOS, the Command Line Tools package is required, and a full Xcode suite
on macOS, the Command Line Tools package is required, and a full XCode suite
may be necessary for some packages such as Qt and apple-gl. Spack is designed
to run on HPC platforms like Cray. Not all packages should be expected
to work on all platforms.
@@ -254,11 +254,12 @@ directory.
Compiler configuration
----------------------
Spack has the ability to build packages with multiple compilers and compiler versions.
Compilers can be made available to Spack by specifying them manually in ``packages.yaml``,
or automatically by running ``spack compiler find``.
For convenience, Spack will automatically detect compilers the first time it needs them,
if none is available.
Spack has the ability to build packages with multiple compilers and
compiler versions. Compilers can be made available to Spack by
specifying them manually in ``compilers.yaml`` or ``packages.yaml``,
or automatically by running ``spack compiler find``, but for
convenience Spack will automatically detect compilers the first time
it needs them.
.. _cmd-spack-compilers:
@@ -273,11 +274,16 @@ compilers`` or ``spack compiler list``:
$ spack compilers
==> Available compilers
-- gcc ubuntu20.04-x86_64 ---------------------------------------
gcc@9.4.0 gcc@8.4.0 gcc@10.5.0
-- llvm ubuntu20.04-x86_64 --------------------------------------
llvm@12.0.0 llvm@11.0.0 llvm@10.0.0
-- gcc ---------------------------------------------------------
gcc@4.9.0 gcc@4.8.0 gcc@4.7.0 gcc@4.6.2 gcc@4.4.7
gcc@4.8.2 gcc@4.7.1 gcc@4.6.3 gcc@4.6.1 gcc@4.1.2
-- intel -------------------------------------------------------
intel@15.0.0 intel@14.0.0 intel@13.0.0 intel@12.1.0 intel@10.0
intel@14.0.3 intel@13.1.1 intel@12.1.5 intel@12.0.4 intel@9.1
intel@14.0.2 intel@13.1.0 intel@12.1.3 intel@11.1
intel@14.0.1 intel@13.0.1 intel@12.1.2 intel@10.1
-- clang -------------------------------------------------------
clang@3.4 clang@3.3 clang@3.2 clang@3.1
Any of these compilers can be used to build Spack packages. More on
how this is done is in :ref:`sec-specs`.
@@ -296,22 +302,16 @@ An alias for ``spack compiler find``.
``spack compiler find``
^^^^^^^^^^^^^^^^^^^^^^^
If you do not see a compiler in the list shown by:
Lists the compilers currently available to Spack. If you do not see
a compiler in this list, but you want to use it with Spack, you can
simply run ``spack compiler find`` with the path to where the
compiler is installed. For example:
.. code-block:: console
$ spack compiler list
but you want to use it with Spack, you can simply run ``spack compiler find`` with the
path to where the compiler is installed. For example:
.. code-block:: console
$ spack compiler find /opt/intel/oneapi/compiler/2025.1/bin/
==> Added 1 new compiler to /home/user/.spack/packages.yaml
intel-oneapi-compilers@2025.1.0
==> Compilers are defined in the following files:
/home/user/.spack/packages.yaml
$ spack compiler find /usr/local/tools/ic-13.0.079
==> Added 1 new compiler to ~/.spack/linux/compilers.yaml
intel@13.0.079
Or you can run ``spack compiler find`` with no arguments to force
auto-detection. This is useful if you do not know where compilers are
@@ -322,7 +322,7 @@ installed, but you know that new compilers have been added to your
$ module load gcc/4.9.0
$ spack compiler find
==> Added 1 new compiler to /home/user/.spack/packages.yaml
==> Added 1 new compiler to ~/.spack/linux/compilers.yaml
gcc@4.9.0
This loads the environment module for gcc-4.9.0 to add it to
@@ -331,7 +331,7 @@ This loads the environment module for gcc-4.9.0 to add it to
.. note::
By default, spack does not fill in the ``modules:`` field in the
``packages.yaml`` file. If you are using a compiler from a
``compilers.yaml`` file. If you are using a compiler from a
module, then you should add this field manually.
See the section on :ref:`compilers-requiring-modules`.
@@ -341,82 +341,91 @@ This loads the environment module for gcc-4.9.0 to add it to
``spack compiler info``
^^^^^^^^^^^^^^^^^^^^^^^
If you want to see additional information on some specific compilers, you can run ``spack compiler info`` on it:
If you want to see specifics on a particular compiler, you can run
``spack compiler info`` on it:
.. code-block:: console
$ spack compiler info gcc
gcc@=8.4.0 languages='c,c++,fortran' arch=linux-ubuntu20.04-x86_64:
prefix: /usr
compilers:
c: /usr/bin/gcc-8
cxx: /usr/bin/g++-8
fortran: /usr/bin/gfortran-8
$ spack compiler info intel@15
intel@15.0.0:
paths:
cc = /usr/local/bin/icc-15.0.090
cxx = /usr/local/bin/icpc-15.0.090
f77 = /usr/local/bin/ifort-15.0.090
fc = /usr/local/bin/ifort-15.0.090
modules = []
operating_system = centos6
...
gcc@=9.4.0 languages='c,c++,fortran' arch=linux-ubuntu20.04-x86_64:
prefix: /usr
compilers:
c: /usr/bin/gcc
cxx: /usr/bin/g++
fortran: /usr/bin/gfortran
gcc@=10.5.0 languages='c,c++,fortran' arch=linux-ubuntu20.04-x86_64:
prefix: /usr
compilers:
c: /usr/bin/gcc-10
cxx: /usr/bin/g++-10
fortran: /usr/bin/gfortran-10
This shows the details of the compilers that were detected by Spack.
Notice also that we didn't have to be too specific about the version. We just said ``gcc``, and we got information
about all the matching compilers.
This shows which C, C++, and Fortran compilers were detected by Spack.
Notice also that we didn't have to be too specific about the
version. We just said ``intel@15``, and information about the only
matching Intel compiler was displayed.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Manual compiler configuration
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
If auto-detection fails, you can manually configure a compiler by editing your ``~/.spack/packages.yaml`` file.
You can do this by running ``spack config edit packages``, which will open the file in
If auto-detection fails, you can manually configure a compiler by
editing your ``~/.spack/<platform>/compilers.yaml`` file. You can do this by running
``spack config edit compilers``, which will open the file in
:ref:`your favorite editor <controlling-the-editor>`.
Each compiler has an "external" entry in the file with some ``extra_attributes``:
Each compiler configuration in the file looks like this:
.. code-block:: yaml
packages:
gcc:
externals:
- spec: gcc@10.5.0 languages='c,c++,fortran'
prefix: /usr
extra_attributes:
compilers:
c: /usr/bin/gcc-10
cxx: /usr/bin/g++-10
fortran: /usr/bin/gfortran-10
compilers:
- compiler:
modules: []
operating_system: centos6
paths:
cc: /usr/local/bin/icc-15.0.024-beta
cxx: /usr/local/bin/icpc-15.0.024-beta
f77: /usr/local/bin/ifort-15.0.024-beta
fc: /usr/local/bin/ifort-15.0.024-beta
spec: intel@15.0.0
The compiler executables are listed under ``extra_attributes:compilers``, and are keyed by language.
Once you save the file, the configured compilers will show up in the list displayed by ``spack compilers``.
For compilers that do not support Fortran (like ``clang``), put
``None`` for ``f77`` and ``fc``:
You can also add compiler flags to manually configured compilers. These flags should be specified in the
``flags`` section of the compiler specification. The valid flags are ``cflags``, ``cxxflags``, ``fflags``,
.. code-block:: yaml
compilers:
- compiler:
modules: []
operating_system: centos6
paths:
cc: /usr/bin/clang
cxx: /usr/bin/clang++
f77: None
fc: None
spec: clang@3.3svn
Once you save the file, the configured compilers will show up in the
list displayed by ``spack compilers``.
You can also add compiler flags to manually configured compilers. These
flags should be specified in the ``flags`` section of the compiler
specification. The valid flags are ``cflags``, ``cxxflags``, ``fflags``,
``cppflags``, ``ldflags``, and ``ldlibs``. For example:
.. code-block:: yaml
packages:
gcc:
externals:
- spec: gcc@10.5.0 languages='c,c++,fortran'
prefix: /usr
extra_attributes:
compilers:
c: /usr/bin/gcc-10
cxx: /usr/bin/g++-10
fortran: /usr/bin/gfortran-10
flags:
cflags: -O3 -fPIC
cxxflags: -O3 -fPIC
cppflags: -O3 -fPIC
compilers:
- compiler:
modules: []
operating_system: centos6
paths:
cc: /usr/bin/gcc
cxx: /usr/bin/g++
f77: /usr/bin/gfortran
fc: /usr/bin/gfortran
flags:
cflags: -O3 -fPIC
cxxflags: -O3 -fPIC
cppflags: -O3 -fPIC
spec: gcc@4.7.2
These flags will be treated by spack as if they were entered from
the command line each time this compiler is used. The compiler wrappers
@@ -431,44 +440,95 @@ These variables should be specified in the ``environment`` section of the compil
specification. The operations available to modify the environment are ``set``, ``unset``,
``prepend_path``, ``append_path``, and ``remove_path``. For example:
.. code-block:: yaml
compilers:
- compiler:
modules: []
operating_system: centos6
paths:
cc: /opt/intel/oneapi/compiler/latest/linux/bin/icx
cxx: /opt/intel/oneapi/compiler/latest/linux/bin/icpx
f77: /opt/intel/oneapi/compiler/latest/linux/bin/ifx
fc: /opt/intel/oneapi/compiler/latest/linux/bin/ifx
spec: oneapi@latest
environment:
set:
MKL_ROOT: "/path/to/mkl/root"
unset: # A list of environment variables to unset
- CC
prepend_path: # Similar for append|remove_path
LD_LIBRARY_PATH: /ld/paths/added/by/setvars/sh
.. note::
Spack is in the process of moving compilers from a separate
attribute to be handled like all other packages. As part of this
process, the ``compilers.yaml`` section will eventually be replaced
by configuration in the ``packages.yaml`` section. This new
configuration is now available, although it is not yet the default
behavior.
Compilers can also be configured as external packages in the
``packages.yaml`` config file. Any external package for a compiler
(e.g. ``gcc`` or ``llvm``) will be treated as a configured compiler
assuming the paths to the compiler executables are determinable from
the prefix.
If the paths to the compiler executable are not determinable from the
prefix, you can add them to the ``extra_attributes`` field. Similarly,
all other fields from the compilers config can be added to the
``extra_attributes`` field for an external representing a compiler.
Note that the format for the ``paths`` field in the
``extra_attributes`` section is different than in the ``compilers``
config. For compilers configured as external packages, the section is
named ``compilers`` and the dictionary maps language names (``c``,
``cxx``, ``fortran``) to paths, rather than using the names ``cc``,
``fc``, and ``f77``.
.. code-block:: yaml
packages:
intel-oneapi-compilers:
externals:
- spec: intel-oneapi-compilers@2025.1.0
prefix: /opt/intel/oneapi
gcc:
external:
- spec: gcc@12.2.0 arch=linux-rhel8-skylake
prefix: /usr
extra_attributes:
compilers:
c: /opt/intel/oneapi/compiler/2025.1/bin/icx
cxx: /opt/intel/oneapi/compiler/2025.1/bin/icpx
fortran: /opt/intel/oneapi/compiler/2025.1/bin/ifx
environment:
set:
MKL_ROOT: "/path/to/mkl/root"
unset: # A list of environment variables to unset
- CC
prepend_path: # Similar for append|remove_path
LD_LIBRARY_PATH: /ld/paths/added/by/setvars/sh
GCC_ROOT: /usr
external:
- spec: llvm+clang@15.0.0 arch=linux-rhel8-skylake
prefix: /usr
extra_attributes:
compilers:
c: /usr/bin/clang-with-suffix
cxx: /usr/bin/clang++-with-extra-info
fortran: /usr/bin/gfortran
extra_rpaths:
- /usr/lib/llvm/
^^^^^^^^^^^^^^^^^^^^^^^
Build Your Own Compiler
^^^^^^^^^^^^^^^^^^^^^^^
If you are particular about which compiler/version you use, you might wish to have Spack build it for you.
For example:
If you are particular about which compiler/version you use, you might
wish to have Spack build it for you. For example:
.. code-block:: console
$ spack install gcc@14+binutils
$ spack install gcc@4.9.3
Once the compiler is installed, you can start using it without additional configuration:
Once that has finished, you will need to add it to your
``compilers.yaml`` file. You can then set Spack to use it by default
by adding the following to your ``packages.yaml`` file:
.. code-block:: console
.. code-block:: yaml
$ spack install hdf5~mpi %gcc@14
The same holds true for compilers that are made available from buildcaches, when reusing them is allowed.
packages:
all:
compiler: [gcc@4.9.3]
.. _compilers-requiring-modules:
@@ -476,26 +536,30 @@ The same holds true for compilers that are made available from buildcaches, when
Compilers Requiring Modules
^^^^^^^^^^^^^^^^^^^^^^^^^^^
Many installed compilers will work regardless of the environment they are called with.
However, some installed compilers require environment variables to be set in order to run;
this is typical for Intel and other proprietary compilers.
Many installed compilers will work regardless of the environment they
are called with. However, some installed compilers require
``$LD_LIBRARY_PATH`` or other environment variables to be set in order
to run; this is typical for Intel and other proprietary compilers.
On typical HPC clusters, these environment modifications are usually delegated to some "module" system.
In such a case, you should tell Spack which module(s) to load in order to run the chosen compiler:
In such a case, you should tell Spack which module(s) to load in order
to run the chosen compiler (If the compiler does not come with a
module file, you might consider making one by hand). Spack will load
this module into the environment ONLY when the compiler is run, and
NOT in general for a package's ``install()`` method. See, for
example, this ``compilers.yaml`` file:
.. code-block:: yaml
packages:
gcc:
externals:
- spec: gcc@10.5.0 languages='c,c++,fortran'
prefix: /opt/compilers
extra_attributes:
compilers:
c: /opt/compilers/bin/gcc-10
cxx: /opt/compilers/bin/g++-10
fortran: /opt/compilers/bin/gfortran-10
modules: [gcc/10.5.0]
compilers:
- compiler:
modules: [other/comp/gcc-5.3-sp3]
operating_system: SuSE11
paths:
cc: /usr/local/other/SLES11.3/gcc/5.3.0/bin/gcc
cxx: /usr/local/other/SLES11.3/gcc/5.3.0/bin/g++
f77: /usr/local/other/SLES11.3/gcc/5.3.0/bin/gfortran
fc: /usr/local/other/SLES11.3/gcc/5.3.0/bin/gfortran
spec: gcc@5.3.0
Some compilers require special environment settings to be loaded not just
to run, but also to execute the code they build, breaking packages that
@@ -516,7 +580,7 @@ Licensed Compilers
^^^^^^^^^^^^^^^^^^
Some proprietary compilers require licensing to use. If you need to
use a licensed compiler, the process is similar to a mix of
use a licensed compiler (eg, PGI), the process is similar to a mix of
build your own, plus modules:
#. Create a Spack package (if it doesn't exist already) to install
@@ -526,21 +590,24 @@ build your own, plus modules:
using Spack to load the module it just created, and running simple
builds (eg: ``cc helloWorld.c && ./a.out``)
#. Add the newly-installed compiler to ``packages.yaml`` as shown above.
#. Add the newly-installed compiler to ``compilers.yaml`` as shown
above.
.. _mixed-toolchains:
^^^^^^^^^^^^^^^^^^^^^^^^^^
Fortran compilers on macOS
^^^^^^^^^^^^^^^^^^^^^^^^^^
^^^^^^^^^^^^^^^^
Mixed Toolchains
^^^^^^^^^^^^^^^^
Modern compilers typically come with related compilers for C, C++ and
Fortran bundled together. When possible, results are best if the same
compiler is used for all languages.
In some cases, this is not possible. For example, XCode on macOS provides no Fortran compilers.
The user is therefore forced to use a mixed toolchain: XCode-provided Clang for C/C++ and e.g.
GNU ``gfortran`` for Fortran.
In some cases, this is not possible. For example, starting with macOS El
Capitan (10.11), many packages no longer build with GCC, but XCode
provides no Fortran compilers. The user is therefore forced to use a
mixed toolchain: XCode-provided Clang for C/C++ and GNU ``gfortran`` for
Fortran.
#. You need to make sure that Xcode is installed. Run the following command:
@@ -593,25 +660,45 @@ GNU ``gfortran`` for Fortran.
Note: the flag is ``-license``, not ``--license``.
#. Run ``spack compiler find`` to locate Clang.
#. There are different ways to get ``gfortran`` on macOS. For example, you can
install GCC with Spack (``spack install gcc``), with Homebrew (``brew install
gcc``), or from a `DMG installer
<https://github.com/fxcoudert/gfortran-for-macOS/releases>`_.
#. Run ``spack compiler find`` to locate both Apple-Clang and GCC.
#. The only thing left to do is to edit ``~/.spack/darwin/compilers.yaml`` to provide
the path to ``gfortran``:
Since languages in Spack are modeled as virtual packages, ``apple-clang`` will be used to provide
C and C++, while GCC will be used for Fortran.
.. code-block:: yaml
compilers:
- compiler:
# ...
paths:
cc: /usr/bin/clang
cxx: /usr/bin/clang++
f77: /path/to/bin/gfortran
fc: /path/to/bin/gfortran
spec: apple-clang@11.0.0
If you used Spack to install GCC, you can get the installation prefix by
``spack location -i gcc`` (this will only work if you have a single version
of GCC installed). Whereas for Homebrew, GCC is installed in
``/usr/local/Cellar/gcc/x.y.z``. With the DMG installer, the correct path
will be ``/usr/local/gfortran``.
^^^^^^^^^^^^^^^^^^^^^
Compiler Verification
^^^^^^^^^^^^^^^^^^^^^
You can verify that your compilers are configured properly by installing a simple package. For example:
You can verify that your compilers are configured properly by installing a
simple package. For example:
.. code-block:: console
$ spack install zlib-ng%gcc@5.3.0
$ spack install zlib%gcc@5.3.0
.. _vendor-specific-compiler-configuration:
@@ -620,7 +707,9 @@ You can verify that your compilers are configured properly by installing a simpl
Vendor-Specific Compiler Configuration
--------------------------------------
This section provides details on how to get vendor-specific compilers working.
With Spack, things usually "just work" with GCC. Not so for other
compilers. This section provides details on how to get specific
compilers working.
^^^^^^^^^^^^^^^
Intel Compilers
@@ -642,8 +731,8 @@ compilers:
you have installed from the ``PATH`` environment variable.
If you want use a version of ``gcc`` or ``g++`` other than the default
version on your system, you need to use either the ``--gcc-install-dir``
or ``--gcc-toolchain`` compiler option to specify the path to the version of
version on your system, you need to use either the ``-gcc-name``
or ``-gxx-name`` compiler option to specify the path to the version of
``gcc`` or ``g++`` that you want to use."
-- `Intel Reference Guide <https://software.intel.com/en-us/node/522750>`_
@@ -651,12 +740,76 @@ compilers:
Intel compilers may therefore be configured in one of two ways with
Spack: using modules, or using compiler flags.
""""""""""""""""""""""""""
Configuration with Modules
""""""""""""""""""""""""""
One can control which GCC is seen by the Intel compiler with modules.
A module must be loaded both for the Intel Compiler (so it will run)
and GCC (so the compiler can find the intended GCC). The following
configuration in ``compilers.yaml`` illustrates this technique:
.. code-block:: yaml
compilers:
- compiler:
modules: [gcc-4.9.3, intel-15.0.24]
operating_system: centos7
paths:
cc: /opt/intel-15.0.24/bin/icc-15.0.24-beta
cxx: /opt/intel-15.0.24/bin/icpc-15.0.24-beta
f77: /opt/intel-15.0.24/bin/ifort-15.0.24-beta
fc: /opt/intel-15.0.24/bin/ifort-15.0.24-beta
spec: intel@15.0.24.4.9.3
.. note::
The version number on the Intel compiler is a combination of
the "native" Intel version number and the GNU compiler it is
targeting.
""""""""""""""""""""""""""
Command Line Configuration
""""""""""""""""""""""""""
One can also control which GCC is seen by the Intel compiler by adding
flags to the ``icc`` command:
#. Identify the location of the compiler you just installed:
.. code-block:: console
$ spack location --install-dir gcc
~/spack/opt/spack/linux-centos7-x86_64/gcc-4.9.3-iy4rw...
#. Set up ``compilers.yaml``, for example:
.. code-block:: yaml
compilers:
- compiler:
modules: [intel-15.0.24]
operating_system: centos7
paths:
cc: /opt/intel-15.0.24/bin/icc-15.0.24-beta
cxx: /opt/intel-15.0.24/bin/icpc-15.0.24-beta
f77: /opt/intel-15.0.24/bin/ifort-15.0.24-beta
fc: /opt/intel-15.0.24/bin/ifort-15.0.24-beta
flags:
cflags: -gcc-name ~/spack/opt/spack/linux-centos7-x86_64/gcc-4.9.3-iy4rw.../bin/gcc
cxxflags: -gxx-name ~/spack/opt/spack/linux-centos7-x86_64/gcc-4.9.3-iy4rw.../bin/g++
fflags: -gcc-name ~/spack/opt/spack/linux-centos7-x86_64/gcc-4.9.3-iy4rw.../bin/gcc
spec: intel@15.0.24.4.9.3
^^^
NAG
^^^
The Numerical Algorithms Group provides a licensed Fortran compiler.
It is recommended to use GCC for your C/C++ compilers.
The Numerical Algorithms Group provides a licensed Fortran compiler. Like Clang,
this requires you to set up a :ref:`mixed-toolchains`. It is recommended to use
GCC for your C/C++ compilers.
The NAG Fortran compilers are a bit more strict than other compilers, and many
packages will fail to install with error messages like:
@@ -673,40 +826,44 @@ the command line:
$ spack install openmpi fflags="-mismatch"
Or it can be set permanently in your ``packages.yaml``:
Or it can be set permanently in your ``compilers.yaml``:
.. code-block:: yaml
packages:
nag:
externals:
- spec: nag@6.1
prefix: /opt/nag/bin
extra_attributes:
compilers:
fortran: /opt/nag/bin/nagfor
flags:
fflags: -mismatch
- compiler:
modules: []
operating_system: centos6
paths:
cc: /soft/spack/opt/spack/linux-x86_64/gcc-5.3.0/gcc-6.1.0-q2zosj3igepi3pjnqt74bwazmptr5gpj/bin/gcc
cxx: /soft/spack/opt/spack/linux-x86_64/gcc-5.3.0/gcc-6.1.0-q2zosj3igepi3pjnqt74bwazmptr5gpj/bin/g++
f77: /soft/spack/opt/spack/linux-x86_64/gcc-4.4.7/nag-6.1-jt3h5hwt5myezgqguhfsan52zcskqene/bin/nagfor
fc: /soft/spack/opt/spack/linux-x86_64/gcc-4.4.7/nag-6.1-jt3h5hwt5myezgqguhfsan52zcskqene/bin/nagfor
flags:
fflags: -mismatch
spec: nag@6.1
---------------
System Packages
---------------
Once compilers are configured, one needs to determine which pre-installed system packages,
if any, to use in builds. These are also configured in the ``~/.spack/packages.yaml`` file.
For example, to use an OpenMPI installed in /opt/local, one would use:
Once compilers are configured, one needs to determine which
pre-installed system packages, if any, to use in builds. This is
configured in the file ``~/.spack/packages.yaml``. For example, to use
an OpenMPI installed in /opt/local, one would use:
.. code-block:: yaml
packages:
openmpi:
buildable: False
externals:
- spec: openmpi@1.10.1
prefix: /opt/local
packages:
openmpi:
externals:
- spec: openmpi@1.10.1
prefix: /opt/local
buildable: False
In general, *Spack is easier to use and more reliable if it builds all of its own dependencies*.
However, there are several packages for which one commonly needs to use system versions:
In general, Spack is easier to use and more reliable if it builds all of
its own dependencies. However, there are several packages for which one
commonly needs to use system versions:
^^^
MPI
@@ -719,7 +876,8 @@ you are unlikely to get a working MPI from Spack. Instead, use an
appropriate pre-installed MPI.
If you choose a pre-installed MPI, you should consider using the
pre-installed compiler used to build that MPI.
pre-installed compiler used to build that MPI; see above on
``compilers.yaml``.
^^^^^^^
OpenSSL
@@ -1283,9 +1441,9 @@ To configure Spack, first run the following command inside the Spack console:
spack compiler find
This creates a ``.staging`` directory in our Spack prefix, along with a ``windows`` subdirectory
containing a ``packages.yaml`` file. On a fresh Windows install with the above packages
containing a ``compilers.yaml`` file. On a fresh Windows install with the above packages
installed, this command should only detect Microsoft Visual Studio and the Intel Fortran
compiler will be integrated within the first version of MSVC present in the ``packages.yaml``
compiler will be integrated within the first version of MSVC present in the ``compilers.yaml``
output.
Spack provides a default ``config.yaml`` file for Windows that it will use unless overridden.

View File

@@ -23,6 +23,7 @@ components for use by dependent packages:
packages:
all:
compiler: [rocmcc@=5.3.0]
variants: amdgpu_target=gfx90a
hip:
buildable: false
@@ -69,15 +70,16 @@ This is in combination with the following compiler definition:
.. code-block:: yaml
packages:
llvm-amdgpu:
externals:
- spec: llvm-amdgpu@=5.3.0
prefix: /opt/rocm-5.3.0
compilers:
c: /opt/rocm-5.3.0/bin/amdclang
cxx: /opt/rocm-5.3.0/bin/amdclang++
fortran: null
compilers:
- compiler:
spec: rocmcc@=5.3.0
paths:
cc: /opt/rocm-5.3.0/bin/amdclang
cxx: /opt/rocm-5.3.0/bin/amdclang++
f77: null
fc: /opt/rocm-5.3.0/bin/amdflang
operating_system: rhel8
target: x86_64
This includes the following considerations:

View File

@@ -1,65 +0,0 @@
.. Copyright Spack Project Developers. See COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
.. _include-yaml:
===============================
Include Settings (include.yaml)
===============================
Spack allows you to include configuration files through ``include.yaml``.
Using the ``include:`` heading results in pulling in external configuration
information to be used by any Spack command.
Included configuration files are required *unless* they are explicitly optional
or the entry's condition evaluates to ``false``. Optional includes are specified
with the ``optional`` clause and conditional with the ``when`` clause. For
example,
.. code-block:: yaml
include:
- /path/to/a/required/config.yaml
- path: /path/to/$os/$target/config
optional: true
- path: /path/to/os-specific/config-dir
when: os == "ventura"
shows all three. The first entry, ``/path/to/a/required/config.yaml``,
indicates that included ``config.yaml`` file is required (so must exist).
Use of ``optional: true`` for ``/path/to/$os/$target/config`` means
the path is only included if it exists. The condition ``os == "ventura"``
in the ``when`` clause for ``/path/to/os-specific/config-dir`` means the
path is only included when the operating system (``os``) is ``ventura``.
The same conditions and variables in `Spec List References
<https://spack.readthedocs.io/en/latest/environments.html#spec-list-references>`_
can be used for conditional activation in the ``when`` clauses.
Included files can be specified by path or by their parent directory.
Paths may be absolute, relative (to the configuration file including the path),
or specified as URLs. Only the ``file``, ``ftp``, ``http`` and ``https`` protocols (or
schemes) are supported. Spack-specific, environment and user path variables
can be used. (See :ref:`config-file-variables` for more information.)
A ``sha256`` is required for remote file URLs and must be specified as follows:
.. code-block:: yaml
include:
- path: https://github.com/path/to/raw/config/compilers.yaml
sha256: 26e871804a92cd07bb3d611b31b4156ae93d35b6a6d6e0ef3a67871fcb1d258b
Additionally, remote file URLs must link to the **raw** form of the file's
contents (e.g., `GitHub
<https://docs.github.com/en/repositories/working-with-files/using-files/viewing-and-understanding-files#viewing-or-copying-the-raw-file-content>`_
or `GitLab
<https://docs.gitlab.com/ee/api/repository_files.html#get-raw-file-from-repository>`_).
.. warning::
Recursive includes are not currently processed in a breadth-first manner
so the value of a configuration option that is altered by multiple included
files may not be what you expect. This will be addressed in a future
update.

View File

@@ -71,11 +71,9 @@ or refer to the full manual below.
configuration
config_yaml
include_yaml
packages_yaml
build_settings
environments
env_vars_yaml
containers
mirrors
module_file_support
@@ -103,7 +101,6 @@ or refer to the full manual below.
:caption: API Docs
Spack API Docs <spack>
Spack Builtin Repo <spack_repo>
LLNL API Docs <llnl>
==================

View File

@@ -8,7 +8,7 @@
Modules (modules.yaml)
======================
The use of module systems to manage user environments in a controlled way
The use of module systems to manage user environment in a controlled way
is a common practice at HPC centers that is sometimes embraced also by
individual programmers on their development machines. To support this
common practice Spack integrates with `Environment Modules
@@ -128,7 +128,7 @@ depend on the spec:
.. code-block:: python
def setup_run_environment(self, env: EnvironmentModifications) -> None:
def setup_run_environment(self, env):
if self.spec.satisfies("+foo"):
env.set("FOO", "bar")
@@ -142,7 +142,7 @@ For example, a simplified version of the ``python`` package could look like this
.. code-block:: python
def setup_dependent_run_environment(self, env: EnvironmentModifications, dependent_spec: Spec) -> None:
def setup_dependent_run_environment(self, env, dependent_spec):
if dependent_spec.package.extends(self.spec):
env.prepend_path("PYTHONPATH", dependent_spec.prefix.lib.python)
@@ -456,13 +456,14 @@ For instance, the following config options,
tcl:
all:
suffixes:
^python@3: 'python{^python.version.up_to_2}'
^python@3: 'python{^python.version}'
^openblas: 'openblas'
will add a ``python3.12`` to module names of packages compiled with Python 3.12, and similarly for
all specs depending on ``python@3``. This is useful to know which version of Python a set of Python
extensions is associated with. Likewise, the ``openblas`` string is attached to any program that
has openblas in the spec, most likely via the ``+blas`` variant specification.
will add a ``python-3.12.1`` version string to any packages compiled with
Python matching the spec, ``python@3``. This is useful to know which
version of Python a set of Python extensions is associated with. Likewise, the
``openblas`` string is attached to any program that has openblas in the spec,
most likely via the ``+blas`` variant specification.
The most heavyweight solution to module naming is to change the entire
naming convention for module files. This uses the projections format
@@ -490,7 +491,7 @@ that are already in the Lmod hierarchy.
.. note::
Tcl and Lua modules also allow for explicit conflicts between module files.
Tcl and Lua modules also allow for explicit conflicts between modulefiles.
.. code-block:: yaml
@@ -513,7 +514,7 @@ that are already in the Lmod hierarchy.
:meth:`~spack.spec.Spec.format` method.
For Lmod and Environment Modules versions prior 4.2, it is important to
express the conflict on both module files conflicting with each other.
express the conflict on both modulefiles conflicting with each other.
.. note::
@@ -550,7 +551,7 @@ that are already in the Lmod hierarchy.
.. warning::
Consistency of Core packages
The user is responsible for maintaining consistency among core packages, as ``core_specs``
The user is responsible for maintining consistency among core packages, as ``core_specs``
bypasses the hierarchy that allows Lmod to safely switch between coherent software stacks.
.. warning::

View File

@@ -486,8 +486,6 @@ present. For instance with a configuration like:
you will use ``mvapich2~cuda %gcc`` as an ``mpi`` provider.
.. _package-strong-preferences:
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Conflicts and strong preferences
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -557,13 +555,14 @@ preferences.
FAQ: :ref:`Why does Spack pick particular versions and variants? <faq-concretizer-precedence>`
The ``target`` and ``providers`` preferences
Most package preferences (``compilers``, ``target`` and ``providers``)
can only be set globally under the ``all`` section of ``packages.yaml``:
.. code-block:: yaml
packages:
all:
compiler: [gcc@12.2.0, clang@12:, oneapi@2023:]
target: [x86_64_v3]
providers:
mpi: [mvapich2, mpich, openmpi]

View File

@@ -69,7 +69,7 @@ An example for ``CMake`` is, for instance:
The predefined steps for each build system are called "phases".
In general, the name and order in which the phases will be executed can be
obtained by either reading the API docs at :py:mod:`~.spack_repo.builtin.build_systems`, or
obtained by either reading the API docs at :py:mod:`~.spack.build_systems`, or
using the ``spack info`` command:
.. code-block:: console
@@ -158,7 +158,7 @@ builder class explicitly. Using the same example as above, this reads:
url_fmt = "https://github.com/uclouvain/openjpeg/archive/version.{0}.tar.gz"
return url_fmt.format(version)
class CMakeBuilder(spack_repo.builtin.build_systems.cmake.CMakeBuilder):
class CMakeBuilder(spack.build_systems.cmake.CMakeBuilder):
def cmake_args(self):
args = [
self.define_from_variant("BUILD_CODEC", "codec"),
@@ -179,7 +179,7 @@ Spack can be found at :ref:`package_class_structure`.
.. code-block:: python
class Foo(CMakePackage):
class Foo(CmakePackage):
def cmake_args(self):
...
@@ -256,7 +256,7 @@ for details):
#
# See the Spack documentation for more information on packaging.
# ----------------------------------------------------------------------------
import spack_repo.builtin.build_systems.autotools
import spack.build_systems.autotools
from spack.package import *
@@ -369,9 +369,9 @@ If you have a collection of software expected to work well together with
no source code of its own, you can create a :ref:`BundlePackage <bundlepackage>`.
Examples where bundle packages can be useful include defining suites of
applications (e.g, `EcpProxyApps
<https://github.com/spack/spack/blob/develop/var/spack/repos/spack_repo/builtin/packages/ecp_proxy_apps/package.py>`_), commonly used libraries
(e.g., `AmdAocl <https://github.com/spack/spack/blob/develop/var/spack/repos/spack_repo/builtin/packages/amd_aocl/package.py>`_),
and software development kits (e.g., `EcpDataVisSdk <https://github.com/spack/spack/blob/develop/var/spack/repos/spack_repo/builtin/packages/ecp_data_vis_sdk/package.py>`_).
<https://github.com/spack/spack/blob/develop/var/spack/repos/builtin/packages/ecp-proxy-apps/package.py>`_), commonly used libraries
(e.g., `AmdAocl <https://github.com/spack/spack/blob/develop/var/spack/repos/builtin/packages/amd-aocl/package.py>`_),
and software development kits (e.g., `EcpDataVisSdk <https://github.com/spack/spack/blob/develop/var/spack/repos/builtin/packages/ecp-data-vis-sdk/package.py>`_).
These versioned packages primarily consist of dependencies on the associated
software packages. They can include :ref:`variants <variants>` to ensure
@@ -443,7 +443,7 @@ lives in:
.. code-block:: console
$ spack location -p gmp
${SPACK_ROOT}/var/spack/repos/spack_repo/builtin/packages/gmp/package.py
${SPACK_ROOT}/var/spack/repos/builtin/packages/gmp/package.py
but ``spack edit`` provides a much simpler shortcut and saves you the
trouble of typing the full path.
@@ -457,19 +457,19 @@ live in Spack's directory structure. In general, :ref:`cmd-spack-create`
handles creating package files for you, so you can skip most of the
details here.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
``var/spack/repos/spack_repo/builtin/packages``
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
``var/spack/repos/builtin/packages``
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
A Spack installation directory is structured like a standard UNIX
install prefix (``bin``, ``lib``, ``include``, ``var``, ``opt``,
etc.). Most of the code for Spack lives in ``$SPACK_ROOT/lib/spack``.
Packages themselves live in ``$SPACK_ROOT/var/spack/repos/spack_repo/builtin/packages``.
Packages themselves live in ``$SPACK_ROOT/var/spack/repos/builtin/packages``.
If you ``cd`` to that directory, you will see directories for each
package:
.. command-output:: cd $SPACK_ROOT/var/spack/repos/spack_repo/builtin/packages && ls
.. command-output:: cd $SPACK_ROOT/var/spack/repos/builtin/packages && ls
:shell:
:ellipsis: 10
@@ -479,7 +479,7 @@ package lives in:
.. code-block:: none
$SPACK_ROOT/var/spack/repos/spack_repo/builtin/packages/libelf/package.py
$SPACK_ROOT/var/spack/repos/builtin/packages/libelf/package.py
Alongside the ``package.py`` file, a package may contain extra
directories or files (like patches) that it needs to build.
@@ -492,12 +492,12 @@ Packages are named after the directory containing ``package.py``. So,
``libelf``'s ``package.py`` lives in a directory called ``libelf``.
The ``package.py`` file defines a class called ``Libelf``, which
extends Spack's ``Package`` class. For example, here is
``$SPACK_ROOT/var/spack/repos/spack_repo/builtin/packages/libelf/package.py``:
``$SPACK_ROOT/var/spack/repos/builtin/packages/libelf/package.py``:
.. code-block:: python
:linenos:
from spack.package import *
from spack import *
class Libelf(Package):
""" ... description ... """
@@ -520,7 +520,7 @@ these:
$ spack install libelf@0.8.13
Spack sees the package name in the spec and looks for
``libelf/package.py`` in ``var/spack/repos/spack_repo/builtin/packages``.
``libelf/package.py`` in ``var/spack/repos/builtin/packages``.
Likewise, if you run ``spack install py-numpy``, Spack looks for
``py-numpy/package.py``.
@@ -686,7 +686,7 @@ https://www.open-mpi.org/software/ompi/v2.1/downloads/openmpi-2.1.1.tar.bz2
In order to handle this, you can define a ``url_for_version()`` function
like so:
.. literalinclude:: _spack_root/var/spack/repos/spack_repo/builtin/packages/openmpi/package.py
.. literalinclude:: _spack_root/var/spack/repos/builtin/packages/openmpi/package.py
:pyobject: Openmpi.url_for_version
With the use of this ``url_for_version()``, Spack knows to download OpenMPI ``2.1.1``
@@ -787,7 +787,7 @@ of GNU. For that, Spack goes a step further and defines a mixin class that
takes care of all of the plumbing and requires packagers to just define a proper
``gnu_mirror_path`` attribute:
.. literalinclude:: _spack_root/var/spack/repos/spack_repo/builtin/packages/autoconf/package.py
.. literalinclude:: _spack_root/var/spack/repos/builtin/packages/autoconf/package.py
:lines: 9-18
^^^^^^^^^^^^^^^^^^^^^^^^
@@ -1089,7 +1089,7 @@ You've already seen the ``homepage`` and ``url`` package attributes:
.. code-block:: python
:linenos:
from spack.package import *
from spack import *
class Mpich(Package):
@@ -1212,7 +1212,7 @@ class-level tarball URL and VCS. For example:
version("master", branch="master")
version("12.12.1", md5="ecd4606fa332212433c98bf950a69cc7")
version("12.10.1", md5="667333dbd7c0f031d47d7c5511fd0810")
version("12.8.1", md5="9f37f683ee2b427b5540db8a20ed6b15")
version("12.8.1", "9f37f683ee2b427b5540db8a20ed6b15")
If a package contains both a ``url`` and ``git`` class-level attribute,
Spack decides which to use based on the arguments to the ``version()``
@@ -1343,7 +1343,7 @@ Submodules
version("1.0.1", tag="v1.0.1", submodules=True)
If a package needs more fine-grained control over submodules, define
If a package has needs more fine-grained control over submodules, define
``submodules`` to be a callable function that takes the package instance as
its only argument. The function should return a list of submodules to be fetched.
@@ -1995,7 +1995,7 @@ structure like this:
.. code-block:: none
$SPACK_ROOT/var/spack/repos/spack_repo/builtin/packages/
$SPACK_ROOT/var/spack/repos/builtin/packages/
mvapich2/
package.py
ad_lustre_rwcontig_open_source.patch
@@ -2133,7 +2133,7 @@ handles ``RPATH``:
.. _pyside-patch:
.. literalinclude:: _spack_root/var/spack/repos/spack_repo/builtin/packages/py_pyside/package.py
.. literalinclude:: _spack_root/var/spack/repos/builtin/packages/py-pyside/package.py
:pyobject: PyPyside.patch
:linenos:
@@ -2201,7 +2201,7 @@ using the ``spack resource show`` command::
$ spack resource show 3877ab54
3877ab548f88597ab2327a2230ee048d2d07ace1062efe81fc92e91b7f39cd00
path: /home/spackuser/src/spack/var/spack/repos/spack_repo/builtin/packages/m4/gnulib-pgi.patch
path: /home/spackuser/src/spack/var/spack/repos/builtin/packages/m4/gnulib-pgi.patch
applies to: builtin.m4
``spack resource show`` looks up downloadable resources from package
@@ -2219,7 +2219,7 @@ wonder where the extra boost patches are coming from::
^boost@1.68.0%apple-clang@9.0.0+atomic+chrono~clanglibcpp cxxstd=default +date_time~debug+exception+filesystem+graph~icu+iostreams+locale+log+math~mpi+multithreaded~numpy patches=2ab6c72d03dec6a4ae20220a9dfd5c8c572c5294252155b85c6874d97c323199,b37164268f34f7133cbc9a4066ae98fda08adf51e1172223f6a969909216870f ~pic+program_options~python+random+regex+serialization+shared+signals~singlethreaded+system~taggedlayout+test+thread+timer~versionedlayout+wave arch=darwin-highsierra-x86_64
$ spack resource show b37164268
b37164268f34f7133cbc9a4066ae98fda08adf51e1172223f6a969909216870f
path: /home/spackuser/src/spack/var/spack/repos/spack_repo/builtin/packages/dealii/boost_1.68.0.patch
path: /home/spackuser/src/spack/var/spack/repos/builtin/packages/dealii/boost_1.68.0.patch
applies to: builtin.boost
patched by: builtin.dealii
@@ -2253,15 +2253,22 @@ RPATHs in Spack are handled in one of three ways:
set in standard variables like ``CC``, ``CXX``, ``F77``, and ``FC``,
so most build systems (autotools and many gmake systems) pick them
up and use them.
#. CMake has its own RPATH handling, and distinguishes between build and
install RPATHs. By default, during the build it registers RPATHs to
all libraries it links to, so that just-built executables can be run
during the build itself. Upon installation, these RPATHs are cleared,
unless the user defines the install RPATHs. When inheriting from
``CMakePackage``, Spack handles this automatically, and sets
``CMAKE_INSTALL_RPATH_USE_LINK_PATH`` and ``CMAKE_INSTALL_RPATH``,
so that libraries of dependencies and the package's own libraries
can be found at runtime.
#. CMake also respects Spack's compiler wrappers, but many CMake
builds have logic to overwrite RPATHs when binaries are
installed. Spack provides the ``std_cmake_args`` variable, which
includes parameters necessary for CMake build use the right
installation RPATH. It can be used like this when ``cmake`` is
invoked:
.. code-block:: python
class MyPackage(Package):
...
def install(self, spec, prefix):
cmake("..", *std_cmake_args)
make()
make("install")
#. If you need to modify the build to add your own RPATHs, you can
use the ``self.rpath`` property of your package, which will
return a list of all the RPATHs that Spack will use when it
@@ -2308,19 +2315,31 @@ looks like this:
parallel = False
You can also disable parallel builds only for specific make
invocation:
Similarly, you can disable parallel builds only for specific make
commands, as ``libdwarf`` does:
.. code-block:: python
:emphasize-lines: 5
:emphasize-lines: 9, 12
:linenos:
class Libelf(Package):
...
def install(self, spec, prefix):
configure("--prefix=" + prefix,
"--enable-shared",
"--disable-dependency-tracking",
"--disable-debug")
make()
# The mkdir commands in libelf's install can fail in parallel
make("install", parallel=False)
The first make will run in parallel here, but the second will not. If
you set ``parallel`` to ``False`` at the package level, then each call
to ``make()`` will be sequential by default, but packagers can call
``make(parallel=True)`` to override it.
Note that the ``--jobs`` option works out of the box for all standard
build systems. If you are using a non-standard build system instead, you
can use the variable ``make_jobs`` to extract the number of jobs specified
@@ -2495,7 +2514,7 @@ necessary when there are breaking changes in the dependency that the
package cannot handle. In Spack we often add forward compatibility
bounds only at the time a new, breaking version of a dependency is
released. As with backward compatibility, it is typical to see a list
of forward compatibility bounds in a package file as separate lines:
of forward compatibility bounds in a package file as seperate lines:
.. code-block:: python
@@ -2911,7 +2930,7 @@ this, Spack provides four different methods that can be overridden in a package:
The Qt package, for instance, uses this call:
.. literalinclude:: _spack_root/var/spack/repos/spack_repo/builtin/packages/qt/package.py
.. literalinclude:: _spack_root/var/spack/repos/builtin/packages/qt/package.py
:pyobject: Qt.setup_dependent_build_environment
:linenos:
@@ -2939,7 +2958,7 @@ variables to be used by the dependent. This is done by implementing
:meth:`setup_dependent_package <spack.package_base.PackageBase.setup_dependent_package>`. An
example of this can be found in the ``Python`` package:
.. literalinclude:: _spack_root/var/spack/repos/spack_repo/builtin/packages/python/package.py
.. literalinclude:: _spack_root/var/spack/repos/builtin/packages/python/package.py
:pyobject: Python.setup_dependent_package
:linenos:
@@ -3371,7 +3390,7 @@ the above attribute implementations:
"/opt/spack/linux-fedora35-haswell/gcc-11.3.1/foo-1.0-ca3rczp5omy7dfzoqw4p7oc2yh3u7lt6/baz/lib/libFooBaz.so"
])
# baz library directories in the baz subdirectory of the foo prefix
# baz library directories in the baz subdirectory of the foo porefix
>>> spec["baz"].libs.directories
[
"/opt/spack/linux-fedora35-haswell/gcc-11.3.1/foo-1.0-ca3rczp5omy7dfzoqw4p7oc2yh3u7lt6/baz/lib"
@@ -3685,57 +3704,60 @@ the build system. The build systems currently supported by Spack are:
+----------------------------------------------------------+----------------------------------+
| **API docs** | **Description** |
+==========================================================+==================================+
| :class:`~spack_repo.builtin.build_systems.generic` | Generic build system without any |
| :class:`~spack.build_systems.generic` | Generic build system without any |
| | base implementation |
+----------------------------------------------------------+----------------------------------+
| :class:`~spack_repo.builtin.build_systems.makefile` | Specialized build system for |
| :class:`~spack.build_systems.makefile` | Specialized build system for |
| | software built invoking |
| | hand-written Makefiles |
+----------------------------------------------------------+----------------------------------+
| :class:`~spack_repo.builtin.build_systems.autotools` | Specialized build system for |
| :class:`~spack.build_systems.autotools` | Specialized build system for |
| | software built using |
| | GNU Autotools |
+----------------------------------------------------------+----------------------------------+
| :class:`~spack_repo.builtin.build_systems.cmake` | Specialized build system for |
| :class:`~spack.build_systems.cmake` | Specialized build system for |
| | software built using CMake |
+----------------------------------------------------------+----------------------------------+
| :class:`~spack_repo.builtin.build_systems.maven` | Specialized build system for |
| :class:`~spack.build_systems.maven` | Specialized build system for |
| | software built using Maven |
+----------------------------------------------------------+----------------------------------+
| :class:`~spack_repo.builtin.build_systems.meson` | Specialized build system for |
| :class:`~spack.build_systems.meson` | Specialized build system for |
| | software built using Meson |
+----------------------------------------------------------+----------------------------------+
| :class:`~spack_repo.builtin.build_systems.nmake` | Specialized build system for |
| :class:`~spack.build_systems.nmake` | Specialized build system for |
| | software built using NMake |
+----------------------------------------------------------+----------------------------------+
| :class:`~spack_repo.builtin.build_systems.qmake` | Specialized build system for |
| :class:`~spack.build_systems.qmake` | Specialized build system for |
| | software built using QMake |
+----------------------------------------------------------+----------------------------------+
| :class:`~spack_repo.builtin.build_systems.scons` | Specialized build system for |
| :class:`~spack.build_systems.scons` | Specialized build system for |
| | software built using SCons |
+----------------------------------------------------------+----------------------------------+
| :class:`~spack_repo.builtin.build_systems.waf` | Specialized build system for |
| :class:`~spack.build_systems.waf` | Specialized build system for |
| | software built using Waf |
+----------------------------------------------------------+----------------------------------+
| :class:`~spack_repo.builtin.build_systems.r` | Specialized build system for |
| :class:`~spack.build_systems.r` | Specialized build system for |
| | R extensions |
+----------------------------------------------------------+----------------------------------+
| :class:`~spack_repo.builtin.build_systems.octave` | Specialized build system for |
| :class:`~spack.build_systems.octave` | Specialized build system for |
| | Octave packages |
+----------------------------------------------------------+----------------------------------+
| :class:`~spack_repo.builtin.build_systems.python` | Specialized build system for |
| :class:`~spack.build_systems.python` | Specialized build system for |
| | Python extensions |
+----------------------------------------------------------+----------------------------------+
| :class:`~spack_repo.builtin.build_systems.perl` | Specialized build system for |
| :class:`~spack.build_systems.perl` | Specialized build system for |
| | Perl extensions |
+----------------------------------------------------------+----------------------------------+
| :class:`~spack_repo.builtin.build_systems.ruby` | Specialized build system for |
| :class:`~spack.build_systems.ruby` | Specialized build system for |
| | Ruby extensions |
+----------------------------------------------------------+----------------------------------+
| :class:`~spack_repo.builtin.build_systems.oneapi` | Specialized build system for |
| :class:`~spack.build_systems.intel` | Specialized build system for |
| | licensed Intel software |
+----------------------------------------------------------+----------------------------------+
| :class:`~spack.build_systems.oneapi` | Specialized build system for |
| | Intel oneAPI software |
+----------------------------------------------------------+----------------------------------+
| :class:`~spack_repo.builtin.build_systems.aspell_dict` | Specialized build system for |
| :class:`~spack.build_systems.aspell_dict` | Specialized build system for |
| | Aspell dictionaries |
+----------------------------------------------------------+----------------------------------+
@@ -3747,7 +3769,7 @@ the build system. The build systems currently supported by Spack are:
rare cases where manual intervention is needed we need to stress that a
package base class depends on the *build system* being used, not the language of the package.
For example, a Python extension installed with CMake would ``extends("python")`` and
subclass from :class:`~spack_repo.builtin.build_systems.cmake.CMakePackage`.
subclass from :class:`~spack.build_systems.cmake.CMakePackage`.
^^^^^^^^^^^^^^^^^^^^^^^^^^
Overriding builder methods
@@ -3755,7 +3777,7 @@ Overriding builder methods
Build-system "phases" have default implementations that fit most of the common cases:
.. literalinclude:: _spack_root/var/spack/repos/spack_repo/builtin/build_systems/autotools.py
.. literalinclude:: _spack_root/lib/spack/spack/build_systems/autotools.py
:pyobject: AutotoolsBuilder.configure
:linenos:
@@ -3763,13 +3785,13 @@ It is usually sufficient for a packager to override a few
build system specific helper methods or attributes to provide, for instance,
configure arguments:
.. literalinclude:: _spack_root/var/spack/repos/spack_repo/builtin/packages/m4/package.py
.. literalinclude:: _spack_root/var/spack/repos/builtin/packages/m4/package.py
:pyobject: M4.configure_args
:linenos:
Each specific build system has a list of attributes and methods that can be overridden to
fine-tune the installation of a package without overriding an entire phase. To
have more information on them the place to go is the API docs of the :py:mod:`~.spack_repo.builtin.build_systems`
have more information on them the place to go is the API docs of the :py:mod:`~.spack.build_systems`
module.
^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -3811,7 +3833,7 @@ If the ``package.py`` has build instructions in a separate
.. code-block:: python
class CMakeBuilder(spack_repo.builtin.build_systems.cmake.CMakeBuilder):
class CMakeBuilder(spack.build_systems.cmake.CMakeBuilder):
def install(self, pkg, spec, prefix):
...
@@ -3824,32 +3846,31 @@ Mixin base classes
Besides build systems, there are other cases where common metadata and behavior can be extracted
and reused by many packages. For instance, packages that depend on ``Cuda`` or ``Rocm``, share
common dependencies and constraints. To factor these attributes into a single place, Spack provides
a few mixin classes in the ``spack_repo.builtin.build_systems`` module:
a few mixin classes in the ``spack.build_systems`` module:
+----------------------------------------------------------------------------+----------------------------------+
| **API docs** | **Description** |
+============================================================================+==================================+
| :class:`~spack_repo.builtin.build_systems.cuda.CudaPackage` | A helper class for packages that |
| | use CUDA |
+----------------------------------------------------------------------------+----------------------------------+
| :class:`~spack_repo.builtin.build_systems.rocm.ROCmPackage` | A helper class for packages that |
| | use ROCm |
+----------------------------------------------------------------------------+----------------------------------+
| :class:`~spack_repo.builtin.build_systems.gnu.GNUMirrorPackage` | A helper class for GNU packages |
| | |
+----------------------------------------------------------------------------+----------------------------------+
| :class:`~spack_repo.builtin.build_systems.python.PythonExtension` | A helper class for Python |
| | extensions |
+----------------------------------------------------------------------------+----------------------------------+
| :class:`~spack_repo.builtin.build_systems.sourceforge.SourceforgePackage` | A helper class for packages |
| | from sourceforge.org |
+----------------------------------------------------------------------------+----------------------------------+
| :class:`~spack_repo.builtin.build_systems.sourceware.SourcewarePackage` | A helper class for packages |
| | from sourceware.org |
+----------------------------------------------------------------------------+----------------------------------+
| :class:`~spack_repo.builtin.build_systems.xorg.XorgPackage` | A helper class for x.org |
| | packages |
+----------------------------------------------------------------------------+----------------------------------+
+---------------------------------------------------------------+----------------------------------+
| **API docs** | **Description** |
+===============================================================+==================================+
| :class:`~spack.build_systems.cuda.CudaPackage` | A helper class for packages that |
| | use CUDA |
+---------------------------------------------------------------+----------------------------------+
| :class:`~spack.build_systems.rocm.ROCmPackage` | A helper class for packages that |
| | use ROCm |
+---------------------------------------------------------------+----------------------------------+
| :class:`~spack.build_systems.gnu.GNUMirrorPackage` | A helper class for GNU packages |
+---------------------------------------------------------------+----------------------------------+
| :class:`~spack.build_systems.python.PythonExtension` | A helper class for Python |
| | extensions |
+---------------------------------------------------------------+----------------------------------+
| :class:`~spack.build_systems.sourceforge.SourceforgePackage` | A helper class for packages |
| | from sourceforge.org |
+---------------------------------------------------------------+----------------------------------+
| :class:`~spack.build_systems.sourceware.SourcewarePackage` | A helper class for packages |
| | from sourceware.org |
+---------------------------------------------------------------+----------------------------------+
| :class:`~spack.build_systems.xorg.XorgPackage` | A helper class for x.org |
| | packages |
+---------------------------------------------------------------+----------------------------------+
These classes should be used by adding them to the inheritance tree of the package that needs them,
for instance:
@@ -3893,13 +3914,13 @@ Additional build instructions are split into separate builder classes:
.. code-block:: python
class CMakeBuilder(spack_repo.builtin.build_systems.cmake.CMakeBuilder):
class CMakeBuilder(spack.build_systems.cmake.CMakeBuilder):
def cmake_args(self):
return [
self.define_from_variant("MY_FEATURE", "my_feature")
]
class AutotoolsBuilder(spack_repo.builtin.build_systems.autotools.AutotoolsBuilder):
class AutotoolsBuilder(spack.build_systems.autotools.AutotoolsBuilder):
def configure_args(self):
return self.with_or_without("my-feature", variant="my_feature")
@@ -4089,7 +4110,7 @@ Shell command functions
Recall the install method from ``libelf``:
.. literalinclude:: _spack_root/var/spack/repos/spack_repo/builtin/packages/libelf/package.py
.. literalinclude:: _spack_root/var/spack/repos/builtin/packages/libelf/package.py
:pyobject: Libelf.install
:linenos:
@@ -4880,7 +4901,7 @@ the one passed to install, only the MPI implementations all set some
additional properties on it to help you out. E.g., in openmpi, you'll
find this:
.. literalinclude:: _spack_root/var/spack/repos/spack_repo/builtin/packages/openmpi/package.py
.. literalinclude:: _spack_root/var/spack/repos/builtin/packages/openmpi/package.py
:pyobject: Openmpi.setup_dependent_package
That code allows the ``openmpi`` package to associate an ``mpicc`` property
@@ -5728,7 +5749,7 @@ running each executable, ``foo`` and ``bar``, as independent test parts.
.. note::
The method name ``copy_test_files`` here is for illustration purposes.
You are free to use a name that is better suited to your package.
You are free to use a name that is more suited to your package.
The key to copying files for stand-alone testing at build time is use
of the ``run_after`` directive, which ensures the associated files are
@@ -5980,16 +6001,16 @@ with those implemented in the package itself.
* - Parent/Provider Package
- Stand-alone Tests
* - `C
<https://github.com/spack/spack/blob/develop/var/spack/repos/spack_repo/builtin/packages/c>`_
<https://github.com/spack/spack/blob/develop/var/spack/repos/builtin/packages/c>`_
- Compiles ``hello.c`` and runs it
* - `Cxx
<https://github.com/spack/spack/blob/develop/var/spack/repos/spack_repo/builtin/packages/cxx>`_
<https://github.com/spack/spack/blob/develop/var/spack/repos/builtin/packages/cxx>`_
- Compiles and runs several ``hello`` programs
* - `Fortran
<https://github.com/spack/spack/blob/develop/var/spack/repos/spack_repo/builtin/packages/fortran>`_
<https://github.com/spack/spack/blob/develop/var/spack/repos/builtin/packages/fortran>`_
- Compiles and runs ``hello`` programs (``F`` and ``f90``)
* - `Mpi
<https://github.com/spack/spack/blob/develop/var/spack/repos/spack_repo/builtin/packages/mpi>`_
<https://github.com/spack/spack/blob/develop/var/spack/repos/builtin/packages/mpi>`_
- Compiles and runs ``mpi_hello`` (``c``, ``fortran``)
* - :ref:`PythonPackage <pythonpackage>`
- Imports modules listed in the ``self.import_modules`` property with defaults derived from the tarball
@@ -6010,7 +6031,7 @@ maintainers provide additional stand-alone tests customized to the package.
One example of a package that adds its own stand-alone tests to those
"inherited" by the virtual package it provides an implementation for is
the `Openmpi package
<https://github.com/spack/spack/blob/develop/var/spack/repos/spack_repo/builtin/packages/openmpi/package.py>`_.
<https://github.com/spack/spack/blob/develop/var/spack/repos/builtin/packages/openmpi/package.py>`_.
Below are snippets from running and viewing the stand-alone test results
for ``openmpi``:
@@ -6162,7 +6183,7 @@ running:
.. code-block:: python
from spack.package import *
from spack import *
This is already part of the boilerplate for packages created with
``spack create``.
@@ -7237,7 +7258,7 @@ which are not, there is the `checked_by` parameter in the license directive:
license("<license>", when="<when>", checked_by="<github username>")
When you have validated a package license, either when doing so explicitly or
When you have validated a github license, either when doing so explicitly or
as part of packaging a new package, please set the `checked_by` parameter
to your Github username to signal that the license has been manually
verified.

View File

@@ -330,7 +330,7 @@ that ``--tests`` is passed to ``spack ci rebuild`` as part of the
- spack --version
- cd ${SPACK_CONCRETE_ENV_DIR}
- spack env activate --without-view .
- spack config add "config:install_tree:projections:${SPACK_JOB_SPEC_PKG_NAME}:'morepadding/{architecture.platform}-{architecture.target}/{name}-{version}-{hash}'"
- spack config add "config:install_tree:projections:${SPACK_JOB_SPEC_PKG_NAME}:'morepadding/{architecture}/{compiler.name}-{compiler.version}/{name}-{version}-{hash}'"
- mkdir -p ${SPACK_ARTIFACTS_ROOT}/user_data
- if [[ -r /mnt/key/intermediate_ci_signing_key.gpg ]]; then spack gpg trust /mnt/key/intermediate_ci_signing_key.gpg; fi
- if [[ -r /mnt/key/spack_public_key.gpg ]]; then spack gpg trust /mnt/key/spack_public_key.gpg; fi
@@ -820,69 +820,6 @@ presence of a ``SPACK_CDASH_AUTH_TOKEN`` environment variable during the
build group on CDash called "Release Testing" (that group will be created if
it didn't already exist).
.. _ci_artifacts:
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
CI Artifacts Directory Layout
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
When running the CI build using the command ``spack ci rebuild`` a number of directories are created for
storing data generated during the CI job. The default root directory for artifacts is ``job_scratch_root``.
This can be overridden by passing the argument ``--artifacts-root`` to the ``spack ci generate`` command
or by setting the ``SPACK_ARTIFACTS_ROOT`` environment variable in the build job scripts.
The top level directories under the artifact root are ``concrete_environment``, ``logs``, ``reproduction``,
``tests``, and ``user_data``. Spack does not restrict what is written to any of these directories nor does
it require user specified files be written to any specific directory.
------------------------
``concrete_environment``
------------------------
The directory ``concrete_environment`` is used to communicate the ci generate processed ``spack.yaml`` and
the concrete ``spack.lock`` for the CI environment.
--------
``logs``
--------
The directory ``logs`` contains the spack build log, ``spack-build-out.txt``, and the spack build environment
modification file, ``spack-build-mod-env.txt``. Additionally all files specified by the packages ``Builder``
property ``archive_files`` are also copied here (ie. ``CMakeCache.txt`` in ``CMakeBuilder``).
----------------
``reproduction``
----------------
The directory ``reproduction`` is used to store the files needed by the ``spack reproduce-build`` command.
This includes ``repro.json``, copies of all of the files in ``concrete_environment``, the concrete spec
JSON file for the current spec being built, and all of the files written in the artifacts root directory.
The ``repro.json`` file is not versioned and is only designed to work with the version of spack CI was run with.
An example of what a ``repro.json`` may look like is here.
.. code:: json
{
"job_name": "adios2@2.9.2 /feaevuj %gcc@11.4.0 arch=linux-ubuntu20.04-x86_64_v3 E4S ROCm External",
"job_spec_json": "adios2.json",
"ci_project_dir": "/builds/spack/spack"
}
---------
``tests``
---------
The directory ``tests`` is used to store output from running ``spack test <job spec>``. This may or may not have
data in it depending on the package that was built and the availability of tests.
-------------
``user_data``
-------------
The directory ``user_data`` is used to store everything else that shouldn't be copied to the ``reproduction`` direcotory.
Users may use this to store additional logs or metrics or other types of files generated by the build job.
-------------------------------------
Using a custom spack in your pipeline
-------------------------------------

View File

@@ -214,7 +214,7 @@ package versions, simply run the following commands:
Running ``spack mark -i --all`` tells Spack to mark all of the existing
packages within an environment as "implicitly" installed. This tells
Spack's garbage collection system that these packages should be cleaned up.
spack's garbage collection system that these packages should be cleaned up.
Don't worry however, this will not remove your entire environment.
Running ``spack install`` will reexamine your spack environment after

View File

@@ -9,7 +9,7 @@ Package Repositories (repos.yaml)
=================================
Spack comes with thousands of built-in package recipes in
``var/spack/repos/spack_repo/builtin/``. This is a **package repository** -- a
``var/spack/repos/builtin/``. This is a **package repository** -- a
directory that Spack searches when it needs to find a package by name.
You may need to maintain packages for restricted, proprietary or
experimental software separately from the built-in repository. Spack
@@ -69,7 +69,7 @@ The default ``etc/spack/defaults/repos.yaml`` file looks like this:
.. code-block:: yaml
repos:
- $spack/var/spack/repos/spack_repo/builtin
- $spack/var/spack/repos/builtin
The file starts with ``repos:`` and contains a single ordered list of
paths to repositories. Each path is on a separate line starting with
@@ -78,16 +78,16 @@ paths to repositories. Each path is on a separate line starting with
.. code-block:: yaml
repos:
- /opt/repos/spack_repo/local_repo
- $spack/var/spack/repos/spack_repo/builtin
- /opt/local-repo
- $spack/var/spack/repos/builtin
When Spack interprets a spec, e.g., ``mpich`` in ``spack install mpich``,
it searches these repositories in order (first to last) to resolve each
package name. In this example, Spack will look for the following
packages and use the first valid file:
1. ``/opt/repos/spack_repo/local_repo/packages/mpich/package.py``
2. ``$spack/var/spack/repos/spack_repo/builtin/packages/mpich/package.py``
1. ``/opt/local-repo/packages/mpich/package.py``
2. ``$spack/var/spack/repos/builtin/packages/mpich/package.py``
.. note::
@@ -101,15 +101,14 @@ Namespaces
Every repository in Spack has an associated **namespace** defined in its
top-level ``repo.yaml`` file. If you look at
``var/spack/repos/spack_repo/builtin/repo.yaml`` in the built-in repository, you'll
``var/spack/repos/builtin/repo.yaml`` in the built-in repository, you'll
see that its namespace is ``builtin``:
.. code-block:: console
$ cat var/spack/repos/spack_repo/builtin/repo.yaml
$ cat var/spack/repos/builtin/repo.yaml
repo:
namespace: builtin
api: v2.0
Spack records the repository namespace of each installed package. For
example, if you install the ``mpich`` package from the ``builtin`` repo,
@@ -218,15 +217,15 @@ Suppose you have three repositories: the builtin Spack repo
repo containing your own prototype packages (``proto``). Suppose they
contain packages as follows:
+--------------+-----------------------------------------------+-----------------------------+
| Namespace | Path to repo | Packages |
+==============+===============================================+=============================+
| ``proto`` | ``~/my_spack_repos/spack_repo/proto`` | ``mpich`` |
+--------------+-----------------------------------------------+-----------------------------+
| ``llnl`` | ``/usr/local/repos/spack_repo/llnl`` | ``hdf5`` |
+--------------+-----------------------------------------------+-----------------------------+
| ``builtin`` | ``$spack/var/spack/repos/spack_repo/builtin`` | ``mpich``, ``hdf5``, others |
+--------------+-----------------------------------------------+-----------------------------+
+--------------+------------------------------------+-----------------------------+
| Namespace | Path to repo | Packages |
+==============+====================================+=============================+
| ``proto`` | ``~/proto`` | ``mpich`` |
+--------------+------------------------------------+-----------------------------+
| ``llnl`` | ``/usr/local/llnl`` | ``hdf5`` |
+--------------+------------------------------------+-----------------------------+
| ``builtin`` | ``$spack/var/spack/repos/builtin`` | ``mpich``, ``hdf5``, others |
+--------------+------------------------------------+-----------------------------+
Suppose that ``hdf5`` depends on ``mpich``. You can override the
built-in ``hdf5`` by adding the ``llnl`` repo to ``repos.yaml``:
@@ -234,8 +233,8 @@ built-in ``hdf5`` by adding the ``llnl`` repo to ``repos.yaml``:
.. code-block:: yaml
repos:
- /usr/local/repos/spack_repo/llnl
- $spack/var/spack/repos/spack_repo/builtin
- /usr/local/llnl
- $spack/var/spack/repos/builtin
``spack install hdf5`` will install ``llnl.hdf5 ^builtin.mpich``.
@@ -244,9 +243,9 @@ If, instead, ``repos.yaml`` looks like this:
.. code-block:: yaml
repos:
- ~/my_spack_repos/spack_repo/proto
- /usr/local/repos/spack_repo/llnl
- $spack/var/spack/repos/spack_repo/builtin
- ~/proto
- /usr/local/llnl
- $spack/var/spack/repos/builtin
``spack install hdf5`` will install ``llnl.hdf5 ^proto.mpich``.
@@ -327,8 +326,8 @@ files, use ``spack repo list``.
$ spack repo list
==> 2 package repositories.
myrepo v2.0 ~/my_spack_repos/spack_repo/myrepo
builtin v2.0 ~/spack/var/spack/repos/spack_repo/builtin
myrepo ~/myrepo
builtin ~/spack/var/spack/repos/builtin
Each repository is listed with its associated namespace. To get the raw,
merged YAML from all configuration files, use ``spack config get repos``:
@@ -336,9 +335,9 @@ merged YAML from all configuration files, use ``spack config get repos``:
.. code-block:: console
$ spack config get repos
repos:
- ~/my_spack_repos/spack_repo/myrepo
- $spack/var/spack/repos/spack_repo/builtin
repos:srepos:
- ~/myrepo
- $spack/var/spack/repos/builtin
Note that, unlike ``spack repo list``, this does not include the
namespace, which is read from each repo's ``repo.yaml``.
@@ -352,54 +351,66 @@ yourself; you can use the ``spack repo create`` command.
.. code-block:: console
$ spack repo create ~/my_spack_repos myrepo
$ spack repo create myrepo
==> Created repo with namespace 'myrepo'.
==> To register it with spack, run this command:
spack repo add ~/my_spack_repos/spack_repo/myrepo
spack repo add ~/myrepo
$ ls ~/my_spack_repos/spack_repo/myrepo
$ ls myrepo
packages/ repo.yaml
$ cat ~/my_spack_repos/spack_repo/myrepo/repo.yaml
$ cat myrepo/repo.yaml
repo:
namespace: 'myrepo'
api: v2.0
Namespaces can also be nested, which can be useful if you have
multiple package repositories for an organization. Spack will
create the corresponding directory structure for you:
By default, the namespace of a new repo matches its directory's name.
You can supply a custom namespace with a second argument, e.g.:
.. code-block:: console
$ spack repo create ~/my_spack_repos llnl.comp
$ spack repo create myrepo llnl.comp
==> Created repo with namespace 'llnl.comp'.
==> To register it with spack, run this command:
spack repo add ~/my_spack_repos/spack_repo/llnl/comp
spack repo add ~/myrepo
$ cat ~/my_spack_repos/spack_repo/llnl/comp/repo.yaml
$ cat myrepo/repo.yaml
repo:
namespace: 'llnl.comp'
api: v2.0
You can also create repositories with custom structure with the ``-d/--subdirectory``
argument, e.g.:
.. code-block:: console
$ spack repo create -d applications myrepo apps
==> Created repo with namespace 'apps'.
==> To register it with Spack, run this command:
spack repo add ~/myrepo
$ ls myrepo
applications/ repo.yaml
$ cat myrepo/repo.yaml
repo:
namespace: apps
subdirectory: applications
^^^^^^^^^^^^^^^^^^
``spack repo add``
^^^^^^^^^^^^^^^^^^
Once your repository is created, you can register it with Spack with
``spack repo add``. You nee to specify the path to the directory that
contains the ``repo.yaml`` file.
``spack repo add``:
.. code-block:: console
$ spack repo add ~/my_spack_repos/spack_repo/llnl/comp
$ spack repo add ./myrepo
==> Added repo with namespace 'llnl.comp'.
$ spack repo list
==> 2 package repositories.
llnl.comp v2.0 ~/my_spack_repos/spack_repo/llnl/comp
builtin v2.0 ~/spack/var/spack/repos/spack_repo/builtin
llnl.comp ~/myrepo
builtin ~/spack/var/spack/repos/builtin
This simply adds the repo to your ``repos.yaml`` file.
@@ -421,43 +432,46 @@ By namespace:
.. code-block:: console
$ spack repo rm llnl.comp
==> Removed repository ~/my_spack_repos/spack_repo/llnl/comp with namespace 'llnl.comp'.
==> Removed repository ~/myrepo with namespace 'llnl.comp'.
$ spack repo list
==> 1 package repository.
builtin ~/spack/var/spack/repos/spack_repo/builtin
builtin ~/spack/var/spack/repos/builtin
By path:
.. code-block:: console
$ spack repo rm ~/my_spack_repos/spack_repo/llnl/comp
==> Removed repository ~/my_spack_repos/spack_repo/llnl/comp
$ spack repo rm ~/myrepo
==> Removed repository ~/myrepo
$ spack repo list
==> 1 package repository.
builtin ~/spack/var/spack/repos/spack_repo/builtin
builtin ~/spack/var/spack/repos/builtin
--------------------------------
Repo namespaces and Python
--------------------------------
Package repositories are implemented as Python packages. To be precise,
they are `namespace packages
<https://packaging.python.org/en/latest/guides/packaging-namespace-packages/>`_
with ``spack_repo`` the top-level namespace, followed by the repository
namespace as submodules. For example, the builtin repository corresponds
to the Python module ``spack_repo.builtin.packages``.
You may have noticed that namespace notation for repositories is similar
to the notation for namespaces in Python. As it turns out, you *can*
treat Spack repositories like Python packages; this is how they are
implemented.
This structure allows you to extend a ``builtin`` package in your own
You could, for example, extend a ``builtin`` package in your own
repository:
.. code-block:: python
from spack_repo.builtin.packages.mpich.package import Mpich
from spack.pkg.builtin.mpich import Mpich
class MyPackage(Mpich):
...
Spack populates ``sys.path`` at runtime with the path to the root of your
package repository's ``spack_repo`` directory.
Spack repo namespaces are actually Python namespaces tacked on under
``spack.pkg``. The search semantics of ``repos.yaml`` are actually
implemented using Python's built-in `sys.path
<https://docs.python.org/2/library/sys.html#sys.path>`_ search. The
:py:mod:`spack.repo` module implements a custom `Python importer
<https://docs.python.org/2/library/imp.html>`_.

View File

@@ -1,13 +1,13 @@
sphinx==8.2.3
sphinx==8.1.3
sphinxcontrib-programoutput==0.18
sphinx_design==0.6.1
sphinx-rtd-theme==3.0.2
python-levenshtein==0.27.1
python-levenshtein==0.26.1
docutils==0.21.2
pygments==2.19.1
urllib3==2.4.0
pytest==8.3.5
isort==6.0.1
black==25.1.0
flake8==7.2.0
pygments==2.18.0
urllib3==2.3.0
pytest==8.3.4
isort==5.13.2
black==24.10.0
flake8==7.1.1
mypy==1.11.1

View File

@@ -176,72 +176,92 @@ community without needing deep familiarity with GnuPG or Public Key
Infrastructure.
.. _build_cache_signing:
.. _build_cache_format:
-------------------
Build Cache Signing
-------------------
------------------
Build Cache Format
------------------
For an in-depth description of the layout of a binary mirror, see
the :ref:`documentation<build_cache_layout>` covering binary caches. The
key takeaway from that discussion that applies here is that the entry point
to a binary package is it's manifest. The manifest refers unambiguously to the
spec metadata and compressed archive, which are stored as content-addressed
blobs.
A binary package consists of a metadata file unambiguously defining the
built package (and including other details such as how to relocate it)
and the installation directory of the package stored as a compressed
archive file. The metadata files can either be unsigned, in which case
the contents are simply the json-serialized concrete spec plus metadata,
or they can be signed, in which case the json-serialized concrete spec
plus metadata is wrapped in a gpg cleartext signature. Built package
metadata files are named to indicate the operating system and
architecture for which the package was built as well as the compiler
used to build it and the packages name and version. For example::
The manifest files can either be signed or unsigned, but are always given
a name ending with ``.spec.manifest.json`` regardless. The difference between
signed and unsigned manifests is simply that the signed version is wrapped in
a gpg cleartext signature, as illustrated below::
linux-ubuntu18.04-haswell-gcc-7.5.0-zlib-1.2.12-llv2ysfdxnppzjrt5ldybb5c52qbmoow.spec.json.sig
would contain the concrete spec and binary metadata for a binary package
of ``zlib@1.2.12``, built for the ``ubuntu`` operating system and ``haswell``
architecture. The id of the built package exists in the name of the file
as well (after the package name and version) and in this case begins
with ``llv2ys``. The id distinguishes a particular built package from all
other built packages with the same os/arch, compiler, name, and version.
Below is an example of a signed binary package metadata file. Such a
file would live in the ``build_cache`` directory of a binary mirror::
-----BEGIN PGP SIGNED MESSAGE-----
Hash: SHA512
{
"version": 3,
"data": [
{
"contentLength": 10731083,
"mediaType": "application/vnd.spack.install.v2.tar+gzip",
"compression": "gzip",
"checksumAlgorithm": "sha256",
"checksum": "0f24aa6b5dd7150067349865217acd3f6a383083f9eca111d2d2fed726c88210"
},
{
"contentLength": 1000,
"mediaType": "application/vnd.spack.spec.v5+json",
"compression": "gzip",
"checksumAlgorithm": "sha256",
"checksum": "fba751c4796536737c9acbb718dad7429be1fa485f5585d450ab8b25d12ae041"
}
]
}
-----BEGIN PGP SIGNATURE-----
"spec": {
<concrete-spec-contents-omitted>
},
iQGzBAEBCgAdFiEEdbwFKBFJCcB24mB0GAEP+tc8mwcFAmf2rr4ACgkQGAEP+tc8
mwfefwv+KJs8MsQ5ovFaBdmyx5H/3k4rO4QHBzuSPOB6UaxErA9IyOB31iP6vNTU
HzYpxz6F5dJCJWmmNEMN/0+vjhMHEOkqd7M1l5reVcxduTF2yc4tBZUO2gienEHL
W0e+SnUznl1yc/aVpChUiahO2zToCsI8HZRNT4tu6iCnE/OpghqjsSdBOZHmSNDD
5wuuCxfDUyWI6ZlLclaaB7RdbCUUJf/iqi711J+wubvnDFhc6Ynwm1xai5laJ1bD
ev3NrSb2AAroeNFVo4iECA0fZC1OZQYzaRmAEhBXtCideGJ5Zf2Cp9hmCwNK8Hq6
bNt94JP9LqC3FCCJJOMsPyOOhMSA5MU44zyyzloRwEQpHHLuFzVdbTHA3dmTc18n
HxNLkZoEMYRc8zNr40g0yb2lCbc+P11TtL1E+5NlE34MX15mPewRCiIFTMwhCnE3
gFSKtW1MKustZE35/RUwd2mpJRf+mSRVCl1f1RiFjktLjz7vWQq7imIUSam0fPDr
XD4aDogm
=RrFX
"buildcache_layout_version": 1,
"binary_cache_checksum": {
"hash_algorithm": "sha256",
"hash": "4f1e46452c35a5e61bcacca205bae1bfcd60a83a399af201a29c95b7cc3e1423"
}
}
-----BEGIN PGP SIGNATURE-----
iQGzBAEBCgAdFiEETZn0sLle8jIrdAPLx/P+voVcifMFAmKAGvwACgkQx/P+voVc
ifNoVgv/VrhA+wurVs5GB9PhmMA1m5U/AfXZb4BElDRwpT8ZcTPIv5X8xtv60eyn
4EOneGVbZoMThVxgev/NKARorGmhFXRqhWf+jknJZ1dicpqn/qpv34rELKUpgXU+
QDQ4d1P64AIdTczXe2GI9ZvhOo6+bPvK7LIsTkBbtWmopkomVxF0LcMuxAVIbA6b
887yBvVO0VGlqRnkDW7nXx49r3AG2+wDcoU1f8ep8QtjOcMNaPTPJ0UnjD0VQGW6
4ZFaGZWzdo45MY6tF3o5mqM7zJkVobpoW3iUz6J5tjz7H/nMlGgMkUwY9Kxp2PVH
qoj6Zip3LWplnl2OZyAY+vflPFdFh12Xpk4FG7Sxm/ux0r+l8tCAPvtw+G38a5P7
QEk2JBr8qMGKASmnRlJUkm1vwz0a95IF3S9YDfTAA2vz6HH3PtsNLFhtorfx8eBi
Wn5aPJAGEPOawEOvXGGbsH4cDEKPeN0n6cy1k92uPEmBLDVsdnur8q42jk5c2Qyx
j3DXty57
=3gvm
-----END PGP SIGNATURE-----
If a user has trusted the public key associated with the private key
used to sign the above manifest file, the signature can be verified with
used to sign the above spec file, the signature can be verified with
gpg, as follows::
$ gpg --verify gcc-runtime-12.3.0-s2nqujezsce4x6uhtvxscu7jhewqzztx.spec.manifest.json
$ gpg verify linux-ubuntu18.04-haswell-gcc-7.5.0-zlib-1.2.12-llv2ysfdxnppzjrt5ldybb5c52qbmoow.spec.json.sig
When attempting to install a binary package that has been signed, spack will
attempt to verify the signature with one of the trusted keys in its keyring,
and will fail if unable to do so. While not recommended, it is possible to
force installation of a signed package without verification by providing the
``--no-check-signature`` argument to ``spack install ...``.
The metadata (regardless whether signed or unsigned) contains the checksum
of the ``.spack`` file containing the actual installation. The checksum should
be compared to a checksum computed locally on the ``.spack`` file to ensure the
contents have not changed since the binary spec plus metadata were signed. The
``.spack`` files are actually tarballs containing the compressed archive of the
install tree. These files, along with the metadata files, live within the
``build_cache`` directory of the mirror, and together are organized as follows::
build_cache/
# unsigned metadata (for indexing, contains sha256 of .spack file)
<arch>-<compiler>-<name>-<ver>-24zvipcqgg2wyjpvdq2ajy5jnm564hen.spec.json
# clearsigned metadata (same as above, but signed)
<arch>-<compiler>-<name>-<ver>-24zvipcqgg2wyjpvdq2ajy5jnm564hen.spec.json.sig
<arch>/
<compiler>/
<name>-<ver>/
# tar.gz-compressed prefix (may support more compression formats later)
<arch>-<compiler>-<name>-<ver>-24zvipcqgg2wyjpvdq2ajy5jnm564hen.spack
Uncompressing and extracting the ``.spack`` file results in the install tree.
This is in contrast to previous versions of spack, where the ``.spack`` file
contained a (duplicated) metadata file, a signature file and a nested tarball
containing the install tree.
.. _internal_implementation:
@@ -300,10 +320,10 @@ the following way:
Reputational Public Key are imported into a keyring by the ``spack gpg …``
sub-command. This is initiated by the jobs build script which is created by
the generate job at the beginning of the pipeline.
4. Assuming the package has dependencies those spec manifests are verified using
4. Assuming the package has dependencies those specs are verified using
the keyring.
5. The package is built and the spec manifest is generated
6. The spec manifest is signed by the keyring and uploaded to the mirrors
5. The package is built and the spec.json is generated
6. The spec.json is signed by the keyring and uploaded to the mirrors
build cache.
**Reputational Key**
@@ -356,24 +376,24 @@ following way:
4. In addition to the secret, the runner creates a tmpfs memory mounted
directory where the GnuPG keyring will be created to verify, and
then resign the package specs.
5. The job script syncs all spec manifest files from the build cache to
5. The job script syncs all spec.json.sig files from the build cache to
a working directory in the jobs execution environment.
6. The job script then runs the ``sign.sh`` script built into the
notary Docker image.
7. The ``sign.sh`` script imports the public components of the
Reputational and Intermediate CI Keys and uses them to verify good
signatures on the spec.manifest.json files. If any signed manifest
does not verify, the job immediately fails.
8. Assuming all manifests are verified, the ``sign.sh`` script then unpacks
the manifest json data from the signed file in preparation for being
signatures on the spec.json.sig files. If any signed spec does not
verify the job immediately fails.
8. Assuming all specs are verified, the ``sign.sh`` script then unpacks
the spec json data from the signed file in preparation for being
re-signed with the Reputational Key.
9. The private components of the Reputational Key are decrypted to
standard out using ``aws-encryption-cli`` directly into a ``gpg
import …`` statement which imports the key into the
keyring mounted in-memory.
10. The private key is then used to sign each of the manifests and the
10. The private key is then used to sign each of the json specs and the
keyring is removed from disk.
11. The re-signed manifests are resynced to the AWS S3 Mirror and the
11. The re-signed json specs are resynced to the AWS S3 Mirror and the
public signing of the packages for the develop or release pipeline
that created them is complete.

1
lib/spack/env/aocc/clang vendored Symbolic link
View File

@@ -0,0 +1 @@
../cc

1
lib/spack/env/aocc/clang++ vendored Symbolic link
View File

@@ -0,0 +1 @@
../cpp

1
lib/spack/env/aocc/flang vendored Symbolic link
View File

@@ -0,0 +1 @@
../fc

1
lib/spack/env/arm/armclang vendored Symbolic link
View File

@@ -0,0 +1 @@
../cc

1
lib/spack/env/arm/armclang++ vendored Symbolic link
View File

@@ -0,0 +1 @@
../cc

1
lib/spack/env/arm/armflang vendored Symbolic link
View File

@@ -0,0 +1 @@
../cc

1
lib/spack/env/c++ vendored Symbolic link
View File

@@ -0,0 +1 @@
cc

1
lib/spack/env/c89 vendored Symbolic link
View File

@@ -0,0 +1 @@
cc

1
lib/spack/env/c99 vendored Symbolic link
View File

@@ -0,0 +1 @@
cc

1
lib/spack/env/case-insensitive/CC vendored Symbolic link
View File

@@ -0,0 +1 @@
../cc

970
lib/spack/env/cc vendored Executable file
View File

@@ -0,0 +1,970 @@
#!/bin/sh -f
# shellcheck disable=SC2034 # evals in this script fool shellcheck
#
# Copyright Spack Project Developers. See COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
#
# Spack compiler wrapper script.
#
# Compiler commands go through this compiler wrapper in Spack builds.
# The compiler wrapper is a thin layer around the standard compilers.
# It enables several key pieces of functionality:
#
# 1. It allows Spack to swap compilers into and out of builds easily.
# 2. It adds several options to the compile line so that spack
# packages can find their dependencies at build time and run time:
# -I and/or -isystem arguments for dependency /include directories.
# -L arguments for dependency /lib directories.
# -Wl,-rpath arguments for dependency /lib directories.
#
# Reset IFS to the default: whitespace-separated lists. When we use
# other separators, we set and reset it.
unset IFS
# Separator for lists whose names end with `_list`.
# We pick the alarm bell character, which is highly unlikely to
# conflict with anything. This is a literal bell character (which
# we have to use since POSIX sh does not convert escape sequences
# like '\a' outside of the format argument of `printf`).
# NOTE: Depending on your editor this may look empty, but it is not.
readonly lsep=''
# This is an array of environment variables that need to be set before
# the script runs. They are set by routines in spack.build_environment
# as part of the package installation process.
readonly params="\
SPACK_ENV_PATH
SPACK_DEBUG_LOG_DIR
SPACK_DEBUG_LOG_ID
SPACK_COMPILER_SPEC
SPACK_CC_RPATH_ARG
SPACK_CXX_RPATH_ARG
SPACK_F77_RPATH_ARG
SPACK_FC_RPATH_ARG
SPACK_LINKER_ARG
SPACK_SHORT_SPEC
SPACK_SYSTEM_DIRS
SPACK_MANAGED_DIRS"
# Optional parameters that aren't required to be set
# Boolean (true/false/custom) if we want to add debug flags
# SPACK_ADD_DEBUG_FLAGS
# If a custom flag is requested, it will be defined
# SPACK_DEBUG_FLAGS
# The compiler input variables are checked for sanity later:
# SPACK_CC, SPACK_CXX, SPACK_F77, SPACK_FC
# The default compiler flags are passed from these variables:
# SPACK_CFLAGS, SPACK_CXXFLAGS, SPACK_FFLAGS,
# SPACK_LDFLAGS, SPACK_LDLIBS
# Debug env var is optional; set to "TRUE" for debug logging:
# SPACK_DEBUG
# Test command is used to unit test the compiler script.
# SPACK_TEST_COMMAND
# die MESSAGE
# Print a message and exit with error code 1.
die() {
echo "[spack cc] ERROR: $*"
exit 1
}
# empty VARNAME
# Return whether the variable VARNAME is unset or set to the empty string.
empty() {
eval "test -z \"\${$1}\""
}
# setsep LISTNAME
# Set the global variable 'sep' to the separator for a list with name LISTNAME.
# There are three types of lists:
# 1. regular lists end with _list and are separated by $lsep
# 2. directory lists end with _dirs/_DIRS/PATH(S) and are separated by ':'
# 3. any other list is assumed to be separated by spaces: " "
setsep() {
case "$1" in
*_dirs|*_DIRS|*PATH|*PATHS)
sep=':'
;;
*_list)
sep="$lsep"
;;
*)
sep=" "
;;
esac
}
# prepend LISTNAME ELEMENT
#
# Prepend ELEMENT to the list stored in the variable LISTNAME.
# Handles empty lists and single-element lists.
prepend() {
varname="$1"
elt="$2"
if empty "$varname"; then
eval "$varname=\"\${elt}\""
else
# Get the appropriate separator for the list we're appending to.
setsep "$varname"
eval "$varname=\"\${elt}${sep}\${$varname}\""
fi
}
# append LISTNAME ELEMENT [SEP]
#
# Append ELEMENT to the list stored in the variable LISTNAME,
# assuming the list is separated by SEP.
# Handles empty lists and single-element lists.
append() {
varname="$1"
elt="$2"
if empty "$varname"; then
eval "$varname=\"\${elt}\""
else
# Get the appropriate separator for the list we're appending to.
setsep "$varname"
eval "$varname=\"\${$varname}${sep}\${elt}\""
fi
}
# extend LISTNAME1 LISTNAME2 [PREFIX]
#
# Append the elements stored in the variable LISTNAME2
# to the list stored in LISTNAME1.
# If PREFIX is provided, prepend it to each element.
extend() {
# Figure out the appropriate IFS for the list we're reading.
setsep "$2"
if [ "$sep" != " " ]; then
IFS="$sep"
fi
eval "for elt in \${$2}; do append $1 \"$3\${elt}\"; done"
unset IFS
}
# preextend LISTNAME1 LISTNAME2 [PREFIX]
#
# Prepend the elements stored in the list at LISTNAME2
# to the list at LISTNAME1, preserving order.
# If PREFIX is provided, prepend it to each element.
preextend() {
# Figure out the appropriate IFS for the list we're reading.
setsep "$2"
if [ "$sep" != " " ]; then
IFS="$sep"
fi
# first, reverse the list to prepend
_reversed_list=""
eval "for elt in \${$2}; do prepend _reversed_list \"$3\${elt}\"; done"
# prepend reversed list to preextend in order
IFS="${lsep}"
for elt in $_reversed_list; do prepend "$1" "$3${elt}"; done
unset IFS
}
execute() {
# dump the full command if the caller supplies SPACK_TEST_COMMAND=dump-args
if [ -n "${SPACK_TEST_COMMAND=}" ]; then
case "$SPACK_TEST_COMMAND" in
dump-args)
IFS="$lsep"
for arg in $full_command_list; do
echo "$arg"
done
unset IFS
exit
;;
dump-env-*)
var=${SPACK_TEST_COMMAND#dump-env-}
eval "printf '%s\n' \"\$0: \$var: \$$var\""
;;
*)
die "Unknown test command: '$SPACK_TEST_COMMAND'"
;;
esac
fi
#
# Write the input and output commands to debug logs if it's asked for.
#
if [ "$SPACK_DEBUG" = TRUE ]; then
input_log="$SPACK_DEBUG_LOG_DIR/spack-cc-$SPACK_DEBUG_LOG_ID.in.log"
output_log="$SPACK_DEBUG_LOG_DIR/spack-cc-$SPACK_DEBUG_LOG_ID.out.log"
echo "[$mode] $command $input_command" >> "$input_log"
IFS="$lsep"
echo "[$mode] "$full_command_list >> "$output_log"
unset IFS
fi
# Execute the full command, preserving spaces with IFS set
# to the alarm bell separator.
IFS="$lsep"; exec $full_command_list
exit
}
# Fail with a clear message if the input contains any bell characters.
if eval "[ \"\${*#*${lsep}}\" != \"\$*\" ]"; then
die "Compiler command line contains our separator ('${lsep}'). Cannot parse."
fi
# ensure required variables are set
for param in $params; do
if eval "test -z \"\${${param}:-}\""; then
die "Spack compiler must be run from Spack! Input '$param' is missing."
fi
done
# eval this because SPACK_MANAGED_DIRS and SPACK_SYSTEM_DIRS are inputs we don't wanna loop over.
# moving the eval inside the function would eval it every call.
eval "\
path_order() {
case \"\$1\" in
$SPACK_MANAGED_DIRS) return 0 ;;
$SPACK_SYSTEM_DIRS) return 2 ;;
/*) return 1 ;;
esac
}
"
# path_list functions. Path_lists have 3 parts: spack_store_<list>, <list> and system_<list>,
# which are used to prioritize paths when assembling the final command line.
# init_path_lists LISTNAME
# Set <LISTNAME>, spack_store_<LISTNAME>, and system_<LISTNAME> to "".
init_path_lists() {
eval "spack_store_$1=\"\""
eval "$1=\"\""
eval "system_$1=\"\""
}
# assign_path_lists LISTNAME1 LISTNAME2
# Copy contents of LISTNAME2 into LISTNAME1, for each path_list prefix.
assign_path_lists() {
eval "spack_store_$1=\"\${spack_store_$2}\""
eval "$1=\"\${$2}\""
eval "system_$1=\"\${system_$2}\""
}
# append_path_lists LISTNAME ELT
# Append the provided ELT to the appropriate list, based on the result of path_order().
append_path_lists() {
path_order "$2"
case $? in
0) eval "append spack_store_$1 \"\$2\"" ;;
1) eval "append $1 \"\$2\"" ;;
2) eval "append system_$1 \"\$2\"" ;;
esac
}
# Check if optional parameters are defined
# If we aren't asking for debug flags, don't add them
if [ -z "${SPACK_ADD_DEBUG_FLAGS:-}" ]; then
SPACK_ADD_DEBUG_FLAGS="false"
fi
# SPACK_ADD_DEBUG_FLAGS must be true/false/custom
is_valid="false"
for param in "true" "false" "custom"; do
if [ "$param" = "$SPACK_ADD_DEBUG_FLAGS" ]; then
is_valid="true"
fi
done
# Exit with error if we are given an incorrect value
if [ "$is_valid" = "false" ]; then
die "SPACK_ADD_DEBUG_FLAGS, if defined, must be one of 'true', 'false', or 'custom'."
fi
# Figure out the type of compiler, the language, and the mode so that
# the compiler script knows what to do.
#
# Possible languages are C, C++, Fortran 77, and Fortran 90.
# 'command' is set based on the input command to $SPACK_[CC|CXX|F77|F90]
#
# 'mode' is set to one of:
# vcheck version check
# cpp preprocess
# cc compile
# as assemble
# ld link
# ccld compile & link
# Note. SPACK_ALWAYS_XFLAGS are applied for all compiler invocations,
# including version checks (SPACK_XFLAGS variants are not applied
# for version checks).
command="${0##*/}"
comp="CC"
vcheck_flags=""
case "$command" in
cpp)
mode=cpp
debug_flags="-g"
vcheck_flags="${SPACK_ALWAYS_CPPFLAGS}"
;;
cc|c89|c99|gcc|clang|armclang|icc|icx|pgcc|nvc|xlc|xlc_r|fcc|amdclang|cl.exe|craycc)
command="$SPACK_CC"
language="C"
comp="CC"
lang_flags=C
debug_flags="-g"
vcheck_flags="${SPACK_ALWAYS_CFLAGS}"
;;
c++|CC|g++|clang++|armclang++|icpc|icpx|pgc++|nvc++|xlc++|xlc++_r|FCC|amdclang++|crayCC)
command="$SPACK_CXX"
language="C++"
comp="CXX"
lang_flags=CXX
debug_flags="-g"
vcheck_flags="${SPACK_ALWAYS_CXXFLAGS}"
;;
ftn|f90|fc|f95|gfortran|flang|armflang|ifort|ifx|pgfortran|nvfortran|xlf90|xlf90_r|nagfor|frt|amdflang|crayftn)
command="$SPACK_FC"
language="Fortran 90"
comp="FC"
lang_flags=F
debug_flags="-g"
vcheck_flags="${SPACK_ALWAYS_FFLAGS}"
;;
f77|xlf|xlf_r|pgf77)
command="$SPACK_F77"
language="Fortran 77"
comp="F77"
lang_flags=F
debug_flags="-g"
vcheck_flags="${SPACK_ALWAYS_FFLAGS}"
;;
ld|ld.gold|ld.lld)
mode=ld
;;
*)
die "Unknown compiler: $command"
;;
esac
# If any of the arguments below are present, then the mode is vcheck.
# In vcheck mode, nothing is added in terms of extra search paths or
# libraries.
if [ -z "$mode" ] || [ "$mode" = ld ]; then
for arg in "$@"; do
case $arg in
-v|-V|--version|-dumpversion)
mode=vcheck
break
;;
esac
done
fi
# Finish setting up the mode.
if [ -z "$mode" ]; then
mode=ccld
for arg in "$@"; do
if [ "$arg" = "-E" ]; then
mode=cpp
break
elif [ "$arg" = "-S" ]; then
mode=as
break
elif [ "$arg" = "-c" ]; then
mode=cc
break
fi
done
fi
# This is needed to ensure we set RPATH instead of RUNPATH
# (or the opposite, depending on the configuration in config.yaml)
#
# Documentation on this mechanism is lacking at best. A few sources
# of information are (note that some of them take explicitly the
# opposite stance that Spack does):
#
# http://blog.qt.io/blog/2011/10/28/rpath-and-runpath/
# https://wiki.debian.org/RpathIssue
#
# The only discussion I could find on enabling new dynamic tags by
# default on ld is the following:
#
# https://sourceware.org/ml/binutils/2013-01/msg00307.html
#
dtags_to_add="${SPACK_DTAGS_TO_ADD}"
dtags_to_strip="${SPACK_DTAGS_TO_STRIP}"
linker_arg="${SPACK_LINKER_ARG}"
# Set up rpath variable according to language.
rpath="ERROR: RPATH ARG WAS NOT SET"
eval "rpath=\${SPACK_${comp}_RPATH_ARG:?${rpath}}"
# Dump the mode and exit if the command is dump-mode.
if [ "$SPACK_TEST_COMMAND" = "dump-mode" ]; then
echo "$mode"
exit
fi
# If, say, SPACK_CC is set but SPACK_FC is not, we want to know. Compilers do not
# *have* to set up Fortran executables, so we need to tell the user when a build is
# about to attempt to use them unsuccessfully.
if [ -z "$command" ]; then
die "Compiler '$SPACK_COMPILER_SPEC' does not have a $language compiler configured."
fi
#
# Filter '.' and Spack environment directories out of PATH so that
# this script doesn't just call itself
#
new_dirs=""
IFS=':'
for dir in $PATH; do
addpath=true
for spack_env_dir in $SPACK_ENV_PATH; do
case "${dir%%/}" in
"$spack_env_dir"|'.'|'')
addpath=false
break
;;
esac
done
if [ $addpath = true ]; then
append new_dirs "$dir"
fi
done
unset IFS
export PATH="$new_dirs"
if [ "$mode" = vcheck ]; then
full_command_list="$command"
args="$@"
extend full_command_list vcheck_flags
extend full_command_list args
execute
fi
# Darwin's linker has a -r argument that merges object files together.
# It doesn't work with -rpath.
# This variable controls whether they are added.
add_rpaths=true
if [ "$mode" = ld ] || [ "$mode" = ccld ]; then
if [ "${SPACK_SHORT_SPEC#*darwin}" != "${SPACK_SHORT_SPEC}" ]; then
for arg in "$@"; do
if [ "$arg" = "-r" ]; then
if [ "$mode" = ld ] || [ "$mode" = ccld ]; then
add_rpaths=false
break
fi
elif [ "$arg" = "-Wl,-r" ] && [ "$mode" = ccld ]; then
add_rpaths=false
break
fi
done
fi
fi
# Save original command for debug logging
input_command="$*"
#
# Parse the command line arguments.
#
# We extract -L, -I, -isystem and -Wl,-rpath arguments from the
# command line and recombine them with Spack arguments later. We
# parse these out so that we can make sure that system paths come
# last, that package arguments come first, and that Spack arguments
# are injected properly.
#
# All other arguments, including -l arguments, are treated as
# 'other_args' and left in their original order. This ensures that
# --start-group, --end-group, and other order-sensitive flags continue to
# work as the caller expects.
#
# The libs variable is initialized here for completeness, and it is also
# used later to inject flags supplied via `ldlibs` on the command
# line. These come into the wrappers via SPACK_LDLIBS.
# The loop below breaks up the command line into these lists of components.
# The lists are all bell-separated to be as flexible as possible, as their
# contents may come from the command line, from ' '-separated lists,
# ':'-separated lists, etc.
parse_Wl() {
while [ $# -ne 0 ]; do
if [ "$wl_expect_rpath" = yes ]; then
append_path_lists return_rpath_dirs_list "$1"
wl_expect_rpath=no
else
case "$1" in
-rpath=*)
arg="${1#-rpath=}"
if [ -z "$arg" ]; then
shift; continue
fi
append_path_lists return_rpath_dirs_list "$arg"
;;
--rpath=*)
arg="${1#--rpath=}"
if [ -z "$arg" ]; then
shift; continue
fi
append_path_lists return_rpath_dirs_list "$arg"
;;
-rpath|--rpath)
wl_expect_rpath=yes
;;
"$dtags_to_strip")
;;
-Wl)
# Nested -Wl,-Wl means we're in NAG compiler territory. We don't support it.
return 1
;;
*)
append return_other_args_list "-Wl,$1"
;;
esac
fi
shift
done
}
categorize_arguments() {
unset IFS
return_other_args_list=""
return_isystem_was_used=""
init_path_lists return_isystem_include_dirs_list
init_path_lists return_include_dirs_list
init_path_lists return_lib_dirs_list
init_path_lists return_rpath_dirs_list
# Global state for keeping track of -Wl,-rpath -Wl,/path
wl_expect_rpath=no
# Same, but for -Xlinker -rpath -Xlinker /path
xlinker_expect_rpath=no
while [ $# -ne 0 ]; do
# an RPATH to be added after the case statement.
rp=""
# Multiple consecutive spaces in the command line can
# result in blank arguments
if [ -z "$1" ]; then
shift
continue
fi
if [ -n "${SPACK_COMPILER_FLAGS_KEEP}" ] ; then
# NOTE: the eval is required to allow `|` alternatives inside the variable
eval "\
case \"\$1\" in
$SPACK_COMPILER_FLAGS_KEEP)
append return_other_args_list \"\$1\"
shift
continue
;;
esac
"
fi
# the replace list is a space-separated list of pipe-separated pairs,
# the first in each pair is the original prefix to be matched, the
# second is the replacement prefix
if [ -n "${SPACK_COMPILER_FLAGS_REPLACE}" ] ; then
for rep in ${SPACK_COMPILER_FLAGS_REPLACE} ; do
before=${rep%|*}
after=${rep#*|}
eval "\
stripped=\"\${1##$before}\"
"
if [ "$stripped" = "$1" ] ; then
continue
fi
replaced="$after$stripped"
# it matched, remove it
shift
if [ -z "$replaced" ] ; then
# completely removed, continue OUTER loop
continue 2
fi
# re-build argument list with replacement
set -- "$replaced" "$@"
done
fi
case "$1" in
-isystem*)
arg="${1#-isystem}"
return_isystem_was_used=true
if [ -z "$arg" ]; then shift; arg="$1"; fi
append_path_lists return_isystem_include_dirs_list "$arg"
;;
-I*)
arg="${1#-I}"
if [ -z "$arg" ]; then shift; arg="$1"; fi
append_path_lists return_include_dirs_list "$arg"
;;
-L*)
arg="${1#-L}"
if [ -z "$arg" ]; then shift; arg="$1"; fi
append_path_lists return_lib_dirs_list "$arg"
;;
-l*)
# -loopopt=0 is generated erroneously in autoconf <= 2.69,
# and passed by ifx to the linker, which confuses it with a
# library. Filter it out.
# TODO: generalize filtering of args with an env var, so that
# TODO: we do not have to special case this here.
if { [ "$mode" = "ccld" ] || [ $mode = "ld" ]; } \
&& [ "$1" != "${1#-loopopt}" ]; then
shift
continue
fi
arg="${1#-l}"
if [ -z "$arg" ]; then shift; arg="$1"; fi
append return_other_args_list "-l$arg"
;;
-Wl,*)
IFS=,
if ! parse_Wl ${1#-Wl,}; then
append return_other_args_list "$1"
fi
unset IFS
;;
-Xlinker)
shift
if [ $# -eq 0 ]; then
# -Xlinker without value: let the compiler error about it.
append return_other_args_list -Xlinker
xlinker_expect_rpath=no
break
elif [ "$xlinker_expect_rpath" = yes ]; then
# Register the path of -Xlinker -rpath <other args> -Xlinker <path>
append_path_lists return_rpath_dirs_list "$1"
xlinker_expect_rpath=no
else
case "$1" in
-rpath=*)
arg="${1#-rpath=}"
append_path_lists return_rpath_dirs_list "$arg"
;;
--rpath=*)
arg="${1#--rpath=}"
append_path_lists return_rpath_dirs_list "$arg"
;;
-rpath|--rpath)
xlinker_expect_rpath=yes
;;
"$dtags_to_strip")
;;
*)
append return_other_args_list -Xlinker
append return_other_args_list "$1"
;;
esac
fi
;;
"$dtags_to_strip")
;;
*)
# if mode is not ld, we can just add to other args
if [ "$mode" != "ld" ]; then
append return_other_args_list "$1"
shift
continue
fi
# if we're in linker mode, we need to parse raw RPATH args
case "$1" in
-rpath=*)
arg="${1#-rpath=}"
append_path_lists return_rpath_dirs_list "$arg"
;;
--rpath=*)
arg="${1#--rpath=}"
append_path_lists return_rpath_dirs_list "$arg"
;;
-rpath|--rpath)
if [ $# -eq 1 ]; then
# -rpath without value: let the linker raise an error.
append return_other_args_list "$1"
break
fi
shift
append_path_lists return_rpath_dirs_list "$1"
;;
*)
append return_other_args_list "$1"
;;
esac
;;
esac
shift
done
# We found `-Xlinker -rpath` but no matching value `-Xlinker /path`. Just append
# `-Xlinker -rpath` again and let the compiler or linker handle the error during arg
# parsing.
if [ "$xlinker_expect_rpath" = yes ]; then
append return_other_args_list -Xlinker
append return_other_args_list -rpath
fi
# Same, but for -Wl flags.
if [ "$wl_expect_rpath" = yes ]; then
append return_other_args_list -Wl,-rpath
fi
}
categorize_arguments "$@"
assign_path_lists isystem_include_dirs_list return_isystem_include_dirs_list
assign_path_lists include_dirs_list return_include_dirs_list
assign_path_lists lib_dirs_list return_lib_dirs_list
assign_path_lists rpath_dirs_list return_rpath_dirs_list
isystem_was_used="$return_isystem_was_used"
other_args_list="$return_other_args_list"
#
# Add flags from Spack's cppflags, cflags, cxxflags, fcflags, fflags, and
# ldflags. We stick to the order that gmake puts the flags in by default.
#
# See the gmake manual on implicit rules for details:
# https://www.gnu.org/software/make/manual/html_node/Implicit-Variables.html
#
flags_list=""
# Add debug flags
if [ "${SPACK_ADD_DEBUG_FLAGS}" = "true" ]; then
extend flags_list debug_flags
# If a custom flag is requested, derive from environment
elif [ "$SPACK_ADD_DEBUG_FLAGS" = "custom" ]; then
extend flags_list SPACK_DEBUG_FLAGS
fi
spack_flags_list=""
# Fortran flags come before CPPFLAGS
case "$mode" in
cc|ccld)
case $lang_flags in
F)
extend spack_flags_list SPACK_ALWAYS_FFLAGS
extend spack_flags_list SPACK_FFLAGS
;;
esac
;;
esac
# C preprocessor flags come before any C/CXX flags
case "$mode" in
cpp|as|cc|ccld)
extend spack_flags_list SPACK_ALWAYS_CPPFLAGS
extend spack_flags_list SPACK_CPPFLAGS
;;
esac
# Add C and C++ flags
case "$mode" in
cc|ccld)
case $lang_flags in
C)
extend spack_flags_list SPACK_ALWAYS_CFLAGS
extend spack_flags_list SPACK_CFLAGS
;;
CXX)
extend spack_flags_list SPACK_ALWAYS_CXXFLAGS
extend spack_flags_list SPACK_CXXFLAGS
;;
esac
# prepend target args
preextend flags_list SPACK_TARGET_ARGS
;;
esac
# Linker flags
case "$mode" in
ccld)
extend spack_flags_list SPACK_LDFLAGS
;;
esac
IFS="$lsep"
categorize_arguments $spack_flags_list
unset IFS
assign_path_lists spack_flags_isystem_include_dirs_list return_isystem_include_dirs_list
assign_path_lists spack_flags_include_dirs_list return_include_dirs_list
assign_path_lists spack_flags_lib_dirs_list return_lib_dirs_list
assign_path_lists spack_flags_rpath_dirs_list return_rpath_dirs_list
spack_flags_isystem_was_used="$return_isystem_was_used"
spack_flags_other_args_list="$return_other_args_list"
# On macOS insert headerpad_max_install_names linker flag
if [ "$mode" = ld ] || [ "$mode" = ccld ]; then
if [ "${SPACK_SHORT_SPEC#*darwin}" != "${SPACK_SHORT_SPEC}" ]; then
case "$mode" in
ld)
append flags_list "-headerpad_max_install_names" ;;
ccld)
append flags_list "-Wl,-headerpad_max_install_names" ;;
esac
fi
fi
if [ "$mode" = ccld ] || [ "$mode" = ld ]; then
if [ "$add_rpaths" != "false" ]; then
# Append RPATH directories. Note that in the case of the
# top-level package these directories may not exist yet. For dependencies
# it is assumed that paths have already been confirmed.
extend spack_store_rpath_dirs_list SPACK_STORE_RPATH_DIRS
extend rpath_dirs_list SPACK_RPATH_DIRS
fi
fi
if [ "$mode" = ccld ] || [ "$mode" = ld ]; then
extend spack_store_lib_dirs_list SPACK_STORE_LINK_DIRS
extend lib_dirs_list SPACK_LINK_DIRS
fi
libs_list=""
# add RPATHs if we're in in any linking mode
case "$mode" in
ld|ccld)
# Set extra RPATHs
extend lib_dirs_list SPACK_COMPILER_EXTRA_RPATHS
if [ "$add_rpaths" != "false" ]; then
extend rpath_dirs_list SPACK_COMPILER_EXTRA_RPATHS
fi
# Set implicit RPATHs
if [ "$add_rpaths" != "false" ]; then
extend rpath_dirs_list SPACK_COMPILER_IMPLICIT_RPATHS
fi
# Add SPACK_LDLIBS to args
for lib in $SPACK_LDLIBS; do
append libs_list "${lib#-l}"
done
;;
esac
case "$mode" in
cpp|cc|as|ccld)
if [ "$spack_flags_isystem_was_used" = "true" ] || [ "$isystem_was_used" = "true" ]; then
extend spack_store_isystem_include_dirs_list SPACK_STORE_INCLUDE_DIRS
extend isystem_include_dirs_list SPACK_INCLUDE_DIRS
else
extend spack_store_include_dirs_list SPACK_STORE_INCLUDE_DIRS
extend include_dirs_list SPACK_INCLUDE_DIRS
fi
;;
esac
#
# Finally, reassemble the command line.
#
args_list="$flags_list"
# Include search paths partitioned by (in store, non-sytem, system)
# NOTE: adding ${lsep} to the prefix here turns every added element into two
extend args_list spack_store_spack_flags_include_dirs_list -I
extend args_list spack_store_include_dirs_list -I
extend args_list spack_flags_include_dirs_list -I
extend args_list include_dirs_list -I
extend args_list spack_store_spack_flags_isystem_include_dirs_list "-isystem${lsep}"
extend args_list spack_store_isystem_include_dirs_list "-isystem${lsep}"
extend args_list spack_flags_isystem_include_dirs_list "-isystem${lsep}"
extend args_list isystem_include_dirs_list "-isystem${lsep}"
extend args_list system_spack_flags_include_dirs_list -I
extend args_list system_include_dirs_list -I
extend args_list system_spack_flags_isystem_include_dirs_list "-isystem${lsep}"
extend args_list system_isystem_include_dirs_list "-isystem${lsep}"
# Library search paths partitioned by (in store, non-sytem, system)
extend args_list spack_store_spack_flags_lib_dirs_list "-L"
extend args_list spack_store_lib_dirs_list "-L"
extend args_list spack_flags_lib_dirs_list "-L"
extend args_list lib_dirs_list "-L"
extend args_list system_spack_flags_lib_dirs_list "-L"
extend args_list system_lib_dirs_list "-L"
# RPATHs arguments
rpath_prefix=""
case "$mode" in
ccld)
if [ -n "$dtags_to_add" ] ; then
append args_list "$linker_arg$dtags_to_add"
fi
rpath_prefix="$rpath"
;;
ld)
if [ -n "$dtags_to_add" ] ; then
append args_list "$dtags_to_add"
fi
rpath_prefix="-rpath${lsep}"
;;
esac
# if mode is ccld or ld, extend RPATH lists with the prefix determined above
if [ -n "$rpath_prefix" ]; then
extend args_list spack_store_spack_flags_rpath_dirs_list "$rpath_prefix"
extend args_list spack_store_rpath_dirs_list "$rpath_prefix"
extend args_list spack_flags_rpath_dirs_list "$rpath_prefix"
extend args_list rpath_dirs_list "$rpath_prefix"
extend args_list system_spack_flags_rpath_dirs_list "$rpath_prefix"
extend args_list system_rpath_dirs_list "$rpath_prefix"
fi
# Other arguments from the input command
extend args_list other_args_list
extend args_list spack_flags_other_args_list
# Inject SPACK_LDLIBS, if supplied
extend args_list libs_list "-l"
full_command_list="$command"
extend full_command_list args_list
# prepend the ccache binary if we're using ccache
if [ -n "$SPACK_CCACHE_BINARY" ]; then
case "$lang_flags" in
C|CXX) # ccache only supports C languages
prepend full_command_list "${SPACK_CCACHE_BINARY}"
# workaround for stage being a temp folder
# see #3761#issuecomment-294352232
export CCACHE_NOHASHDIR=yes
;;
esac
fi
execute

1
lib/spack/env/cce/case-insensitive/CC vendored Symbolic link
View File

@@ -0,0 +1 @@
../../cc

View File

@@ -0,0 +1 @@
../../cc

1
lib/spack/env/cce/cc vendored Symbolic link
View File

@@ -0,0 +1 @@
../cc

1
lib/spack/env/cce/craycc vendored Symbolic link
View File

@@ -0,0 +1 @@
../cc

1
lib/spack/env/cce/crayftn vendored Symbolic link
View File

@@ -0,0 +1 @@
../cc

1
lib/spack/env/cce/ftn vendored Symbolic link
View File

@@ -0,0 +1 @@
../cc

1
lib/spack/env/clang/clang vendored Symbolic link
View File

@@ -0,0 +1 @@
../cc

1
lib/spack/env/clang/clang++ vendored Symbolic link
View File

@@ -0,0 +1 @@
../cc

1
lib/spack/env/clang/flang vendored Symbolic link
View File

@@ -0,0 +1 @@
../cc

1
lib/spack/env/clang/gfortran vendored Symbolic link
View File

@@ -0,0 +1 @@
../cc

1
lib/spack/env/cpp vendored Symbolic link
View File

@@ -0,0 +1 @@
cc

1
lib/spack/env/f77 vendored Symbolic link
View File

@@ -0,0 +1 @@
cc

1
lib/spack/env/f90 vendored Symbolic link
View File

@@ -0,0 +1 @@
cc

1
lib/spack/env/f95 vendored Symbolic link
View File

@@ -0,0 +1 @@
cc

1
lib/spack/env/fc vendored Symbolic link
View File

@@ -0,0 +1 @@
cc

1
lib/spack/env/fj/case-insensitive/FCC vendored Symbolic link
View File

@@ -0,0 +1 @@
../../cc

1
lib/spack/env/fj/fcc vendored Symbolic link
View File

@@ -0,0 +1 @@
../cc

1
lib/spack/env/fj/frt vendored Symbolic link
View File

@@ -0,0 +1 @@
../cc

1
lib/spack/env/ftn vendored Symbolic link
View File

@@ -0,0 +1 @@
cc

1
lib/spack/env/gcc/g++ vendored Symbolic link
View File

@@ -0,0 +1 @@
../cc

1
lib/spack/env/gcc/gcc vendored Symbolic link
View File

@@ -0,0 +1 @@
../cc

1
lib/spack/env/gcc/gfortran vendored Symbolic link
View File

@@ -0,0 +1 @@
../cc

1
lib/spack/env/intel/icc vendored Symbolic link
View File

@@ -0,0 +1 @@
../cc

1
lib/spack/env/intel/icpc vendored Symbolic link
View File

@@ -0,0 +1 @@
../cc

1
lib/spack/env/intel/ifort vendored Symbolic link
View File

@@ -0,0 +1 @@
../cc

1
lib/spack/env/ld vendored Symbolic link
View File

@@ -0,0 +1 @@
cc

1
lib/spack/env/ld.gold vendored Symbolic link
View File

@@ -0,0 +1 @@
cc

1
lib/spack/env/ld.lld vendored Symbolic link
View File

@@ -0,0 +1 @@
cc

1
lib/spack/env/nag/nagfor vendored Symbolic link
View File

@@ -0,0 +1 @@
../cc

Some files were not shown because too many files have changed in this diff Show More