Compare commits

..

3 Commits

Author SHA1 Message Date
Gregory L. Lee
08aaecd1e2 adding py-pyspy package 2023-11-14 08:03:39 -08:00
Gregory L. Lee
b237d303df adding py-pyspy package 2023-11-13 17:26:52 -08:00
Gregory L. Lee
c3015d2a1b adding py-pyspy package 2023-11-13 17:09:38 -08:00
9385 changed files with 41076 additions and 103238 deletions

View File

@@ -1,4 +0,0 @@
{
"image": "ghcr.io/spack/ubuntu20.04-runner-amd64-gcc-11.4:2023.08.01",
"postCreateCommand": "./.devcontainer/postCreateCommand.sh"
}

View File

@@ -1,20 +0,0 @@
#!/bin/bash
# Load spack environment at terminal startup
cat <<EOF >> /root/.bashrc
. /workspaces/spack/share/spack/setup-env.sh
EOF
# Load spack environment in this script
. /workspaces/spack/share/spack/setup-env.sh
# Ensure generic targets for maximum matching with buildcaches
spack config --scope site add "packages:all:require:[target=x86_64_v3]"
spack config --scope site add "concretizer:targets:granularity:generic"
# Find compiler and install gcc-runtime
spack compiler find --scope site
# Setup buildcaches
spack mirror add --scope site develop https://binaries.spack.io/develop
spack buildcache keys --install --trust

View File

@@ -1,6 +0,0 @@
<!--
Remember that `spackbot` can help with your PR in multiple ways:
- `@spackbot help` shows all the commands that are currently available
- `@spackbot fix style` tries to push a commit to fix style issues in this PR
- `@spackbot re-run pipeline` runs the pipelines again, if you have write access to the repository
-->

View File

@@ -17,53 +17,33 @@ concurrency:
jobs:
# Run audits on all the packages in the built-in repository
package-audits:
runs-on: ${{ matrix.system.os }}
runs-on: ${{ matrix.operating_system }}
strategy:
matrix:
system:
- { os: windows-latest, shell: 'powershell Invoke-Expression -Command "./share/spack/qa/windows_test_setup.ps1"; {0}' }
- { os: ubuntu-latest, shell: bash }
- { os: macos-latest, shell: bash }
defaults:
run:
shell: ${{ matrix.system.shell }}
operating_system: ["ubuntu-latest", "macos-latest"]
steps:
- uses: actions/checkout@44c2b7a8a4ea60a981eaca3cf939b5f4305c123b
- uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # @v2
- uses: actions/setup-python@65d7f2d534ac1bc67fcd62888c5f4f3d2cb2b236 # @v2
with:
python-version: ${{inputs.python_version}}
- name: Install Python packages
run: |
pip install --upgrade pip setuptools pytest coverage[toml]
- name: Setup for Windows run
if: runner.os == 'Windows'
run: |
python -m pip install --upgrade pywin32
- name: Package audits (with coverage)
if: ${{ inputs.with_coverage == 'true' && runner.os != 'Windows' }}
if: ${{ inputs.with_coverage == 'true' }}
run: |
. share/spack/setup-env.sh
coverage run $(which spack) audit packages
coverage run $(which spack) -d audit externals
coverage run $(which spack) audit externals
coverage combine
coverage xml
- name: Package audits (without coverage)
if: ${{ inputs.with_coverage == 'false' && runner.os != 'Windows' }}
if: ${{ inputs.with_coverage == 'false' }}
run: |
. share/spack/setup-env.sh
spack -d audit packages
spack -d audit externals
- name: Package audits (without coverage)
if: ${{ runner.os == 'Windows' }}
run: |
. share/spack/setup-env.sh
spack -d audit packages
./share/spack/qa/validate_last_exit.ps1
spack -d audit externals
./share/spack/qa/validate_last_exit.ps1
- uses: codecov/codecov-action@5ecb98a3c6b747ed38dc09f787459979aebb39be
. share/spack/setup-env.sh
$(which spack) audit packages
$(which spack) audit externals
- uses: codecov/codecov-action@eaaf4bedf32dbdc6b720b63067d99c4d77d6047d # @v2.1.0
if: ${{ inputs.with_coverage == 'true' }}
with:
flags: unittests,audits
token: ${{ secrets.CODECOV_TOKEN }}
verbose: true

View File

@@ -1,8 +1,7 @@
#!/bin/bash
set -e
set -ex
source share/spack/setup-env.sh
$PYTHON bin/spack bootstrap disable github-actions-v0.4
$PYTHON bin/spack bootstrap disable spack-install
$PYTHON bin/spack $SPACK_FLAGS solve zlib
$PYTHON bin/spack -d solve zlib
tree $BOOTSTRAP/store
exit 0

View File

@@ -13,22 +13,118 @@ concurrency:
cancel-in-progress: true
jobs:
distros-clingo-sources:
fedora-clingo-sources:
runs-on: ubuntu-latest
container: ${{ matrix.image }}
strategy:
matrix:
image: ["fedora:latest", "opensuse/leap:latest"]
container: "fedora:latest"
steps:
- name: Setup Fedora
if: ${{ matrix.image == 'fedora:latest' }}
- name: Install dependencies
run: |
dnf install -y \
bzip2 curl file gcc-c++ gcc gcc-gfortran git gzip \
bzip2 curl file gcc-c++ gcc gcc-gfortran git gnupg2 gzip \
make patch unzip which xz python3 python3-devel tree \
cmake bison bison-devel libstdc++-static
- name: Setup OpenSUSE
if: ${{ matrix.image == 'opensuse/leap:latest' }}
- name: Checkout
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with:
fetch-depth: 0
- name: Setup non-root user
run: |
# See [1] below
git config --global --add safe.directory /__w/spack/spack
useradd spack-test && mkdir -p ~spack-test
chown -R spack-test . ~spack-test
- name: Setup repo
shell: runuser -u spack-test -- bash {0}
run: |
git --version
. .github/workflows/setup_git.sh
- name: Bootstrap clingo
shell: runuser -u spack-test -- bash {0}
run: |
source share/spack/setup-env.sh
spack bootstrap disable github-actions-v0.5
spack bootstrap disable github-actions-v0.4
spack external find cmake bison
spack -d solve zlib
tree ~/.spack/bootstrap/store/
ubuntu-clingo-sources:
runs-on: ubuntu-latest
container: "ubuntu:latest"
steps:
- name: Install dependencies
env:
DEBIAN_FRONTEND: noninteractive
run: |
apt-get update -y && apt-get upgrade -y
apt-get install -y \
bzip2 curl file g++ gcc gfortran git gnupg2 gzip \
make patch unzip xz-utils python3 python3-dev tree \
cmake bison
- name: Checkout
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with:
fetch-depth: 0
- name: Setup non-root user
run: |
# See [1] below
git config --global --add safe.directory /__w/spack/spack
useradd spack-test && mkdir -p ~spack-test
chown -R spack-test . ~spack-test
- name: Setup repo
shell: runuser -u spack-test -- bash {0}
run: |
git --version
. .github/workflows/setup_git.sh
- name: Bootstrap clingo
shell: runuser -u spack-test -- bash {0}
run: |
source share/spack/setup-env.sh
spack bootstrap disable github-actions-v0.5
spack bootstrap disable github-actions-v0.4
spack external find cmake bison
spack -d solve zlib
tree ~/.spack/bootstrap/store/
ubuntu-clingo-binaries-and-patchelf:
runs-on: ubuntu-latest
container: "ubuntu:latest"
steps:
- name: Install dependencies
env:
DEBIAN_FRONTEND: noninteractive
run: |
apt-get update -y && apt-get upgrade -y
apt-get install -y \
bzip2 curl file g++ gcc gfortran git gnupg2 gzip \
make patch unzip xz-utils python3 python3-dev tree
- name: Checkout
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with:
fetch-depth: 0
- name: Setup non-root user
run: |
# See [1] below
git config --global --add safe.directory /__w/spack/spack
useradd spack-test && mkdir -p ~spack-test
chown -R spack-test . ~spack-test
- name: Setup repo
shell: runuser -u spack-test -- bash {0}
run: |
git --version
. .github/workflows/setup_git.sh
- name: Bootstrap clingo
shell: runuser -u spack-test -- bash {0}
run: |
source share/spack/setup-env.sh
spack -d solve zlib
tree ~/.spack/bootstrap/store/
opensuse-clingo-sources:
runs-on: ubuntu-latest
container: "opensuse/leap:latest"
steps:
- name: Install dependencies
run: |
# Harden CI by applying the workaround described here: https://www.suse.com/support/kb/doc/?id=000019505
zypper update -y || zypper update -y
@@ -37,9 +133,15 @@ jobs:
make patch unzip which xz python3 python3-devel tree \
cmake bison
- name: Checkout
uses: actions/checkout@44c2b7a8a4ea60a981eaca3cf939b5f4305c123b
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with:
fetch-depth: 0
- name: Setup repo
run: |
# See [1] below
git config --global --add safe.directory /__w/spack/spack
git --version
. .github/workflows/setup_git.sh
- name: Bootstrap clingo
run: |
source share/spack/setup-env.sh
@@ -49,102 +151,77 @@ jobs:
spack -d solve zlib
tree ~/.spack/bootstrap/store/
clingo-sources:
runs-on: ${{ matrix.runner }}
strategy:
matrix:
runner: ['macos-13', 'macos-14', "ubuntu-latest"]
macos-clingo-sources:
runs-on: macos-latest
steps:
- name: Setup macOS
if: ${{ matrix.runner != 'ubuntu-latest' }}
- name: Install dependencies
run: |
brew install cmake bison tree
brew install cmake bison@2.7 tree
- name: Checkout
uses: actions/checkout@44c2b7a8a4ea60a981eaca3cf939b5f4305c123b
with:
fetch-depth: 0
- uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
- uses: actions/setup-python@65d7f2d534ac1bc67fcd62888c5f4f3d2cb2b236 # @v2
with:
python-version: "3.12"
- name: Bootstrap clingo
run: |
source share/spack/setup-env.sh
export PATH=/usr/local/opt/bison@2.7/bin:$PATH
spack bootstrap disable github-actions-v0.5
spack bootstrap disable github-actions-v0.4
spack external find --not-buildable cmake bison
spack -d solve zlib
tree ~/.spack/bootstrap/store/
gnupg-sources:
runs-on: ${{ matrix.runner }}
macos-clingo-binaries:
runs-on: ${{ matrix.macos-version }}
strategy:
matrix:
runner: [ 'macos-13', 'macos-14', "ubuntu-latest" ]
macos-version: ['macos-11', 'macos-12']
steps:
- name: Setup macOS
if: ${{ matrix.runner != 'ubuntu-latest' }}
- name: Install dependencies
run: |
brew install tree
# Remove GnuPG since we want to bootstrap it
sudo rm -rf /usr/local/bin/gpg
- name: Setup Ubuntu
if: ${{ matrix.runner == 'ubuntu-latest' }}
run: |
sudo rm -rf $(which gpg) $(which gpg2) $(which patchelf)
- name: Checkout
uses: actions/checkout@44c2b7a8a4ea60a981eaca3cf939b5f4305c123b
with:
fetch-depth: 0
- name: Bootstrap GnuPG
run: |
source share/spack/setup-env.sh
spack solve zlib
spack bootstrap disable github-actions-v0.5
spack bootstrap disable github-actions-v0.4
spack -d gpg list
tree ~/.spack/bootstrap/store/
from-binaries:
runs-on: ${{ matrix.runner }}
strategy:
matrix:
runner: ['macos-13', 'macos-14', "ubuntu-latest"]
steps:
- name: Setup macOS
if: ${{ matrix.runner != 'ubuntu-latest' }}
run: |
brew install tree
# Remove GnuPG since we want to bootstrap it
sudo rm -rf /usr/local/bin/gpg
- name: Setup Ubuntu
if: ${{ matrix.runner == 'ubuntu-latest' }}
run: |
sudo rm -rf $(which gpg) $(which gpg2) $(which patchelf)
- name: Checkout
uses: actions/checkout@44c2b7a8a4ea60a981eaca3cf939b5f4305c123b
with:
fetch-depth: 0
- uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d
with:
python-version: |
3.8
3.9
3.10
3.11
3.12
- name: Set bootstrap sources
run: |
source share/spack/setup-env.sh
spack bootstrap disable github-actions-v0.4
spack bootstrap disable spack-install
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
- name: Bootstrap clingo
run: |
set -e
for ver in '3.8' '3.9' '3.10' '3.11' '3.12' ; do
set -ex
for ver in '3.7' '3.8' '3.9' '3.10' '3.11' ; do
not_found=1
ver_dir="$(find $RUNNER_TOOL_CACHE/Python -wholename "*/${ver}.*/*/bin" | grep . || true)"
echo "Testing $ver_dir"
if [[ -d "$ver_dir" ]] ; then
if $ver_dir/python --version ; then
export PYTHON="$ver_dir/python"
not_found=0
old_path="$PATH"
export PATH="$ver_dir:$PATH"
./bin/spack-tmpconfig -b ./.github/workflows/bootstrap-test.sh
export PATH="$old_path"
fi
fi
# NOTE: test all pythons that exist, not all do on 12
done
ubuntu-clingo-binaries:
runs-on: ubuntu-20.04
steps:
- name: Checkout
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with:
fetch-depth: 0
- name: Setup repo
run: |
git --version
. .github/workflows/setup_git.sh
- name: Bootstrap clingo
run: |
set -ex
for ver in '3.7' '3.8' '3.9' '3.10' '3.11' ; do
not_found=1
ver_dir="$(find $RUNNER_TOOL_CACHE/Python -wholename "*/${ver}.*/*/bin" | grep . || true)"
echo "Testing $ver_dir"
if [[ -d "$ver_dir" ]] ; then
echo "Testing $ver_dir"
if $ver_dir/python --version ; then
export PYTHON="$ver_dir/python"
not_found=0
@@ -159,9 +236,122 @@ jobs:
exit 1
fi
done
ubuntu-gnupg-binaries:
runs-on: ubuntu-latest
container: "ubuntu:latest"
steps:
- name: Install dependencies
env:
DEBIAN_FRONTEND: noninteractive
run: |
apt-get update -y && apt-get upgrade -y
apt-get install -y \
bzip2 curl file g++ gcc patchelf gfortran git gzip \
make patch unzip xz-utils python3 python3-dev tree
- name: Checkout
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with:
fetch-depth: 0
- name: Setup non-root user
run: |
# See [1] below
git config --global --add safe.directory /__w/spack/spack
useradd spack-test && mkdir -p ~spack-test
chown -R spack-test . ~spack-test
- name: Setup repo
shell: runuser -u spack-test -- bash {0}
run: |
git --version
. .github/workflows/setup_git.sh
- name: Bootstrap GnuPG
shell: runuser -u spack-test -- bash {0}
run: |
source share/spack/setup-env.sh
spack bootstrap disable github-actions-v0.4
spack bootstrap disable spack-install
spack -d gpg list
tree ~/.spack/bootstrap/store/
ubuntu-gnupg-sources:
runs-on: ubuntu-latest
container: "ubuntu:latest"
steps:
- name: Install dependencies
env:
DEBIAN_FRONTEND: noninteractive
run: |
apt-get update -y && apt-get upgrade -y
apt-get install -y \
bzip2 curl file g++ gcc patchelf gfortran git gzip \
make patch unzip xz-utils python3 python3-dev tree \
gawk
- name: Checkout
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with:
fetch-depth: 0
- name: Setup non-root user
run: |
# See [1] below
git config --global --add safe.directory /__w/spack/spack
useradd spack-test && mkdir -p ~spack-test
chown -R spack-test . ~spack-test
- name: Setup repo
shell: runuser -u spack-test -- bash {0}
run: |
git --version
. .github/workflows/setup_git.sh
- name: Bootstrap GnuPG
shell: runuser -u spack-test -- bash {0}
run: |
source share/spack/setup-env.sh
spack solve zlib
spack bootstrap disable github-actions-v0.5
spack bootstrap disable github-actions-v0.4
spack -d gpg list
tree ~/.spack/bootstrap/store/
macos-gnupg-binaries:
runs-on: macos-latest
steps:
- name: Install dependencies
run: |
brew install tree
# Remove GnuPG since we want to bootstrap it
sudo rm -rf /usr/local/bin/gpg
- name: Checkout
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
- name: Bootstrap GnuPG
run: |
source share/spack/setup-env.sh
spack bootstrap disable github-actions-v0.4
spack bootstrap disable spack-install
spack -d gpg list
tree ~/.spack/bootstrap/store/
macos-gnupg-sources:
runs-on: macos-latest
steps:
- name: Install dependencies
run: |
brew install gawk tree
# Remove GnuPG since we want to bootstrap it
sudo rm -rf /usr/local/bin/gpg
- name: Checkout
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
- name: Bootstrap GnuPG
run: |
source share/spack/setup-env.sh
spack solve zlib
spack bootstrap disable github-actions-v0.5
spack bootstrap disable github-actions-v0.4
spack -d gpg list
tree ~/.spack/bootstrap/store/
# [1] Distros that have patched git to resolve CVE-2022-24765 (e.g. Ubuntu patching v2.25.1)
# introduce breaking behaviorso we have to set `safe.directory` in gitconfig ourselves.
# See:
# - https://github.blog/2022-04-12-git-security-vulnerability-announced/
# - https://github.com/actions/checkout/issues/760
# - http://changelogs.ubuntu.com/changelogs/pool/main/g/git/git_2.25.1-1ubuntu3.3/changelog

View File

@@ -45,20 +45,19 @@ jobs:
[leap15, 'linux/amd64,linux/arm64,linux/ppc64le', 'opensuse/leap:15'],
[ubuntu-focal, 'linux/amd64,linux/arm64,linux/ppc64le', 'ubuntu:20.04'],
[ubuntu-jammy, 'linux/amd64,linux/arm64,linux/ppc64le', 'ubuntu:22.04'],
[ubuntu-noble, 'linux/amd64,linux/arm64,linux/ppc64le', 'ubuntu:24.04'],
[almalinux8, 'linux/amd64,linux/arm64,linux/ppc64le', 'almalinux:8'],
[almalinux9, 'linux/amd64,linux/arm64,linux/ppc64le', 'almalinux:9'],
[rockylinux8, 'linux/amd64,linux/arm64', 'rockylinux:8'],
[rockylinux9, 'linux/amd64,linux/arm64', 'rockylinux:9'],
[fedora39, 'linux/amd64,linux/arm64,linux/ppc64le', 'fedora:39'],
[fedora40, 'linux/amd64,linux/arm64,linux/ppc64le', 'fedora:40']]
[fedora37, 'linux/amd64,linux/arm64,linux/ppc64le', 'fedora:37'],
[fedora38, 'linux/amd64,linux/arm64,linux/ppc64le', 'fedora:38']]
name: Build ${{ matrix.dockerfile[0] }}
if: github.repository == 'spack/spack'
steps:
- name: Checkout
uses: actions/checkout@44c2b7a8a4ea60a981eaca3cf939b5f4305c123b
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # @v2
- uses: docker/metadata-action@8e5442c4ef9f78752691e2d8f8d19755c6f78e81
- uses: docker/metadata-action@96383f45573cb7f253c731d3b3ab81c87ef81934
id: docker_meta
with:
images: |
@@ -88,19 +87,19 @@ jobs:
fi
- name: Upload Dockerfile
uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808
uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32
with:
name: dockerfiles_${{ matrix.dockerfile[0] }}
name: dockerfiles
path: dockerfiles
- name: Set up QEMU
uses: docker/setup-qemu-action@68827325e0b33c7199eb31dd4e31fbe9023e06e3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@d70bba72b1f3fd22344832f00baa16ece964efeb
uses: docker/setup-buildx-action@f95db51fddba0c2d1ec667646a06c2ce06100226
- name: Log in to GitHub Container Registry
uses: docker/login-action@e92390c5fb421da1463c202d546fed0ec5c39f20
uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d
with:
registry: ghcr.io
username: ${{ github.actor }}
@@ -108,27 +107,18 @@ jobs:
- name: Log in to DockerHub
if: github.event_name != 'pull_request'
uses: docker/login-action@e92390c5fb421da1463c202d546fed0ec5c39f20
uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build & Deploy ${{ matrix.dockerfile[0] }}
uses: docker/build-push-action@2cdde995de11925a030ce8070c3d77a52ffcf1c0
uses: docker/build-push-action@0565240e2d4ab88bba5387d719585280857ece09
with:
context: dockerfiles/${{ matrix.dockerfile[0] }}
platforms: ${{ matrix.dockerfile[1] }}
push: ${{ github.event_name != 'pull_request' }}
cache-from: type=gha
cache-to: type=gha,mode=max
tags: ${{ steps.docker_meta.outputs.tags }}
labels: ${{ steps.docker_meta.outputs.labels }}
merge-dockerfiles:
runs-on: ubuntu-latest
needs: deploy-images
steps:
- name: Merge Artifacts
uses: actions/upload-artifact/merge@65462800fd760344b1a7b4382951275a0abb4808
with:
name: dockerfiles
pattern: dockerfiles_*
delete-merged: true

View File

@@ -18,7 +18,6 @@ jobs:
prechecks:
needs: [ changes ]
uses: ./.github/workflows/valid-style.yml
secrets: inherit
with:
with_coverage: ${{ needs.changes.outputs.core }}
all-prechecks:
@@ -36,12 +35,12 @@ jobs:
core: ${{ steps.filter.outputs.core }}
packages: ${{ steps.filter.outputs.packages }}
steps:
- uses: actions/checkout@44c2b7a8a4ea60a981eaca3cf939b5f4305c123b
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # @v2
if: ${{ github.event_name == 'push' }}
with:
fetch-depth: 0
# For pull requests it's not necessary to checkout the code
- uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36
- uses: dorny/paths-filter@4512585405083f25c027a35db413c2b3b9006d50
id: filter
with:
# See https://github.com/dorny/paths-filter/issues/56 for the syntax used below
@@ -71,17 +70,14 @@ jobs:
if: ${{ github.repository == 'spack/spack' && needs.changes.outputs.bootstrap == 'true' }}
needs: [ prechecks, changes ]
uses: ./.github/workflows/bootstrap.yml
secrets: inherit
unit-tests:
if: ${{ github.repository == 'spack/spack' && needs.changes.outputs.core == 'true' }}
needs: [ prechecks, changes ]
uses: ./.github/workflows/unit_tests.yaml
secrets: inherit
windows:
if: ${{ github.repository == 'spack/spack' && needs.changes.outputs.core == 'true' }}
needs: [ prechecks ]
uses: ./.github/workflows/windows_python.yml
secrets: inherit
all:
needs: [ windows, unit-tests, bootstrap ]
runs-on: ubuntu-latest

View File

@@ -14,10 +14,10 @@ jobs:
build-paraview-deps:
runs-on: windows-latest
steps:
- uses: actions/checkout@44c2b7a8a4ea60a981eaca3cf939b5f4305c123b
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with:
fetch-depth: 0
- uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d
- uses: actions/setup-python@65d7f2d534ac1bc67fcd62888c5f4f3d2cb2b236
with:
python-version: 3.9
- name: Install Python packages

View File

@@ -1,7 +1,7 @@
black==24.4.2
clingo==5.7.1
flake8==7.0.0
isort==5.13.2
mypy==1.8.0
black==23.11.0
clingo==5.6.2
flake8==6.1.0
isort==5.12.0
mypy==1.6.1
types-six==1.16.21.9
vermin==1.6.0
vermin==1.5.2

View File

@@ -51,10 +51,10 @@ jobs:
on_develop: false
steps:
- uses: actions/checkout@44c2b7a8a4ea60a981eaca3cf939b5f4305c123b
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # @v2
with:
fetch-depth: 0
- uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d
- uses: actions/setup-python@65d7f2d534ac1bc67fcd62888c5f4f3d2cb2b236 # @v2
with:
python-version: ${{ matrix.python-version }}
- name: Install System packages
@@ -91,19 +91,17 @@ jobs:
UNIT_TEST_COVERAGE: ${{ matrix.python-version == '3.11' }}
run: |
share/spack/qa/run-unit-tests
- uses: codecov/codecov-action@5ecb98a3c6b747ed38dc09f787459979aebb39be
- uses: codecov/codecov-action@eaaf4bedf32dbdc6b720b63067d99c4d77d6047d
with:
flags: unittests,linux,${{ matrix.concretizer }}
token: ${{ secrets.CODECOV_TOKEN }}
verbose: true
# Test shell integration
shell:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@44c2b7a8a4ea60a981eaca3cf939b5f4305c123b
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # @v2
with:
fetch-depth: 0
- uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d
- uses: actions/setup-python@65d7f2d534ac1bc67fcd62888c5f4f3d2cb2b236 # @v2
with:
python-version: '3.11'
- name: Install System packages
@@ -124,11 +122,9 @@ jobs:
COVERAGE: true
run: |
share/spack/qa/run-shell-tests
- uses: codecov/codecov-action@5ecb98a3c6b747ed38dc09f787459979aebb39be
- uses: codecov/codecov-action@eaaf4bedf32dbdc6b720b63067d99c4d77d6047d
with:
flags: shelltests,linux
token: ${{ secrets.CODECOV_TOKEN }}
verbose: true
# Test RHEL8 UBI with platform Python. This job is run
# only on PRs modifying core Spack
@@ -141,7 +137,7 @@ jobs:
dnf install -y \
bzip2 curl file gcc-c++ gcc gcc-gfortran git gnupg2 gzip \
make patch tcl unzip which xz
- uses: actions/checkout@44c2b7a8a4ea60a981eaca3cf939b5f4305c123b
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # @v2
- name: Setup repo and non-root user
run: |
git --version
@@ -160,10 +156,10 @@ jobs:
clingo-cffi:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@44c2b7a8a4ea60a981eaca3cf939b5f4305c123b
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # @v2
with:
fetch-depth: 0
- uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d
- uses: actions/setup-python@65d7f2d534ac1bc67fcd62888c5f4f3d2cb2b236 # @v2
with:
python-version: '3.11'
- name: Install System packages
@@ -185,23 +181,20 @@ jobs:
SPACK_TEST_SOLVER: clingo
run: |
share/spack/qa/run-unit-tests
- uses: codecov/codecov-action@5ecb98a3c6b747ed38dc09f787459979aebb39be
- uses: codecov/codecov-action@eaaf4bedf32dbdc6b720b63067d99c4d77d6047d # @v2.1.0
with:
flags: unittests,linux,clingo
token: ${{ secrets.CODECOV_TOKEN }}
verbose: true
# Run unit tests on MacOS
macos:
runs-on: ${{ matrix.os }}
runs-on: macos-latest
strategy:
matrix:
os: [macos-13, macos-14]
python-version: ["3.11"]
steps:
- uses: actions/checkout@44c2b7a8a4ea60a981eaca3cf939b5f4305c123b
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # @v2
with:
fetch-depth: 0
- uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d
- uses: actions/setup-python@65d7f2d534ac1bc67fcd62888c5f4f3d2cb2b236 # @v2
with:
python-version: ${{ matrix.python-version }}
- name: Install Python packages
@@ -222,9 +215,7 @@ jobs:
$(which spack) bootstrap disable spack-install
$(which spack) solve zlib
common_args=(--dist loadfile --tx '4*popen//python=./bin/spack-tmpconfig python -u ./bin/spack python' -x)
$(which spack) unit-test --verbose --cov --cov-config=pyproject.toml --cov-report=xml:coverage.xml "${common_args[@]}"
- uses: codecov/codecov-action@5ecb98a3c6b747ed38dc09f787459979aebb39be
$(which spack) unit-test --cov --cov-config=pyproject.toml --cov-report=xml:coverage.xml "${common_args[@]}"
- uses: codecov/codecov-action@eaaf4bedf32dbdc6b720b63067d99c4d77d6047d
with:
flags: unittests,macos
token: ${{ secrets.CODECOV_TOKEN }}
verbose: true

View File

@@ -18,8 +18,8 @@ jobs:
validate:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@44c2b7a8a4ea60a981eaca3cf939b5f4305c123b
- uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
- uses: actions/setup-python@65d7f2d534ac1bc67fcd62888c5f4f3d2cb2b236
with:
python-version: '3.11'
cache: 'pip'
@@ -35,10 +35,10 @@ jobs:
style:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@44c2b7a8a4ea60a981eaca3cf939b5f4305c123b
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with:
fetch-depth: 0
- uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d
- uses: actions/setup-python@65d7f2d534ac1bc67fcd62888c5f4f3d2cb2b236
with:
python-version: '3.11'
cache: 'pip'
@@ -56,7 +56,6 @@ jobs:
share/spack/qa/run-style-tests
audit:
uses: ./.github/workflows/audit.yaml
secrets: inherit
with:
with_coverage: ${{ inputs.with_coverage }}
python_version: '3.11'
@@ -70,7 +69,7 @@ jobs:
dnf install -y \
bzip2 curl file gcc-c++ gcc gcc-gfortran git gnupg2 gzip \
make patch tcl unzip which xz
- uses: actions/checkout@44c2b7a8a4ea60a981eaca3cf939b5f4305c123b
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # @v2
- name: Setup repo and non-root user
run: |
git --version

View File

@@ -15,10 +15,10 @@ jobs:
unit-tests:
runs-on: windows-latest
steps:
- uses: actions/checkout@44c2b7a8a4ea60a981eaca3cf939b5f4305c123b
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with:
fetch-depth: 0
- uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d
- uses: actions/setup-python@65d7f2d534ac1bc67fcd62888c5f4f3d2cb2b236
with:
python-version: 3.9
- name: Install Python packages
@@ -33,18 +33,16 @@ jobs:
./share/spack/qa/validate_last_exit.ps1
coverage combine -a
coverage xml
- uses: codecov/codecov-action@5ecb98a3c6b747ed38dc09f787459979aebb39be
- uses: codecov/codecov-action@eaaf4bedf32dbdc6b720b63067d99c4d77d6047d
with:
flags: unittests,windows
token: ${{ secrets.CODECOV_TOKEN }}
verbose: true
unit-tests-cmd:
runs-on: windows-latest
steps:
- uses: actions/checkout@44c2b7a8a4ea60a981eaca3cf939b5f4305c123b
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with:
fetch-depth: 0
- uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d
- uses: actions/setup-python@65d7f2d534ac1bc67fcd62888c5f4f3d2cb2b236
with:
python-version: 3.9
- name: Install Python packages
@@ -59,18 +57,16 @@ jobs:
./share/spack/qa/validate_last_exit.ps1
coverage combine -a
coverage xml
- uses: codecov/codecov-action@5ecb98a3c6b747ed38dc09f787459979aebb39be
- uses: codecov/codecov-action@eaaf4bedf32dbdc6b720b63067d99c4d77d6047d
with:
flags: unittests,windows
token: ${{ secrets.CODECOV_TOKEN }}
verbose: true
build-abseil:
runs-on: windows-latest
steps:
- uses: actions/checkout@44c2b7a8a4ea60a981eaca3cf939b5f4305c123b
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with:
fetch-depth: 0
- uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d
- uses: actions/setup-python@65d7f2d534ac1bc67fcd62888c5f4f3d2cb2b236
with:
python-version: 3.9
- name: Install Python packages

View File

@@ -1,335 +1,3 @@
# v0.21.2 (2024-03-01)
## Bugfixes
- Containerize: accommodate nested or pre-existing spack-env paths (#41558)
- Fix setup-env script, when going back and forth between instances (#40924)
- Fix using fully-qualified namespaces from root specs (#41957)
- Fix a bug when a required provider is requested for multiple virtuals (#42088)
- OCI buildcaches:
- only push in parallel when forking (#42143)
- use pickleable errors (#42160)
- Fix using sticky variants in externals (#42253)
- Fix a rare issue with conditional requirements and multi-valued variants (#42566)
## Package updates
- rust: add v1.75, rework a few variants (#41161,#41903)
- py-transformers: add v4.35.2 (#41266)
- mgard: fix OpenMP on AppleClang (#42933)
# v0.21.1 (2024-01-11)
## New features
- Add support for reading buildcaches created by Spack v0.22 (#41773)
## Bugfixes
- spack graph: fix coloring with environments (#41240)
- spack info: sort variants in --variants-by-name (#41389)
- Spec.format: error on old style format strings (#41934)
- ASP-based solver:
- fix infinite recursion when computing concretization errors (#41061)
- don't error for type mismatch on preferences (#41138)
- don't emit spurious debug output (#41218)
- Improve the error message for deprecated preferences (#41075)
- Fix MSVC preview version breaking clingo build on Windows (#41185)
- Fix multi-word aliases (#41126)
- Add a warning for unconfigured compiler (#41213)
- environment: fix an issue with deconcretization/reconcretization of specs (#41294)
- buildcache: don't error if a patch is missing, when installing from binaries (#41986)
- Multiple improvements to unit-tests (#41215,#41369,#41495,#41359,#41361,#41345,#41342,#41308,#41226)
## Package updates
- root: add a webgui patch to address security issue (#41404)
- BerkeleyGW: update source urls (#38218)
# v0.21.0 (2023-11-11)
`v0.21.0` is a major feature release.
## Features in this release
1. **Better error messages with condition chaining**
In v0.18, we added better error messages that could tell you what problem happened,
but they couldn't tell you *why* it happened. `0.21` adds *condition chaining* to the
solver, and Spack can now trace back through the conditions that led to an error and
build a tree of causes potential causes and where they came from. For example:
```console
$ spack solve hdf5 ^cmake@3.0.1
==> Error: concretization failed for the following reasons:
1. Cannot satisfy 'cmake@3.0.1'
2. Cannot satisfy 'cmake@3.0.1'
required because hdf5 ^cmake@3.0.1 requested from CLI
3. Cannot satisfy 'cmake@3.18:' and 'cmake@3.0.1
required because hdf5 ^cmake@3.0.1 requested from CLI
required because hdf5 depends on cmake@3.18: when @1.13:
required because hdf5 ^cmake@3.0.1 requested from CLI
4. Cannot satisfy 'cmake@3.12:' and 'cmake@3.0.1
required because hdf5 depends on cmake@3.12:
required because hdf5 ^cmake@3.0.1 requested from CLI
required because hdf5 ^cmake@3.0.1 requested from CLI
```
More details in #40173.
2. **OCI build caches**
You can now use an arbitrary [OCI](https://opencontainers.org) registry as a build
cache:
```console
$ spack mirror add my_registry oci://user/image # Dockerhub
$ spack mirror add my_registry oci://ghcr.io/haampie/spack-test # GHCR
$ spack mirror set --push --oci-username ... --oci-password ... my_registry # set login creds
$ spack buildcache push my_registry [specs...]
```
And you can optionally add a base image to get *runnable* images:
```console
$ spack buildcache push --base-image ubuntu:23.04 my_registry python
Pushed ... as [image]:python-3.11.2-65txfcpqbmpawclvtasuog4yzmxwaoia.spack
$ docker run --rm -it [image]:python-3.11.2-65txfcpqbmpawclvtasuog4yzmxwaoia.spack
```
This creates a container image from the Spack installations on the host system,
without the need to run `spack install` from a `Dockerfile` or `sif` file. It also
addresses the inconvenience of losing binaries of dependencies when `RUN spack
install` fails inside `docker build`.
Further, the container image layers and build cache tarballs are the same files. This
means that `spack install` and `docker pull` use the exact same underlying binaries.
If you previously used `spack install` inside of `docker build`, this feature helps
you save storage by a factor two.
More details in #38358.
3. **Multiple versions of build dependencies**
Increasingly, complex package builds require multiple versions of some build
dependencies. For example, Python packages frequently require very specific versions
of `setuptools`, `cython`, and sometimes different physics packages require different
versions of Python to build. The concretizer enforced that every solve was *unified*,
i.e., that there only be one version of every package. The concretizer now supports
"duplicate" nodes for *build dependencies*, but enforces unification through
transitive link and run dependencies. This will allow it to better resolve complex
dependency graphs in ecosystems like Python, and it also gets us very close to
modeling compilers as proper dependencies.
This change required a major overhaul of the concretizer, as well as a number of
performance optimizations. See #38447, #39621.
4. **Cherry-picking virtual dependencies**
You can now select only a subset of virtual dependencies from a spec that may provide
more. For example, if you want `mpich` to be your `mpi` provider, you can be explicit
by writing:
```
hdf5 ^[virtuals=mpi] mpich
```
Or, if you want to use, e.g., `intel-parallel-studio` for `blas` along with an external
`lapack` like `openblas`, you could write:
```
strumpack ^[virtuals=mpi] intel-parallel-studio+mkl ^[virtuals=lapack] openblas
```
The `virtuals=mpi` is an edge attribute, and dependency edges in Spack graphs now
track which virtuals they satisfied. More details in #17229 and #35322.
Note for packaging: in Spack 0.21 `spec.satisfies("^virtual")` is true if and only if
the package specifies `depends_on("virtual")`. This is different from Spack 0.20,
where depending on a provider implied depending on the virtual provided. See #41002
for an example where `^mkl` was being used to test for several `mkl` providers in a
package that did not depend on `mkl`.
5. **License directive**
Spack packages can now have license metadata, with the new `license()` directive:
```python
license("Apache-2.0")
```
Licenses use [SPDX identifiers](https://spdx.org/licenses), and you can use SPDX
expressions to combine them:
```python
license("Apache-2.0 OR MIT")
```
Like other directives in Spack, it's conditional, so you can handle complex cases like
Spack itself:
```python
license("LGPL-2.1", when="@:0.11")
license("Apache-2.0 OR MIT", when="@0.12:")
```
More details in #39346, #40598.
6. **`spack deconcretize` command**
We are getting close to having a `spack update` command for environments, but we're
not quite there yet. This is the next best thing. `spack deconcretize` gives you
control over what you want to update in an already concrete environment. If you have
an environment built with, say, `meson`, and you want to update your `meson` version,
you can run:
```console
spack deconcretize meson
```
and have everything that depends on `meson` rebuilt the next time you run `spack
concretize`. In a future Spack version, we'll handle all of this in a single command,
but for now you can use this to drop bits of your lockfile and resolve your
dependencies again. More in #38803.
7. **UI Improvements**
The venerable `spack info` command was looking shabby compared to the rest of Spack's
UI, so we reworked it to have a bit more flair. `spack info` now makes much better
use of terminal space and shows variants, their values, and their descriptions much
more clearly. Conditional variants are grouped separately so you can more easily
understand how packages are structured. More in #40998.
`spack checksum` now allows you to filter versions from your editor, or by version
range. It also notifies you about potential download URL changes. See #40403.
8. **Environments can include definitions**
Spack did not previously support using `include:` with The
[definitions](https://spack.readthedocs.io/en/latest/environments.html#spec-list-references)
section of an environment, but now it does. You can use this to curate lists of specs
and more easily reuse them across environments. See #33960.
9. **Aliases**
You can now add aliases to Spack commands in `config.yaml`, e.g. this might enshrine
your favorite args to `spack find` as `spack f`:
```yaml
config:
aliases:
f: find -lv
```
See #17229.
10. **Improved autoloading of modules**
Spack 0.20 was the first release to enable autoloading of direct dependencies in
module files.
The downside of this was that `module avail` and `module load` tab completion would
show users too many modules to choose from, and many users disabled generating
modules for dependencies through `exclude_implicits: true`. Further, it was
necessary to keep hashes in module names to avoid file name clashes.
In this release, you can start using `hide_implicits: true` instead, which exposes
only explicitly installed packages to the user, while still autoloading
dependencies. On top of that, you can safely use `hash_length: 0`, as this config
now only applies to the modules exposed to the user -- you don't have to worry about
file name clashes for hidden dependencies.
Note: for `tcl` this feature requires Modules 4.7 or higher
11. **Updated container labeling**
Nightly Docker images from the `develop` branch will now be tagged as `:develop` and
`:nightly`. The `:latest` tag is no longer associated with `:develop`, but with the
latest stable release. Releases will be tagged with `:{major}`, `:{major}.{minor}`
and `:{major}.{minor}.{patch}`. `ubuntu:18.04` has also been removed from the list of
generated Docker images, as it is no longer supported. See #40593.
## Other new commands and directives
* `spack env activate` without arguments now loads a `default` environment that you do
not have to create (#40756).
* `spack find -H` / `--hashes`: a new shortcut for piping `spack find` output to
other commands (#38663)
* Add `spack checksum --verify`, fix `--add` (#38458)
* New `default_args` context manager factors out common args for directives (#39964)
* `spack compiler find --[no]-mixed-toolchain` lets you easily mix `clang` and
`gfortran` on Linux (#40902)
## Performance improvements
* `spack external find` execution is now much faster (#39843)
* `spack location -i` now much faster on success (#40898)
* Drop redundant rpaths post install (#38976)
* ASP-based solver: avoid cycles in clingo using hidden directive (#40720)
* Fix multiple quadratic complexity issues in environments (#38771)
## Other new features of note
* archspec: update to v0.2.2, support for Sapphire Rapids, Power10, Neoverse V2 (#40917)
* Propagate variants across nodes that don't have that variant (#38512)
* Implement fish completion (#29549)
* Can now distinguish between source/binary mirror; don't ping mirror.spack.io as much (#34523)
* Improve status reporting on install (add [n/total] display) (#37903)
## Windows
This release has the best Windows support of any Spack release yet, with numerous
improvements and much larger swaths of tests passing:
* MSVC and SDK improvements (#37711, #37930, #38500, #39823, #39180)
* Windows external finding: update default paths; treat .bat as executable on Windows (#39850)
* Windows decompression: fix removal of intermediate file (#38958)
* Windows: executable/path handling (#37762)
* Windows build systems: use ninja and enable tests (#33589)
* Windows testing (#36970, #36972, #36973, #36840, #36977, #36792, #36834, #34696, #36971)
* Windows PowerShell support (#39118, #37951)
* Windows symlinking and libraries (#39933, #38599, #34701, #38578, #34701)
## Notable refactors
* User-specified flags take precedence over others in Spack compiler wrappers (#37376)
* Improve setup of build, run, and test environments (#35737, #40916)
* `make` is no longer a required system dependency of Spack (#40380)
* Support Python 3.12 (#40404, #40155, #40153)
* docs: Replace package list with packages.spack.io (#40251)
* Drop Python 2 constructs in Spack (#38720, #38718, #38703)
## Binary cache and stack updates
* e4s arm stack: duplicate and target neoverse v1 (#40369)
* Add macOS ML CI stacks (#36586)
* E4S Cray CI Stack (#37837)
* e4s cray: expand spec list (#38947)
* e4s cray sles ci: expand spec list (#39081)
## Removals, deprecations, and syntax changes
* ASP: targets, compilers and providers soft-preferences are only global (#31261)
* Parser: fix ambiguity with whitespace in version ranges (#40344)
* Module file generation is disabled by default; you'll need to enable it to use it (#37258)
* Remove deprecated "extra_instructions" option for containers (#40365)
* Stand-alone test feature deprecation postponed to v0.22 (#40600)
* buildcache push: make `--allow-root` the default and deprecate the option (#38878)
## Notable Bugfixes
* Bugfix: propagation of multivalued variants (#39833)
* Allow `/` in git versions (#39398)
* Fetch & patch: actually acquire stage lock, and many more issues (#38903)
* Environment/depfile: better escaping of targets with Git versions (#37560)
* Prevent "spack external find" to error out on wrong permissions (#38755)
* lmod: allow core compiler to be specified with a version range (#37789)
## Spack community stats
* 7,469 total packages, 303 new since `v0.20.0`
* 150 new Python packages
* 34 new R packages
* 353 people contributed to this release
* 336 committers to packages
* 65 committers to core
# v0.20.3 (2023-10-31)
## Bugfixes

View File

@@ -31,17 +31,13 @@ type: software
message: "If you are referencing Spack in a publication, please cite the paper below."
title: "The Spack Package Manager: Bringing Order to HPC Software Chaos"
abstract: >-
Large HPC centers spend considerable time supporting software for thousands of users, but the
complexity of HPC software is quickly outpacing the capabilities of existing software management
tools. Scientific applications require specific versions of compilers, MPI, and other dependency
libraries, so using a single, standard software stack is infeasible. However, managing many
configurations is difficult because the configuration space is combinatorial in size. We
introduce Spack, a tool used at Lawrence Livermore National Laboratory to manage this complexity.
Spack provides a novel, re- cursive specification syntax to invoke parametric builds of packages
and dependencies. It allows any number of builds to coexist on the same system, and it ensures
that installed packages can find their dependencies, regardless of the environment. We show
through real-world use cases that Spack supports diverse and demanding applications, bringing
order to HPC software chaos.
Large HPC centers spend considerable time supporting software for thousands of users, but the complexity of HPC software is quickly outpacing the capabilities of existing software management tools.
Scientific applications require specific versions of compilers, MPI, and other dependency libraries, so using a single, standard software stack is infeasible.
However, managing many configurations is difficult because the configuration space is combinatorial in size.
We introduce Spack, a tool used at Lawrence Livermore National Laboratory to manage this complexity.
Spack provides a novel, re- cursive specification syntax to invoke parametric builds of packages and dependencies.
It allows any number of builds to coexist on the same system, and it ensures that installed packages can find their dependencies, regardless of the environment.
We show through real-world use cases that Spack supports diverse and demanding applications, bringing order to HPC software chaos.
preferred-citation:
title: "The Spack Package Manager: Bringing Order to HPC Software Chaos"
type: conference-paper
@@ -75,7 +71,7 @@ preferred-citation:
type: doi
value: 10.1145/2807591.2807623
- description: "The DOE Document Release Number of the work"
type: other
type: other
value: "LLNL-CONF-669890"
authors:
- family-names: "Gamblin"

View File

@@ -1,6 +1,6 @@
MIT License
Copyright (c) 2013-2024 LLNS, LLC and other Spack Project Developers.
Copyright (c) 2013-2023 LLNS, LLC and other Spack Project Developers.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal

View File

@@ -1,34 +1,13 @@
<div align="left">
# <img src="https://cdn.rawgit.com/spack/spack/develop/share/spack/logo/spack-logo.svg" width="64" valign="middle" alt="Spack"/> Spack
<h2>
<picture>
<source media="(prefers-color-scheme: dark)" srcset="https://cdn.rawgit.com/spack/spack/develop/share/spack/logo/spack-logo-white-text.svg" width="250">
<source media="(prefers-color-scheme: light)" srcset="https://cdn.rawgit.com/spack/spack/develop/share/spack/logo/spack-logo-text.svg" width="250">
<img alt="Spack" src="https://cdn.rawgit.com/spack/spack/develop/share/spack/logo/spack-logo-text.svg" width="250">
</picture>
<br>
<br clear="all">
<a href="https://github.com/spack/spack/actions/workflows/ci.yml"><img src="https://github.com/spack/spack/workflows/ci/badge.svg" alt="CI Status"></a>
<a href="https://github.com/spack/spack/actions/workflows/bootstrapping.yml"><img src="https://github.com/spack/spack/actions/workflows/bootstrap.yml/badge.svg" alt="Bootstrap Status"></a>
<a href="https://github.com/spack/spack/actions/workflows/build-containers.yml"><img src="https://github.com/spack/spack/actions/workflows/build-containers.yml/badge.svg" alt="Containers Status"></a>
<a href="https://spack.readthedocs.io"><img src="https://readthedocs.org/projects/spack/badge/?version=latest" alt="Documentation Status"></a>
<a href="https://codecov.io/gh/spack/spack"><img src="https://codecov.io/gh/spack/spack/branch/develop/graph/badge.svg" alt="Code coverage"/></a>
<a href="https://slack.spack.io"><img src="https://slack.spack.io/badge.svg" alt="Slack"/></a>
<a href="https://matrix.to/#/#spack-space:matrix.org"><img src="https://img.shields.io/matrix/spack-space%3Amatrix.org?label=matrix" alt="Matrix"/></a>
</h2>
**[Getting Started] &nbsp;&nbsp; [Config] &nbsp;&nbsp; [Community] &nbsp;&nbsp; [Contributing] &nbsp;&nbsp; [Packaging Guide]**
[Getting Started]: https://spack.readthedocs.io/en/latest/getting_started.html
[Config]: https://spack.readthedocs.io/en/latest/configuration.html
[Community]: #community
[Contributing]: https://spack.readthedocs.io/en/latest/contribution_guide.html
[Packaging Guide]: https://spack.readthedocs.io/en/latest/packaging_guide.html
</div>
[![Unit Tests](https://github.com/spack/spack/workflows/linux%20tests/badge.svg)](https://github.com/spack/spack/actions)
[![Bootstrapping](https://github.com/spack/spack/actions/workflows/bootstrap.yml/badge.svg)](https://github.com/spack/spack/actions/workflows/bootstrap.yml)
[![codecov](https://codecov.io/gh/spack/spack/branch/develop/graph/badge.svg)](https://codecov.io/gh/spack/spack)
[![Containers](https://github.com/spack/spack/actions/workflows/build-containers.yml/badge.svg)](https://github.com/spack/spack/actions/workflows/build-containers.yml)
[![Read the Docs](https://readthedocs.org/projects/spack/badge/?version=latest)](https://spack.readthedocs.io)
[![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/psf/black)
[![Slack](https://slack.spack.io/badge.svg)](https://slack.spack.io)
[![Matrix](https://img.shields.io/matrix/spack-space%3Amatrix.org?label=Matrix)](https://matrix.to/#/#spack-space:matrix.org)
Spack is a multi-platform package manager that builds and installs
multiple versions and configurations of software. It works on Linux,
@@ -87,11 +66,10 @@ Resources:
* **Matrix space**: [#spack-space:matrix.org](https://matrix.to/#/#spack-space:matrix.org):
[bridged](https://github.com/matrix-org/matrix-appservice-slack#matrix-appservice-slack) to Slack.
* [**Github Discussions**](https://github.com/spack/spack/discussions):
for Q&A and discussions. Note the pinned discussions for announcements.
* **X**: [@spackpm](https://twitter.com/spackpm). Be sure to
not just for discussions, but also Q&A.
* **Mailing list**: [groups.google.com/d/forum/spack](https://groups.google.com/d/forum/spack)
* **Twitter**: [@spackpm](https://twitter.com/spackpm). Be sure to
`@mention` us!
* **Mailing list**: [groups.google.com/d/forum/spack](https://groups.google.com/d/forum/spack):
only for announcements. Please use other venues for discussions.
Contributing
------------------------

View File

@@ -1,4 +1,4 @@
# Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
# Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)

View File

@@ -1,6 +1,6 @@
#!/bin/sh
#
# Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
# Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
# sbang project developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)

View File

@@ -1,7 +1,7 @@
#!/bin/sh
# -*- python -*-
#
# Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
# Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)

View File

@@ -1,6 +1,6 @@
#!/bin/sh
#
# Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
# Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)

View File

@@ -1,4 +1,4 @@
:: Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
:: Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
:: Spack Project Developers. See the top-level COPYRIGHT file for details.
::
:: SPDX-License-Identifier: (Apache-2.0 OR MIT)

View File

@@ -1,4 +1,4 @@
# Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
# Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
# SPDX-License-Identifier: (Apache-2.0 OR MIT)

View File

@@ -1,4 +1,4 @@
# Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
# Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)

View File

@@ -15,7 +15,7 @@ concretizer:
# as possible, rather than building. If `false`, we'll always give you a fresh
# concretization. If `dependencies`, we'll only reuse dependencies but
# give you a fresh concretization for your root specs.
reuse: true
reuse: dependencies
# Options that tune which targets are considered for concretization. The
# concretization process is very sensitive to the number targets, and the time
# needed to reach a solution increases noticeably with the number of targets
@@ -42,8 +42,3 @@ concretizer:
# "minimal": allows the duplication of 'build-tools' nodes only (e.g. py-setuptools, cmake etc.)
# "full" (experimental): allows separation of the entire build-tool stack (e.g. the entire "cmake" subDAG)
strategy: minimal
# Option to specify compatiblity between operating systems for reuse of compilers and packages
# Specified as a key: [list] where the key is the os that is being targeted, and the list contains the OS's
# it can reuse. Note this is a directional compatibility so mutual compatibility between two OS's
# requires two entries i.e. os_compatible: {sonoma: [monterey], monterey: [sonoma]}
os_compatible: {}

View File

@@ -101,12 +101,6 @@ config:
verify_ssl: true
# This is where custom certs for proxy/firewall are stored.
# It can be a path or environment variable. To match ssl env configuration
# the default is the environment variable SSL_CERT_FILE
ssl_certs: $SSL_CERT_FILE
# Suppress gpg warnings from binary package verification
# Only suppresses warnings, gpg failure will still fail the install
# Potential rationale to set True: users have already explicitly trusted the

View File

@@ -1,19 +0,0 @@
# -------------------------------------------------------------------------
# This file controls default concretization preferences for Spack.
#
# Settings here are versioned with Spack and are intended to provide
# sensible defaults out of the box. Spack maintainers should edit this
# file to keep it current.
#
# Users can override these settings by editing the following files.
#
# Per-spack-instance settings (overrides defaults):
# $SPACK_ROOT/etc/spack/packages.yaml
#
# Per-user settings (overrides default and site settings):
# ~/.spack/packages.yaml
# -------------------------------------------------------------------------
packages:
all:
providers:
iconv: [glibc, musl, libiconv]

View File

@@ -19,6 +19,7 @@ packages:
- apple-clang
- clang
- gcc
- intel
providers:
elf: [libelf]
fuse: [macfuse]
@@ -49,4 +50,4 @@ packages:
# Apple bundles libuuid in libsystem_c version 1353.100.2,
# although the version number used here isn't critical
- spec: apple-libuuid@1353.100.2
prefix: /Library/Developer/CommandLineTools/SDKs/MacOSX.sdk
prefix: /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk

View File

@@ -1,19 +0,0 @@
# -------------------------------------------------------------------------
# This file controls default concretization preferences for Spack.
#
# Settings here are versioned with Spack and are intended to provide
# sensible defaults out of the box. Spack maintainers should edit this
# file to keep it current.
#
# Users can override these settings by editing the following files.
#
# Per-spack-instance settings (overrides defaults):
# $SPACK_ROOT/etc/spack/packages.yaml
#
# Per-user settings (overrides default and site settings):
# ~/.spack/packages.yaml
# -------------------------------------------------------------------------
packages:
all:
providers:
iconv: [glibc, musl, libiconv]

View File

@@ -15,17 +15,15 @@
# -------------------------------------------------------------------------
packages:
all:
compiler: [gcc, clang, oneapi, xl, nag, fj, aocc]
compiler: [gcc, intel, pgi, clang, xl, nag, fj, aocc]
providers:
awk: [gawk]
armci: [armcimpi]
blas: [openblas, amdblis]
D: [ldc]
daal: [intel-oneapi-daal]
elf: [elfutils]
fftw-api: [fftw, amdfftw]
flame: [libflame, amdlibflame]
fortran-rt: [gcc-runtime, intel-oneapi-runtime]
fuse: [libfuse]
gl: [glx, osmesa]
glu: [mesa-glu, openglu]
@@ -36,10 +34,7 @@ packages:
java: [openjdk, jdk, ibm-java]
jpeg: [libjpeg-turbo, libjpeg]
lapack: [openblas, amdlibflame]
libc: [glibc, musl]
libgfortran: [ gcc-runtime ]
libglx: [mesa+glx, mesa18+glx]
libifcore: [ intel-oneapi-runtime ]
libllvm: [llvm]
libosmesa: [mesa+osmesa, mesa18+osmesa]
lua-lang: [lua, lua-luajit-openresty, lua-luajit]

View File

@@ -1,4 +1,4 @@
# Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
# Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)

View File

@@ -1,12 +0,0 @@
{% extends "!layout.html" %}
{%- block extrahead %}
<!-- Google tag (gtag.js) -->
<script async src="https://www.googletagmanager.com/gtag/js?id=G-S0PQ7WV75K"></script>
<script>
window.dataLayer = window.dataLayer || [];
function gtag(){dataLayer.push(arguments);}
gtag('js', new Date());
gtag('config', 'G-S0PQ7WV75K');
</script>
{% endblock %}

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
@@ -865,7 +865,7 @@ There are several different ways to use Spack packages once you have
installed them. As you've seen, spack packages are installed into long
paths with hashes, and you need a way to get them into your path. The
easiest way is to use :ref:`spack load <cmd-spack-load>`, which is
described in this section.
described in the next section.
Some more advanced ways to use Spack packages include:
@@ -959,86 +959,7 @@ use ``spack find --loaded``.
You can also use ``spack load --list`` to get the same output, but it
does not have the full set of query options that ``spack find`` offers.
We'll learn more about Spack's spec syntax in :ref:`a later section <sec-specs>`.
.. _extensions:
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Python packages and virtual environments
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Spack can install a large number of Python packages. Their names are
typically prefixed with ``py-``. Installing and using them is no
different from any other package:
.. code-block:: console
$ spack install py-numpy
$ spack load py-numpy
$ python3
>>> import numpy
The ``spack load`` command sets the ``PATH`` variable so that the right Python
executable is used, and makes sure that ``numpy`` and its dependencies can be
located in the ``PYTHONPATH``.
Spack is different from other Python package managers in that it installs
every package into its *own* prefix. This is in contrast to ``pip``, which
installs all packages into the same prefix, be it in a virtual environment
or not.
For many users, **virtual environments** are more convenient than repeated
``spack load`` commands, particularly when working with multiple Python
packages. Fortunately Spack supports environments itself, which together
with a view are no different from Python virtual environments.
The recommended way of working with Python extensions such as ``py-numpy``
is through :ref:`Environments <environments>`. The following example creates
a Spack environment with ``numpy`` in the current working directory. It also
puts a filesystem view in ``./view``, which is a more traditional combined
prefix for all packages in the environment.
.. code-block:: console
$ spack env create --with-view view --dir .
$ spack -e . add py-numpy
$ spack -e . concretize
$ spack -e . install
Now you can activate the environment and start using the packages:
.. code-block:: console
$ spack env activate .
$ python3
>>> import numpy
The environment view is also a virtual environment, which is useful if you are
sharing the environment with others who are unfamiliar with Spack. They can
either use the Python executable directly:
.. code-block:: console
$ ./view/bin/python3
>>> import numpy
or use the activation script:
.. code-block:: console
$ source ./view/bin/activate
$ python3
>>> import numpy
In general, there should not be much difference between ``spack env activate``
and using the virtual environment. The main advantage of ``spack env activate``
is that it knows about more packages than just Python packages, and it may set
additional runtime variables that are not covered by the virtual environment
activation script.
See :ref:`environments` for a more in-depth description of Spack
environments and customizations to views.
We'll learn more about Spack's spec syntax in the next section.
.. _sec-specs:
@@ -1198,9 +1119,6 @@ and ``3.4.2``. Similarly, ``@4.2:`` means any version above and including
``4.2``. As a short-hand, ``@3`` is equivalent to the range ``@3:3`` and
includes any version with major version ``3``.
Versions are ordered lexicograpically by its components. For more details
on the order, see :ref:`the packaging guide <version-comparison>`.
Notice that you can distinguish between the specific version ``@=3.2`` and
the range ``@3.2``. This is useful for packages that follow a versioning
scheme that omits the zero patch version number: ``3.2``, ``3.2.1``,
@@ -1212,10 +1130,6 @@ A version specifier can also be a list of ranges and specific versions,
separated by commas. For example, ``@1.0:1.5,=1.7.1`` matches any version
in the range ``1.0:1.5`` and the specific version ``1.7.1``.
^^^^^^^^^^^^
Git versions
^^^^^^^^^^^^
For packages with a ``git`` attribute, ``git`` references
may be specified instead of a numerical version i.e. branches, tags
and commits. Spack will stage and build based off the ``git``
@@ -1784,6 +1698,165 @@ check only local packages (as opposed to those used transparently from
``upstream`` spack instances) and the ``-j,--json`` option to output
machine-readable json data for any errors.
.. _extensions:
---------------------------
Extensions & Python support
---------------------------
Spack's installation model assumes that each package will live in its
own install prefix. However, certain packages are typically installed
*within* the directory hierarchy of other packages. For example,
`Python <https://www.python.org>`_ packages are typically installed in the
``$prefix/lib/python-2.7/site-packages`` directory.
In Spack, installation prefixes are immutable, so this type of installation
is not directly supported. However, it is possible to create views that
allow you to merge install prefixes of multiple packages into a single new prefix.
Views are a convenient way to get a more traditional filesystem structure.
Using *extensions*, you can ensure that Python packages always share the
same prefix in the view as Python itself. Suppose you have
Python installed like so:
.. code-block:: console
$ spack find python
==> 1 installed packages.
-- linux-debian7-x86_64 / gcc@4.4.7 --------------------------------
python@2.7.8
.. _cmd-spack-extensions:
^^^^^^^^^^^^^^^^^^^^
``spack extensions``
^^^^^^^^^^^^^^^^^^^^
You can find extensions for your Python installation like this:
.. code-block:: console
$ spack extensions python
==> python@2.7.8%gcc@4.4.7 arch=linux-debian7-x86_64-703c7a96
==> 36 extensions:
geos py-ipython py-pexpect py-pyside py-sip
py-basemap py-libxml2 py-pil py-pytz py-six
py-biopython py-mako py-pmw py-rpy2 py-sympy
py-cython py-matplotlib py-pychecker py-scientificpython py-virtualenv
py-dateutil py-mpi4py py-pygments py-scikit-learn
py-epydoc py-mx py-pylint py-scipy
py-gnuplot py-nose py-pyparsing py-setuptools
py-h5py py-numpy py-pyqt py-shiboken
==> 12 installed:
-- linux-debian7-x86_64 / gcc@4.4.7 --------------------------------
py-dateutil@2.4.0 py-nose@1.3.4 py-pyside@1.2.2
py-dateutil@2.4.0 py-numpy@1.9.1 py-pytz@2014.10
py-ipython@2.3.1 py-pygments@2.0.1 py-setuptools@11.3.1
py-matplotlib@1.4.2 py-pyparsing@2.0.3 py-six@1.9.0
The extensions are a subset of what's returned by ``spack list``, and
they are packages like any other. They are installed into their own
prefixes, and you can see this with ``spack find --paths``:
.. code-block:: console
$ spack find --paths py-numpy
==> 1 installed packages.
-- linux-debian7-x86_64 / gcc@4.4.7 --------------------------------
py-numpy@1.9.1 ~/spack/opt/linux-debian7-x86_64/gcc@4.4.7/py-numpy@1.9.1-66733244
However, even though this package is installed, you cannot use it
directly when you run ``python``:
.. code-block:: console
$ spack load python
$ python
Python 2.7.8 (default, Feb 17 2015, 01:35:25)
[GCC 4.4.7 20120313 (Red Hat 4.4.7-11)] on linux2
Type "help", "copyright", "credits" or "license" for more information.
>>> import numpy
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ImportError: No module named numpy
>>>
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Using Extensions in Environments
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The recommended way of working with extensions such as ``py-numpy``
above is through :ref:`Environments <environments>`. For example,
the following creates an environment in the current working directory
with a filesystem view in the ``./view`` directory:
.. code-block:: console
$ spack env create --with-view view --dir .
$ spack -e . add py-numpy
$ spack -e . concretize
$ spack -e . install
We recommend environments for two reasons. Firstly, environments
can be activated (requires :ref:`shell-support`):
.. code-block:: console
$ spack env activate .
which sets all the right environment variables such as ``PATH`` and
``PYTHONPATH``. This ensures that
.. code-block:: console
$ python
>>> import numpy
works. Secondly, even without shell support, the view ensures
that Python can locate its extensions:
.. code-block:: console
$ ./view/bin/python
>>> import numpy
See :ref:`environments` for a more in-depth description of Spack
environments and customizations to views.
^^^^^^^^^^^^^^^^^^^^
Using ``spack load``
^^^^^^^^^^^^^^^^^^^^
A more traditional way of using Spack and extensions is ``spack load``
(requires :ref:`shell-support`). This will add the extension to ``PYTHONPATH``
in your current shell, and Python itself will be available in the ``PATH``:
.. code-block:: console
$ spack load py-numpy
$ python
>>> import numpy
The loaded packages can be checked using ``spack find --loaded``
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Loading Extensions via Modules
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Apart from ``spack env activate`` and ``spack load``, you can load numpy
through your environment modules (using ``environment-modules`` or
``lmod``). This will also add the extension to the ``PYTHONPATH`` in
your current shell.
.. code-block:: console
$ module load <name of numpy module>
If you do not know the name of the specific numpy module you wish to
load, you can use the ``spack module tcl|lmod loads`` command to get
the name of the module from the Spack spec.
-----------------------
Filesystem requirements
-----------------------

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
@@ -153,43 +153,7 @@ keyring, and trusting all downloaded keys.
List of popular build caches
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
* `Extreme-scale Scientific Software Stack (E4S) <https://e4s-project.github.io/>`_: `build cache <https://oaciss.uoregon.edu/e4s/inventory.html>`_'
-------------------
Build cache signing
-------------------
By default, Spack will add a cryptographic signature to each package pushed to
a build cache, and verifies the signature when installing from a build cache.
Keys for signing can be managed with the :ref:`spack gpg <cmd-spack-gpg>` command,
as well as ``spack buildcache keys`` as mentioned above.
You can disable signing when pushing with ``spack buildcache push --unsigned``,
and disable verification when installing from any build cache with
``spack install --no-check-signature``.
Alternatively, signing and verification can be enabled or disabled on a per build cache
basis:
.. code-block:: console
$ spack mirror add --signed <name> <url> # enable signing and verification
$ spack mirror add --unsigned <name> <url> # disable signing and verification
$ spack mirror set --signed <name> # enable signing and verification for an existing mirror
$ spack mirror set --unsigned <name> # disable signing and verification for an existing mirror
Or you can directly edit the ``mirrors.yaml`` configuration file:
.. code-block:: yaml
mirrors:
<name>:
url: <url>
signed: false # disable signing and verification
See also :ref:`mirrors`.
* `Extreme-scale Scientific Software Stack (E4S) <https://e4s-project.github.io/>`_: `build cache <https://oaciss.uoregon.edu/e4s/inventory.html>`_
----------
Relocation
@@ -218,41 +182,6 @@ section of the configuration:
padded_length: 128
.. _binary_caches_oci:
---------------------------------
Automatic push to a build cache
---------------------------------
Sometimes it is convenient to push packages to a build cache as soon as they are installed. Spack can do this by setting autopush flag when adding a mirror:
.. code-block:: console
$ spack mirror add --autopush <name> <url or path>
Or the autopush flag can be set for an existing mirror:
.. code-block:: console
$ spack mirror set --autopush <name> # enable automatic push for an existing mirror
$ spack mirror set --no-autopush <name> # disable automatic push for an existing mirror
Then after installing a package it is automatically pushed to all mirrors with ``autopush: true``. The command
.. code-block:: console
$ spack install <package>
will have the same effect as
.. code-block:: console
$ spack install <package>
$ spack buildcache push <cache> <package> # for all caches with autopush: true
.. note::
Packages are automatically pushed to a build cache only if they are built from source.
-----------------------------------------
OCI / Docker V2 registries as build cache
@@ -321,13 +250,87 @@ To significantly speed up Spack in GitHub Actions, binaries can be cached in
GitHub Packages. This service is an OCI registry that can be linked to a GitHub
repository.
A typical workflow is to include a ``spack.yaml`` environment in your repository
that specifies the packages to install, the target architecture, and the build
cache to use under ``mirrors``:
.. code-block:: yaml
spack:
specs:
- python@3.11
config:
install_tree:
root: /opt/spack
padded_length: 128
packages:
all:
require: target=x86_64_v2
mirrors:
local-buildcache: oci://ghcr.io/<organization>/<repository>
A GitHub action can then be used to install the packages and push them to the
build cache:
.. code-block:: yaml
name: Install Spack packages
on: push
env:
SPACK_COLOR: always
jobs:
example:
runs-on: ubuntu-22.04
permissions:
packages: write
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Checkout Spack
uses: actions/checkout@v3
with:
repository: spack/spack
path: spack
- name: Setup Spack
run: echo "$PWD/spack/bin" >> "$GITHUB_PATH"
- name: Concretize
run: spack -e . concretize
- name: Install
run: spack -e . install --no-check-signature
- name: Run tests
run: ./my_view/bin/python3 -c 'print("hello world")'
- name: Push to buildcache
run: |
spack -e . mirror set --oci-username ${{ github.actor }} --oci-password "${{ secrets.GITHUB_TOKEN }}" local-buildcache
spack -e . buildcache push --base-image ubuntu:22.04 --unsigned --update-index local-buildcache
if: ${{ !cancelled() }}
The first time this action runs, it will build the packages from source and
push them to the build cache. Subsequent runs will pull the binaries from the
build cache. The concretizer will ensure that prebuilt binaries are favored
over source builds.
The build cache entries appear in the GitHub Packages section of your repository,
and contain instructions for pulling and running them with ``docker`` or ``podman``.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Using Spack's public build cache for GitHub Actions
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Spack offers a public build cache for GitHub Actions with a set of common packages,
which lets you get started quickly. See the following resources for more information:
* `spack/setup-spack <https://github.com/spack/setup-spack>`_ for setting up Spack in GitHub
Actions
* `spack/github-actions-buildcache <https://github.com/spack/github-actions-buildcache>`_ for
more details on the public build cache
* `spack/github-actions-buildcache <https://github.com/spack/github-actions-buildcache>`_
.. _cmd-spack-buildcache:

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
@@ -87,7 +87,7 @@ You can check what is installed in the bootstrapping store at any time using:
.. code-block:: console
% spack -b find
% spack find -b
==> Showing internal bootstrap store at "/Users/spack/.spack/bootstrap/store"
==> 11 installed packages
-- darwin-catalina-x86_64 / apple-clang@12.0.0 ------------------
@@ -101,7 +101,7 @@ In case it is needed you can remove all the software in the current bootstrappin
% spack clean -b
==> Removing bootstrapped software and configuration in "/Users/spack/.spack/bootstrap"
% spack -b find
% spack find -b
==> Showing internal bootstrap store at "/Users/spack/.spack/bootstrap/store"
==> 0 installed packages
@@ -175,4 +175,4 @@ bootstrapping.
This command needs to be run on a machine with internet access and the resulting folder
has to be moved over to the air-gapped system. Once the local sources are added using the
commands suggested at the prompt, they can be used to bootstrap Spack.
commands suggested at the prompt, they can be used to bootstrap Spack.

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
@@ -21,90 +21,23 @@ is the following:
Reuse already installed packages
--------------------------------
The ``reuse`` attribute controls how aggressively Spack reuses binary packages during concretization. The
attribute can either be a single value, or an object for more complex configurations.
In the former case ("single value") it allows Spack to:
1. Reuse installed packages and buildcaches for all the specs to be concretized, when ``true``
2. Reuse installed packages and buildcaches only for the dependencies of the root specs, when ``dependencies``
3. Disregard reusing installed packages and buildcaches, when ``false``
In case a finer control over which specs are reused is needed, then the value of this attribute can be
an object, with the following keys:
1. ``roots``: if ``true`` root specs are reused, if ``false`` only dependencies of root specs are reused
2. ``from``: list of sources from which reused specs are taken
Each source in ``from`` is itself an object:
.. list-table:: Attributes for a source or reusable specs
:header-rows: 1
* - Attribute name
- Description
* - type (mandatory, string)
- Can be ``local``, ``buildcache``, or ``external``
* - include (optional, list of specs)
- If present, reusable specs must match at least one of the constraint in the list
* - exclude (optional, list of specs)
- If present, reusable specs must not match any of the constraint in the list.
For instance, the following configuration:
.. code-block:: yaml
concretizer:
reuse:
roots: true
from:
- type: local
include:
- "%gcc"
- "%clang"
tells the concretizer to reuse all specs compiled with either ``gcc`` or ``clang``, that are installed
in the local store. Any spec from remote buildcaches is disregarded.
To reduce the boilerplate in configuration files, default values for the ``include`` and
``exclude`` options can be pushed up one level:
.. code-block:: yaml
concretizer:
reuse:
roots: true
include:
- "%gcc"
from:
- type: local
- type: buildcache
- type: local
include:
- "foo %oneapi"
In the example above we reuse all specs compiled with ``gcc`` from the local store
and remote buildcaches, and we also reuse ``foo %oneapi``. Note that the last source of
specs override the default ``include`` attribute.
For one-off concretizations, the are command line arguments for each of the simple "single value"
configurations. This means a user can:
The ``reuse`` attribute controls whether Spack will prefer to use installed packages (``true``), or
whether it will do a "fresh" installation and prefer the latest settings from
``package.py`` files and ``packages.yaml`` (``false``).
You can use:
.. code-block:: console
% spack install --reuse <spec>
to enable reuse for a single installation, or:
to enable reuse for a single installation, and you can use:
.. code-block:: console
spack install --fresh <spec>
to do a fresh install if ``reuse`` is enabled by default.
.. seealso::
FAQ: :ref:`Why does Spack pick particular versions and variants? <faq-concretizer-precedence>`
``reuse: true`` is the default.
------------------------------------------
Selection of the target microarchitectures
@@ -166,3 +99,547 @@ while `py-numpy` still needs an older version:
Up to Spack v0.20 ``duplicates:strategy:none`` was the default (and only) behavior. From Spack v0.21 the
default behavior is ``duplicates:strategy:minimal``.
.. _build-settings:
================================
Package Settings (packages.yaml)
================================
Spack allows you to customize how your software is built through the
``packages.yaml`` file. Using it, you can make Spack prefer particular
implementations of virtual dependencies (e.g., MPI or BLAS/LAPACK),
or you can make it prefer to build with particular compilers. You can
also tell Spack to use *external* software installations already
present on your system.
At a high level, the ``packages.yaml`` file is structured like this:
.. code-block:: yaml
packages:
package1:
# settings for package1
package2:
# settings for package2
# ...
all:
# settings that apply to all packages.
So you can either set build preferences specifically for *one* package,
or you can specify that certain settings should apply to *all* packages.
The types of settings you can customize are described in detail below.
Spack's build defaults are in the default
``etc/spack/defaults/packages.yaml`` file. You can override them in
``~/.spack/packages.yaml`` or ``etc/spack/packages.yaml``. For more
details on how this works, see :ref:`configuration-scopes`.
.. _sec-external-packages:
-----------------
External Packages
-----------------
Spack can be configured to use externally-installed
packages rather than building its own packages. This may be desirable
if machines ship with system packages, such as a customized MPI
that should be used instead of Spack building its own MPI.
External packages are configured through the ``packages.yaml`` file.
Here's an example of an external configuration:
.. code-block:: yaml
packages:
openmpi:
externals:
- spec: "openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64"
prefix: /opt/openmpi-1.4.3
- spec: "openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64+debug"
prefix: /opt/openmpi-1.4.3-debug
- spec: "openmpi@1.6.5%intel@10.1 arch=linux-debian7-x86_64"
prefix: /opt/openmpi-1.6.5-intel
This example lists three installations of OpenMPI, one built with GCC,
one built with GCC and debug information, and another built with Intel.
If Spack is asked to build a package that uses one of these MPIs as a
dependency, it will use the pre-installed OpenMPI in
the given directory. Note that the specified path is the top-level
install prefix, not the ``bin`` subdirectory.
``packages.yaml`` can also be used to specify modules to load instead
of the installation prefixes. The following example says that module
``CMake/3.7.2`` provides cmake version 3.7.2.
.. code-block:: yaml
cmake:
externals:
- spec: cmake@3.7.2
modules:
- CMake/3.7.2
Each ``packages.yaml`` begins with a ``packages:`` attribute, followed
by a list of package names. To specify externals, add an ``externals:``
attribute under the package name, which lists externals.
Each external should specify a ``spec:`` string that should be as
well-defined as reasonably possible. If a
package lacks a spec component, such as missing a compiler or
package version, then Spack will guess the missing component based
on its most-favored packages, and it may guess incorrectly.
Each package version and compiler listed in an external should
have entries in Spack's packages and compiler configuration, even
though the package and compiler may not ever be built.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Prevent packages from being built from sources
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Adding an external spec in ``packages.yaml`` allows Spack to use an external location,
but it does not prevent Spack from building packages from sources. In the above example,
Spack might choose for many valid reasons to start building and linking with the
latest version of OpenMPI rather than continue using the pre-installed OpenMPI versions.
To prevent this, the ``packages.yaml`` configuration also allows packages
to be flagged as non-buildable. The previous example could be modified to
be:
.. code-block:: yaml
packages:
openmpi:
externals:
- spec: "openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64"
prefix: /opt/openmpi-1.4.3
- spec: "openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64+debug"
prefix: /opt/openmpi-1.4.3-debug
- spec: "openmpi@1.6.5%intel@10.1 arch=linux-debian7-x86_64"
prefix: /opt/openmpi-1.6.5-intel
buildable: False
The addition of the ``buildable`` flag tells Spack that it should never build
its own version of OpenMPI from sources, and it will instead always rely on a pre-built
OpenMPI.
.. note::
If ``concretizer:reuse`` is on (see :ref:`concretizer-options` for more information on that flag)
pre-built specs include specs already available from a local store, an upstream store, a registered
buildcache or specs marked as externals in ``packages.yaml``. If ``concretizer:reuse`` is off, only
external specs in ``packages.yaml`` are included in the list of pre-built specs.
If an external module is specified as not buildable, then Spack will load the
external module into the build environment which can be used for linking.
The ``buildable`` does not need to be paired with external packages.
It could also be used alone to forbid packages that may be
buggy or otherwise undesirable.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Non-buildable virtual packages
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Virtual packages in Spack can also be specified as not buildable, and
external implementations can be provided. In the example above,
OpenMPI is configured as not buildable, but Spack will often prefer
other MPI implementations over the externally available OpenMPI. Spack
can be configured with every MPI provider not buildable individually,
but more conveniently:
.. code-block:: yaml
packages:
mpi:
buildable: False
openmpi:
externals:
- spec: "openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64"
prefix: /opt/openmpi-1.4.3
- spec: "openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64+debug"
prefix: /opt/openmpi-1.4.3-debug
- spec: "openmpi@1.6.5%intel@10.1 arch=linux-debian7-x86_64"
prefix: /opt/openmpi-1.6.5-intel
Spack can then use any of the listed external implementations of MPI
to satisfy a dependency, and will choose depending on the compiler and
architecture.
In cases where the concretizer is configured to reuse specs, and other ``mpi`` providers
(available via stores or buildcaches) are not wanted, Spack can be configured to require
specs matching only the available externals:
.. code-block:: yaml
packages:
mpi:
buildable: False
require:
- one_of: [
"openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64",
"openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64+debug",
"openmpi@1.6.5%intel@10.1 arch=linux-debian7-x86_64"
]
openmpi:
externals:
- spec: "openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64"
prefix: /opt/openmpi-1.4.3
- spec: "openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64+debug"
prefix: /opt/openmpi-1.4.3-debug
- spec: "openmpi@1.6.5%intel@10.1 arch=linux-debian7-x86_64"
prefix: /opt/openmpi-1.6.5-intel
This configuration prevents any spec using MPI and originating from stores or buildcaches to be reused,
unless it matches the requirements under ``packages:mpi:require``. For more information on requirements see
:ref:`package-requirements`.
.. _cmd-spack-external-find:
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Automatically Find External Packages
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
You can run the :ref:`spack external find <spack-external-find>` command
to search for system-provided packages and add them to ``packages.yaml``.
After running this command your ``packages.yaml`` may include new entries:
.. code-block:: yaml
packages:
cmake:
externals:
- spec: cmake@3.17.2
prefix: /usr
Generally this is useful for detecting a small set of commonly-used packages;
for now this is generally limited to finding build-only dependencies.
Specific limitations include:
* Packages are not discoverable by default: For a package to be
discoverable with ``spack external find``, it needs to add special
logic. See :ref:`here <make-package-findable>` for more details.
* The logic does not search through module files, it can only detect
packages with executables defined in ``PATH``; you can help Spack locate
externals which use module files by loading any associated modules for
packages that you want Spack to know about before running
``spack external find``.
* Spack does not overwrite existing entries in the package configuration:
If there is an external defined for a spec at any configuration scope,
then Spack will not add a new external entry (``spack config blame packages``
can help locate all external entries).
.. _package-requirements:
--------------------
Package Requirements
--------------------
Spack can be configured to always use certain compilers, package
versions, and variants during concretization through package
requirements.
Package requirements are useful when you find yourself repeatedly
specifying the same constraints on the command line, and wish that
Spack respects these constraints whether you mention them explicitly
or not. Another use case is specifying constraints that should apply
to all root specs in an environment, without having to repeat the
constraint everywhere.
Apart from that, requirements config is more flexible than constraints
on the command line, because it can specify constraints on packages
*when they occur* as a dependency. In contrast, on the command line it
is not possible to specify constraints on dependencies while also keeping
those dependencies optional.
^^^^^^^^^^^^^^^^^^^
Requirements syntax
^^^^^^^^^^^^^^^^^^^
The package requirements configuration is specified in ``packages.yaml``,
keyed by package name and expressed using the Spec syntax. In the simplest
case you can specify attributes that you always want the package to have
by providing a single spec string to ``require``:
.. code-block:: yaml
packages:
libfabric:
require: "@1.13.2"
In the above example, ``libfabric`` will always build with version 1.13.2. If you
need to compose multiple configuration scopes ``require`` accepts a list of
strings:
.. code-block:: yaml
packages:
libfabric:
require:
- "@1.13.2"
- "%gcc"
In this case ``libfabric`` will always build with version 1.13.2 **and** using GCC
as a compiler.
For more complex use cases, require accepts also a list of objects. These objects
must have either a ``any_of`` or a ``one_of`` field, containing a list of spec strings,
and they can optionally have a ``when`` and a ``message`` attribute:
.. code-block:: yaml
packages:
openmpi:
require:
- any_of: ["@4.1.5", "%gcc"]
message: "in this example only 4.1.5 can build with other compilers"
``any_of`` is a list of specs. One of those specs must be satisfied
and it is also allowed for the concretized spec to match more than one.
In the above example, that means you could build ``openmpi@4.1.5%gcc``,
``openmpi@4.1.5%clang`` or ``openmpi@3.9%gcc``, but
not ``openmpi@3.9%clang``.
If a custom message is provided, and the requirement is not satisfiable,
Spack will print the custom error message:
.. code-block:: console
$ spack spec openmpi@3.9%clang
==> Error: in this example only 4.1.5 can build with other compilers
We could express a similar requirement using the ``when`` attribute:
.. code-block:: yaml
packages:
openmpi:
require:
- any_of: ["%gcc"]
when: "@:4.1.4"
message: "in this example only 4.1.5 can build with other compilers"
In the example above, if the version turns out to be 4.1.4 or less, we require the compiler to be GCC.
For readability, Spack also allows a ``spec`` key accepting a string when there is only a single
constraint:
.. code-block:: yaml
packages:
openmpi:
require:
- spec: "%gcc"
when: "@:4.1.4"
message: "in this example only 4.1.5 can build with other compilers"
This code snippet and the one before it are semantically equivalent.
Finally, instead of ``any_of`` you can use ``one_of`` which also takes a list of specs. The final
concretized spec must match one and only one of them:
.. code-block:: yaml
packages:
mpich:
require:
- one_of: ["+cuda", "+rocm"]
In the example above, that means you could build ``mpich+cuda`` or ``mpich+rocm`` but not ``mpich+cuda+rocm``.
.. note::
For ``any_of`` and ``one_of``, the order of specs indicates a
preference: items that appear earlier in the list are preferred
(note that these preferences can be ignored in favor of others).
.. note::
When using a conditional requirement, Spack is allowed to actively avoid the triggering
condition (the ``when=...`` spec) if that leads to a concrete spec with better scores in
the optimization criteria. To check the current optimization criteria and their
priorities you can run ``spack solve zlib``.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Setting default requirements
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
You can also set default requirements for all packages under ``all``
like this:
.. code-block:: yaml
packages:
all:
require: '%clang'
which means every spec will be required to use ``clang`` as a compiler.
Note that in this case ``all`` represents a *default set of requirements* -
if there are specific package requirements, then the default requirements
under ``all`` are disregarded. For example, with a configuration like this:
.. code-block:: yaml
packages:
all:
require: '%clang'
cmake:
require: '%gcc'
Spack requires ``cmake`` to use ``gcc`` and all other nodes (including ``cmake``
dependencies) to use ``clang``.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Setting requirements on virtual specs
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
A requirement on a virtual spec applies whenever that virtual is present in the DAG.
This can be useful for fixing which virtual provider you want to use:
.. code-block:: yaml
packages:
mpi:
require: 'mvapich2 %gcc'
With the configuration above the only allowed ``mpi`` provider is ``mvapich2 %gcc``.
Requirements on the virtual spec and on the specific provider are both applied, if
present. For instance with a configuration like:
.. code-block:: yaml
packages:
mpi:
require: 'mvapich2 %gcc'
mvapich2:
require: '~cuda'
you will use ``mvapich2~cuda %gcc`` as an ``mpi`` provider.
.. _package-preferences:
-------------------
Package Preferences
-------------------
In some cases package requirements can be too strong, and package
preferences are the better option. Package preferences do not impose
constraints on packages for particular versions or variants values,
they rather only set defaults. The concretizer is free to change
them if it must, due to other constraints, and also prefers reusing
installed packages over building new ones that are a better match for
preferences.
Most package preferences (``compilers``, ``target`` and ``providers``)
can only be set globally under the ``all`` section of ``packages.yaml``:
.. code-block:: yaml
packages:
all:
compiler: [gcc@12.2.0, clang@12:, oneapi@2023:]
target: [x86_64_v3]
providers:
mpi: [mvapich2, mpich, openmpi]
These preferences override Spack's default and effectively reorder priorities
when looking for the best compiler, target or virtual package provider. Each
preference takes an ordered list of spec constraints, with earlier entries in
the list being preferred over later entries.
In the example above all packages prefer to be compiled with ``gcc@12.2.0``,
to target the ``x86_64_v3`` microarchitecture and to use ``mvapich2`` if they
depend on ``mpi``.
The ``variants`` and ``version`` preferences can be set under
package specific sections of the ``packages.yaml`` file:
.. code-block:: yaml
packages:
opencv:
variants: +debug
gperftools:
version: [2.2, 2.4, 2.3]
In this case, the preference for ``opencv`` is to build with debug options, while
``gperftools`` prefers version 2.2 over 2.4.
Any preference can be overwritten on the command line if explicitly requested.
Preferences cannot overcome explicit constraints, as they only set a preferred
ordering among homogeneous attribute values. Going back to the example, if
``gperftools@2.3:`` was requested, then Spack will install version 2.4
since the most preferred version 2.2 is prohibited by the version constraint.
.. _package_permissions:
-------------------
Package Permissions
-------------------
Spack can be configured to assign permissions to the files installed
by a package.
In the ``packages.yaml`` file under ``permissions``, the attributes
``read``, ``write``, and ``group`` control the package
permissions. These attributes can be set per-package, or for all
packages under ``all``. If permissions are set under ``all`` and for a
specific package, the package-specific settings take precedence.
The ``read`` and ``write`` attributes take one of ``user``, ``group``,
and ``world``.
.. code-block:: yaml
packages:
all:
permissions:
write: group
group: spack
my_app:
permissions:
read: group
group: my_team
The permissions settings describe the broadest level of access to
installations of the specified packages. The execute permissions of
the file are set to the same level as read permissions for those files
that are executable. The default setting for ``read`` is ``world``,
and for ``write`` is ``user``. In the example above, installations of
``my_app`` will be installed with user and group permissions but no
world permissions, and owned by the group ``my_team``. All other
packages will be installed with user and group write privileges, and
world read privileges. Those packages will be owned by the group
``spack``.
The ``group`` attribute assigns a Unix-style group to a package. All
files installed by the package will be owned by the assigned group,
and the sticky group bit will be set on the install prefix and all
directories inside the install prefix. This will ensure that even
manually placed files within the install prefix are owned by the
assigned group. If no group is assigned, Spack will allow the OS
default behavior to go as expected.
----------------------------
Assigning Package Attributes
----------------------------
You can assign class-level attributes in the configuration:
.. code-block:: yaml
packages:
mpileaks:
# Override existing attributes
url: http://www.somewhereelse.com/mpileaks-1.0.tar.gz
# ... or add new ones
x: 1
Attributes set this way will be accessible to any method executed
in the package.py file (e.g. the ``install()`` method). Values for these
attributes may be any value parseable by yaml.
These can only be applied to specific packages, not "all" or
virtual packages.

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
@@ -82,7 +82,7 @@ class already contains:
.. code-block:: python
depends_on("cmake", type="build")
depends_on('cmake', type='build')
If you need to specify a particular version requirement, you can
@@ -90,7 +90,7 @@ override this in your package:
.. code-block:: python
depends_on("cmake@2.8.12:", type="build")
depends_on('cmake@2.8.12:', type='build')
^^^^^^^^^^^^^^^^^^^
@@ -137,10 +137,10 @@ and without the :meth:`~spack.build_systems.cmake.CMakeBuilder.define` and
def cmake_args(self):
args = [
"-DWHATEVER:STRING=somevalue",
self.define("ENABLE_BROKEN_FEATURE", False),
self.define_from_variant("DETECT_HDF5", "hdf5"),
self.define_from_variant("THREADS"), # True if +threads
'-DWHATEVER:STRING=somevalue',
self.define('ENABLE_BROKEN_FEATURE', False),
self.define_from_variant('DETECT_HDF5', 'hdf5'),
self.define_from_variant('THREADS'), # True if +threads
]
return args
@@ -151,10 +151,10 @@ and CMake simply ignores the empty command line argument. For example the follow
.. code-block:: python
variant("example", default=True, when="@2.0:")
variant('example', default=True, when='@2.0:')
def cmake_args(self):
return [self.define_from_variant("EXAMPLE", "example")]
return [self.define_from_variant('EXAMPLE', 'example')]
will generate ``'cmake' '-DEXAMPLE=ON' ...`` when `@2.0: +example` is met, but will
result in ``'cmake' '' ...`` when the spec version is below ``2.0``.
@@ -193,9 +193,9 @@ a variant to control this:
.. code-block:: python
variant("build_type", default="RelWithDebInfo",
description="CMake build type",
values=("Debug", "Release", "RelWithDebInfo", "MinSizeRel"))
variant('build_type', default='RelWithDebInfo',
description='CMake build type',
values=('Debug', 'Release', 'RelWithDebInfo', 'MinSizeRel'))
However, not every CMake package accepts all four of these options.
Grep the ``CMakeLists.txt`` file to see if the default values are
@@ -205,9 +205,9 @@ package overrides the default variant with:
.. code-block:: python
variant("build_type", default="DebugRelease",
description="The build type to build",
values=("Debug", "Release", "DebugRelease"))
variant('build_type', default='DebugRelease',
description='The build type to build',
values=('Debug', 'Release', 'DebugRelease'))
For more information on ``CMAKE_BUILD_TYPE``, see:
https://cmake.org/cmake/help/latest/variable/CMAKE_BUILD_TYPE.html
@@ -250,7 +250,7 @@ generator is Ninja. To switch to the Ninja generator, simply add:
.. code-block:: python
generator("ninja")
generator = 'Ninja'
``CMakePackage`` defaults to "Unix Makefiles". If you switch to the
@@ -258,7 +258,7 @@ Ninja generator, make sure to add:
.. code-block:: python
depends_on("ninja", type="build")
depends_on('ninja', type='build')
to the package as well. Aside from that, you shouldn't need to do
anything else. Spack will automatically detect that you are using
@@ -288,7 +288,7 @@ like so:
.. code-block:: python
root_cmakelists_dir = "src"
root_cmakelists_dir = 'src'
Note that this path is relative to the root of the extracted tarball,
@@ -304,7 +304,7 @@ different sub-directory, simply override ``build_directory`` like so:
.. code-block:: python
build_directory = "my-build"
build_directory = 'my-build'
^^^^^^^^^^^^^^^^^^^^^^^^^
Build and install targets
@@ -324,8 +324,8 @@ library or build the documentation, you can add these like so:
.. code-block:: python
build_targets = ["all", "docs"]
install_targets = ["install", "docs"]
build_targets = ['all', 'docs']
install_targets = ['install', 'docs']
^^^^^^^
Testing

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
@@ -53,24 +53,18 @@ Install the oneAPI compilers::
Add the compilers to your ``compilers.yaml`` so spack can use them::
spack compiler add `spack location -i intel-oneapi-compilers`/compiler/latest/bin
spack compiler add `spack location -i intel-oneapi-compilers`/compiler/latest/linux/bin/intel64
spack compiler add `spack location -i intel-oneapi-compilers`/compiler/latest/linux/bin
Verify that the compilers are available::
spack compiler list
Note that 2024 and later releases do not include ``icc``. Before 2024,
the package layout was different::
spack compiler add `spack location -i intel-oneapi-compilers`/compiler/latest/linux/bin/intel64
spack compiler add `spack location -i intel-oneapi-compilers`/compiler/latest/linux/bin
The ``intel-oneapi-compilers`` package includes 2 families of
compilers:
* ``intel``: ``icc``, ``icpc``, ``ifort``. Intel's *classic*
compilers. 2024 and later releases contain ``ifort``, but not
``icc`` and ``icpc``.
compilers.
* ``oneapi``: ``icx``, ``icpx``, ``ifx``. Intel's new generation of
compilers based on LLVM.
@@ -95,8 +89,8 @@ Install the oneAPI compilers::
Add the compilers to your ``compilers.yaml`` so Spack can use them::
spack compiler add `spack location -i intel-oneapi-compilers`/compiler/latest/bin
spack compiler add `spack location -i intel-oneapi-compilers`/compiler/latest/bin
spack compiler add `spack location -i intel-oneapi-compilers`/compiler/latest/linux/bin/intel64
spack compiler add `spack location -i intel-oneapi-compilers`/compiler/latest/linux/bin
Verify that the compilers are available::
@@ -152,7 +146,8 @@ Compilers
To use the compilers, add some information about the installation to
``compilers.yaml``. For most users, it is sufficient to do::
spack compiler add /opt/intel/oneapi/compiler/latest/bin
spack compiler add /opt/intel/oneapi/compiler/latest/linux/bin/intel64
spack compiler add /opt/intel/oneapi/compiler/latest/linux/bin
Adapt the paths above if you did not install the tools in the default
location. After adding the compilers, using them is the same
@@ -161,12 +156,6 @@ Another option is to manually add the configuration to
``compilers.yaml`` as described in :ref:`Compiler configuration
<compiler-config>`.
Before 2024, the directory structure was different::
spack compiler add /opt/intel/oneapi/compiler/latest/linux/bin/intel64
spack compiler add /opt/intel/oneapi/compiler/latest/linux/bin
Libraries
---------

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
@@ -90,7 +90,7 @@ and optimizers do require a paid license. In Spack, they are packaged as:
TODO: Confirm and possible change(!) the scope of MPI components (runtime
vs. devel) in current (and previous?) *cluster/professional/composer*
editions, i.e., presence in downloads, possibly subject to license
coverage(!); see `discussion in PR #4300
coverage(!); see `disussion in PR #4300
<https://github.com/spack/spack/pull/4300#issuecomment-305582898>`_. [NB:
An "mpi" subdirectory is not indicative of the full MPI SDK being present
(i.e., ``mpicc``, ..., and header files). The directory may just as well
@@ -392,7 +392,7 @@ See section
:ref:`Configuration Scopes <configuration-scopes>`
for an explanation about the different files
and section
:ref:`Build customization <packages-config>`
:ref:`Build customization <build-settings>`
for specifics and examples for ``packages.yaml`` files.
.. If your system administrator did not provide modules for pre-installed Intel
@@ -934,9 +934,9 @@ a *virtual* ``mkl`` package is declared in Spack.
.. code-block:: python
# Examples for absolute and conditional dependencies:
depends_on("mkl")
depends_on("mkl", when="+mkl")
depends_on("mkl", when="fftw=mkl")
depends_on('mkl')
depends_on('mkl', when='+mkl')
depends_on('mkl', when='fftw=mkl')
The ``MKLROOT`` environment variable (part of the documented API) will be set
during all stages of client package installation, and is available to both
@@ -972,8 +972,8 @@ a *virtual* ``mkl`` package is declared in Spack.
def configure_args(self):
args = []
...
args.append("--with-blas=%s" % self.spec["blas"].libs.ld_flags)
args.append("--with-lapack=%s" % self.spec["lapack"].libs.ld_flags)
args.append('--with-blas=%s' % self.spec['blas'].libs.ld_flags)
args.append('--with-lapack=%s' % self.spec['lapack'].libs.ld_flags)
...
.. tip::
@@ -989,13 +989,13 @@ a *virtual* ``mkl`` package is declared in Spack.
.. code-block:: python
self.spec["blas"].headers.include_flags
self.spec['blas'].headers.include_flags
and to generate linker options (``-L<dir> -llibname ...``), use the same as above,
.. code-block:: python
self.spec["blas"].libs.ld_flags
self.spec['blas'].libs.ld_flags
See
:ref:`MakefilePackage <makefilepackage>`

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
@@ -88,7 +88,7 @@ override the ``luarocks_args`` method like so:
.. code-block:: python
def luarocks_args(self):
return ["flag1", "flag2"]
return ['flag1', 'flag2']
One common use of this is to override warnings or flags for newer compilers, as in:

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
@@ -88,13 +88,13 @@ command-line. However, Makefiles that use ``?=`` for assignment honor
environment variables. Since Spack already sets ``CC``, ``CXX``, ``F77``,
and ``FC``, you won't need to worry about setting these variables. If
there are any other variables you need to set, you can do this in the
``setup_build_environment`` method:
``edit`` method:
.. code-block:: python
def setup_build_environment(self, env):
env.set("PREFIX", prefix)
env.set("BLASLIB", spec["blas"].libs.ld_flags)
def edit(self, spec, prefix):
env["PREFIX"] = prefix
env["BLASLIB"] = spec["blas"].libs.ld_flags
`cbench <https://github.com/spack/spack/blob/develop/var/spack/repos/builtin/packages/cbench/package.py>`_
@@ -140,7 +140,7 @@ Edit Makefile
Some Makefiles are just plain stubborn and will ignore command-line
variables. The only way to ensure that these packages build correctly
is to directly edit the Makefile. Spack provides a ``FileFilter`` class
and a ``filter`` method to help with this. For example:
and a ``filter_file`` method to help with this. For example:
.. code-block:: python

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
@@ -48,8 +48,8 @@ class automatically adds the following dependencies:
.. code-block:: python
depends_on("java", type=("build", "run"))
depends_on("maven", type="build")
depends_on('java', type=('build', 'run'))
depends_on('maven', type='build')
In the ``pom.xml`` file, you may see sections like:
@@ -72,8 +72,8 @@ should add:
.. code-block:: python
depends_on("java@7:", type="build")
depends_on("maven@3.5.4:", type="build")
depends_on('java@7:', type='build')
depends_on('maven@3.5.4:', type='build')
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -88,9 +88,9 @@ the build phase. For example:
def build_args(self):
return [
"-Pdist,native",
"-Dtar",
"-Dmaven.javadoc.skip=true"
'-Pdist,native',
'-Dtar',
'-Dmaven.javadoc.skip=true'
]

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
@@ -86,8 +86,8 @@ the ``MesonPackage`` base class already contains:
.. code-block:: python
depends_on("meson", type="build")
depends_on("ninja", type="build")
depends_on('meson', type='build')
depends_on('ninja', type='build')
If you need to specify a particular version requirement, you can
@@ -95,8 +95,8 @@ override this in your package:
.. code-block:: python
depends_on("meson@0.43.0:", type="build")
depends_on("ninja", type="build")
depends_on('meson@0.43.0:', type='build')
depends_on('ninja', type='build')
^^^^^^^^^^^^^^^^^^^
@@ -121,7 +121,7 @@ override the ``meson_args`` method like so:
.. code-block:: python
def meson_args(self):
return ["--warnlevel=3"]
return ['--warnlevel=3']
This method can be used to pass flags as well as variables.

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
@@ -118,7 +118,7 @@ so ``PerlPackage`` contains:
.. code-block:: python
extends("perl")
extends('perl')
If your package requires a specific version of Perl, you should
@@ -132,14 +132,14 @@ properly. If your package uses ``Makefile.PL`` to build, add:
.. code-block:: python
depends_on("perl-extutils-makemaker", type="build")
depends_on('perl-extutils-makemaker', type='build')
If your package uses ``Build.PL`` to build, add:
.. code-block:: python
depends_on("perl-module-build", type="build")
depends_on('perl-module-build', type='build')
^^^^^^^^^^^^^^^^^
@@ -165,80 +165,14 @@ arguments to ``Makefile.PL`` or ``Build.PL`` by overriding
.. code-block:: python
def configure_args(self):
expat = self.spec["expat"].prefix
expat = self.spec['expat'].prefix
return [
"EXPATLIBPATH={0}".format(expat.lib),
"EXPATINCPATH={0}".format(expat.include),
'EXPATLIBPATH={0}'.format(expat.lib),
'EXPATINCPATH={0}'.format(expat.include),
]
^^^^^^^
Testing
^^^^^^^
``PerlPackage`` provides a simple stand-alone test of the successfully
installed package to confirm that installed perl module(s) can be used.
These tests can be performed any time after the installation using
``spack -v test run``. (For more information on the command, see
:ref:`cmd-spack-test-run`.)
The base class automatically detects perl modules based on the presence
of ``*.pm`` files under the package's library directory. For example,
the files under ``perl-bignum``'s perl library are:
.. code-block:: console
$ find . -name "*.pm"
./bigfloat.pm
./bigrat.pm
./Math/BigFloat/Trace.pm
./Math/BigInt/Trace.pm
./Math/BigRat/Trace.pm
./bigint.pm
./bignum.pm
which results in the package having the ``use_modules`` property containing:
.. code-block:: python
use_modules = [
"bigfloat",
"bigrat",
"Math::BigFloat::Trace",
"Math::BigInt::Trace",
"Math::BigRat::Trace",
"bigint",
"bignum",
]
.. note::
This list can often be used to catch missing dependencies.
If the list is somehow wrong, you can provide the names of the modules
yourself by overriding ``use_modules`` like so:
.. code-block:: python
use_modules = ["bigfloat", "bigrat", "bigint", "bignum"]
If you only want a subset of the automatically detected modules to be
tested, you could instead define the ``skip_modules`` property on the
package. So, instead of overriding ``use_modules`` as shown above, you
could define the following:
.. code-block:: python
skip_modules = [
"Math::BigFloat::Trace",
"Math::BigInt::Trace",
"Math::BigRat::Trace",
]
for the same use tests.
^^^^^^^^^^^^^^^^^^^^^
Alternatives to Spack
^^^^^^^^^^^^^^^^^^^^^

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
@@ -718,45 +718,23 @@ command-line tool, or C/C++/Fortran program with optional Python
modules? The former should be prepended with ``py-``, while the
latter should not.
""""""""""""""""""""""""""""""
``extends`` vs. ``depends_on``
""""""""""""""""""""""""""""""
""""""""""""""""""""""
extends vs. depends_on
""""""""""""""""""""""
This is very similar to the naming dilemma above, with a slight twist.
As mentioned in the :ref:`Packaging Guide <packaging_extensions>`,
``extends`` and ``depends_on`` are very similar, but ``extends`` ensures
that the extension and extendee share the same prefix in views.
This allows the user to import a Python module without
having to add that module to ``PYTHONPATH``.
Additionally, ``extends("python")`` adds a dependency on the package
``python-venv``. This improves isolation from the system, whether
it's during the build or at runtime: user and system site packages
cannot accidentally be used by any package that ``extends("python")``.
As a rule of thumb: if a package does not install any Python modules
of its own, and merely puts a Python script in the ``bin`` directory,
then there is no need for ``extends``. If the package installs modules
in the ``site-packages`` directory, it requires ``extends``.
"""""""""""""""""""""""""""""""""""""
Executing ``python`` during the build
"""""""""""""""""""""""""""""""""""""
Whenever you need to execute a Python command or pass the path of the
Python interpreter to the build system, it is best to use the global
variable ``python`` directly. For example:
.. code-block:: python
@run_before("install")
def recythonize(self):
python("setup.py", "clean") # use the `python` global
As mentioned in the previous section, ``extends("python")`` adds an
automatic dependency on ``python-venv``, which is a virtual environment
that guarantees build isolation. The ``python`` global always refers to
the correct Python interpreter, whether the package uses ``extends("python")``
or ``depends_on("python")``.
When deciding between ``extends`` and ``depends_on``, the best rule of
thumb is to check the installation prefix. If Python libraries are
installed to ``<prefix>/lib/pythonX.Y/site-packages``, then you
should use ``extends``. If Python libraries are installed elsewhere
or the only files that get installed reside in ``<prefix>/bin``, then
don't use ``extends``.
^^^^^^^^^^^^^^^^^^^^^
Alternatives to Spack

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
@@ -83,7 +83,7 @@ base class already contains:
.. code-block:: python
depends_on("qt", type="build")
depends_on('qt', type='build')
If you want to specify a particular version requirement, or need to
@@ -91,7 +91,7 @@ link to the ``qt`` libraries, you can override this in your package:
.. code-block:: python
depends_on("qt@5.6.0:")
depends_on('qt@5.6.0:')
^^^^^^^^^^^^^^^^^^^^^^^^^^
Passing arguments to qmake
@@ -103,7 +103,7 @@ override the ``qmake_args`` method like so:
.. code-block:: python
def qmake_args(self):
return ["-recursive"]
return ['-recursive']
This method can be used to pass flags as well as variables.
@@ -118,7 +118,7 @@ sub-directory by adding the following to the package:
.. code-block:: python
build_directory = "src"
build_directory = 'src'
^^^^^^^^^^^^^^^^^^^^^^

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
@@ -163,28 +163,28 @@ attributes that can be used to set ``homepage``, ``url``, ``list_url``, and
.. code-block:: python
cran = "caret"
cran = 'caret'
is equivalent to:
.. code-block:: python
homepage = "https://cloud.r-project.org/package=caret"
url = "https://cloud.r-project.org/src/contrib/caret_6.0-86.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/caret"
homepage = 'https://cloud.r-project.org/package=caret'
url = 'https://cloud.r-project.org/src/contrib/caret_6.0-86.tar.gz'
list_url = 'https://cloud.r-project.org/src/contrib/Archive/caret'
Likewise, the following ``bioc`` attribute:
.. code-block:: python
bioc = "BiocVersion"
bioc = 'BiocVersion'
is equivalent to:
.. code-block:: python
homepage = "https://bioconductor.org/packages/BiocVersion/"
git = "https://git.bioconductor.org/packages/BiocVersion"
homepage = 'https://bioconductor.org/packages/BiocVersion/'
git = 'https://git.bioconductor.org/packages/BiocVersion'
^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -200,7 +200,7 @@ base class contains:
.. code-block:: python
extends("r")
extends('r')
Take a close look at the homepage for ``caret``. If you look at the
@@ -209,7 +209,7 @@ You should add this to your package like so:
.. code-block:: python
depends_on("r@3.2.0:", type=("build", "run"))
depends_on('r@3.2.0:', type=('build', 'run'))
^^^^^^^^^^^^^^
@@ -227,7 +227,7 @@ and list all of their dependencies in the following sections:
* LinkingTo
As far as Spack is concerned, all 3 of these dependency types
correspond to ``type=("build", "run")``, so you don't have to worry
correspond to ``type=('build', 'run')``, so you don't have to worry
about the details. If you are curious what they mean,
https://github.com/spack/spack/issues/2951 has a pretty good summary:
@@ -330,7 +330,7 @@ the dependency:
.. code-block:: python
depends_on("r-lattice@0.20:", type=("build", "run"))
depends_on('r-lattice@0.20:', type=('build', 'run'))
^^^^^^^^^^^^^^^^^^
@@ -361,20 +361,20 @@ like so:
.. code-block:: python
def configure_args(self):
mpi_name = self.spec["mpi"].name
mpi_name = self.spec['mpi'].name
# The type of MPI. Supported values are:
# OPENMPI, LAM, MPICH, MPICH2, or CRAY
if mpi_name == "openmpi":
Rmpi_type = "OPENMPI"
elif mpi_name == "mpich":
Rmpi_type = "MPICH2"
if mpi_name == 'openmpi':
Rmpi_type = 'OPENMPI'
elif mpi_name == 'mpich':
Rmpi_type = 'MPICH2'
else:
raise InstallError("Unsupported MPI type")
raise InstallError('Unsupported MPI type')
return [
"--with-Rmpi-type={0}".format(Rmpi_type),
"--with-mpi={0}".format(spec["mpi"].prefix),
'--with-Rmpi-type={0}'.format(Rmpi_type),
'--with-mpi={0}'.format(spec['mpi'].prefix),
]

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
@@ -84,8 +84,8 @@ The ``*.gemspec`` file may contain something like:
.. code-block:: ruby
summary = "An implementation of the AsciiDoc text processor and publishing toolchain"
description = "A fast, open source text processor and publishing toolchain for converting AsciiDoc content to HTML 5, DocBook 5, and other formats."
summary = 'An implementation of the AsciiDoc text processor and publishing toolchain'
description = 'A fast, open source text processor and publishing toolchain for converting AsciiDoc content to HTML 5, DocBook 5, and other formats.'
Either of these can be used for the description of the Spack package.
@@ -98,7 +98,7 @@ The ``*.gemspec`` file may contain something like:
.. code-block:: ruby
homepage = "https://asciidoctor.org"
homepage = 'https://asciidoctor.org'
This should be used as the official homepage of the Spack package.
@@ -112,21 +112,21 @@ the base class contains:
.. code-block:: python
extends("ruby")
extends('ruby')
The ``*.gemspec`` file may contain something like:
.. code-block:: ruby
required_ruby_version = ">= 2.3.0"
required_ruby_version = '>= 2.3.0'
This can be added to the Spack package using:
.. code-block:: python
depends_on("ruby@2.3.0:", type=("build", "run"))
depends_on('ruby@2.3.0:', type=('build', 'run'))
^^^^^^^^^^^^^^^^^

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
@@ -124,7 +124,7 @@ are wrong, you can provide the names yourself by overriding
.. code-block:: python
import_modules = ["PyQt5"]
import_modules = ['PyQt5']
These tests often catch missing dependencies and non-RPATHed

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
@@ -63,8 +63,8 @@ run package-specific unit tests.
.. code-block:: python
def installtest(self):
with working_dir("test"):
pytest = which("py.test")
with working_dir('test'):
pytest = which('py.test')
pytest()
@@ -93,7 +93,7 @@ the following dependency automatically:
.. code-block:: python
depends_on("python@2.5:", type="build")
depends_on('python@2.5:', type='build')
Waf only supports Python 2.5 and up.
@@ -113,7 +113,7 @@ phase, you can use:
args = []
if self.run_tests:
args.append("--test")
args.append('--test')
return args

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)

View File

@@ -1,4 +1,4 @@
# Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
# Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
@@ -199,7 +199,6 @@ def setup(sphinx):
("py:class", "contextlib.contextmanager"),
("py:class", "module"),
("py:class", "_io.BufferedReader"),
("py:class", "_io.BytesIO"),
("py:class", "unittest.case.TestCase"),
("py:class", "_frozen_importlib_external.SourceFileLoader"),
("py:class", "clingo.Control"),
@@ -216,7 +215,6 @@ def setup(sphinx):
("py:class", "spack.spec.InstallStatus"),
("py:class", "spack.spec.SpecfileReaderBase"),
("py:class", "spack.install_test.Pb"),
("py:class", "spack.filesystem_view.SimpleFilesystemView"),
]
# The reST default role (used for this markup: `text`) to use for all documents.

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
@@ -145,25 +145,6 @@ hosts when making ``ssl`` connections. Set to ``false`` to disable, and
tools like ``curl`` will use their ``--insecure`` options. Disabling
this can expose you to attacks. Use at your own risk.
--------------------
``ssl_certs``
--------------------
Path to custom certificats for SSL verification. The value can be a
filesytem path, or an environment variable that expands to an absolute file path.
The default value is set to the environment variable ``SSL_CERT_FILE``
to use the same syntax used by many other applications that automatically
detect custom certificates.
When ``url_fetch_method:curl`` the ``config:ssl_certs`` should resolve to
a single file. Spack will then set the environment variable ``CURL_CA_BUNDLE``
in the subprocess calling ``curl``.
If ``url_fetch_method:urllib`` then files and directories are supported i.e.
``config:ssl_certs:$SSL_CERT_FILE`` or ``config:ssl_certs:$SSL_CERT_DIR``
will work.
In all cases the expanded path must be absolute for Spack to use the certificates.
Certificates relative to an environment can be created by prepending the path variable
with the Spack configuration variable``$env``.
--------------------
``checksum``
--------------------

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
@@ -17,7 +17,7 @@ case you want to skip directly to specific docs:
* :ref:`config.yaml <config-yaml>`
* :ref:`mirrors.yaml <mirrors>`
* :ref:`modules.yaml <modules>`
* :ref:`packages.yaml <packages-config>`
* :ref:`packages.yaml <build-settings>`
* :ref:`repos.yaml <repositories>`
You can also add any of these as inline configuration in the YAML
@@ -73,12 +73,9 @@ are six configuration scopes. From lowest to highest:
Spack instance per project) or for site-wide settings on a multi-user
machine (e.g., for a common Spack instance).
#. **plugin**: Read from a Python project's entry points. Settings here affect
all instances of Spack running with the same Python installation. This scope takes higher precedence than site, system, and default scopes.
#. **user**: Stored in the home directory: ``~/.spack/``. These settings
affect all instances of Spack and take higher precedence than site,
system, plugin, or defaults scopes.
system, or defaults scopes.
#. **custom**: Stored in a custom directory specified by ``--config-scope``.
If multiple scopes are listed on the command line, they are ordered
@@ -199,45 +196,6 @@ with MPICH. You can create different configuration scopes for use with
mpi: [mpich]
.. _plugin-scopes:
^^^^^^^^^^^^^
Plugin scopes
^^^^^^^^^^^^^
.. note::
Python version >= 3.8 is required to enable plugin configuration.
Spack can be made aware of configuration scopes that are installed as part of a python package. To do so, register a function that returns the scope's path to the ``"spack.config"`` entry point. Consider the Python package ``my_package`` that includes Spack configurations:
.. code-block:: console
my-package/
├── src
│   ├── my_package
│   │   ├── __init__.py
│   │   └── spack/
│   │   │   └── config.yaml
└── pyproject.toml
adding the following to ``my_package``'s ``pyproject.toml`` will make ``my_package``'s ``spack/`` configurations visible to Spack when ``my_package`` is installed:
.. code-block:: toml
[project.entry_points."spack.config"]
my_package = "my_package:get_config_path"
The function ``my_package.get_extension_path`` in ``my_package/__init__.py`` might look like
.. code-block:: python
import importlib.resources
def get_config_path():
dirname = importlib.resources.files("my_package").joinpath("spack")
if dirname.exists():
return str(dirname)
.. _platform-scopes:
------------------------

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
@@ -9,96 +9,24 @@
Container Images
================
Spack :ref:`environments` can easily be turned into container images. This page
outlines two ways in which this can be done:
1. By installing the environment on the host system, and copying the installations
into the container image. This approach does not require any tools like Docker
or Singularity to be installed.
2. By generating a Docker or Singularity recipe that can be used to build the
container image. In this approach, Spack builds the software inside the
container runtime, not on the host system.
The first approach is easiest if you already have an installed environment,
the second approach gives more control over the container image.
---------------------------
From existing installations
---------------------------
If you already have a Spack environment installed on your system, you can
share the binaries as an OCI compatible container image. To get started you
just have to configure and OCI registry and run ``spack buildcache push``.
.. code-block:: console
# Create and install an environment in the current directory
spack env create -d .
spack -e . add pkg-a pkg-b
spack -e . install
# Configure the registry
spack -e . mirror add --oci-username ... --oci-password ... container-registry oci://example.com/name/image
# Push the image
spack -e . buildcache push --update-index --base-image ubuntu:22.04 --tag my_env container-registry
The resulting container image can then be run as follows:
.. code-block:: console
$ docker run -it example.com/name/image:my_env
The image generated by Spack consists of the specified base image with each package from the
environment as a separate layer on top. The image is minimal by construction, it only contains the
environment roots and its runtime dependencies.
.. note::
When using registries like GHCR and Docker Hub, the ``--oci-password`` flag is not
the password for your account, but a personal access token you need to generate separately.
The specified ``--base-image`` should have a libc that is compatible with the host system.
For example if your host system is Ubuntu 20.04, you can use ``ubuntu:20.04``, ``ubuntu:22.04``
or newer: the libc in the container image must be at least the version of the host system,
assuming ABI compatibility. It is also perfectly fine to use a completely different
Linux distribution as long as the libc is compatible.
For convenience, Spack also turns the OCI registry into a :ref:`build cache <binary_caches_oci>`,
so that future ``spack install`` of the environment will simply pull the binaries from the
registry instead of doing source builds. The flag ``--update-index`` is needed to make Spack
take the build cache into account when concretizing.
.. note::
When generating container images in CI, the approach above is recommended when CI jobs
already run in a sandboxed environment. You can simply use ``spack`` directly
in the CI job and push the resulting image to a registry. Subsequent CI jobs should
run faster because Spack can install from the same registry instead of rebuilding from
sources.
---------------------------------------------
Generating recipes for Docker and Singularity
---------------------------------------------
Apart from copying existing installations into container images, Spack can also
generate recipes for container images. This is useful if you want to run Spack
itself in a sandboxed environment instead of on the host system.
Since recipes need a little bit more boilerplate than
Spack :ref:`environments` are a great tool to create container images, but
preparing one that is suitable for production requires some more boilerplate
than just:
.. code-block:: docker
COPY spack.yaml /environment
RUN spack -e /environment install
Spack provides a command to generate customizable recipes for container images. Customizations
include minimizing the size of the image, installing packages in the base image using the system
package manager, and setting up a proper entrypoint to run the image.
Additional actions may be needed to minimize the size of the
container, or to update the system software that is installed in the base
image, or to set up a proper entrypoint to run the image. These tasks are
usually both necessary and repetitive, so Spack comes with a command
to generate recipes for container images starting from a ``spack.yaml``.
~~~~~~~~~~~~~~~~~~~~
--------------------
A Quick Introduction
~~~~~~~~~~~~~~~~~~~~
--------------------
Consider having a Spack environment like the following:
@@ -109,8 +37,8 @@ Consider having a Spack environment like the following:
- gromacs+mpi
- mpich
Producing a ``Dockerfile`` from it is as simple as changing directories to
where the ``spack.yaml`` file is stored and running the following command:
Producing a ``Dockerfile`` from it is as simple as moving to the directory
where the ``spack.yaml`` file is stored and giving the following command:
.. code-block:: console
@@ -176,9 +104,9 @@ configuration are discussed in details in the sections below.
.. _container_spack_images:
~~~~~~~~~~~~~~~~~~~~~~~~~~
--------------------------
Spack Images on Docker Hub
~~~~~~~~~~~~~~~~~~~~~~~~~~
--------------------------
Docker images with Spack preinstalled and ready to be used are
built when a release is tagged, or nightly on ``develop``. The images
@@ -194,15 +122,15 @@ The OS that are currently supported are summarized in the table below:
* - Operating System
- Base Image
- Spack Image
* - Ubuntu 18.04
- ``ubuntu:18.04``
- ``spack/ubuntu-bionic``
* - Ubuntu 20.04
- ``ubuntu:20.04``
- ``spack/ubuntu-focal``
* - Ubuntu 22.04
- ``ubuntu:22.04``
- ``spack/ubuntu-jammy``
* - Ubuntu 24.04
- ``ubuntu:24.04``
- ``spack/ubuntu-noble``
* - CentOS 7
- ``centos:7``
- ``spack/centos7``
@@ -227,12 +155,12 @@ The OS that are currently supported are summarized in the table below:
* - Rocky Linux 9
- ``rockylinux:9``
- ``spack/rockylinux9``
* - Fedora Linux 39
- ``fedora:39``
- ``spack/fedora39``
* - Fedora Linux 40
- ``fedora:40``
- ``spack/fedora40``
* - Fedora Linux 37
- ``fedora:37``
- ``spack/fedora37``
* - Fedora Linux 38
- ``fedora:38``
- ``spack/fedora38``
@@ -248,9 +176,9 @@ by Spack use them as default base images for their ``build`` stage,
even though handles to use custom base images provided by users are
available to accommodate complex use cases.
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Configuring the Container Recipe
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
---------------------------------
Creating Images From Environments
---------------------------------
Any Spack Environment can be used for the automatic generation of container
recipes. Sensible defaults are provided for things like the base image or the
@@ -291,18 +219,18 @@ under the ``container`` attribute of environments:
A detailed description of the options available can be found in the :ref:`container_config_options` section.
~~~~~~~~~~~~~~~~~~~
-------------------
Setting Base Images
~~~~~~~~~~~~~~~~~~~
-------------------
The ``images`` subsection is used to select both the image where
Spack builds the software and the image where the built software
is installed. This attribute can be set in different ways and
which one to use depends on the use case at hand.
""""""""""""""""""""""""""""""""""""""""
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Use Official Spack Images From Dockerhub
""""""""""""""""""""""""""""""""""""""""
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
To generate a recipe that uses an official Docker image from the
Spack organization to build the software and the corresponding official OS image
@@ -507,9 +435,9 @@ responsibility to ensure that:
Therefore we don't recommend its use in cases that can be otherwise
covered by the simplified mode shown first.
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
----------------------------
Singularity Definition Files
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
----------------------------
In addition to producing recipes in ``Dockerfile`` format Spack can produce
Singularity Definition Files by just changing the value of the ``format``
@@ -530,9 +458,9 @@ attribute:
The minimum version of Singularity required to build a SIF (Singularity Image Format)
image from the recipes generated by Spack is ``3.5.3``.
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
------------------------------
Extending the Jinja2 Templates
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
------------------------------
The Dockerfile and the Singularity definition file that Spack can generate are based on
a few Jinja2 templates that are rendered according to the environment being containerized.
@@ -653,9 +581,9 @@ The recipe that gets generated contains the two extra instruction that we added
.. _container_config_options:
~~~~~~~~~~~~~~~~~~~~~~~
-----------------------
Configuration Reference
~~~~~~~~~~~~~~~~~~~~~~~
-----------------------
The tables below describe all the configuration options that are currently supported
to customize the generation of container recipes:
@@ -752,13 +680,13 @@ to customize the generation of container recipes:
- Description string
- No
~~~~~~~~~~~~~~
--------------
Best Practices
~~~~~~~~~~~~~~
--------------
"""
^^^
MPI
"""
^^^
Due to the dependency on Fortran for OpenMPI, which is the spack default
implementation, consider adding ``gfortran`` to the ``apt-get install`` list.
@@ -769,9 +697,9 @@ For execution on HPC clusters, it can be helpful to import the docker
image into Singularity in order to start a program with an *external*
MPI. Otherwise, also add ``openssh-server`` to the ``apt-get install`` list.
""""
^^^^
CUDA
""""
^^^^
Starting from CUDA 9.0, Nvidia provides minimal CUDA images based on
Ubuntu. Please see `their instructions <https://hub.docker.com/r/nvidia/cuda/>`_.
Avoid double-installing CUDA by adding, e.g.
@@ -790,9 +718,9 @@ to your ``spack.yaml``.
Users will either need ``nvidia-docker`` or e.g. Singularity to *execute*
device kernels.
"""""""""""""""""""""""""
^^^^^^^^^^^^^^^^^^^^^^^^^
Docker on Windows and OSX
"""""""""""""""""""""""""
^^^^^^^^^^^^^^^^^^^^^^^^^
On Mac OS and Windows, docker runs on a hypervisor that is not allocated much
memory by default, and some spack packages may fail to build due to lack of

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
@@ -357,23 +357,91 @@ If there is a hook that you would like and is missing, you can propose to add a
``pre_install(spec)``
"""""""""""""""""""""
A ``pre_install`` hook is run within the install subprocess, directly before the install starts.
It expects a single argument of a spec.
A ``pre_install`` hook is run within an install subprocess, directly before
the install starts. It expects a single argument of a spec, and is run in
a multiprocessing subprocess. Note that if you see ``pre_install`` functions associated with packages these are not hooks
as we have defined them here, but rather callback functions associated with
a package install.
"""""""""""""""""""""""""""""""""""""
``post_install(spec, explicit=None)``
"""""""""""""""""""""""""""""""""""""
""""""""""""""""""""""
``post_install(spec)``
""""""""""""""""""""""
A ``post_install`` hook is run within the install subprocess, directly after the install finishes,
but before the build stage is removed and the spec is registered in the database. It expects two
arguments: spec and an optional boolean indicating whether this spec is being installed explicitly.
A ``post_install`` hook is run within an install subprocess, directly after
the install finishes, but before the build stage is removed. If you
write one of these hooks, you should expect it to accept a spec as the only
argument. This is run in a multiprocessing subprocess. This ``post_install`` is
also seen in packages, but in this context not related to the hooks described
here.
""""""""""""""""""""""""""""""""""""""""""""""""""""
``pre_uninstall(spec)`` and ``post_uninstall(spec)``
""""""""""""""""""""""""""""""""""""""""""""""""""""
These hooks are currently used for cleaning up module files after uninstall.
""""""""""""""""""""""""""
``on_install_start(spec)``
""""""""""""""""""""""""""
This hook is run at the beginning of ``lib/spack/spack/installer.py``,
in the install function of a ``PackageInstaller``,
and importantly is not part of a build process, but before it. This is when
we have just newly grabbed the task, and are preparing to install. If you
write a hook of this type, you should provide the spec to it.
.. code-block:: python
def on_install_start(spec):
"""On start of an install, we want to...
"""
print('on_install_start')
""""""""""""""""""""""""""""
``on_install_success(spec)``
""""""""""""""""""""""""""""
This hook is run on a successful install, and is also run inside the build
process, akin to ``post_install``. The main difference is that this hook
is run outside of the context of the stage directory, meaning after the
build stage has been removed and the user is alerted that the install was
successful. If you need to write a hook that is run on success of a particular
phase, you should use ``on_phase_success``.
""""""""""""""""""""""""""""
``on_install_failure(spec)``
""""""""""""""""""""""""""""
This hook is run given an install failure that happens outside of the build
subprocess, but somewhere in ``installer.py`` when something else goes wrong.
If you need to write a hook that is relevant to a failure within a build
process, you would want to instead use ``on_phase_failure``.
"""""""""""""""""""""""""""
``on_install_cancel(spec)``
"""""""""""""""""""""""""""
The same, but triggered if a spec install is cancelled for any reason.
"""""""""""""""""""""""""""""""""""""""""""""""
``on_phase_success(pkg, phase_name, log_file)``
"""""""""""""""""""""""""""""""""""""""""""""""
This hook is run within the install subprocess, and specifically when a phase
successfully finishes. Since we are interested in the package, the name of
the phase, and any output from it, we require:
- **pkg**: the package variable, which also has the attached spec at ``pkg.spec``
- **phase_name**: the name of the phase that was successful (e.g., configure)
- **log_file**: the path to the file with output, in case you need to inspect or otherwise interact with it.
"""""""""""""""""""""""""""""""""""""""""""""
``on_phase_error(pkg, phase_name, log_file)``
"""""""""""""""""""""""""""""""""""""""""""""
In the case of an error during a phase, we might want to trigger some event
with a hook, and this is the purpose of this particular hook. Akin to
``on_phase_success`` we require the same variables - the package that failed,
the name of the phase, and the log file where we might find errors.
^^^^^^^^^^^^^^^^^^^^^^
@@ -552,11 +620,11 @@ With either interpreter you can run a single command:
.. code-block:: console
$ spack python -c 'from spack.spec import Spec; Spec("python").concretized()'
...
$ spack python -c 'import distro; distro.linux_distribution()'
('Ubuntu', '18.04', 'Bionic Beaver')
$ spack python -i ipython -c 'from spack.spec import Spec; Spec("python").concretized()'
Out[1]: ...
$ spack python -i ipython -c 'import distro; distro.linux_distribution()'
Out[1]: ('Ubuntu', '18.04', 'Bionic Beaver')
or a file:
@@ -1071,9 +1139,9 @@ Announcing a release
We announce releases in all of the major Spack communication channels.
Publishing the release takes care of GitHub. The remaining channels are
X, Slack, and the mailing list. Here are the steps:
Twitter, Slack, and the mailing list. Here are the steps:
#. Announce the release on X.
#. Announce the release on Twitter.
* Compose the tweet on the ``@spackpm`` account per the
``spack-twitter`` slack channel.

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
@@ -142,17 +142,6 @@ user's prompt to begin with the environment name in brackets.
$ spack env activate -p myenv
[myenv] $ ...
The ``activate`` command can also be used to create a new environment if it does not already
exist.
.. code-block:: console
$ spack env activate --create -p myenv
# ...
# [creates if myenv does not exist yet]
# ...
[myenv] $ ...
To deactivate an environment, use the command:
.. code-block:: console
@@ -172,36 +161,21 @@ environment will remove the view from the user environment.
Anonymous Environments
^^^^^^^^^^^^^^^^^^^^^^
Apart from managed environments, Spack also supports anonymous environments.
Anonymous environments can be placed in any directory of choice.
.. note::
When uninstalling packages, Spack asks the user to confirm the removal of packages
that are still used in a managed environment. This is not the case for anonymous
environments.
To create an anonymous environment, use one of the following commands:
Any directory can be treated as an environment if it contains a file
``spack.yaml``. To load an anonymous environment, use:
.. code-block:: console
$ spack env create --dir my_env
$ spack env create ./my_env
$ spack env activate -d /path/to/directory
As a shorthand, you can also create an anonymous environment upon activation if it does not
already exist:
Anonymous specs can be created in place using the command:
.. code-block:: console
$ spack env activate --create ./my_env
For convenience, Spack can also place an anonymous environment in a temporary directory for you:
.. code-block:: console
$ spack env activate --temp
$ spack env create -d .
In this case Spack simply creates a ``spack.yaml`` file in the requested
directory.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Environment Sensitive Commands
@@ -427,23 +401,6 @@ that git clone if ``foo`` is in the environment.
Further development on ``foo`` can be tested by reinstalling the environment,
and eventually committed and pushed to the upstream git repo.
If the package being developed supports out-of-source builds then users can use the
``--build_directory`` flag to control the location and name of the build directory.
This is a shortcut to set the ``package_attributes:build_directory`` in the
``packages`` configuration (see :ref:`assigning-package-attributes`).
The supplied location will become the build-directory for that package in all future builds.
.. warning::
Potential pitfalls of setting the build directory
Spack does not check for out-of-source build compatibility with the packages and
so the onerous of making sure the package supports out-of-source builds is on
the user.
For example, most ``autotool`` and ``makefile`` packages do not support out-of-source builds
while all ``CMake`` packages do.
Understanding these nuances are on the software developers and we strongly encourage
developers to only redirect the build directory if they understand their package's
build-system.
^^^^^^^
Loading
^^^^^^^
@@ -460,125 +417,6 @@ Sourcing that file in Bash will make the environment available to the
user; and can be included in ``.bashrc`` files, etc. The ``loads``
file may also be copied out of the environment, renamed, etc.
.. _environment_include_concrete:
------------------------------
Included Concrete Environments
------------------------------
Spack environments can create an environment based off of information in already
established environments. You can think of it as a combination of existing
environments. It will gather information from the existing environment's
``spack.lock`` and use that during the creation of this included concrete
environment. When an included concrete environment is created it will generate
a ``spack.lock`` file for the newly created environment.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Creating included environments
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
To create a combined concrete environment, you must have at least one existing
concrete environment. You will use the command ``spack env create`` with the
argument ``--include-concrete`` followed by the name or path of the environment
you'd like to include. Here is an example of how to create a combined environment
from the command line.
.. code-block:: console
$ spack env create myenv
$ spack -e myenv add python
$ spack -e myenv concretize
$ spack env create --include-concrete myenv included_env
You can also include an environment directly in the ``spack.yaml`` file. It
involves adding the ``include_concrete`` heading in the yaml followed by the
absolute path to the independent environments.
.. code-block:: yaml
spack:
specs: []
concretizer:
unify: true
include_concrete:
- /absolute/path/to/environment1
- /absolute/path/to/environment2
Once the ``spack.yaml`` has been updated you must concretize the environment to
get the concrete specs from the included environments.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Updating an included environment
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
If changes were made to the base environment and you want that reflected in the
included environment you will need to reconcretize both the base environment and the
included environment for the change to be implemented. For example:
.. code-block:: console
$ spack env create myenv
$ spack -e myenv add python
$ spack -e myenv concretize
$ spack env create --include-concrete myenv included_env
$ spack -e myenv find
==> In environment myenv
==> Root specs
python
==> 0 installed packages
$ spack -e included_env find
==> In environment included_env
==> No root specs
==> Included specs
python
==> 0 installed packages
Here we see that ``included_env`` has access to the python package through
the ``myenv`` environment. But if we were to add another spec to ``myenv``,
``included_env`` will not be able to access the new information.
.. code-block:: console
$ spack -e myenv add perl
$ spack -e myenv concretize
$ spack -e myenv find
==> In environment myenv
==> Root specs
perl python
==> 0 installed packages
$ spack -e included_env find
==> In environment included_env
==> No root specs
==> Included specs
python
==> 0 installed packages
It isn't until you run the ``spack concretize`` command that the combined
environment will get the updated information from the reconcretized base environmennt.
.. code-block:: console
$ spack -e included_env concretize
$ spack -e included_env find
==> In environment included_env
==> No root specs
==> Included specs
perl python
==> 0 installed packages
.. _environment-configuration:
------------------------
@@ -619,11 +457,11 @@ a ``packages.yaml`` file) could contain:
.. code-block:: yaml
spack:
# ...
...
packages:
all:
compiler: [intel]
# ...
...
This configuration sets the default compiler for all packages to
``intel``.
@@ -930,7 +768,6 @@ For example, the following environment has three root packages:
This allows for a much-needed reduction in redundancy between packages
and constraints.
----------------
Filesystem Views
----------------
@@ -970,7 +807,7 @@ directories.
.. code-block:: yaml
spack:
# ...
...
view:
mpis:
root: /path/to/view
@@ -1014,7 +851,7 @@ automatically named ``default``, so that
.. code-block:: yaml
spack:
# ...
...
view: True
is equivalent to
@@ -1022,7 +859,7 @@ is equivalent to
.. code-block:: yaml
spack:
# ...
...
view:
default:
root: .spack-env/view
@@ -1032,7 +869,7 @@ and
.. code-block:: yaml
spack:
# ...
...
view: /path/to/view
is equivalent to
@@ -1040,7 +877,7 @@ is equivalent to
.. code-block:: yaml
spack:
# ...
...
view:
default:
root: /path/to/view
@@ -1083,17 +920,6 @@ function, as shown in the example below:
^mpi: "{name}-{version}/{^mpi.name}-{^mpi.version}-{compiler.name}-{compiler.version}"
all: "{name}-{version}/{compiler.name}-{compiler.version}"
Projections also permit environment and spack configuration variable
expansions as shown below:
.. code-block:: yaml
projections:
all: "{name}-{version}/{compiler.name}-{compiler.version}/$date/$SYSTEM_ENV_VARIBLE"
where ``$date`` is the spack configuration variable that will expand with the ``YYYY-MM-DD``
format and ``$SYSTEM_ENV_VARIABLE`` is an environment variable defined in the shell.
The entries in the projections configuration file must all be either
specs or the keyword ``all``. For each spec, the projection used will
be the first non-``all`` entry that the spec satisfies, or ``all`` if
@@ -1164,7 +990,7 @@ other targets to depend on the environment installation.
A typical workflow is as follows:
.. code-block:: console
.. code:: console
spack env create -d .
spack -e . add perl
@@ -1257,7 +1083,7 @@ its dependencies. This can be useful when certain flags should only apply to
dependencies. Below we show a use case where a spec is installed with verbose
output (``spack install --verbose``) while its dependencies are installed silently:
.. code-block:: console
.. code:: console
$ spack env depfile -o Makefile
@@ -1279,7 +1105,7 @@ This can be accomplished through the generated ``[<prefix>/]SPACK_PACKAGE_IDS``
variable. Assuming we have an active and concrete environment, we generate the
associated ``Makefile`` with a prefix ``example``:
.. code-block:: console
.. code:: console
$ spack env depfile -o env.mk --make-prefix example

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
@@ -9,42 +9,46 @@
Custom Extensions
=================
*Spack extensions* allow you to extend Spack capabilities by deploying your
*Spack extensions* permit you to extend Spack capabilities by deploying your
own custom commands or logic in an arbitrary location on your filesystem.
This might be extremely useful e.g. to develop and maintain a command whose purpose is
too specific to be considered for reintegration into the mainline or to
evolve a command through its early stages before starting a discussion to merge
it upstream.
From Spack's point of view an extension is any path in your filesystem which
respects the following naming and layout for files:
respects a prescribed naming and layout for files:
.. code-block:: console
spack-scripting/ # The top level directory must match the format 'spack-{extension_name}'
├── pytest.ini # Optional file if the extension ships its own tests
├── scripting # Folder that may contain modules that are needed for the extension commands
│   ── cmd # Folder containing extension commands
│   │   └── filter.py # A new command that will be available
│   └── functions.py # Module with internal details
└── tests # Tests for this extension
│   ── cmd # Folder containing extension commands
│   └── filter.py # A new command that will be available
├── tests # Tests for this extension
│ ├── conftest.py
│ └── test_filter.py
└── templates # Templates that may be needed by the extension
In the example above, the extension is named *scripting*. It adds an additional command
(``spack filter``) and unit tests to verify its behavior.
In the example above the extension named *scripting* adds an additional command (``filter``)
and unit tests to verify its behavior. The code for this example can be
obtained by cloning the corresponding git repository:
The extension can import any core Spack module in its implementation. When loaded by
the ``spack`` command, the extension itself is imported as a Python package in the
``spack.extensions`` namespace. In the example above, since the extension is named
"scripting", the corresponding Python module is ``spack.extensions.scripting``.
The code for this example extension can be obtained by cloning the corresponding git repository:
.. TODO: write an ad-hoc "hello world" extension and make it part of the spack organization
.. code-block:: console
$ git -C /tmp clone https://github.com/spack/spack-scripting.git
$ cd ~/
$ mkdir tmp && cd tmp
$ git clone https://github.com/alalazo/spack-scripting.git
Cloning into 'spack-scripting'...
remote: Counting objects: 11, done.
remote: Compressing objects: 100% (7/7), done.
remote: Total 11 (delta 0), reused 11 (delta 0), pack-reused 0
Receiving objects: 100% (11/11), done.
As you can see by inspecting the sources, Python modules that are part of the extension
can import any core Spack module.
---------------------------------
Configure Spack to Use Extensions
@@ -57,7 +61,7 @@ paths to ``config.yaml``. In the case of our example this means ensuring that:
config:
extensions:
- /tmp/spack-scripting
- ~/tmp/spack-scripting
is part of your configuration file. Once this is setup any command that the extension provides
will be available from the command line:
@@ -82,68 +86,37 @@ will be available from the command line:
--implicit select specs that are not installed or were installed implicitly
--output OUTPUT where to dump the result
The corresponding unit tests can be run giving the appropriate options to ``spack unit-test``:
The corresponding unit tests can be run giving the appropriate options
to ``spack unit-test``:
.. code-block:: console
$ spack unit-test --extension=scripting
========================================== test session starts ===========================================
platform linux -- Python 3.11.5, pytest-7.4.3, pluggy-1.3.0
rootdir: /home/culpo/github/spack-scripting
configfile: pytest.ini
testpaths: tests
plugins: xdist-3.5.0
============================================================== test session starts ===============================================================
platform linux2 -- Python 2.7.15rc1, pytest-3.2.5, py-1.4.34, pluggy-0.4.0
rootdir: /home/mculpo/tmp/spack-scripting, inifile: pytest.ini
collected 5 items
tests/test_filter.py ..... [100%]
tests/test_filter.py ...XX
============================================================ short test summary info =============================================================
XPASS tests/test_filter.py::test_filtering_specs[flags3-specs3-expected3]
XPASS tests/test_filter.py::test_filtering_specs[flags4-specs4-expected4]
========================================== slowest 30 durations ==========================================
2.31s setup tests/test_filter.py::test_filtering_specs[kwargs0-specs0-expected0]
0.57s call tests/test_filter.py::test_filtering_specs[kwargs2-specs2-expected2]
0.56s call tests/test_filter.py::test_filtering_specs[kwargs4-specs4-expected4]
0.54s call tests/test_filter.py::test_filtering_specs[kwargs3-specs3-expected3]
0.54s call tests/test_filter.py::test_filtering_specs[kwargs1-specs1-expected1]
0.48s call tests/test_filter.py::test_filtering_specs[kwargs0-specs0-expected0]
0.01s setup tests/test_filter.py::test_filtering_specs[kwargs4-specs4-expected4]
0.01s setup tests/test_filter.py::test_filtering_specs[kwargs2-specs2-expected2]
0.01s setup tests/test_filter.py::test_filtering_specs[kwargs1-specs1-expected1]
0.01s setup tests/test_filter.py::test_filtering_specs[kwargs3-specs3-expected3]
(5 durations < 0.005s hidden. Use -vv to show these durations.)
=========================================== 5 passed in 5.06s ============================================
---------------------------------------
Registering Extensions via Entry Points
---------------------------------------
.. note::
Python version >= 3.8 is required to register extensions via entry points.
Spack can be made aware of extensions that are installed as part of a python package. To do so, register a function that returns the extension path, or paths, to the ``"spack.extensions"`` entry point. Consider the Python package ``my_package`` that includes a Spack extension:
.. code-block:: console
my-package/
├── src
│   ├── my_package
│   │   └── __init__.py
│   └── spack-scripting/ # the spack extensions
└── pyproject.toml
adding the following to ``my_package``'s ``pyproject.toml`` will make the ``spack-scripting`` extension visible to Spack when ``my_package`` is installed:
.. code-block:: toml
[project.entry_points."spack.extenions"]
my_package = "my_package:get_extension_path"
The function ``my_package.get_extension_path`` in ``my_package/__init__.py`` might look like
.. code-block:: python
import importlib.resources
def get_extension_path():
dirname = importlib.resources.files("my_package").joinpath("spack-scripting")
if dirname.exists():
return str(dirname)
=========================================================== slowest 20 test durations ============================================================
3.74s setup tests/test_filter.py::test_filtering_specs[flags0-specs0-expected0]
0.17s call tests/test_filter.py::test_filtering_specs[flags3-specs3-expected3]
0.16s call tests/test_filter.py::test_filtering_specs[flags2-specs2-expected2]
0.15s call tests/test_filter.py::test_filtering_specs[flags1-specs1-expected1]
0.13s call tests/test_filter.py::test_filtering_specs[flags4-specs4-expected4]
0.08s call tests/test_filter.py::test_filtering_specs[flags0-specs0-expected0]
0.04s teardown tests/test_filter.py::test_filtering_specs[flags4-specs4-expected4]
0.00s setup tests/test_filter.py::test_filtering_specs[flags4-specs4-expected4]
0.00s setup tests/test_filter.py::test_filtering_specs[flags3-specs3-expected3]
0.00s setup tests/test_filter.py::test_filtering_specs[flags1-specs1-expected1]
0.00s setup tests/test_filter.py::test_filtering_specs[flags2-specs2-expected2]
0.00s teardown tests/test_filter.py::test_filtering_specs[flags2-specs2-expected2]
0.00s teardown tests/test_filter.py::test_filtering_specs[flags1-specs1-expected1]
0.00s teardown tests/test_filter.py::test_filtering_specs[flags0-specs0-expected0]
0.00s teardown tests/test_filter.py::test_filtering_specs[flags3-specs3-expected3]
====================================================== 3 passed, 2 xpassed in 4.51 seconds =======================================================

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)

View File

@@ -1,77 +0,0 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
==========================
Frequently Asked Questions
==========================
This page contains answers to frequently asked questions about Spack.
If you have questions that are not answered here, feel free to ask on
`Slack <https://slack.spack.io>`_ or `GitHub Discussions
<https://github.com/spack/spack/discussions>`_. If you've learned the
answer to a question that you think should be here, please consider
contributing to this page.
.. _faq-concretizer-precedence:
-----------------------------------------------------
Why does Spack pick particular versions and variants?
-----------------------------------------------------
This question comes up in a variety of forms:
1. Why does Spack seem to ignore my package preferences from ``packages.yaml`` config?
2. Why does Spack toggle a variant instead of using the default from the ``package.py`` file?
The short answer is that Spack always picks an optimal configuration
based on a complex set of criteria\ [#f1]_. These criteria are more nuanced
than always choosing the latest versions or default variants.
.. note::
As a rule of thumb: requirements + constraints > reuse > preferences > defaults.
The following set of criteria (from lowest to highest precedence) explain
common cases where concretization output may seem surprising at first.
1. :ref:`Package preferences <package-preferences>` configured in ``packages.yaml``
override variant defaults from ``package.py`` files, and influence the optimal
ordering of versions. Preferences are specified as follows:
.. code-block:: yaml
packages:
foo:
version: [1.0, 1.1]
variants: ~mpi
2. :ref:`Reuse concretization <concretizer-options>` configured in ``concretizer.yaml``
overrides preferences, since it's typically faster to reuse an existing spec than to
build a preferred one from sources. When build caches are enabled, specs may be reused
from a remote location too. Reuse concretization is configured as follows:
.. code-block:: yaml
concretizer:
reuse: dependencies # other options are 'true' and 'false'
3. :ref:`Package requirements <package-requirements>` configured in ``packages.yaml``,
and constraints from the command line as well as ``package.py`` files override all
of the above. Requirements are specified as follows:
.. code-block:: yaml
packages:
foo:
require:
- "@1.2: +mpi"
Requirements and constraints restrict the set of possible solutions, while reuse
behavior and preferences influence what an optimal solution looks like.
.. rubric:: Footnotes
.. [#f1] The exact list of criteria can be retrieved with the ``spack solve`` command

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
@@ -250,10 +250,9 @@ Compiler configuration
Spack has the ability to build packages with multiple compilers and
compiler versions. Compilers can be made available to Spack by
specifying them manually in ``compilers.yaml`` or ``packages.yaml``,
or automatically by running ``spack compiler find``, but for
convenience Spack will automatically detect compilers the first time
it needs them.
specifying them manually in ``compilers.yaml``, or automatically by
running ``spack compiler find``, but for convenience Spack will
automatically detect compilers the first time it needs them.
.. _cmd-spack-compilers:
@@ -458,54 +457,6 @@ specification. The operations available to modify the environment are ``set``, `
prepend_path: # Similar for append|remove_path
LD_LIBRARY_PATH: /ld/paths/added/by/setvars/sh
.. note::
Spack is in the process of moving compilers from a separate
attribute to be handled like all other packages. As part of this
process, the ``compilers.yaml`` section will eventually be replaced
by configuration in the ``packages.yaml`` section. This new
configuration is now available, although it is not yet the default
behavior.
Compilers can also be configured as external packages in the
``packages.yaml`` config file. Any external package for a compiler
(e.g. ``gcc`` or ``llvm``) will be treated as a configured compiler
assuming the paths to the compiler executables are determinable from
the prefix.
If the paths to the compiler executable are not determinable from the
prefix, you can add them to the ``extra_attributes`` field. Similarly,
all other fields from the compilers config can be added to the
``extra_attributes`` field for an external representing a compiler.
Note that the format for the ``paths`` field in the
``extra_attributes`` section is different than in the ``compilers``
config. For compilers configured as external packages, the section is
named ``compilers`` and the dictionary maps language names (``c``,
``cxx``, ``fortran``) to paths, rather than using the names ``cc``,
``fc``, and ``f77``.
.. code-block:: yaml
packages:
gcc:
external:
- spec: gcc@12.2.0 arch=linux-rhel8-skylake
prefix: /usr
extra_attributes:
environment:
set:
GCC_ROOT: /usr
external:
- spec: llvm+clang@15.0.0 arch=linux-rhel8-skylake
prefix: /usr
extra_attributes:
compilers:
c: /usr/bin/clang-with-suffix
cxx: /usr/bin/clang++-with-extra-info
fortran: /usr/bin/gfortran
extra_rpaths:
- /usr/lib/llvm/
^^^^^^^^^^^^^^^^^^^^^^^
Build Your Own Compiler
@@ -672,7 +623,7 @@ Fortran.
compilers:
- compiler:
# ...
...
paths:
cc: /usr/bin/clang
cxx: /usr/bin/clang++
@@ -1578,8 +1529,6 @@ Microsoft Visual Studio
"""""""""""""""""""""""
Microsoft Visual Studio provides the only Windows C/C++ compiler that is currently supported by Spack.
Spack additionally requires that the Windows SDK (including WGL) to be installed as part of your
visual studio installation as it is required to build many packages from source.
We require several specific components to be included in the Visual Studio installation.
One is the C/C++ toolset, which can be selected as "Desktop development with C++" or "C++ build tools,"
@@ -1587,7 +1536,6 @@ depending on installation type (Professional, Build Tools, etc.) The other requ
"C++ CMake tools for Windows," which can be selected from among the optional packages.
This provides CMake and Ninja for use during Spack configuration.
If you already have Visual Studio installed, you can make sure these components are installed by
rerunning the installer. Next to your installation, select "Modify" and look at the
"Installation details" pane on the right.

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
@@ -111,28 +111,3 @@ CUDA is split into fewer components and is simpler to specify:
prefix: /opt/cuda/cuda-11.0.2/
where ``/opt/cuda/cuda-11.0.2/lib/`` contains ``libcudart.so``.
-----------------------------------
Using an External OpenGL API
-----------------------------------
Depending on whether we have a graphics card or not, we may choose to use OSMesa or GLX to implement the OpenGL API.
If a graphics card is unavailable, OSMesa is recommended and can typically be built with Spack.
However, if we prefer to utilize the system GLX tailored to our graphics card, we need to declare it as an external. Here's how to do it:
.. code-block:: yaml
packages:
libglx:
require: [opengl]
opengl:
buildable: false
externals:
- prefix: /usr/
spec: opengl@4.6
Note that prefix has to be the root of both the libraries and the headers, using is /usr not the path the the lib.
To know which spec for opengl is available use ``cd /usr/include/GL && grep -Ri gl_version``.

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
@@ -55,7 +55,6 @@ or refer to the full manual below.
getting_started
basic_usage
replace_conda_homebrew
frequently_asked_questions
.. toctree::
:maxdepth: 2
@@ -71,7 +70,7 @@ or refer to the full manual below.
configuration
config_yaml
packages_yaml
bootstrapping
build_settings
environments
containers
@@ -79,7 +78,6 @@ or refer to the full manual below.
module_file_support
repositories
binary_caches
bootstrapping
command_index
chain
extensions

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
@@ -10,7 +10,7 @@ Modules (modules.yaml)
======================
The use of module systems to manage user environment in a controlled way
is a common practice at HPC centers that is sometimes embraced also by
is a common practice at HPC centers that is often embraced also by
individual programmers on their development machines. To support this
common practice Spack integrates with `Environment Modules
<http://modules.sourceforge.net/>`_ and `Lmod
@@ -21,38 +21,14 @@ Modules are one of several ways you can use Spack packages. For other
options that may fit your use case better, you should also look at
:ref:`spack load <spack-load>` and :ref:`environments <environments>`.
-----------
Quick start
-----------
----------------------------
Using module files via Spack
----------------------------
In the current version of Spack, module files are not generated by default. To get started, you
can generate module files for all currently installed packages by running either
.. code-block:: console
$ spack module tcl refresh
or
.. code-block:: console
$ spack module lmod refresh
Spack can also generate module files for all future installations automatically through the
following configuration:
.. code-block:: console
$ spack config add modules:default:enable:[tcl]
or
.. code-block:: console
$ spack config add modules:default:enable:[lmod]
Assuming you have a module system installed, you should now be able to use the ``module`` command
to interact with them:
If you have installed a supported module system you should be able to
run ``module avail`` to see what module
files have been installed. Here is sample output of those programs,
showing lots of installed packages:
.. code-block:: console
@@ -89,17 +65,33 @@ scheme used at your site.
Module file customization
-------------------------
Module files are generated by post-install hooks after the successful
installation of a package.
.. note::
Spack only generates modulefiles when a package is installed. If
you attempt to install a package and it is already installed, Spack
will not regenerate modulefiles for the package. This may lead to
inconsistent modulefiles if the Spack module configuration has
changed since the package was installed, either by editing a file
or changing scopes or environments.
Later in this section there is a subsection on :ref:`regenerating
modules <cmd-spack-module-refresh>` that will allow you to bring
your modules to a consistent state.
The table below summarizes the essential information associated with
the different file formats that can be generated by Spack:
+-----------+--------------+------------------------------+----------------------------------------------+----------------------+
| | Hierarchical | **Default root directory** | **Default template file** | **Compatible tools** |
+===========+==============+==============================+==============================================+======================+
| ``tcl`` | No | share/spack/modules | share/spack/templates/modules/modulefile.tcl | Env. Modules/Lmod |
+-----------+--------------+------------------------------+----------------------------------------------+----------------------+
| ``lmod`` | Yes | share/spack/lmod | share/spack/templates/modules/modulefile.lua | Lmod |
+-----------+--------------+------------------------------+----------------------------------------------+----------------------+
+-----------------------------+--------------------+-------------------------------+----------------------------------------------+----------------------+
| | **Hook name** | **Default root directory** | **Default template file** | **Compatible tools** |
+=============================+====================+===============================+==============================================+======================+
| **Tcl - Non-Hierarchical** | ``tcl`` | share/spack/modules | share/spack/templates/modules/modulefile.tcl | Env. Modules/Lmod |
+-----------------------------+--------------------+-------------------------------+----------------------------------------------+----------------------+
| **Lua - Hierarchical** | ``lmod`` | share/spack/lmod | share/spack/templates/modules/modulefile.lua | Lmod |
+-----------------------------+--------------------+-------------------------------+----------------------------------------------+----------------------+
Spack ships with sensible defaults for the generation of module files, but
@@ -110,7 +102,7 @@ In general you can override or extend the default behavior by:
2. writing specific rules in the ``modules.yaml`` configuration file
3. writing your own templates to override or extend the defaults
The former method lets you express changes in the run-time environment
The former method let you express changes in the run-time environment
that are needed to use the installed software properly, e.g. injecting variables
from language interpreters into their extensions. The latter two instead permit to
fine tune the filesystem layout, content and creation of module files to meet
@@ -118,62 +110,79 @@ site specific conventions.
.. _overide-api-calls-in-package-py:
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Setting environment variables dynamically in ``package.py``
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Override API calls in ``package.py``
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
There are two methods that you can implement in any ``package.py`` to dynamically affect the
content of the module files generated by Spack. The most important one is
``setup_run_environment``, which can be used to set environment variables in the module file that
depend on the spec:
There are two methods that you can override in any ``package.py`` to affect the
content of the module files generated by Spack. The first one:
.. code-block:: python
def setup_run_environment(self, env):
if self.spec.satisfies("+foo"):
env.set("FOO", "bar")
pass
The second, less commonly used, is ``setup_dependent_run_environment(self, env, dependent_spec)``,
which allows a dependency to set variables in the module file of its dependents. This is typically
used in packages like ``python``, ``r``, or ``perl`` to prepend the dependent's prefix to the
search path of the interpreter (``PYTHONPATH``, ``R_LIBS``, ``PERL5LIB`` resp.), so it can locate
the packages at runtime.
For example, a simplified version of the ``python`` package could look like this:
can alter the content of the module file associated with the same package where it is overridden.
The second method:
.. code-block:: python
def setup_dependent_run_environment(self, env, dependent_spec):
if dependent_spec.package.extends(self.spec):
env.prepend_path("PYTHONPATH", dependent_spec.prefix.lib.python)
pass
and would make any package that ``extends("python")`` have its library directory added to the
``PYTHONPATH`` environment variable in the module file. It's much more convenient to set this
variable here, than to repeat it in every Python extension's ``setup_run_environment`` method.
can instead inject run-time environment modifications in the module files of packages
that depend on it. In both cases you need to fill ``env`` with the desired
list of environment modifications.
.. admonition:: The ``r`` package and callback APIs
An example in which it is crucial to override both methods
is given by the ``r`` package. This package installs libraries and headers
in non-standard locations and it is possible to prepend the appropriate directory
to the corresponding environment variables:
================== =================================
LD_LIBRARY_PATH ``self.prefix/rlib/R/lib``
PKG_CONFIG_PATH ``self.prefix/rlib/pkgconfig``
================== =================================
with the following snippet:
.. literalinclude:: _spack_root/var/spack/repos/builtin/packages/r/package.py
:pyobject: R.setup_run_environment
The ``r`` package also knows which environment variable should be modified
to make language extensions provided by other packages available, and modifies
it appropriately in the override of the second method:
.. literalinclude:: _spack_root/var/spack/repos/builtin/packages/r/package.py
:pyobject: R.setup_dependent_run_environment
.. _modules-yaml:
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The ``modules.yaml`` config file and module sets
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
^^^^^^^^^^^^^^^^^^^^^^^^^^
Write a configuration file
^^^^^^^^^^^^^^^^^^^^^^^^^^
The configuration files that control module generation behavior are named ``modules.yaml``. The
default configuration looks like this:
The configuration files that control module generation behavior
are named ``modules.yaml``. The default configuration:
.. literalinclude:: _spack_root/etc/spack/defaults/modules.yaml
:language: yaml
You can define one or more **module sets**, each of which can be configured separately with regard
to install location, naming scheme, inclusion and exclusion, autoloading, et cetera.
activates the hooks to generate ``tcl`` module files and inspects
the installation folder of each package for the presence of a set of subdirectories
(``bin``, ``man``, ``share/man``, etc.). If any is found its full path is prepended
to the environment variables listed below the folder name.
The default module set is aptly named ``default``. All
:ref:`Spack commands that operate on modules <maintaining-module-files>` apply to the ``default``
module set, unless another module set is specified explicitly (with the ``--name`` flag).
Spack modules can be configured for multiple module sets. The default
module set is named ``default``. All Spack commands which operate on
modules default to apply the ``default`` module set, but can be
applied to any module set in the configuration.
^^^^^^^^^^^^^^^^^^^^^^^^^
"""""""""""""""""""""""""
Changing the modules root
^^^^^^^^^^^^^^^^^^^^^^^^^
"""""""""""""""""""""""""
As shown in the table above, the default module root for ``lmod`` is
``$spack/share/spack/lmod`` and the default root for ``tcl`` is
@@ -189,7 +198,7 @@ set by changing the ``roots`` key of the configuration.
my_custom_lmod_modules:
roots:
lmod: /path/to/install/custom/lmod/modules
# ...
...
This configuration will create two module sets. The default module set
will install its ``tcl`` modules to ``/path/to/install/tcl/modules``
@@ -215,32 +224,25 @@ location could be confusing to users of your modules. In the next
section, we will discuss enabling and disabling module types (module
file generators) for each module set.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Automatically generating module files
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
""""""""""""""""""""
Activate other hooks
""""""""""""""""""""
Spack can be configured to automatically generate module files as part of package installation.
This is done by adding the desired module systems to the ``enable`` list.
Any other module file generator shipped with Spack can be activated adding it to the
list under the ``enable`` key in the module file. Currently the only generator that
is not active by default is ``lmod``, which produces hierarchical lua module files.
Each module system can then be configured separately. In fact, you should list configuration
options that affect a particular type of module files under a top level key corresponding
to the generator being customized:
.. code-block:: yaml
modules:
default:
enable:
- tcl
- lmod
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Configuring ``tcl`` and ``lmod`` modules
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
You can configure the behavior of either module system separately, under a key corresponding to
the generator being customized:
.. code-block:: yaml
modules:
default:
- tcl
- lmod
tcl:
# contains environment modules specific customizations
lmod:
@@ -251,82 +253,16 @@ either change the layout of the module files on the filesystem, or they will aff
their content. For the latter point it is possible to use anonymous specs
to fine tune the set of packages on which the modifications should be applied.
.. _autoloading-dependencies:
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Autoloading and hiding dependencies
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
A module file should set the variables that are needed for an application to work. But since an
application often has many dependencies, where should all the environment variables for those be
set? In Spack the rule is that each package sets the runtime variables that are needed by the
package itself, and no more. This way, dependencies can be loaded standalone too, and duplication
of environment variables is avoided.
That means however that if you want to use an application, you need to load the modules for all its
dependencies. Of course this is not something you would want users to do manually.
Since Spack knows the dependency graph of every package, it can easily generate module files that
automatically load the modules for its dependencies recursively. It is enabled by default for both
Lmod and Environment Modules under the ``autoload: direct`` config option. The former system has
builtin support through the ``depends_on`` function, the latter simply uses a ``module load``
statement. Both module systems (at least in newer versions) do reference counting, so that if a
module is loaded by two different modules, it will only be unloaded after the others are.
The ``autoload`` key accepts the values:
* ``none``: no autoloading
* ``run``: autoload direct *run* type dependencies
* ``direct``: autoload direct *link and run* type dependencies
* ``all``: autoload all dependencies
In case of ``run`` and ``direct``, a ``module load`` triggers a recursive load.
The ``direct`` option is most correct: there are cases where pure link dependencies need to set
variables for themselves, or need to have variables of their own dependencies set.
In practice however, ``run`` is often sufficient, and may make ``module load`` snappier.
The ``all`` option is discouraged and seldomly used.
A common complaint about autoloading is the large number of modules that are visible to the user.
Spack has a solution for this as well: ``hide_implicits: true``. This ensures that only those
packages you've explicitly installed are exposed by ``module avail``, but still allows for
autoloading of hidden dependencies. Lmod should support hiding implicits in general, while
Environment Modules requires version 4.7 or higher.
.. note::
If supported by your module system, we highly encourage the following configuration that enables
autoloading and hiding of implicits. It ensures all runtime variables are set correctly,
including those for dependencies, without overwhelming the user with a large number of available
modules. Further, it makes it easier to get readable module names without collisions, see the
section below on :ref:`modules-projections`.
.. code-block:: yaml
modules:
default:
tcl:
hide_implicits: true
all:
autoload: direct # or `run`
lmod:
hide_implicits: true
all:
autoload: direct # or `run`
.. _anonymous_specs:
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Setting environment variables for selected packages in config
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
""""""""""""""""""""""""""""
Selection by anonymous specs
""""""""""""""""""""""""""""
In the configuration file you can filter particular specs, and make further changes to the
environment variables that go into their module files. This is very powerful when you want to avoid
:ref:`modifying the package itself <overide-api-calls-in-package-py>`, or when you want to set
certain variables on multiple selected packages at once.
For instance, in the snippet below:
In the configuration file you can use *anonymous specs* (i.e. specs
that **are not required to have a root package** and are thus used just
to express constraints) to apply certain modifications on a selected set
of the installed software. For instance, in the snippet below:
.. code-block:: yaml
@@ -369,28 +305,12 @@ the variable ``FOOBAR`` will be unset.
.. note::
Order does matter
The modifications associated with the ``all`` keyword are always evaluated
first, no matter where they appear in the configuration file. All the other changes to
environment variables for matching specs are evaluated from top to bottom.
first, no matter where they appear in the configuration file. All the other
spec constraints are instead evaluated top to bottom.
.. warning::
As general advice, it's often better to set as few unnecessary variables as possible. For
example, the following seemingly innocent and potentially useful configuration
.. code-block:: yaml
all:
environment:
set:
"{name}_ROOT": "{prefix}"
sets ``BINUTILS_ROOT`` to its prefix in modules for ``binutils``, which happens to break
the ``gcc`` compiler: it uses this variable as its default search path for certain object
files and libraries, and by merely setting it, everything fails to link.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
""""""""""""""""""""""""""""""""""""""""""""
Exclude or include specific module files
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
""""""""""""""""""""""""""""""""""""""""""""
You can use anonymous specs also to prevent module files from being written or
to force them to be written. Consider the case where you want to hide from users
@@ -410,19 +330,14 @@ you will prevent the generation of module files for any package that
is compiled with ``gcc@4.4.7``, with the only exception of any ``gcc``
or any ``llvm`` installation.
It is safe to combine ``exclude`` and ``autoload``
:ref:`mentioned above <autoloading-dependencies>`. When ``exclude`` prevents a module file to be
generated for a dependency, the ``autoload`` feature will simply not generate a statement to load
it.
.. _modules-projections:
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
"""""""""""""""""""""""""""""""
Customize the naming of modules
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
"""""""""""""""""""""""""""""""
The names of environment modules generated by Spack are not always easy to
The names of environment modules generated by spack are not always easy to
fully comprehend due to the long hash in the name. There are three module
configuration options to help with that. The first is a global setting to
adjust the hash length. It can be set anywhere from 0 to 32 and has a default
@@ -438,13 +353,6 @@ shows how to set hash length in the module file names:
tcl:
hash_length: 7
.. tip::
Using ``hide_implicits: true`` (see :ref:`autoloading-dependencies`) vastly reduces the number
modules exposed to the user. The hidden modules always contain the hash in their name, and are
not influenced by the ``hash_length`` setting. Hidden implicits thus make it easier to use a
short hash length or no hash at all, without risking name conflicts.
To help make module names more readable, and to help alleviate name conflicts
with a short hash, one can use the ``suffixes`` option in the modules
configuration file. This option will add strings to modules that match a spec.
@@ -457,12 +365,12 @@ For instance, the following config options,
tcl:
all:
suffixes:
^python@3.12: 'python-3.12'
^python@2.7.12: 'python-2.7.12'
^openblas: 'openblas'
will add a ``python-3.12`` version string to any packages compiled with
Python matching the spec, ``python@3.12``. This is useful to know which
version of Python a set of Python extensions is associated with. Likewise, the
will add a ``python-2.7.12`` version string to any packages compiled with
python matching the spec, ``python@2.7.12``. This is useful to know which
version of python a set of python extensions is associated with. Likewise, the
``openblas`` string is attached to any program that has openblas in the spec,
most likely via the ``+blas`` variant specification.
@@ -560,11 +468,41 @@ that are already in the Lmod hierarchy.
For hierarchies that are deeper than three layers ``lmod spider`` may have some issues.
See `this discussion on the Lmod project <https://github.com/TACC/Lmod/issues/114>`_.
""""""""""""""""""""""
Select default modules
""""""""""""""""""""""
By default, when multiple modules of the same name share a directory,
the highest version number will be the default module. This behavior
of the ``module`` command can be overridden with a symlink named
``default`` to the desired default module. If you wish to configure
default modules with Spack, add a ``defaults`` key to your modules
configuration:
.. code-block:: yaml
modules:
my-module-set:
tcl:
defaults:
- gcc@10.2.1
- hdf5@1.2.10+mpi+hl%gcc
These defaults may be arbitrarily specific. For any package that
satisfies a default, Spack will generate the module file in the
appropriate path, and will generate a default symlink to the module
file as well.
.. warning::
If Spack is configured to generate multiple default packages in the
same directory, the last modulefile to be generated will be the
default module.
.. _customize-env-modifications:
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
"""""""""""""""""""""""""""""""""""
Customize environment modifications
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
"""""""""""""""""""""""""""""""""""
You can control which prefixes in a Spack package are added to
environment variables with the ``prefix_inspections`` section; this
@@ -662,9 +600,9 @@ stack to users who are likely to inspect the modules to find full
paths to software, when it is desirable to present the users with a
simpler set of paths than those generated by the Spack install tree.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
""""""""""""""""""""""""""""""""""""
Filter out environment modifications
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
""""""""""""""""""""""""""""""""""""
Modifications to certain environment variables in module files are there by
default, for instance because they are generated by prefix inspections.
@@ -684,37 +622,49 @@ do so by using the ``exclude_env_vars``:
The configuration above will generate module files that will not contain
modifications to either ``CPATH`` or ``LIBRARY_PATH``.
^^^^^^^^^^^^^^^^^^^^^^
Select default modules
^^^^^^^^^^^^^^^^^^^^^^
By default, when multiple modules of the same name share a directory,
the highest version number will be the default module. This behavior
of the ``module`` command can be overridden with a symlink named
``default`` to the desired default module. If you wish to configure
default modules with Spack, add a ``defaults`` key to your modules
configuration:
.. _autoloading-dependencies:
"""""""""""""""""""""
Autoload dependencies
"""""""""""""""""""""
Often it is required for a module to have its (transient) dependencies loaded as well.
One example where this is useful is when one package needs to use executables provided
by its dependency; when the dependency is autoloaded, the executable will be in the
PATH. Similarly for scripting languages such as Python, packages and their dependencies
have to be loaded together.
Autoloading is enabled by default for Lmod and Environment Modules. The former
has builtin support for through the ``depends_on`` function. The latter uses
``module load`` statement to load and track dependencies.
Autoloading can also be enabled conditionally:
.. code-block:: yaml
modules:
my-module-set:
tcl:
defaults:
- gcc@10.2.1
- hdf5@1.2.10+mpi+hl%gcc
modules:
default:
tcl:
all:
autoload: none
^python:
autoload: direct
These defaults may be arbitrarily specific. For any package that
satisfies a default, Spack will generate the module file in the
appropriate path, and will generate a default symlink to the module
file as well.
The configuration file above will produce module files that will
load their direct dependencies if the package installed depends on ``python``.
The allowed values for the ``autoload`` statement are either ``none``,
``direct`` or ``all``.
.. warning::
If Spack is configured to generate multiple default packages in the
same directory, the last modulefile to be generated will be the
default module.
.. _maintaining-module-files:
.. note::
Tcl prerequisites
In the ``tcl`` section of the configuration file it is possible to use
the ``prerequisites`` directive that accepts the same values as
``autoload``. It will produce module files that have a ``prereq``
statement, which autoloads dependencies on Environment Modules when its
``auto_handling`` configuration option is enabled. If Environment Modules
is installed with Spack, ``auto_handling`` is enabled by default starting
version 4.2. Otherwise it is enabled by default since version 5.0.
------------------------
Maintaining Module Files

View File

@@ -1,673 +0,0 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
.. _packages-config:
================================
Package Settings (packages.yaml)
================================
Spack allows you to customize how your software is built through the
``packages.yaml`` file. Using it, you can make Spack prefer particular
implementations of virtual dependencies (e.g., MPI or BLAS/LAPACK),
or you can make it prefer to build with particular compilers. You can
also tell Spack to use *external* software installations already
present on your system.
At a high level, the ``packages.yaml`` file is structured like this:
.. code-block:: yaml
packages:
package1:
# settings for package1
package2:
# settings for package2
# ...
all:
# settings that apply to all packages.
So you can either set build preferences specifically for *one* package,
or you can specify that certain settings should apply to *all* packages.
The types of settings you can customize are described in detail below.
Spack's build defaults are in the default
``etc/spack/defaults/packages.yaml`` file. You can override them in
``~/.spack/packages.yaml`` or ``etc/spack/packages.yaml``. For more
details on how this works, see :ref:`configuration-scopes`.
.. _sec-external-packages:
-----------------
External Packages
-----------------
Spack can be configured to use externally-installed
packages rather than building its own packages. This may be desirable
if machines ship with system packages, such as a customized MPI
that should be used instead of Spack building its own MPI.
External packages are configured through the ``packages.yaml`` file.
Here's an example of an external configuration:
.. code-block:: yaml
packages:
openmpi:
externals:
- spec: "openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64"
prefix: /opt/openmpi-1.4.3
- spec: "openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64+debug"
prefix: /opt/openmpi-1.4.3-debug
- spec: "openmpi@1.6.5%intel@10.1 arch=linux-debian7-x86_64"
prefix: /opt/openmpi-1.6.5-intel
This example lists three installations of OpenMPI, one built with GCC,
one built with GCC and debug information, and another built with Intel.
If Spack is asked to build a package that uses one of these MPIs as a
dependency, it will use the pre-installed OpenMPI in
the given directory. Note that the specified path is the top-level
install prefix, not the ``bin`` subdirectory.
``packages.yaml`` can also be used to specify modules to load instead
of the installation prefixes. The following example says that module
``CMake/3.7.2`` provides cmake version 3.7.2.
.. code-block:: yaml
cmake:
externals:
- spec: cmake@3.7.2
modules:
- CMake/3.7.2
Each ``packages.yaml`` begins with a ``packages:`` attribute, followed
by a list of package names. To specify externals, add an ``externals:``
attribute under the package name, which lists externals.
Each external should specify a ``spec:`` string that should be as
well-defined as reasonably possible. If a
package lacks a spec component, such as missing a compiler or
package version, then Spack will guess the missing component based
on its most-favored packages, and it may guess incorrectly.
Each package version and compiler listed in an external should
have entries in Spack's packages and compiler configuration, even
though the package and compiler may not ever be built.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Extra attributes for external packages
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Sometimes external packages require additional attributes to be used
effectively. This information can be defined on a per-package basis
and stored in the ``extra_attributes`` section of the external package
configuration. In addition to per-package information, this section
can be used to define environment modifications to be performed
whenever the package is used. For example, if an external package is
built without ``rpath`` support, it may require ``LD_LIBRARY_PATH``
settings to find its dependencies. This could be configured as
follows:
.. code-block:: yaml
packages:
mpich:
externals:
- spec: mpich@3.3 %clang@12.0.0 +hwloc
prefix: /path/to/mpich
extra_attributes:
environment:
prepend_path:
LD_LIBRARY_PATH: /path/to/hwloc/lib64
See :ref:`configuration_environment_variables` for more information on
how to configure environment modifications in Spack config files.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Prevent packages from being built from sources
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Adding an external spec in ``packages.yaml`` allows Spack to use an external location,
but it does not prevent Spack from building packages from sources. In the above example,
Spack might choose for many valid reasons to start building and linking with the
latest version of OpenMPI rather than continue using the pre-installed OpenMPI versions.
To prevent this, the ``packages.yaml`` configuration also allows packages
to be flagged as non-buildable. The previous example could be modified to
be:
.. code-block:: yaml
packages:
openmpi:
externals:
- spec: "openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64"
prefix: /opt/openmpi-1.4.3
- spec: "openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64+debug"
prefix: /opt/openmpi-1.4.3-debug
- spec: "openmpi@1.6.5%intel@10.1 arch=linux-debian7-x86_64"
prefix: /opt/openmpi-1.6.5-intel
buildable: False
The addition of the ``buildable`` flag tells Spack that it should never build
its own version of OpenMPI from sources, and it will instead always rely on a pre-built
OpenMPI.
.. note::
If ``concretizer:reuse`` is on (see :ref:`concretizer-options` for more information on that flag)
pre-built specs include specs already available from a local store, an upstream store, a registered
buildcache or specs marked as externals in ``packages.yaml``. If ``concretizer:reuse`` is off, only
external specs in ``packages.yaml`` are included in the list of pre-built specs.
If an external module is specified as not buildable, then Spack will load the
external module into the build environment which can be used for linking.
The ``buildable`` does not need to be paired with external packages.
It could also be used alone to forbid packages that may be
buggy or otherwise undesirable.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Non-buildable virtual packages
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Virtual packages in Spack can also be specified as not buildable, and
external implementations can be provided. In the example above,
OpenMPI is configured as not buildable, but Spack will often prefer
other MPI implementations over the externally available OpenMPI. Spack
can be configured with every MPI provider not buildable individually,
but more conveniently:
.. code-block:: yaml
packages:
mpi:
buildable: False
openmpi:
externals:
- spec: "openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64"
prefix: /opt/openmpi-1.4.3
- spec: "openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64+debug"
prefix: /opt/openmpi-1.4.3-debug
- spec: "openmpi@1.6.5%intel@10.1 arch=linux-debian7-x86_64"
prefix: /opt/openmpi-1.6.5-intel
Spack can then use any of the listed external implementations of MPI
to satisfy a dependency, and will choose depending on the compiler and
architecture.
In cases where the concretizer is configured to reuse specs, and other ``mpi`` providers
(available via stores or buildcaches) are not wanted, Spack can be configured to require
specs matching only the available externals:
.. code-block:: yaml
packages:
mpi:
buildable: False
require:
- one_of: [
"openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64",
"openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64+debug",
"openmpi@1.6.5%intel@10.1 arch=linux-debian7-x86_64"
]
openmpi:
externals:
- spec: "openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64"
prefix: /opt/openmpi-1.4.3
- spec: "openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64+debug"
prefix: /opt/openmpi-1.4.3-debug
- spec: "openmpi@1.6.5%intel@10.1 arch=linux-debian7-x86_64"
prefix: /opt/openmpi-1.6.5-intel
This configuration prevents any spec using MPI and originating from stores or buildcaches to be reused,
unless it matches the requirements under ``packages:mpi:require``. For more information on requirements see
:ref:`package-requirements`.
.. _cmd-spack-external-find:
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Automatically Find External Packages
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
You can run the :ref:`spack external find <spack-external-find>` command
to search for system-provided packages and add them to ``packages.yaml``.
After running this command your ``packages.yaml`` may include new entries:
.. code-block:: yaml
packages:
cmake:
externals:
- spec: cmake@3.17.2
prefix: /usr
Generally this is useful for detecting a small set of commonly-used packages;
for now this is generally limited to finding build-only dependencies.
Specific limitations include:
* Packages are not discoverable by default: For a package to be
discoverable with ``spack external find``, it needs to add special
logic. See :ref:`here <make-package-findable>` for more details.
* The logic does not search through module files, it can only detect
packages with executables defined in ``PATH``; you can help Spack locate
externals which use module files by loading any associated modules for
packages that you want Spack to know about before running
``spack external find``.
* Spack does not overwrite existing entries in the package configuration:
If there is an external defined for a spec at any configuration scope,
then Spack will not add a new external entry (``spack config blame packages``
can help locate all external entries).
.. _package-requirements:
--------------------
Package Requirements
--------------------
Spack can be configured to always use certain compilers, package
versions, and variants during concretization through package
requirements.
Package requirements are useful when you find yourself repeatedly
specifying the same constraints on the command line, and wish that
Spack respects these constraints whether you mention them explicitly
or not. Another use case is specifying constraints that should apply
to all root specs in an environment, without having to repeat the
constraint everywhere.
Apart from that, requirements config is more flexible than constraints
on the command line, because it can specify constraints on packages
*when they occur* as a dependency. In contrast, on the command line it
is not possible to specify constraints on dependencies while also keeping
those dependencies optional.
.. seealso::
FAQ: :ref:`Why does Spack pick particular versions and variants? <faq-concretizer-precedence>`
^^^^^^^^^^^^^^^^^^^
Requirements syntax
^^^^^^^^^^^^^^^^^^^
The package requirements configuration is specified in ``packages.yaml``,
keyed by package name and expressed using the Spec syntax. In the simplest
case you can specify attributes that you always want the package to have
by providing a single spec string to ``require``:
.. code-block:: yaml
packages:
libfabric:
require: "@1.13.2"
In the above example, ``libfabric`` will always build with version 1.13.2. If you
need to compose multiple configuration scopes ``require`` accepts a list of
strings:
.. code-block:: yaml
packages:
libfabric:
require:
- "@1.13.2"
- "%gcc"
In this case ``libfabric`` will always build with version 1.13.2 **and** using GCC
as a compiler.
For more complex use cases, require accepts also a list of objects. These objects
must have either a ``any_of`` or a ``one_of`` field, containing a list of spec strings,
and they can optionally have a ``when`` and a ``message`` attribute:
.. code-block:: yaml
packages:
openmpi:
require:
- any_of: ["@4.1.5", "%gcc"]
message: "in this example only 4.1.5 can build with other compilers"
``any_of`` is a list of specs. One of those specs must be satisfied
and it is also allowed for the concretized spec to match more than one.
In the above example, that means you could build ``openmpi@4.1.5%gcc``,
``openmpi@4.1.5%clang`` or ``openmpi@3.9%gcc``, but
not ``openmpi@3.9%clang``.
If a custom message is provided, and the requirement is not satisfiable,
Spack will print the custom error message:
.. code-block:: console
$ spack spec openmpi@3.9%clang
==> Error: in this example only 4.1.5 can build with other compilers
We could express a similar requirement using the ``when`` attribute:
.. code-block:: yaml
packages:
openmpi:
require:
- any_of: ["%gcc"]
when: "@:4.1.4"
message: "in this example only 4.1.5 can build with other compilers"
In the example above, if the version turns out to be 4.1.4 or less, we require the compiler to be GCC.
For readability, Spack also allows a ``spec`` key accepting a string when there is only a single
constraint:
.. code-block:: yaml
packages:
openmpi:
require:
- spec: "%gcc"
when: "@:4.1.4"
message: "in this example only 4.1.5 can build with other compilers"
This code snippet and the one before it are semantically equivalent.
Finally, instead of ``any_of`` you can use ``one_of`` which also takes a list of specs. The final
concretized spec must match one and only one of them:
.. code-block:: yaml
packages:
mpich:
require:
- one_of: ["+cuda", "+rocm"]
In the example above, that means you could build ``mpich+cuda`` or ``mpich+rocm`` but not ``mpich+cuda+rocm``.
.. note::
For ``any_of`` and ``one_of``, the order of specs indicates a
preference: items that appear earlier in the list are preferred
(note that these preferences can be ignored in favor of others).
.. note::
When using a conditional requirement, Spack is allowed to actively avoid the triggering
condition (the ``when=...`` spec) if that leads to a concrete spec with better scores in
the optimization criteria. To check the current optimization criteria and their
priorities you can run ``spack solve zlib``.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Setting default requirements
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
You can also set default requirements for all packages under ``all``
like this:
.. code-block:: yaml
packages:
all:
require: '%clang'
which means every spec will be required to use ``clang`` as a compiler.
Requirements on variants for all packages are possible too, but note that they
are only enforced for those packages that define these variants, otherwise they
are disregarded. For example:
.. code-block:: yaml
packages:
all:
require:
- "+shared"
- "+cuda"
will just enforce ``+shared`` on ``zlib``, which has a boolean ``shared`` variant but
no ``cuda`` variant.
Constraints in a single spec literal are always considered as a whole, so in a case like:
.. code-block:: yaml
packages:
all:
require: "+shared +cuda"
the default requirement will be enforced only if a package has both a ``cuda`` and
a ``shared`` variant, and will never be partially enforced.
Finally, ``all`` represents a *default set of requirements* -
if there are specific package requirements, then the default requirements
under ``all`` are disregarded. For example, with a configuration like this:
.. code-block:: yaml
packages:
all:
require:
- 'build_type=Debug'
- '%clang'
cmake:
require:
- 'build_type=Debug'
- '%gcc'
Spack requires ``cmake`` to use ``gcc`` and all other nodes (including ``cmake``
dependencies) to use ``clang``. If enforcing ``build_type=Debug`` is needed also
on ``cmake``, it must be repeated in the specific ``cmake`` requirements.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Setting requirements on virtual specs
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
A requirement on a virtual spec applies whenever that virtual is present in the DAG.
This can be useful for fixing which virtual provider you want to use:
.. code-block:: yaml
packages:
mpi:
require: 'mvapich2 %gcc'
With the configuration above the only allowed ``mpi`` provider is ``mvapich2 %gcc``.
Requirements on the virtual spec and on the specific provider are both applied, if
present. For instance with a configuration like:
.. code-block:: yaml
packages:
mpi:
require: 'mvapich2 %gcc'
mvapich2:
require: '~cuda'
you will use ``mvapich2~cuda %gcc`` as an ``mpi`` provider.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Conflicts and strong preferences
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
If the semantic of requirements is too strong, you can also express "strong preferences" and "conflicts"
from configuration files:
.. code-block:: yaml
packages:
all:
prefer:
- '%clang'
conflict:
- '+shared'
The ``prefer`` and ``conflict`` sections can be used whenever a ``require`` section is allowed.
The argument is always a list of constraints, and each constraint can be either a simple string,
or a more complex object:
.. code-block:: yaml
packages:
all:
conflict:
- spec: '%clang'
when: 'target=x86_64_v3'
message: 'reason why clang cannot be used'
The ``spec`` attribute is mandatory, while both ``when`` and ``message`` are optional.
.. note::
Requirements allow for expressing both "strong preferences" and "conflicts".
The syntax for doing so, though, may not be immediately clear. For
instance, if we want to prevent any package from using ``%clang``, we can set:
.. code-block:: yaml
packages:
all:
require:
- one_of: ['%clang', '@:']
Since only one of the requirements must hold, and ``@:`` is always true, the rule above is
equivalent to a conflict. For "strong preferences" we need to substitute the ``one_of`` policy
with ``any_of``.
.. _package-preferences:
-------------------
Package Preferences
-------------------
In some cases package requirements can be too strong, and package
preferences are the better option. Package preferences do not impose
constraints on packages for particular versions or variants values,
they rather only set defaults. The concretizer is free to change
them if it must, due to other constraints, and also prefers reusing
installed packages over building new ones that are a better match for
preferences.
.. seealso::
FAQ: :ref:`Why does Spack pick particular versions and variants? <faq-concretizer-precedence>`
Most package preferences (``compilers``, ``target`` and ``providers``)
can only be set globally under the ``all`` section of ``packages.yaml``:
.. code-block:: yaml
packages:
all:
compiler: [gcc@12.2.0, clang@12:, oneapi@2023:]
target: [x86_64_v3]
providers:
mpi: [mvapich2, mpich, openmpi]
These preferences override Spack's default and effectively reorder priorities
when looking for the best compiler, target or virtual package provider. Each
preference takes an ordered list of spec constraints, with earlier entries in
the list being preferred over later entries.
In the example above all packages prefer to be compiled with ``gcc@12.2.0``,
to target the ``x86_64_v3`` microarchitecture and to use ``mvapich2`` if they
depend on ``mpi``.
The ``variants`` and ``version`` preferences can be set under
package specific sections of the ``packages.yaml`` file:
.. code-block:: yaml
packages:
opencv:
variants: +debug
gperftools:
version: [2.2, 2.4, 2.3]
In this case, the preference for ``opencv`` is to build with debug options, while
``gperftools`` prefers version 2.2 over 2.4.
Any preference can be overwritten on the command line if explicitly requested.
Preferences cannot overcome explicit constraints, as they only set a preferred
ordering among homogeneous attribute values. Going back to the example, if
``gperftools@2.3:`` was requested, then Spack will install version 2.4
since the most preferred version 2.2 is prohibited by the version constraint.
.. _package_permissions:
-------------------
Package Permissions
-------------------
Spack can be configured to assign permissions to the files installed
by a package.
In the ``packages.yaml`` file under ``permissions``, the attributes
``read``, ``write``, and ``group`` control the package
permissions. These attributes can be set per-package, or for all
packages under ``all``. If permissions are set under ``all`` and for a
specific package, the package-specific settings take precedence.
The ``read`` and ``write`` attributes take one of ``user``, ``group``,
and ``world``.
.. code-block:: yaml
packages:
all:
permissions:
write: group
group: spack
my_app:
permissions:
read: group
group: my_team
The permissions settings describe the broadest level of access to
installations of the specified packages. The execute permissions of
the file are set to the same level as read permissions for those files
that are executable. The default setting for ``read`` is ``world``,
and for ``write`` is ``user``. In the example above, installations of
``my_app`` will be installed with user and group permissions but no
world permissions, and owned by the group ``my_team``. All other
packages will be installed with user and group write privileges, and
world read privileges. Those packages will be owned by the group
``spack``.
The ``group`` attribute assigns a Unix-style group to a package. All
files installed by the package will be owned by the assigned group,
and the sticky group bit will be set on the install prefix and all
directories inside the install prefix. This will ensure that even
manually placed files within the install prefix are owned by the
assigned group. If no group is assigned, Spack will allow the OS
default behavior to go as expected.
.. _assigning-package-attributes:
----------------------------
Assigning Package Attributes
----------------------------
You can assign class-level attributes in the configuration:
.. code-block:: yaml
packages:
mpileaks:
package_attributes:
# Override existing attributes
url: http://www.somewhereelse.com/mpileaks-1.0.tar.gz
# ... or add new ones
x: 1
Attributes set this way will be accessible to any method executed
in the package.py file (e.g. the ``install()`` method). Values for these
attributes may be any value parseable by yaml.
These can only be applied to specific packages, not "all" or
virtual packages.

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
@@ -237,7 +237,7 @@ for details):
.. code-block:: python
:linenos:
# Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
# Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
@@ -893,50 +893,26 @@ as an option to the ``version()`` directive. Example situations would be a
"snapshot"-like Version Control System (VCS) tag, a VCS branch such as
``v6-16-00-patches``, or a URL specifying a regularly updated snapshot tarball.
.. _version-comparison:
^^^^^^^^^^^^^^^^^^
Version comparison
^^^^^^^^^^^^^^^^^^
Spack imposes a generic total ordering on the set of versions,
independently from the package they are associated with.
Most Spack versions are numeric, a tuple of integers; for example,
``0.1``, ``6.96`` or ``1.2.3.1``. In this very basic case, version
comparison is lexicographical on the numeric components:
``1.2 < 1.2.1 < 1.2.2 < 1.10``.
``0.1``, ``6.96`` or ``1.2.3.1``. Spack knows how to compare and sort
numeric versions.
Spack can also supports string components such as ``1.1.1a`` and
``1.y.0``. String components are considered less than numeric
components, so ``1.y.0 < 1.0``. This is for consistency with
`RPM <https://bugzilla.redhat.com/show_bug.cgi?id=50977>`_. String
components do not have to be separated by dots or any other delimiter.
So, the contrived version ``1y0`` is identical to ``1.y.0``.
Some Spack versions involve slight extensions of numeric syntax; for
example, ``py-sphinx-rtd-theme@=0.1.10a0``. In this case, numbers are
always considered to be "newer" than letters. This is for consistency
with `RPM <https://bugzilla.redhat.com/show_bug.cgi?id=50977>`_.
Pre-release suffixes also contain string parts, but they are handled
in a special way. For example ``1.2.3alpha1`` is parsed as a pre-release
of the version ``1.2.3``. This allows Spack to order it before the
actual release: ``1.2.3alpha1 < 1.2.3``. Spack supports alpha, beta and
release candidate suffixes: ``1.2alpha1 < 1.2beta1 < 1.2rc1 < 1.2``. Any
suffix not recognized as a pre-release is treated as an ordinary
string component, so ``1.2 < 1.2-mysuffix``.
Spack versions may also be arbitrary non-numeric strings, for example
``develop``, ``master``, ``local``.
Finally, there are a few special string components that are considered
"infinity versions". They include ``develop``, ``main``, ``master``,
``head``, ``trunk``, and ``stable``. For example: ``1.2 < develop``.
These are useful for specifying the most recent development version of
a package (often a moving target like a git branch), without assigning
a specific version number. Infinity versions are not automatically used when determining the latest version of a package unless explicitly required by another package or user.
More formally, the order on versions is defined as follows. A version
string is split into a list of components based on delimiters such as
``.`` and ``-`` and string boundaries. The components are split into
the **release** and a possible **pre-release** (if the last component
is numeric and the second to last is a string ``alpha``, ``beta`` or ``rc``).
The release components are ordered lexicographically, with comparsion
between different types of components as follows:
The order on versions is defined as follows. A version string is split
into a list of components based on delimiters such as ``.``, ``-`` etc.
Lists are then ordered lexicographically, where components are ordered
as follows:
#. The following special strings are considered larger than any other
numeric or non-numeric version component, and satisfy the following
@@ -949,9 +925,6 @@ between different types of components as follows:
#. All other non-numeric components are less than numeric components,
and are ordered alphabetically.
Finally, if the release components are equal, the pre-release components
are used to break the tie, in the obvious way.
The logic behind this sort order is two-fold:
#. Non-numeric versions are usually used for special cases while
@@ -2364,7 +2337,7 @@ window while a batch job is running ``spack install`` on the same or
overlapping dependencies without any process trying to re-do the work of
another.
For example, if you are using Slurm, you could launch an installation
For example, if you are using SLURM, you could launch an installation
of ``mpich`` using the following command:
.. code-block:: console
@@ -4406,16 +4379,10 @@ implementation was selected for this build:
elif "mvapich" in spec:
configure_args.append("--with-mvapich")
It's also a bit more concise than satisfies.
.. note::
The ``satisfies()`` method tests whether this spec has, at least, all the constraints of the argument spec,
while ``in`` tests whether a spec or any of its dependencies satisfy the provided spec.
If the provided spec is anonymous (e.g., ":1.2:", "+shared") or has the
same name as the spec being checked, then ``in`` works the same as
``satisfies()``; however, use of ``satisfies()`` is more intuitive.
It's also a bit more concise than satisfies. The difference between
the two functions is that ``satisfies()`` tests whether spec
constraints overlap at all, while ``in`` tests whether a spec or any
of its dependencies satisfy the provided spec.
^^^^^^^^^^^^^^^^^^^^^^^
Architecture specifiers
@@ -5317,7 +5284,7 @@ installed example.
example = which(self.prefix.bin.example)
example()
Output showing the identification of each test part after running the tests
Output showing the identification of each test part after runnig the tests
is illustrated below.
.. code-block:: console
@@ -5814,7 +5781,7 @@ with those implemented in the package itself.
* - `Cxx
<https://github.com/spack/spack/blob/develop/var/spack/repos/builtin/packages/cxx>`_
- Compiles and runs several ``hello`` programs
* - `Fortran
* - `Fortan
<https://github.com/spack/spack/blob/develop/var/spack/repos/builtin/packages/fortran>`_
- Compiles and runs ``hello`` programs (``F`` and ``f90``)
* - `Mpi
@@ -6435,12 +6402,9 @@ the ``paths`` attribute:
echo "Target: x86_64-pc-linux-gnu"
echo "Thread model: posix"
echo "InstalledDir: /usr/bin"
platforms: ["linux", "darwin"]
results:
- spec: 'llvm@3.9.1 +clang~lld~lldb'
If the ``platforms`` attribute is present, tests are run only if the current host
matches one of the listed platforms.
Each test is performed by first creating a temporary directory structure as
specified in the corresponding ``layout`` and by then running
package detection and checking that the outcome matches the expected
@@ -6474,10 +6438,6 @@ package detection and checking that the outcome matches the expected
- A spec that is expected from detection
- Any valid spec
- Yes
* - ``results:[0]:extra_attributes``
- Extra attributes expected on the associated Spec
- Nested dictionary with string as keys, and regular expressions as leaf values
- No
"""""""""""""""""""""""""""""""
Reuse tests from other packages
@@ -7013,18 +6973,3 @@ you probably care most about are:
You may also care about `license exceptions
<https://spdx.org/licenses/exceptions-index.html>`_ that use the ``WITH`` operator,
e.g. ``Apache-2.0 WITH LLVM-exception``.
Many of the licenses that are currently in the spack repositories have been
automatically determined. While this is great for bulk adding license
information and is most likely correct, there are sometimes edge cases that
require manual intervention. To determine which licenses are validated and
which are not, there is the `checked_by` parameter in the license directive:
.. code-block:: python
license("<license>", when="<when>", checked_by="<github username>")
When you have validated a github license, either when doing so explicitly or
as part of packaging a new package, please set the `checked_by` parameter
to your Github username to signal that the license has been manually
verified.

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
@@ -810,7 +810,7 @@ generated by ``spack ci generate``. You also want your generated rebuild jobs
.. code-block:: yaml
spack:
# ...
...
ci:
pipeline-gen:
- build-job:

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
@@ -17,7 +17,7 @@ experimental software separately from the built-in repository. Spack
allows you to configure local repositories using either the
``repos.yaml`` or the ``spack repo`` command.
A package repository is a directory structured like this::
A package repository a directory structured like this::
repo/
repo.yaml

View File

@@ -1,13 +1,13 @@
sphinx==7.2.6
sphinxcontrib-programoutput==0.17
sphinx_design==0.5.0
sphinx-rtd-theme==2.0.0
python-levenshtein==0.25.1
docutils==0.20.1
pygments==2.18.0
urllib3==2.2.1
pytest==8.2.0
isort==5.13.2
black==24.4.2
flake8==7.0.0
mypy==1.10.0
sphinx-rtd-theme==1.3.0
python-levenshtein==0.23.0
docutils==0.18.1
pygments==2.16.1
urllib3==2.0.7
pytest==7.4.3
isort==5.12.0
black==23.11.0
flake8==6.1.0
mypy==1.7.0

View File

@@ -1,4 +1,4 @@
.. Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
.. Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
Spack Project Developers. See the top-level COPYRIGHT file for details.
SPDX-License-Identifier: (Apache-2.0 OR MIT)
@@ -142,7 +142,7 @@ Reputational Key
----------------
The Reputational Key is the public facing key used to sign complete groups of
development and release packages. Only one key pair exists in this class of
development and release packages. Only one key pair exsits in this class of
keys. In contrast to the Intermediate CI Key the Reputational Key *should* be
used to verify package integrity. At the end of develop and release pipeline a
final pipeline job pulls down all signed package metadata built by the pipeline,
@@ -272,7 +272,7 @@ Internal Implementation
The technical implementation of the pipeline signing process includes components
defined in Amazon Web Services, the Kubernetes cluster, at affilicated
institutions, and the GitLab/GitLab Runner deployment. We present the technical
institutions, and the GitLab/GitLab Runner deployment. We present the techincal
implementation in two interdependent sections. The first addresses how secrets
are managed through the lifecycle of a develop or release pipeline. The second
section describes how Gitlab Runner and pipelines are configured and managed to
@@ -295,7 +295,7 @@ infrastructure.
-----------------------
Multiple intermediate CI signing keys exist, one Intermediate CI Key for jobs
run in AWS, and one key for each affiliated institution (e.g. University of
run in AWS, and one key for each affiliated institution (e.g. Univerity of
Oregon). Here we describe how the Intermediate CI Key is managed in AWS:
The Intermediate CI Key (including the Signing Intermediate CI Private Key is
@@ -305,7 +305,7 @@ contains an ASCII-armored export of just the *public* components of the
Reputational Key. This secret also contains the *public* components of each of
the affiliated institutions' Intermediate CI Key. These are potentially needed
to verify dependent packages which may have been found in the public mirror or
built by a protected job running on an affiliated institution's infrastructure
built by a protected job running on an affiliated institution's infrastrcuture
in an earlier stage of the pipeline.
Procedurally the ``spack-intermediate-ci-signing-key`` secret is used in

View File

@@ -1,4 +1,4 @@
# Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
# Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)

257
lib/spack/env/cc vendored
View File

@@ -1,7 +1,7 @@
#!/bin/sh -f
# shellcheck disable=SC2034 # evals in this script fool shellcheck
#
# Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
# Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
@@ -47,8 +47,7 @@ SPACK_F77_RPATH_ARG
SPACK_FC_RPATH_ARG
SPACK_LINKER_ARG
SPACK_SHORT_SPEC
SPACK_SYSTEM_DIRS
SPACK_MANAGED_DIRS"
SPACK_SYSTEM_DIRS"
# Optional parameters that aren't required to be set
@@ -174,6 +173,22 @@ preextend() {
unset IFS
}
# system_dir PATH
# test whether a path is a system directory
system_dir() {
IFS=':' # SPACK_SYSTEM_DIRS is colon-separated
path="$1"
for sd in $SPACK_SYSTEM_DIRS; do
if [ "${path}" = "${sd}" ] || [ "${path}" = "${sd}/" ]; then
# success if path starts with a system prefix
unset IFS
return 0
fi
done
unset IFS
return 1 # fail if path starts no system prefix
}
# Fail with a clear message if the input contains any bell characters.
if eval "[ \"\${*#*${lsep}}\" != \"\$*\" ]"; then
die "Compiler command line contains our separator ('${lsep}'). Cannot parse."
@@ -186,18 +201,6 @@ for param in $params; do
fi
done
# eval this because SPACK_MANAGED_DIRS and SPACK_SYSTEM_DIRS are inputs we don't wanna loop over.
# moving the eval inside the function would eval it every call.
eval "\
path_order() {
case \"\$1\" in
$SPACK_MANAGED_DIRS) return 0 ;;
$SPACK_SYSTEM_DIRS) return 2 ;;
/*) return 1 ;;
esac
}
"
# Check if optional parameters are defined
# If we aren't asking for debug flags, don't add them
if [ -z "${SPACK_ADD_DEBUG_FLAGS:-}" ]; then
@@ -245,7 +248,7 @@ case "$command" in
lang_flags=C
debug_flags="-g"
;;
c++|CC|g++|clang++|armclang++|icpc|icpx|pgc++|nvc++|xlc++|xlc++_r|FCC|amdclang++|crayCC)
c++|CC|g++|clang++|armclang++|icpc|icpx|dpcpp|pgc++|nvc++|xlc++|xlc++_r|FCC|amdclang++|crayCC)
command="$SPACK_CXX"
language="C++"
comp="CXX"
@@ -417,12 +420,11 @@ input_command="$*"
parse_Wl() {
while [ $# -ne 0 ]; do
if [ "$wl_expect_rpath" = yes ]; then
path_order "$1"
case $? in
0) append return_spack_store_rpath_dirs_list "$1" ;;
1) append return_rpath_dirs_list "$1" ;;
2) append return_system_rpath_dirs_list "$1" ;;
esac
if system_dir "$1"; then
append return_system_rpath_dirs_list "$1"
else
append return_rpath_dirs_list "$1"
fi
wl_expect_rpath=no
else
case "$1" in
@@ -430,25 +432,21 @@ parse_Wl() {
arg="${1#-rpath=}"
if [ -z "$arg" ]; then
shift; continue
elif system_dir "$arg"; then
append return_system_rpath_dirs_list "$arg"
else
append return_rpath_dirs_list "$arg"
fi
path_order "$arg"
case $? in
0) append return_spack_store_rpath_dirs_list "$arg" ;;
1) append return_rpath_dirs_list "$arg" ;;
2) append return_system_rpath_dirs_list "$arg" ;;
esac
;;
--rpath=*)
arg="${1#--rpath=}"
if [ -z "$arg" ]; then
shift; continue
elif system_dir "$arg"; then
append return_system_rpath_dirs_list "$arg"
else
append return_rpath_dirs_list "$arg"
fi
path_order "$arg"
case $? in
0) append return_spack_store_rpath_dirs_list "$arg" ;;
1) append return_rpath_dirs_list "$arg" ;;
2) append return_system_rpath_dirs_list "$arg" ;;
esac
;;
-rpath|--rpath)
wl_expect_rpath=yes
@@ -475,20 +473,12 @@ categorize_arguments() {
return_other_args_list=""
return_isystem_was_used=""
return_isystem_spack_store_include_dirs_list=""
return_isystem_system_include_dirs_list=""
return_isystem_include_dirs_list=""
return_spack_store_include_dirs_list=""
return_system_include_dirs_list=""
return_include_dirs_list=""
return_spack_store_lib_dirs_list=""
return_system_lib_dirs_list=""
return_lib_dirs_list=""
return_spack_store_rpath_dirs_list=""
return_system_rpath_dirs_list=""
return_rpath_dirs_list=""
@@ -536,7 +526,7 @@ categorize_arguments() {
continue
fi
replaced="$after$stripped"
replaced="$after$stripped"
# it matched, remove it
shift
@@ -556,32 +546,29 @@ categorize_arguments() {
arg="${1#-isystem}"
return_isystem_was_used=true
if [ -z "$arg" ]; then shift; arg="$1"; fi
path_order "$arg"
case $? in
0) append return_isystem_spack_store_include_dirs_list "$arg" ;;
1) append return_isystem_include_dirs_list "$arg" ;;
2) append return_isystem_system_include_dirs_list "$arg" ;;
esac
if system_dir "$arg"; then
append return_isystem_system_include_dirs_list "$arg"
else
append return_isystem_include_dirs_list "$arg"
fi
;;
-I*)
arg="${1#-I}"
if [ -z "$arg" ]; then shift; arg="$1"; fi
path_order "$arg"
case $? in
0) append return_spack_store_include_dirs_list "$arg" ;;
1) append return_include_dirs_list "$arg" ;;
2) append return_system_include_dirs_list "$arg" ;;
esac
if system_dir "$arg"; then
append return_system_include_dirs_list "$arg"
else
append return_include_dirs_list "$arg"
fi
;;
-L*)
arg="${1#-L}"
if [ -z "$arg" ]; then shift; arg="$1"; fi
path_order "$arg"
case $? in
0) append return_spack_store_lib_dirs_list "$arg" ;;
1) append return_lib_dirs_list "$arg" ;;
2) append return_system_lib_dirs_list "$arg" ;;
esac
if system_dir "$arg"; then
append return_system_lib_dirs_list "$arg"
else
append return_lib_dirs_list "$arg"
fi
;;
-l*)
# -loopopt=0 is generated erroneously in autoconf <= 2.69,
@@ -614,32 +601,29 @@ categorize_arguments() {
break
elif [ "$xlinker_expect_rpath" = yes ]; then
# Register the path of -Xlinker -rpath <other args> -Xlinker <path>
path_order "$1"
case $? in
0) append return_spack_store_rpath_dirs_list "$1" ;;
1) append return_rpath_dirs_list "$1" ;;
2) append return_system_rpath_dirs_list "$1" ;;
esac
if system_dir "$1"; then
append return_system_rpath_dirs_list "$1"
else
append return_rpath_dirs_list "$1"
fi
xlinker_expect_rpath=no
else
case "$1" in
-rpath=*)
arg="${1#-rpath=}"
path_order "$arg"
case $? in
0) append return_spack_store_rpath_dirs_list "$arg" ;;
1) append return_rpath_dirs_list "$arg" ;;
2) append return_system_rpath_dirs_list "$arg" ;;
esac
if system_dir "$arg"; then
append return_system_rpath_dirs_list "$arg"
else
append return_rpath_dirs_list "$arg"
fi
;;
--rpath=*)
arg="${1#--rpath=}"
path_order "$arg"
case $? in
0) append return_spack_store_rpath_dirs_list "$arg" ;;
1) append return_rpath_dirs_list "$arg" ;;
2) append return_system_rpath_dirs_list "$arg" ;;
esac
if system_dir "$arg"; then
append return_system_rpath_dirs_list "$arg"
else
append return_rpath_dirs_list "$arg"
fi
;;
-rpath|--rpath)
xlinker_expect_rpath=yes
@@ -677,25 +661,16 @@ categorize_arguments() {
}
categorize_arguments "$@"
spack_store_include_dirs_list="$return_spack_store_include_dirs_list"
system_include_dirs_list="$return_system_include_dirs_list"
include_dirs_list="$return_include_dirs_list"
spack_store_lib_dirs_list="$return_spack_store_lib_dirs_list"
system_lib_dirs_list="$return_system_lib_dirs_list"
lib_dirs_list="$return_lib_dirs_list"
spack_store_rpath_dirs_list="$return_spack_store_rpath_dirs_list"
system_rpath_dirs_list="$return_system_rpath_dirs_list"
rpath_dirs_list="$return_rpath_dirs_list"
isystem_spack_store_include_dirs_list="$return_isystem_spack_store_include_dirs_list"
isystem_system_include_dirs_list="$return_isystem_system_include_dirs_list"
isystem_include_dirs_list="$return_isystem_include_dirs_list"
isystem_was_used="$return_isystem_was_used"
other_args_list="$return_other_args_list"
include_dirs_list="$return_include_dirs_list"
lib_dirs_list="$return_lib_dirs_list"
rpath_dirs_list="$return_rpath_dirs_list"
system_include_dirs_list="$return_system_include_dirs_list"
system_lib_dirs_list="$return_system_lib_dirs_list"
system_rpath_dirs_list="$return_system_rpath_dirs_list"
isystem_was_used="$return_isystem_was_used"
isystem_system_include_dirs_list="$return_isystem_system_include_dirs_list"
isystem_include_dirs_list="$return_isystem_include_dirs_list"
other_args_list="$return_other_args_list"
#
# Add flags from Spack's cppflags, cflags, cxxflags, fcflags, fflags, and
@@ -755,7 +730,7 @@ esac
# Linker flags
case "$mode" in
ccld)
ld|ccld)
extend spack_flags_list SPACK_LDFLAGS
;;
esac
@@ -763,25 +738,16 @@ esac
IFS="$lsep"
categorize_arguments $spack_flags_list
unset IFS
spack_flags_isystem_spack_store_include_dirs_list="$return_isystem_spack_store_include_dirs_list"
spack_flags_isystem_system_include_dirs_list="$return_isystem_system_include_dirs_list"
spack_flags_isystem_include_dirs_list="$return_isystem_include_dirs_list"
spack_flags_spack_store_include_dirs_list="$return_spack_store_include_dirs_list"
spack_flags_system_include_dirs_list="$return_system_include_dirs_list"
spack_flags_include_dirs_list="$return_include_dirs_list"
spack_flags_spack_store_lib_dirs_list="$return_spack_store_lib_dirs_list"
spack_flags_system_lib_dirs_list="$return_system_lib_dirs_list"
spack_flags_lib_dirs_list="$return_lib_dirs_list"
spack_flags_spack_store_rpath_dirs_list="$return_spack_store_rpath_dirs_list"
spack_flags_system_rpath_dirs_list="$return_system_rpath_dirs_list"
spack_flags_rpath_dirs_list="$return_rpath_dirs_list"
spack_flags_isystem_was_used="$return_isystem_was_used"
spack_flags_other_args_list="$return_other_args_list"
spack_flags_include_dirs_list="$return_include_dirs_list"
spack_flags_lib_dirs_list="$return_lib_dirs_list"
spack_flags_rpath_dirs_list="$return_rpath_dirs_list"
spack_flags_system_include_dirs_list="$return_system_include_dirs_list"
spack_flags_system_lib_dirs_list="$return_system_lib_dirs_list"
spack_flags_system_rpath_dirs_list="$return_system_rpath_dirs_list"
spack_flags_isystem_was_used="$return_isystem_was_used"
spack_flags_isystem_system_include_dirs_list="$return_isystem_system_include_dirs_list"
spack_flags_isystem_include_dirs_list="$return_isystem_include_dirs_list"
spack_flags_other_args_list="$return_other_args_list"
# On macOS insert headerpad_max_install_names linker flag
@@ -801,13 +767,11 @@ if [ "$mode" = ccld ] || [ "$mode" = ld ]; then
# Append RPATH directories. Note that in the case of the
# top-level package these directories may not exist yet. For dependencies
# it is assumed that paths have already been confirmed.
extend spack_store_rpath_dirs_list SPACK_STORE_RPATH_DIRS
extend rpath_dirs_list SPACK_RPATH_DIRS
fi
fi
if [ "$mode" = ccld ] || [ "$mode" = ld ]; then
extend spack_store_lib_dirs_list SPACK_STORE_LINK_DIRS
extend lib_dirs_list SPACK_LINK_DIRS
fi
@@ -834,50 +798,38 @@ case "$mode" in
;;
esac
case "$mode" in
cpp|cc|as|ccld)
if [ "$spack_flags_isystem_was_used" = "true" ] || [ "$isystem_was_used" = "true" ]; then
extend isystem_spack_store_include_dirs_list SPACK_STORE_INCLUDE_DIRS
extend isystem_include_dirs_list SPACK_INCLUDE_DIRS
else
extend spack_store_include_dirs_list SPACK_STORE_INCLUDE_DIRS
extend include_dirs_list SPACK_INCLUDE_DIRS
fi
;;
esac
#
# Finally, reassemble the command line.
#
args_list="$flags_list"
# Include search paths partitioned by (in store, non-sytem, system)
# Insert include directories just prior to any system include directories
# NOTE: adding ${lsep} to the prefix here turns every added element into two
extend args_list spack_flags_spack_store_include_dirs_list -I
extend args_list spack_store_include_dirs_list -I
extend args_list spack_flags_include_dirs_list -I
extend args_list include_dirs_list -I
extend args_list spack_flags_isystem_spack_store_include_dirs_list "-isystem${lsep}"
extend args_list isystem_spack_store_include_dirs_list "-isystem${lsep}"
extend args_list spack_flags_include_dirs_list "-I"
extend args_list include_dirs_list "-I"
extend args_list spack_flags_isystem_include_dirs_list "-isystem${lsep}"
extend args_list isystem_include_dirs_list "-isystem${lsep}"
case "$mode" in
cpp|cc|as|ccld)
if [ "$spack_flags_isystem_was_used" = "true" ]; then
extend args_list SPACK_INCLUDE_DIRS "-isystem${lsep}"
elif [ "$isystem_was_used" = "true" ]; then
extend args_list SPACK_INCLUDE_DIRS "-isystem${lsep}"
else
extend args_list SPACK_INCLUDE_DIRS "-I"
fi
;;
esac
extend args_list spack_flags_system_include_dirs_list -I
extend args_list system_include_dirs_list -I
extend args_list spack_flags_isystem_system_include_dirs_list "-isystem${lsep}"
extend args_list isystem_system_include_dirs_list "-isystem${lsep}"
# Library search paths partitioned by (in store, non-sytem, system)
extend args_list spack_flags_spack_store_lib_dirs_list "-L"
extend args_list spack_store_lib_dirs_list "-L"
# Library search paths
extend args_list spack_flags_lib_dirs_list "-L"
extend args_list lib_dirs_list "-L"
extend args_list spack_flags_system_lib_dirs_list "-L"
extend args_list system_lib_dirs_list "-L"
@@ -887,12 +839,8 @@ case "$mode" in
if [ -n "$dtags_to_add" ] ; then
append args_list "$linker_arg$dtags_to_add"
fi
extend args_list spack_flags_spack_store_rpath_dirs_list "$rpath"
extend args_list spack_store_rpath_dirs_list "$rpath"
extend args_list spack_flags_rpath_dirs_list "$rpath"
extend args_list rpath_dirs_list "$rpath"
extend args_list spack_flags_system_rpath_dirs_list "$rpath"
extend args_list system_rpath_dirs_list "$rpath"
;;
@@ -900,12 +848,8 @@ case "$mode" in
if [ -n "$dtags_to_add" ] ; then
append args_list "$dtags_to_add"
fi
extend args_list spack_flags_spack_store_rpath_dirs_list "-rpath${lsep}"
extend args_list spack_store_rpath_dirs_list "-rpath${lsep}"
extend args_list spack_flags_rpath_dirs_list "-rpath${lsep}"
extend args_list rpath_dirs_list "-rpath${lsep}"
extend args_list spack_flags_system_rpath_dirs_list "-rpath${lsep}"
extend args_list system_rpath_dirs_list "-rpath${lsep}"
;;
@@ -969,3 +913,4 @@ fi
# Execute the full command, preserving spaces with IFS set
# to the alarm bell separator.
IFS="$lsep"; exec $full_command_list

View File

@@ -1,4 +1,4 @@
# Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
# Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
@@ -18,7 +18,7 @@
* Homepage: https://pypi.python.org/pypi/archspec
* Usage: Labeling, comparison and detection of microarchitectures
* Version: 0.2.4 (commit 48b92512b9ce203ded0ebd1ac41b42593e931f7c)
* Version: 0.2.2 (commit 1dc58a5776dd77e6fc6e4ba5626af5b1fb24996e)
astunparse
----------------

View File

@@ -497,7 +497,7 @@ def copy_attributes(self, t, memo=None):
Tag.attrib, merge_attrib]:
if hasattr(self, a):
if memo is not None:
setattr(t, a, copy.deepcopy(getattr(self, a), memo))
setattr(t, a, copy.deepcopy(getattr(self, a, memo)))
else:
setattr(t, a, getattr(self, a))
# fmt: on

View File

@@ -1,3 +1,2 @@
"""Init file to avoid namespace packages"""
__version__ = "0.2.4"
__version__ = "0.2.2"

View File

@@ -3,7 +3,6 @@
"""
import sys
from .cli import main
sys.exit(main())

View File

@@ -46,11 +46,7 @@ def _make_parser() -> argparse.ArgumentParser:
def cpu() -> int:
"""Run the `archspec cpu` subcommand."""
try:
print(archspec.cpu.host())
except FileNotFoundError as exc:
print(exc)
return 1
print(archspec.cpu.host())
return 0

View File

@@ -5,23 +5,16 @@
"""The "cpu" package permits to query and compare different
CPU microarchitectures.
"""
from .detect import brand_string, host
from .microarchitecture import (
TARGETS,
InvalidCompilerVersion,
Microarchitecture,
UnsupportedMicroarchitecture,
generic_microarchitecture,
version_components,
)
from .microarchitecture import Microarchitecture, UnsupportedMicroarchitecture
from .microarchitecture import TARGETS, generic_microarchitecture
from .microarchitecture import version_components
from .detect import host
__all__ = [
"brand_string",
"host",
"TARGETS",
"InvalidCompilerVersion",
"Microarchitecture",
"UnsupportedMicroarchitecture",
"TARGETS",
"generic_microarchitecture",
"host",
"version_components",
]

View File

@@ -4,17 +4,15 @@
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""Detection of CPU microarchitectures"""
import collections
import functools
import os
import platform
import re
import struct
import subprocess
import warnings
from typing import Dict, List, Optional, Set, Tuple, Union
from ..vendor.cpuid.cpuid import CPUID
from .microarchitecture import TARGETS, Microarchitecture, generic_microarchitecture
from .schema import CPUID_JSON, TARGETS_JSON
from .microarchitecture import generic_microarchitecture, TARGETS
from .schema import TARGETS_JSON
#: Mapping from operating systems to chain of commands
#: to obtain a dictionary of raw info on the current cpu
@@ -24,46 +22,43 @@
#: functions checking the compatibility of the host with a given target
COMPATIBILITY_CHECKS = {}
# Constants for commonly used architectures
X86_64 = "x86_64"
AARCH64 = "aarch64"
PPC64LE = "ppc64le"
PPC64 = "ppc64"
RISCV64 = "riscv64"
def detection(operating_system: str):
"""Decorator to mark functions that are meant to return partial information on the current cpu.
def info_dict(operating_system):
"""Decorator to mark functions that are meant to return raw info on
the current cpu.
Args:
operating_system: operating system where this function can be used.
operating_system (str or tuple): operating system for which the marked
function is a viable factory of raw info dictionaries.
"""
def decorator(factory):
INFO_FACTORY[operating_system].append(factory)
return factory
@functools.wraps(factory)
def _impl():
info = factory()
# Check that info contains a few mandatory fields
msg = 'field "{0}" is missing from raw info dictionary'
assert "vendor_id" in info, msg.format("vendor_id")
assert "flags" in info, msg.format("flags")
assert "model" in info, msg.format("model")
assert "model_name" in info, msg.format("model_name")
return info
return _impl
return decorator
def partial_uarch(
name: str = "", vendor: str = "", features: Optional[Set[str]] = None, generation: int = 0
) -> Microarchitecture:
"""Construct a partial microarchitecture, from information gathered during system scan."""
return Microarchitecture(
name=name,
parents=[],
vendor=vendor,
features=features or set(),
compilers={},
generation=generation,
)
@detection(operating_system="Linux")
def proc_cpuinfo() -> Microarchitecture:
"""Returns a partial Microarchitecture, obtained from scanning ``/proc/cpuinfo``"""
data = {}
@info_dict(operating_system="Linux")
def proc_cpuinfo():
"""Returns a raw info dictionary by parsing the first entry of
``/proc/cpuinfo``
"""
info = {}
with open("/proc/cpuinfo") as file: # pylint: disable=unspecified-encoding
for line in file:
key, separator, value = line.partition(":")
@@ -75,121 +70,11 @@ def proc_cpuinfo() -> Microarchitecture:
#
# we are on a blank line separating two cpus. Exit early as
# we want to read just the first entry in /proc/cpuinfo
if separator != ":" and data:
if separator != ":" and info:
break
data[key.strip()] = value.strip()
architecture = _machine()
if architecture == X86_64:
return partial_uarch(
vendor=data.get("vendor_id", "generic"), features=_feature_set(data, key="flags")
)
if architecture == AARCH64:
return partial_uarch(
vendor=_canonicalize_aarch64_vendor(data),
features=_feature_set(data, key="Features"),
)
if architecture in (PPC64LE, PPC64):
generation_match = re.search(r"POWER(\d+)", data.get("cpu", ""))
try:
generation = int(generation_match.group(1))
except AttributeError:
# There might be no match under emulated environments. For instance
# emulating a ppc64le with QEMU and Docker still reports the host
# /proc/cpuinfo and not a Power
generation = 0
return partial_uarch(generation=generation)
if architecture == RISCV64:
if data.get("uarch") == "sifive,u74-mc":
data["uarch"] = "u74mc"
return partial_uarch(name=data.get("uarch", RISCV64))
return generic_microarchitecture(architecture)
class CpuidInfoCollector:
"""Collects the information we need on the host CPU from cpuid"""
# pylint: disable=too-few-public-methods
def __init__(self):
self.cpuid = CPUID()
registers = self.cpuid.registers_for(**CPUID_JSON["vendor"]["input"])
self.highest_basic_support = registers.eax
self.vendor = struct.pack("III", registers.ebx, registers.edx, registers.ecx).decode(
"utf-8"
)
registers = self.cpuid.registers_for(**CPUID_JSON["highest_extension_support"]["input"])
self.highest_extension_support = registers.eax
self.features = self._features()
def _features(self):
result = set()
def check_features(data):
registers = self.cpuid.registers_for(**data["input"])
for feature_check in data["bits"]:
current = getattr(registers, feature_check["register"])
if self._is_bit_set(current, feature_check["bit"]):
result.add(feature_check["name"])
for call_data in CPUID_JSON["flags"]:
if call_data["input"]["eax"] > self.highest_basic_support:
continue
check_features(call_data)
for call_data in CPUID_JSON["extension-flags"]:
if call_data["input"]["eax"] > self.highest_extension_support:
continue
check_features(call_data)
return result
def _is_bit_set(self, register: int, bit: int) -> bool:
mask = 1 << bit
return register & mask > 0
def brand_string(self) -> Optional[str]:
"""Returns the brand string, if available."""
if self.highest_extension_support < 0x80000004:
return None
r1 = self.cpuid.registers_for(eax=0x80000002, ecx=0)
r2 = self.cpuid.registers_for(eax=0x80000003, ecx=0)
r3 = self.cpuid.registers_for(eax=0x80000004, ecx=0)
result = struct.pack(
"IIIIIIIIIIII",
r1.eax,
r1.ebx,
r1.ecx,
r1.edx,
r2.eax,
r2.ebx,
r2.ecx,
r2.edx,
r3.eax,
r3.ebx,
r3.ecx,
r3.edx,
).decode("utf-8")
return result.strip("\x00")
@detection(operating_system="Windows")
def cpuid_info():
"""Returns a partial Microarchitecture, obtained from running the cpuid instruction"""
architecture = _machine()
if architecture == X86_64:
data = CpuidInfoCollector()
return partial_uarch(vendor=data.vendor, features=data.features)
return generic_microarchitecture(architecture)
info[key.strip()] = value.strip()
return info
def _check_output(args, env):
@@ -198,25 +83,14 @@ def _check_output(args, env):
return str(output.decode("utf-8"))
WINDOWS_MAPPING = {
"AMD64": X86_64,
"ARM64": AARCH64,
}
def _machine():
"""Return the machine architecture we are on"""
""" "Return the machine architecture we are on"""
operating_system = platform.system()
# If we are not on Darwin or Windows, trust what Python tells us
if operating_system not in ("Darwin", "Windows"):
# If we are not on Darwin, trust what Python tells us
if operating_system != "Darwin":
return platform.machine()
# Normalize windows specific names
if operating_system == "Windows":
platform_machine = platform.machine()
return WINDOWS_MAPPING.get(platform_machine, platform_machine)
# On Darwin it might happen that we are on M1, but using an interpreter
# built for x86_64. In that case "platform.machine() == 'x86_64'", so we
# need to fix that.
@@ -229,47 +103,54 @@ def _machine():
if "Apple" in output:
# Note that a native Python interpreter on Apple M1 would return
# "arm64" instead of "aarch64". Here we normalize to the latter.
return AARCH64
return "aarch64"
return X86_64
return "x86_64"
@detection(operating_system="Darwin")
def sysctl_info() -> Microarchitecture:
@info_dict(operating_system="Darwin")
def sysctl_info_dict():
"""Returns a raw info dictionary parsing the output of sysctl."""
child_environment = _ensure_bin_usrbin_in_path()
def sysctl(*args: str) -> str:
def sysctl(*args):
return _check_output(["sysctl"] + list(args), env=child_environment).strip()
if _machine() == X86_64:
features = (
f'{sysctl("-n", "machdep.cpu.features").lower()} '
f'{sysctl("-n", "machdep.cpu.leaf7_features").lower()}'
if _machine() == "x86_64":
flags = (
sysctl("-n", "machdep.cpu.features").lower()
+ " "
+ sysctl("-n", "machdep.cpu.leaf7_features").lower()
)
features = set(features.split())
info = {
"vendor_id": sysctl("-n", "machdep.cpu.vendor"),
"flags": flags,
"model": sysctl("-n", "machdep.cpu.model"),
"model name": sysctl("-n", "machdep.cpu.brand_string"),
}
else:
model = "unknown"
model_str = sysctl("-n", "machdep.cpu.brand_string").lower()
if "m2" in model_str:
model = "m2"
elif "m1" in model_str:
model = "m1"
elif "apple" in model_str:
model = "m1"
# Flags detected on Darwin turned to their linux counterpart
for darwin_flag, linux_flag in TARGETS_JSON["conversions"]["darwin_flags"].items():
if darwin_flag in features:
features.update(linux_flag.split())
return partial_uarch(vendor=sysctl("-n", "machdep.cpu.vendor"), features=features)
model = "unknown"
model_str = sysctl("-n", "machdep.cpu.brand_string").lower()
if "m2" in model_str:
model = "m2"
elif "m1" in model_str:
model = "m1"
elif "apple" in model_str:
model = "m1"
return partial_uarch(name=model, vendor="Apple")
info = {
"vendor_id": "Apple",
"flags": [],
"model": model,
"CPU implementer": "Apple",
"model name": sysctl("-n", "machdep.cpu.brand_string"),
}
return info
def _ensure_bin_usrbin_in_path():
# Make sure that /sbin and /usr/sbin are in PATH as sysctl is usually found there
# Make sure that /sbin and /usr/sbin are in PATH as sysctl is
# usually found there
child_environment = dict(os.environ.items())
search_paths = child_environment.get("PATH", "").split(os.pathsep)
for additional_path in ("/sbin", "/usr/sbin"):
@@ -279,10 +160,22 @@ def _ensure_bin_usrbin_in_path():
return child_environment
def _canonicalize_aarch64_vendor(data: Dict[str, str]) -> str:
"""Adjust the vendor field to make it human-readable"""
if "CPU implementer" not in data:
return "generic"
def adjust_raw_flags(info):
"""Adjust the flags detected on the system to homogenize
slightly different representations.
"""
# Flags detected on Darwin turned to their linux counterpart
flags = info.get("flags", [])
d2l = TARGETS_JSON["conversions"]["darwin_flags"]
for darwin_flag, linux_flag in d2l.items():
if darwin_flag in flags:
info["flags"] += " " + linux_flag
def adjust_raw_vendor(info):
"""Adjust the vendor field to make it human readable"""
if "CPU implementer" not in info:
return
# Mapping numeric codes to vendor (ARM). This list is a merge from
# different sources:
@@ -292,37 +185,43 @@ def _canonicalize_aarch64_vendor(data: Dict[str, str]) -> str:
# https://github.com/gcc-mirror/gcc/blob/master/gcc/config/aarch64/aarch64-cores.def
# https://patchwork.kernel.org/patch/10524949/
arm_vendors = TARGETS_JSON["conversions"]["arm_vendors"]
arm_code = data["CPU implementer"]
return arm_vendors.get(arm_code, arm_code)
arm_code = info["CPU implementer"]
if arm_code in arm_vendors:
info["CPU implementer"] = arm_vendors[arm_code]
def _feature_set(data: Dict[str, str], key: str) -> Set[str]:
return set(data.get(key, "").split())
def raw_info_dictionary():
"""Returns a dictionary with information on the cpu of the current host.
def detected_info() -> Microarchitecture:
"""Returns a partial Microarchitecture with information on the CPU of the current host.
This function calls all the viable factories one after the other until there's one that is
able to produce the requested information. Falls-back to a generic microarchitecture, if none
of the calls succeed.
This function calls all the viable factories one after the other until
there's one that is able to produce the requested information.
"""
# pylint: disable=broad-except
info = {}
for factory in INFO_FACTORY[platform.system()]:
try:
return factory()
info = factory()
except Exception as exc:
warnings.warn(str(exc))
return generic_microarchitecture(_machine())
if info:
adjust_raw_flags(info)
adjust_raw_vendor(info)
break
return info
def compatible_microarchitectures(info: Microarchitecture) -> List[Microarchitecture]:
"""Returns an unordered list of known micro-architectures that are compatible with the
partial Microarchitecture passed as input.
def compatible_microarchitectures(info):
"""Returns an unordered list of known micro-architectures that are
compatible with the info dictionary passed as argument.
Args:
info (dict): dictionary containing information on the host cpu
"""
architecture_family = _machine()
# If a tester is not registered, assume no known target is compatible with the host
# If a tester is not registered, be conservative and assume no known
# target is compatible with the host
tester = COMPATIBILITY_CHECKS.get(architecture_family, lambda x, y: False)
return [x for x in TARGETS.values() if tester(info, x)] or [
generic_microarchitecture(architecture_family)
@@ -331,8 +230,8 @@ def compatible_microarchitectures(info: Microarchitecture) -> List[Microarchitec
def host():
"""Detects the host micro-architecture and returns it."""
# Retrieve information on the host's cpu
info = detected_info()
# Retrieve a dictionary with raw information on the host's cpu
info = raw_info_dictionary()
# Get a list of possible candidates for this micro-architecture
candidates = compatible_microarchitectures(info)
@@ -359,15 +258,16 @@ def sorting_fn(item):
return max(candidates, key=sorting_fn)
def compatibility_check(architecture_family: Union[str, Tuple[str, ...]]):
def compatibility_check(architecture_family):
"""Decorator to register a function as a proper compatibility check.
A compatibility check function takes a partial Microarchitecture object as a first argument,
and an arbitrary target Microarchitecture as the second argument. It returns True if the
target is compatible with first argument, False otherwise.
A compatibility check function takes the raw info dictionary as a first
argument and an arbitrary target as the second argument. It returns True
if the target is compatible with the info dictionary, False otherwise.
Args:
architecture_family: architecture family for which this test can be used
architecture_family (str or tuple): architecture family for which
this test can be used, e.g. x86_64 or ppc64le etc.
"""
# Turn the argument into something iterable
if isinstance(architecture_family, str):
@@ -380,70 +280,86 @@ def decorator(func):
return decorator
@compatibility_check(architecture_family=(PPC64LE, PPC64))
@compatibility_check(architecture_family=("ppc64le", "ppc64"))
def compatibility_check_for_power(info, target):
"""Compatibility check for PPC64 and PPC64LE architectures."""
basename = platform.machine()
generation_match = re.search(r"POWER(\d+)", info.get("cpu", ""))
try:
generation = int(generation_match.group(1))
except AttributeError:
# There might be no match under emulated environments. For instance
# emulating a ppc64le with QEMU and Docker still reports the host
# /proc/cpuinfo and not a Power
generation = 0
# We can use a target if it descends from our machine type and our
# generation (9 for POWER9, etc) is at least its generation.
arch_root = TARGETS[_machine()]
arch_root = TARGETS[basename]
return (
target == arch_root or arch_root in target.ancestors
) and target.generation <= info.generation
) and target.generation <= generation
@compatibility_check(architecture_family=X86_64)
@compatibility_check(architecture_family="x86_64")
def compatibility_check_for_x86_64(info, target):
"""Compatibility check for x86_64 architectures."""
basename = "x86_64"
vendor = info.get("vendor_id", "generic")
features = set(info.get("flags", "").split())
# We can use a target if it descends from our machine type, is from our
# vendor, and we have all of its features
arch_root = TARGETS[X86_64]
arch_root = TARGETS[basename]
return (
(target == arch_root or arch_root in target.ancestors)
and target.vendor in (info.vendor, "generic")
and target.features.issubset(info.features)
and target.vendor in (vendor, "generic")
and target.features.issubset(features)
)
@compatibility_check(architecture_family=AARCH64)
@compatibility_check(architecture_family="aarch64")
def compatibility_check_for_aarch64(info, target):
"""Compatibility check for AARCH64 architectures."""
# At the moment, it's not clear how to detect compatibility with
basename = "aarch64"
features = set(info.get("Features", "").split())
vendor = info.get("CPU implementer", "generic")
# At the moment it's not clear how to detect compatibility with
# a specific version of the architecture
if target.vendor == "generic" and target.name != AARCH64:
if target.vendor == "generic" and target.name != "aarch64":
return False
arch_root = TARGETS[AARCH64]
arch_root = TARGETS[basename]
arch_root_and_vendor = arch_root == target.family and target.vendor in (
info.vendor,
vendor,
"generic",
)
# On macOS it seems impossible to get all the CPU features
# with syctl info, but for ARM we can get the exact model
if platform.system() == "Darwin":
model = TARGETS[info.name]
model_key = info.get("model", basename)
model = TARGETS[model_key]
return arch_root_and_vendor and (target == model or target in model.ancestors)
return arch_root_and_vendor and target.features.issubset(info.features)
return arch_root_and_vendor and target.features.issubset(features)
@compatibility_check(architecture_family=RISCV64)
@compatibility_check(architecture_family="riscv64")
def compatibility_check_for_riscv64(info, target):
"""Compatibility check for riscv64 architectures."""
arch_root = TARGETS[RISCV64]
basename = "riscv64"
uarch = info.get("uarch")
# sifive unmatched board
if uarch == "sifive,u74-mc":
uarch = "u74mc"
# catch-all for unknown uarchs
else:
uarch = "riscv64"
arch_root = TARGETS[basename]
return (target == arch_root or arch_root in target.ancestors) and (
target.name == info.name or target.vendor == "generic"
target == uarch or target.vendor == "generic"
)
def brand_string() -> Optional[str]:
"""Returns the brand string of the host, if detected, or None."""
if platform.system() == "Darwin":
return _check_output(
["sysctl", "-n", "machdep.cpu.brand_string"], env=_ensure_bin_usrbin_in_path()
).strip()
if host().family == X86_64:
return CpuidInfoCollector().brand_string()
return None

View File

@@ -13,7 +13,6 @@
import archspec
import archspec.cpu.alias
import archspec.cpu.schema
from .alias import FEATURE_ALIASES
from .schema import LazyDictionary
@@ -48,7 +47,7 @@ class Microarchitecture:
which has "broadwell" as a parent, supports running binaries
optimized for "broadwell".
vendor (str): vendor of the micro-architecture
features (set of str): supported CPU flags. Note that the semantic
features (list of str): supported CPU flags. Note that the semantic
of the flags in this field might vary among architectures, if
at all present. For instance x86_64 processors will list all
the flags supported by a given CPU while Arm processors will
@@ -181,35 +180,29 @@ def generic(self):
generics = [x for x in [self] + self.ancestors if x.vendor == "generic"]
return max(generics, key=lambda x: len(x.ancestors))
def to_dict(self):
"""Returns a dictionary representation of this object."""
return {
"name": str(self.name),
"vendor": str(self.vendor),
"features": sorted(str(x) for x in self.features),
"generation": self.generation,
"parents": [str(x) for x in self.parents],
"compilers": self.compilers,
}
def to_dict(self, return_list_of_items=False):
"""Returns a dictionary representation of this object.
@staticmethod
def from_dict(data) -> "Microarchitecture":
"""Construct a microarchitecture from a dictionary representation."""
return Microarchitecture(
name=data["name"],
parents=[TARGETS[x] for x in data["parents"]],
vendor=data["vendor"],
features=set(data["features"]),
compilers=data.get("compilers", {}),
generation=data.get("generation", 0),
)
Args:
return_list_of_items (bool): if True returns an ordered list of
items instead of the dictionary
"""
list_of_items = [
("name", str(self.name)),
("vendor", str(self.vendor)),
("features", sorted(str(x) for x in self.features)),
("generation", self.generation),
("parents", [str(x) for x in self.parents]),
]
if return_list_of_items:
return list_of_items
return dict(list_of_items)
def optimization_flags(self, compiler, version):
"""Returns a string containing the optimization flags that needs
to be used to produce code optimized for this micro-architecture.
The version is expected to be a string of dot separated digits.
If there is no information on the compiler passed as argument the
function returns an empty string. If it is known that the compiler
version we want to use does not support this architecture the function
@@ -218,11 +211,6 @@ def optimization_flags(self, compiler, version):
Args:
compiler (str): name of the compiler to be used
version (str): version of the compiler to be used
Raises:
UnsupportedMicroarchitecture: if the requested compiler does not support
this micro-architecture.
ValueError: if the version doesn't match the expected format
"""
# If we don't have information on compiler at all return an empty string
if compiler not in self.family.compilers:
@@ -239,14 +227,6 @@ def optimization_flags(self, compiler, version):
msg = msg.format(compiler, best_target, best_target.family)
raise UnsupportedMicroarchitecture(msg)
# Check that the version matches the expected format
if not re.match(r"^(?:\d+\.)*\d+$", version):
msg = (
"invalid format for the compiler version argument. "
"Only dot separated digits are allowed."
)
raise InvalidCompilerVersion(msg)
# If we have information on this compiler we need to check the
# version being used
compiler_info = self.compilers[compiler]
@@ -291,7 +271,9 @@ def tuplify(ver):
flags = flags_fmt.format(**compiler_entry)
return flags
msg = "cannot produce optimized binary for micro-architecture '{0}' with {1}@{2}"
msg = (
"cannot produce optimized binary for micro-architecture '{0}' with {1}@{2}"
)
if compiler_info:
versions = [x["versions"] for x in compiler_info]
msg += f' [supported compiler versions are {", ".join(versions)}]'
@@ -307,7 +289,9 @@ def generic_microarchitecture(name):
Args:
name (str): name of the micro-architecture
"""
return Microarchitecture(name, parents=[], vendor="generic", features=set(), compilers={})
return Microarchitecture(
name, parents=[], vendor="generic", features=[], compilers={}
)
def version_components(version):
@@ -361,7 +345,9 @@ def fill_target_from_dict(name, data, targets):
compilers = values.get("compilers", {})
generation = values.get("generation", 0)
targets[name] = Microarchitecture(name, parents, vendor, features, compilers, generation)
targets[name] = Microarchitecture(
name, parents, vendor, features, compilers, generation
)
known_targets = {}
data = archspec.cpu.schema.TARGETS_JSON["microarchitectures"]
@@ -382,15 +368,7 @@ def fill_target_from_dict(name, data, targets):
TARGETS = LazyDictionary(_known_microarchitectures)
class ArchspecError(Exception):
"""Base class for errors within archspec"""
class UnsupportedMicroarchitecture(ArchspecError, ValueError):
class UnsupportedMicroarchitecture(ValueError):
"""Raised if a compiler version does not support optimization for a given
micro-architecture.
"""
class InvalidCompilerVersion(ArchspecError, ValueError):
"""Raised when an invalid format is used for compiler versions in archspec."""

View File

@@ -7,9 +7,7 @@
"""
import collections.abc
import json
import os
import pathlib
from typing import Tuple
import os.path
class LazyDictionary(collections.abc.MutableMapping):
@@ -48,65 +46,21 @@ def __len__(self):
return len(self.data)
#: Environment variable that might point to a directory with a user defined JSON file
DIR_FROM_ENVIRONMENT = "ARCHSPEC_CPU_DIR"
def _load_json_file(json_file):
json_dir = os.path.join(os.path.dirname(__file__), "..", "json", "cpu")
json_dir = os.path.abspath(json_dir)
#: Environment variable that might point to a directory with extensions to JSON files
EXTENSION_DIR_FROM_ENVIRONMENT = "ARCHSPEC_EXTENSION_CPU_DIR"
def _factory():
filename = os.path.join(json_dir, json_file)
with open(filename, "r", encoding="utf-8") as file:
return json.load(file)
def _json_file(filename: str, allow_custom: bool = False) -> Tuple[pathlib.Path, pathlib.Path]:
"""Given a filename, returns the absolute path for the main JSON file, and an
optional absolute path for an extension JSON file.
Args:
filename: filename for the JSON file
allow_custom: if True, allows overriding the location where the file resides
"""
json_dir = pathlib.Path(__file__).parent / ".." / "json" / "cpu"
if allow_custom and DIR_FROM_ENVIRONMENT in os.environ:
json_dir = pathlib.Path(os.environ[DIR_FROM_ENVIRONMENT])
json_dir = json_dir.absolute()
json_file = json_dir / filename
extension_file = None
if allow_custom and EXTENSION_DIR_FROM_ENVIRONMENT in os.environ:
extension_dir = pathlib.Path(os.environ[EXTENSION_DIR_FROM_ENVIRONMENT])
extension_dir.absolute()
extension_file = extension_dir / filename
return json_file, extension_file
def _load(json_file: pathlib.Path, extension_file: pathlib.Path):
with open(json_file, "r", encoding="utf-8") as file:
data = json.load(file)
if not extension_file or not extension_file.exists():
return data
with open(extension_file, "r", encoding="utf-8") as file:
extension_data = json.load(file)
top_level_sections = list(data.keys())
for key in top_level_sections:
if key not in extension_data:
continue
data[key].update(extension_data[key])
return data
return _factory
#: In memory representation of the data in microarchitectures.json,
#: loaded on first access
TARGETS_JSON = LazyDictionary(_load, *_json_file("microarchitectures.json", allow_custom=True))
TARGETS_JSON = LazyDictionary(_load_json_file("microarchitectures.json"))
#: JSON schema for microarchitectures.json, loaded on first access
TARGETS_JSON_SCHEMA = LazyDictionary(_load, *_json_file("microarchitectures_schema.json"))
#: Information on how to call 'cpuid' to get information on the HOST CPU
CPUID_JSON = LazyDictionary(_load, *_json_file("cpuid.json", allow_custom=True))
#: JSON schema for cpuid.json, loaded on first access
CPUID_JSON_SCHEMA = LazyDictionary(_load, *_json_file("cpuid_schema.json"))
SCHEMA = LazyDictionary(_load_json_file("microarchitectures_schema.json"))

View File

@@ -9,11 +9,11 @@ language specific APIs.
Currently the repository contains the following JSON files:
```console
cpu/
├── cpuid.json # Contains information on CPUID calls to retrieve vendor and features on x86_64
── cpuid_schema.json # Schema for the file above
├── microarchitectures.json # Contains information on CPU microarchitectures
└── microarchitectures_schema.json # Schema for the file above
.
├── COPYRIGHT
── cpu
   ├── microarchitectures.json # Contains information on CPU microarchitectures
   └── microarchitectures_schema.json # Schema for the file above
```

File diff suppressed because it is too large Load Diff

View File

@@ -1,134 +0,0 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
"title": "Schema for microarchitecture definitions and feature aliases",
"type": "object",
"additionalProperties": false,
"properties": {
"vendor": {
"type": "object",
"additionalProperties": false,
"properties": {
"description": {
"type": "string"
},
"input": {
"type": "object",
"additionalProperties": false,
"properties": {
"eax": {
"type": "integer"
},
"ecx": {
"type": "integer"
}
}
}
}
},
"highest_extension_support": {
"type": "object",
"additionalProperties": false,
"properties": {
"description": {
"type": "string"
},
"input": {
"type": "object",
"additionalProperties": false,
"properties": {
"eax": {
"type": "integer"
},
"ecx": {
"type": "integer"
}
}
}
}
},
"flags": {
"type": "array",
"items": {
"type": "object",
"additionalProperties": false,
"properties": {
"description": {
"type": "string"
},
"input": {
"type": "object",
"additionalProperties": false,
"properties": {
"eax": {
"type": "integer"
},
"ecx": {
"type": "integer"
}
}
},
"bits": {
"type": "array",
"items": {
"type": "object",
"additionalProperties": false,
"properties": {
"name": {
"type": "string"
},
"register": {
"type": "string"
},
"bit": {
"type": "integer"
}
}
}
}
}
}
},
"extension-flags": {
"type": "array",
"items": {
"type": "object",
"additionalProperties": false,
"properties": {
"description": {
"type": "string"
},
"input": {
"type": "object",
"additionalProperties": false,
"properties": {
"eax": {
"type": "integer"
},
"ecx": {
"type": "integer"
}
}
},
"bits": {
"type": "array",
"items": {
"type": "object",
"additionalProperties": false,
"properties": {
"name": {
"type": "string"
},
"register": {
"type": "string"
},
"bit": {
"type": "integer"
}
}
}
}
}
}
}
}
}

View File

@@ -2937,6 +2937,8 @@
"ilrcpc",
"flagm",
"ssbs",
"paca",
"pacg",
"dcpodp",
"svei8mm",
"svebf16",
@@ -3064,6 +3066,8 @@
"flagm",
"ssbs",
"sb",
"paca",
"pacg",
"dcpodp",
"sve2",
"sveaes",
@@ -3077,7 +3081,8 @@
"svebf16",
"i8mm",
"bf16",
"dgh"
"dgh",
"bti"
],
"compilers" : {
"gcc": [

View File

@@ -1,20 +0,0 @@
The MIT License (MIT)
Copyright (c) 2014 Anders Høst
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

Some files were not shown because too many files have changed in this diff Show More