Compare commits

..

4 Commits

Author SHA1 Message Date
Harmen Stoppels
eb77658b8f manual changes 2024-04-03 09:57:43 +02:00
Harmen Stoppels
430b41efc6 fix ordering 2024-04-02 19:10:01 +02:00
Harmen Stoppels
b628c989cb undo: py-geopmdpy 2024-04-02 17:40:37 +02:00
Harmen Stoppels
8f898a9cdc python ecosystem: sdist -> universal wheels 2024-04-02 17:38:42 +02:00
7258 changed files with 73267 additions and 112546 deletions

View File

@@ -5,7 +5,7 @@ coverage:
status: status:
project: project:
default: default:
threshold: 2.0% threshold: 0.2%
ignore: ignore:
- lib/spack/spack/test/.* - lib/spack/spack/test/.*

View File

@@ -1,5 +1,4 @@
{ {
"name": "Ubuntu 20.04",
"image": "ghcr.io/spack/ubuntu20.04-runner-amd64-gcc-11.4:2023.08.01", "image": "ghcr.io/spack/ubuntu20.04-runner-amd64-gcc-11.4:2023.08.01",
"postCreateCommand": "./.devcontainer/postCreateCommand.sh" "postCreateCommand": "./.devcontainer/postCreateCommand.sh"
} }

View File

@@ -1,5 +0,0 @@
{
"name": "Ubuntu 22.04",
"image": "ghcr.io/spack/ubuntu-22.04:v2024-05-07",
"postCreateCommand": "./.devcontainer/postCreateCommand.sh"
}

View File

@@ -5,10 +5,13 @@ updates:
directory: "/" directory: "/"
schedule: schedule:
interval: "daily" interval: "daily"
# Requirements to run style checks and build documentation # Requirements to build documentation
- package-ecosystem: "pip" - package-ecosystem: "pip"
directories: directory: "/lib/spack/docs"
- "/.github/workflows/requirements/style/*" schedule:
- "/lib/spack/docs" interval: "daily"
# Requirements to run style checks
- package-ecosystem: "pip"
directory: "/.github/workflows/style"
schedule: schedule:
interval: "daily" interval: "daily"

View File

@@ -17,58 +17,35 @@ concurrency:
jobs: jobs:
# Run audits on all the packages in the built-in repository # Run audits on all the packages in the built-in repository
package-audits: package-audits:
runs-on: ${{ matrix.system.os }} runs-on: ${{ matrix.operating_system }}
strategy: strategy:
matrix: matrix:
system: operating_system: ["ubuntu-latest", "macos-latest"]
- { os: windows-latest, shell: 'powershell Invoke-Expression -Command "./share/spack/qa/windows_test_setup.ps1"; {0}' }
- { os: ubuntu-latest, shell: bash }
- { os: macos-latest, shell: bash }
defaults:
run:
shell: ${{ matrix.system.shell }}
steps: steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
- uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d
with: with:
python-version: ${{inputs.python_version}} python-version: ${{inputs.python_version}}
- name: Install Python packages - name: Install Python packages
run: | run: |
pip install --upgrade pip setuptools pytest coverage[toml] pip install --upgrade pip setuptools pytest coverage[toml]
- name: Setup for Windows run
if: runner.os == 'Windows'
run: |
python -m pip install --upgrade pywin32
- name: Package audits (with coverage) - name: Package audits (with coverage)
env: if: ${{ inputs.with_coverage == 'true' }}
COVERAGE_FILE: coverage/.coverage-audits-${{ matrix.system.os }}
if: ${{ inputs.with_coverage == 'true' && runner.os != 'Windows' }}
run: | run: |
. share/spack/setup-env.sh . share/spack/setup-env.sh
coverage run $(which spack) audit packages coverage run $(which spack) audit packages
coverage run $(which spack) audit configs coverage run $(which spack) audit externals
coverage run $(which spack) -d audit externals
coverage combine coverage combine
coverage xml
- name: Package audits (without coverage) - name: Package audits (without coverage)
if: ${{ inputs.with_coverage == 'false' && runner.os != 'Windows' }} if: ${{ inputs.with_coverage == 'false' }}
run: | run: |
. share/spack/setup-env.sh . share/spack/setup-env.sh
spack -d audit packages $(which spack) audit packages
spack -d audit configs $(which spack) audit externals
spack -d audit externals - uses: codecov/codecov-action@c16abc29c95fcf9174b58eb7e1abf4c866893bc8
- name: Package audits (without coverage) if: ${{ inputs.with_coverage == 'true' }}
if: ${{ runner.os == 'Windows' }}
run: |
. share/spack/setup-env.sh
spack -d audit packages
./share/spack/qa/validate_last_exit.ps1
spack -d audit configs
./share/spack/qa/validate_last_exit.ps1
spack -d audit externals
./share/spack/qa/validate_last_exit.ps1
- uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882
if: ${{ inputs.with_coverage == 'true' && runner.os != 'Windows' }}
with: with:
name: coverage-audits-${{ matrix.system.os }} flags: unittests,audits
path: coverage token: ${{ secrets.CODECOV_TOKEN }}
include-hidden-files: true verbose: true

View File

@@ -1,8 +1,7 @@
#!/bin/bash #!/bin/bash
set -e set -ex
source share/spack/setup-env.sh source share/spack/setup-env.sh
$PYTHON bin/spack bootstrap disable github-actions-v0.5
$PYTHON bin/spack bootstrap disable spack-install $PYTHON bin/spack bootstrap disable spack-install
$PYTHON bin/spack $SPACK_FLAGS solve zlib $PYTHON bin/spack -d solve zlib
tree $BOOTSTRAP/store tree $BOOTSTRAP/store
exit 0 exit 0

View File

@@ -13,22 +13,118 @@ concurrency:
cancel-in-progress: true cancel-in-progress: true
jobs: jobs:
distros-clingo-sources: fedora-clingo-sources:
runs-on: ubuntu-latest runs-on: ubuntu-latest
container: ${{ matrix.image }} container: "fedora:latest"
strategy:
matrix:
image: ["fedora:latest", "opensuse/leap:latest"]
steps: steps:
- name: Setup Fedora - name: Install dependencies
if: ${{ matrix.image == 'fedora:latest' }}
run: | run: |
dnf install -y \ dnf install -y \
bzip2 curl file gcc-c++ gcc gcc-gfortran git gzip \ bzip2 curl file gcc-c++ gcc gcc-gfortran git gnupg2 gzip \
make patch unzip which xz python3 python3-devel tree \ make patch unzip which xz python3 python3-devel tree \
cmake bison bison-devel libstdc++-static cmake bison bison-devel libstdc++-static
- name: Setup OpenSUSE - name: Checkout
if: ${{ matrix.image == 'opensuse/leap:latest' }} uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
with:
fetch-depth: 0
- name: Setup non-root user
run: |
# See [1] below
git config --global --add safe.directory /__w/spack/spack
useradd spack-test && mkdir -p ~spack-test
chown -R spack-test . ~spack-test
- name: Setup repo
shell: runuser -u spack-test -- bash {0}
run: |
git --version
. .github/workflows/setup_git.sh
- name: Bootstrap clingo
shell: runuser -u spack-test -- bash {0}
run: |
source share/spack/setup-env.sh
spack bootstrap disable github-actions-v0.5
spack bootstrap disable github-actions-v0.4
spack external find cmake bison
spack -d solve zlib
tree ~/.spack/bootstrap/store/
ubuntu-clingo-sources:
runs-on: ubuntu-latest
container: "ubuntu:latest"
steps:
- name: Install dependencies
env:
DEBIAN_FRONTEND: noninteractive
run: |
apt-get update -y && apt-get upgrade -y
apt-get install -y \
bzip2 curl file g++ gcc gfortran git gnupg2 gzip \
make patch unzip xz-utils python3 python3-dev tree \
cmake bison
- name: Checkout
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
with:
fetch-depth: 0
- name: Setup non-root user
run: |
# See [1] below
git config --global --add safe.directory /__w/spack/spack
useradd spack-test && mkdir -p ~spack-test
chown -R spack-test . ~spack-test
- name: Setup repo
shell: runuser -u spack-test -- bash {0}
run: |
git --version
. .github/workflows/setup_git.sh
- name: Bootstrap clingo
shell: runuser -u spack-test -- bash {0}
run: |
source share/spack/setup-env.sh
spack bootstrap disable github-actions-v0.5
spack bootstrap disable github-actions-v0.4
spack external find cmake bison
spack -d solve zlib
tree ~/.spack/bootstrap/store/
ubuntu-clingo-binaries-and-patchelf:
runs-on: ubuntu-latest
container: "ubuntu:latest"
steps:
- name: Install dependencies
env:
DEBIAN_FRONTEND: noninteractive
run: |
apt-get update -y && apt-get upgrade -y
apt-get install -y \
bzip2 curl file g++ gcc gfortran git gnupg2 gzip \
make patch unzip xz-utils python3 python3-dev tree
- name: Checkout
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
with:
fetch-depth: 0
- name: Setup non-root user
run: |
# See [1] below
git config --global --add safe.directory /__w/spack/spack
useradd spack-test && mkdir -p ~spack-test
chown -R spack-test . ~spack-test
- name: Setup repo
shell: runuser -u spack-test -- bash {0}
run: |
git --version
. .github/workflows/setup_git.sh
- name: Bootstrap clingo
shell: runuser -u spack-test -- bash {0}
run: |
source share/spack/setup-env.sh
spack -d solve zlib
tree ~/.spack/bootstrap/store/
opensuse-clingo-sources:
runs-on: ubuntu-latest
container: "opensuse/leap:latest"
steps:
- name: Install dependencies
run: | run: |
# Harden CI by applying the workaround described here: https://www.suse.com/support/kb/doc/?id=000019505 # Harden CI by applying the workaround described here: https://www.suse.com/support/kb/doc/?id=000019505
zypper update -y || zypper update -y zypper update -y || zypper update -y
@@ -37,117 +133,101 @@ jobs:
make patch unzip which xz python3 python3-devel tree \ make patch unzip which xz python3 python3-devel tree \
cmake bison cmake bison
- name: Checkout - name: Checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
with: with:
fetch-depth: 0 fetch-depth: 0
- name: Setup repo
run: |
# See [1] below
git config --global --add safe.directory /__w/spack/spack
git --version
. .github/workflows/setup_git.sh
- name: Bootstrap clingo - name: Bootstrap clingo
run: | run: |
source share/spack/setup-env.sh source share/spack/setup-env.sh
spack bootstrap disable github-actions-v0.6
spack bootstrap disable github-actions-v0.5 spack bootstrap disable github-actions-v0.5
spack bootstrap disable github-actions-v0.4
spack external find cmake bison spack external find cmake bison
spack -d solve zlib spack -d solve zlib
tree ~/.spack/bootstrap/store/ tree ~/.spack/bootstrap/store/
clingo-sources: macos-clingo-sources:
runs-on: ${{ matrix.runner }} runs-on: macos-latest
strategy:
matrix:
runner: ['macos-13', 'macos-14', "ubuntu-latest"]
steps: steps:
- name: Setup macOS - name: Install dependencies
if: ${{ matrix.runner != 'ubuntu-latest' }}
run: | run: |
brew install cmake bison tree brew install cmake bison@2.7 tree
- name: Checkout - name: Checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
with: - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d
fetch-depth: 0
- uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b
with: with:
python-version: "3.12" python-version: "3.12"
- name: Bootstrap clingo - name: Bootstrap clingo
run: | run: |
source share/spack/setup-env.sh source share/spack/setup-env.sh
spack bootstrap disable github-actions-v0.6 export PATH=/usr/local/opt/bison@2.7/bin:$PATH
spack bootstrap disable github-actions-v0.5 spack bootstrap disable github-actions-v0.5
spack bootstrap disable github-actions-v0.4
spack external find --not-buildable cmake bison spack external find --not-buildable cmake bison
spack -d solve zlib spack -d solve zlib
tree $HOME/.spack/bootstrap/store/
gnupg-sources:
runs-on: ${{ matrix.runner }}
strategy:
matrix:
runner: [ 'macos-13', 'macos-14', "ubuntu-latest" ]
steps:
- name: Setup macOS
if: ${{ matrix.runner != 'ubuntu-latest' }}
run: brew install tree gawk
- name: Remove system executables
run: |
while [ -n "$(command -v gpg gpg2 patchelf)" ]; do
sudo rm $(command -v gpg gpg2 patchelf)
done
- name: Checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
with:
fetch-depth: 0
- name: Bootstrap GnuPG
run: |
source share/spack/setup-env.sh
spack solve zlib
spack bootstrap disable github-actions-v0.6
spack bootstrap disable github-actions-v0.5
spack -d gpg list
tree ~/.spack/bootstrap/store/ tree ~/.spack/bootstrap/store/
from-binaries: macos-clingo-binaries:
runs-on: ${{ matrix.runner }} runs-on: ${{ matrix.macos-version }}
strategy: strategy:
matrix: matrix:
runner: ['macos-13', 'macos-14', "ubuntu-latest"] macos-version: ['macos-11', 'macos-12']
steps: steps:
- name: Setup macOS - name: Install dependencies
if: ${{ matrix.runner != 'ubuntu-latest' }}
run: brew install tree
- name: Remove system executables
run: | run: |
while [ -n "$(command -v gpg gpg2 patchelf)" ]; do brew install tree
sudo rm $(command -v gpg gpg2 patchelf)
done
- name: Checkout - name: Checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
with:
fetch-depth: 0
- uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b
with:
python-version: |
3.8
3.9
3.10
3.11
3.12
3.13
- name: Set bootstrap sources
run: |
source share/spack/setup-env.sh
spack bootstrap disable github-actions-v0.5
spack bootstrap disable spack-install
- name: Bootstrap clingo - name: Bootstrap clingo
run: | run: |
set -e set -ex
for ver in '3.8' '3.9' '3.10' '3.11' '3.12' '3.13'; do for ver in '3.7' '3.8' '3.9' '3.10' '3.11' ; do
not_found=1 not_found=1
ver_dir="$(find $RUNNER_TOOL_CACHE/Python -wholename "*/${ver}.*/*/bin" | grep . || true)" ver_dir="$(find $RUNNER_TOOL_CACHE/Python -wholename "*/${ver}.*/*/bin" | grep . || true)"
echo "Testing $ver_dir"
if [[ -d "$ver_dir" ]] ; then if [[ -d "$ver_dir" ]] ; then
echo "Testing $ver_dir"
if $ver_dir/python --version ; then if $ver_dir/python --version ; then
export PYTHON="$ver_dir/python" export PYTHON="$ver_dir/python"
not_found=0 not_found=0
old_path="$PATH" old_path="$PATH"
export PATH="$ver_dir:$PATH" export PATH="$ver_dir:$PATH"
./bin/spack-tmpconfig -b ./.github/workflows/bin/bootstrap-test.sh ./bin/spack-tmpconfig -b ./.github/workflows/bootstrap-test.sh
export PATH="$old_path"
fi
fi
# NOTE: test all pythons that exist, not all do on 12
done
ubuntu-clingo-binaries:
runs-on: ubuntu-20.04
steps:
- name: Checkout
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
with:
fetch-depth: 0
- name: Setup repo
run: |
git --version
. .github/workflows/setup_git.sh
- name: Bootstrap clingo
run: |
set -ex
for ver in '3.7' '3.8' '3.9' '3.10' '3.11' ; do
not_found=1
ver_dir="$(find $RUNNER_TOOL_CACHE/Python -wholename "*/${ver}.*/*/bin" | grep . || true)"
echo "Testing $ver_dir"
if [[ -d "$ver_dir" ]] ; then
if $ver_dir/python --version ; then
export PYTHON="$ver_dir/python"
not_found=0
old_path="$PATH"
export PATH="$ver_dir:$PATH"
./bin/spack-tmpconfig -b ./.github/workflows/bootstrap-test.sh
export PATH="$old_path" export PATH="$old_path"
fi fi
fi fi
@@ -156,49 +236,122 @@ jobs:
exit 1 exit 1
fi fi
done done
- name: Bootstrap GnuPG
run: |
source share/spack/setup-env.sh
spack -d gpg list
tree $HOME/.spack/bootstrap/store/
- name: Bootstrap File
run: |
source share/spack/setup-env.sh
spack -d python share/spack/qa/bootstrap-file.py
tree $HOME/.spack/bootstrap/store/
windows: ubuntu-gnupg-binaries:
runs-on: "windows-latest" runs-on: ubuntu-latest
container: "ubuntu:latest"
steps: steps:
- name: Install dependencies
env:
DEBIAN_FRONTEND: noninteractive
run: |
apt-get update -y && apt-get upgrade -y
apt-get install -y \
bzip2 curl file g++ gcc patchelf gfortran git gzip \
make patch unzip xz-utils python3 python3-dev tree
- name: Checkout - name: Checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
with: with:
fetch-depth: 0 fetch-depth: 0
- uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b - name: Setup non-root user
run: |
# See [1] below
git config --global --add safe.directory /__w/spack/spack
useradd spack-test && mkdir -p ~spack-test
chown -R spack-test . ~spack-test
- name: Setup repo
shell: runuser -u spack-test -- bash {0}
run: |
git --version
. .github/workflows/setup_git.sh
- name: Bootstrap GnuPG
shell: runuser -u spack-test -- bash {0}
run: |
source share/spack/setup-env.sh
spack bootstrap disable github-actions-v0.4
spack bootstrap disable spack-install
spack -d gpg list
tree ~/.spack/bootstrap/store/
ubuntu-gnupg-sources:
runs-on: ubuntu-latest
container: "ubuntu:latest"
steps:
- name: Install dependencies
env:
DEBIAN_FRONTEND: noninteractive
run: |
apt-get update -y && apt-get upgrade -y
apt-get install -y \
bzip2 curl file g++ gcc patchelf gfortran git gzip \
make patch unzip xz-utils python3 python3-dev tree \
gawk
- name: Checkout
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
with: with:
python-version: "3.12" fetch-depth: 0
- name: Setup Windows - name: Setup non-root user
run: | run: |
Remove-Item -Path (Get-Command gpg).Path # See [1] below
Remove-Item -Path (Get-Command file).Path git config --global --add safe.directory /__w/spack/spack
- name: Bootstrap clingo useradd spack-test && mkdir -p ~spack-test
chown -R spack-test . ~spack-test
- name: Setup repo
shell: runuser -u spack-test -- bash {0}
run: | run: |
./share/spack/setup-env.ps1 git --version
spack bootstrap disable github-actions-v0.6 . .github/workflows/setup_git.sh
- name: Bootstrap GnuPG
shell: runuser -u spack-test -- bash {0}
run: |
source share/spack/setup-env.sh
spack solve zlib
spack bootstrap disable github-actions-v0.5 spack bootstrap disable github-actions-v0.5
spack external find --not-buildable cmake bison spack bootstrap disable github-actions-v0.4
spack -d solve zlib spack -d gpg list
./share/spack/qa/validate_last_exit.ps1 tree ~/.spack/bootstrap/store/
tree $env:userprofile/.spack/bootstrap/store/
macos-gnupg-binaries:
runs-on: macos-latest
steps:
- name: Install dependencies
run: |
brew install tree
# Remove GnuPG since we want to bootstrap it
sudo rm -rf /usr/local/bin/gpg
- name: Checkout
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
- name: Bootstrap GnuPG - name: Bootstrap GnuPG
run: | run: |
./share/spack/setup-env.ps1 source share/spack/setup-env.sh
spack bootstrap disable github-actions-v0.4
spack bootstrap disable spack-install
spack -d gpg list spack -d gpg list
./share/spack/qa/validate_last_exit.ps1 tree ~/.spack/bootstrap/store/
tree $env:userprofile/.spack/bootstrap/store/
- name: Bootstrap File macos-gnupg-sources:
runs-on: macos-latest
steps:
- name: Install dependencies
run: | run: |
./share/spack/setup-env.ps1 brew install gawk tree
spack -d python share/spack/qa/bootstrap-file.py # Remove GnuPG since we want to bootstrap it
./share/spack/qa/validate_last_exit.ps1 sudo rm -rf /usr/local/bin/gpg
tree $env:userprofile/.spack/bootstrap/store/ - name: Checkout
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
- name: Bootstrap GnuPG
run: |
source share/spack/setup-env.sh
spack solve zlib
spack bootstrap disable github-actions-v0.5
spack bootstrap disable github-actions-v0.4
spack -d gpg list
tree ~/.spack/bootstrap/store/
# [1] Distros that have patched git to resolve CVE-2022-24765 (e.g. Ubuntu patching v2.25.1)
# introduce breaking behaviorso we have to set `safe.directory` in gitconfig ourselves.
# See:
# - https://github.blog/2022-04-12-git-security-vulnerability-announced/
# - https://github.com/actions/checkout/issues/760
# - http://changelogs.ubuntu.com/changelogs/pool/main/g/git/git_2.25.1-1ubuntu3.3/changelog

View File

@@ -40,22 +40,22 @@ jobs:
# 1: Platforms to build for # 1: Platforms to build for
# 2: Base image (e.g. ubuntu:22.04) # 2: Base image (e.g. ubuntu:22.04)
dockerfile: [[amazon-linux, 'linux/amd64,linux/arm64', 'amazonlinux:2'], dockerfile: [[amazon-linux, 'linux/amd64,linux/arm64', 'amazonlinux:2'],
[centos-stream9, 'linux/amd64,linux/arm64,linux/ppc64le', 'centos:stream9'], [centos7, 'linux/amd64,linux/arm64,linux/ppc64le', 'centos:7'],
[centos-stream, 'linux/amd64,linux/arm64,linux/ppc64le', 'centos:stream'],
[leap15, 'linux/amd64,linux/arm64,linux/ppc64le', 'opensuse/leap:15'], [leap15, 'linux/amd64,linux/arm64,linux/ppc64le', 'opensuse/leap:15'],
[ubuntu-focal, 'linux/amd64,linux/arm64,linux/ppc64le', 'ubuntu:20.04'], [ubuntu-focal, 'linux/amd64,linux/arm64,linux/ppc64le', 'ubuntu:20.04'],
[ubuntu-jammy, 'linux/amd64,linux/arm64,linux/ppc64le', 'ubuntu:22.04'], [ubuntu-jammy, 'linux/amd64,linux/arm64,linux/ppc64le', 'ubuntu:22.04'],
[ubuntu-noble, 'linux/amd64,linux/arm64,linux/ppc64le', 'ubuntu:24.04'],
[almalinux8, 'linux/amd64,linux/arm64,linux/ppc64le', 'almalinux:8'], [almalinux8, 'linux/amd64,linux/arm64,linux/ppc64le', 'almalinux:8'],
[almalinux9, 'linux/amd64,linux/arm64,linux/ppc64le', 'almalinux:9'], [almalinux9, 'linux/amd64,linux/arm64,linux/ppc64le', 'almalinux:9'],
[rockylinux8, 'linux/amd64,linux/arm64', 'rockylinux:8'], [rockylinux8, 'linux/amd64,linux/arm64', 'rockylinux:8'],
[rockylinux9, 'linux/amd64,linux/arm64', 'rockylinux:9'], [rockylinux9, 'linux/amd64,linux/arm64', 'rockylinux:9'],
[fedora39, 'linux/amd64,linux/arm64,linux/ppc64le', 'fedora:39'], [fedora37, 'linux/amd64,linux/arm64,linux/ppc64le', 'fedora:37'],
[fedora40, 'linux/amd64,linux/arm64,linux/ppc64le', 'fedora:40']] [fedora38, 'linux/amd64,linux/arm64,linux/ppc64le', 'fedora:38']]
name: Build ${{ matrix.dockerfile[0] }} name: Build ${{ matrix.dockerfile[0] }}
if: github.repository == 'spack/spack' if: github.repository == 'spack/spack'
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
- uses: docker/metadata-action@8e5442c4ef9f78752691e2d8f8d19755c6f78e81 - uses: docker/metadata-action@8e5442c4ef9f78752691e2d8f8d19755c6f78e81
id: docker_meta id: docker_meta
@@ -76,7 +76,7 @@ jobs:
env: env:
SPACK_YAML_OS: "${{ matrix.dockerfile[2] }}" SPACK_YAML_OS: "${{ matrix.dockerfile[2] }}"
run: | run: |
.github/workflows/bin/generate_spack_yaml_containerize.sh .github/workflows/generate_spack_yaml_containerize.sh
. share/spack/setup-env.sh . share/spack/setup-env.sh
mkdir -p dockerfiles/${{ matrix.dockerfile[0] }} mkdir -p dockerfiles/${{ matrix.dockerfile[0] }}
spack containerize --last-stage=bootstrap | tee dockerfiles/${{ matrix.dockerfile[0] }}/Dockerfile spack containerize --last-stage=bootstrap | tee dockerfiles/${{ matrix.dockerfile[0] }}/Dockerfile
@@ -87,19 +87,19 @@ jobs:
fi fi
- name: Upload Dockerfile - name: Upload Dockerfile
uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32
with: with:
name: dockerfiles_${{ matrix.dockerfile[0] }} name: dockerfiles
path: dockerfiles path: dockerfiles
- name: Set up QEMU - name: Set up QEMU
uses: docker/setup-qemu-action@49b3bc8e6bdd4a60e6116a5414239cba5943d3cf uses: docker/setup-qemu-action@68827325e0b33c7199eb31dd4e31fbe9023e06e3
- name: Set up Docker Buildx - name: Set up Docker Buildx
uses: docker/setup-buildx-action@c47758b77c9736f4b2ef4073d4d51994fabfe349 uses: docker/setup-buildx-action@2b51285047da1547ffb1b2203d8be4c0af6b1f20
- name: Log in to GitHub Container Registry - name: Log in to GitHub Container Registry
uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 uses: docker/login-action@e92390c5fb421da1463c202d546fed0ec5c39f20
with: with:
registry: ghcr.io registry: ghcr.io
username: ${{ github.actor }} username: ${{ github.actor }}
@@ -107,27 +107,16 @@ jobs:
- name: Log in to DockerHub - name: Log in to DockerHub
if: github.event_name != 'pull_request' if: github.event_name != 'pull_request'
uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 uses: docker/login-action@e92390c5fb421da1463c202d546fed0ec5c39f20
with: with:
username: ${{ secrets.DOCKERHUB_USERNAME }} username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }} password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build & Deploy ${{ matrix.dockerfile[0] }} - name: Build & Deploy ${{ matrix.dockerfile[0] }}
uses: docker/build-push-action@4f58ea79222b3b9dc2c8bbdd6debcef730109a75 uses: docker/build-push-action@2cdde995de11925a030ce8070c3d77a52ffcf1c0
with: with:
context: dockerfiles/${{ matrix.dockerfile[0] }} context: dockerfiles/${{ matrix.dockerfile[0] }}
platforms: ${{ matrix.dockerfile[1] }} platforms: ${{ matrix.dockerfile[1] }}
push: ${{ github.event_name != 'pull_request' }} push: ${{ github.event_name != 'pull_request' }}
tags: ${{ steps.docker_meta.outputs.tags }} tags: ${{ steps.docker_meta.outputs.tags }}
labels: ${{ steps.docker_meta.outputs.labels }} labels: ${{ steps.docker_meta.outputs.labels }}
merge-dockerfiles:
runs-on: ubuntu-latest
needs: deploy-images
steps:
- name: Merge Artifacts
uses: actions/upload-artifact/merge@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882
with:
name: dockerfiles
pattern: dockerfiles_*
delete-merged: true

View File

@@ -15,6 +15,18 @@ concurrency:
cancel-in-progress: true cancel-in-progress: true
jobs: jobs:
prechecks:
needs: [ changes ]
uses: ./.github/workflows/valid-style.yml
secrets: inherit
with:
with_coverage: ${{ needs.changes.outputs.core }}
all-prechecks:
needs: [ prechecks ]
runs-on: ubuntu-latest
steps:
- name: Success
run: "true"
# Check which files have been updated by the PR # Check which files have been updated by the PR
changes: changes:
runs-on: ubuntu-latest runs-on: ubuntu-latest
@@ -24,7 +36,7 @@ jobs:
core: ${{ steps.filter.outputs.core }} core: ${{ steps.filter.outputs.core }}
packages: ${{ steps.filter.outputs.packages }} packages: ${{ steps.filter.outputs.packages }}
steps: steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
if: ${{ github.event_name == 'push' }} if: ${{ github.event_name == 'push' }}
with: with:
fetch-depth: 0 fetch-depth: 0
@@ -41,13 +53,6 @@ jobs:
- 'var/spack/repos/builtin/packages/clingo/**' - 'var/spack/repos/builtin/packages/clingo/**'
- 'var/spack/repos/builtin/packages/python/**' - 'var/spack/repos/builtin/packages/python/**'
- 'var/spack/repos/builtin/packages/re2c/**' - 'var/spack/repos/builtin/packages/re2c/**'
- 'var/spack/repos/builtin/packages/gnupg/**'
- 'var/spack/repos/builtin/packages/libassuan/**'
- 'var/spack/repos/builtin/packages/libgcrypt/**'
- 'var/spack/repos/builtin/packages/libgpg-error/**'
- 'var/spack/repos/builtin/packages/libksba/**'
- 'var/spack/repos/builtin/packages/npth/**'
- 'var/spack/repos/builtin/packages/pinentry/**'
- 'lib/spack/**' - 'lib/spack/**'
- 'share/spack/**' - 'share/spack/**'
- '.github/workflows/bootstrap.yml' - '.github/workflows/bootstrap.yml'
@@ -67,53 +72,19 @@ jobs:
needs: [ prechecks, changes ] needs: [ prechecks, changes ]
uses: ./.github/workflows/bootstrap.yml uses: ./.github/workflows/bootstrap.yml
secrets: inherit secrets: inherit
unit-tests: unit-tests:
if: ${{ github.repository == 'spack/spack' && needs.changes.outputs.core == 'true' }} if: ${{ github.repository == 'spack/spack' && needs.changes.outputs.core == 'true' }}
needs: [ prechecks, changes ] needs: [ prechecks, changes ]
uses: ./.github/workflows/unit_tests.yaml uses: ./.github/workflows/unit_tests.yaml
secrets: inherit secrets: inherit
windows:
prechecks: if: ${{ github.repository == 'spack/spack' && needs.changes.outputs.core == 'true' }}
needs: [ changes ]
uses: ./.github/workflows/valid-style.yml
secrets: inherit
with:
with_coverage: ${{ needs.changes.outputs.core }}
all-prechecks:
needs: [ prechecks ] needs: [ prechecks ]
if: ${{ always() }} uses: ./.github/workflows/windows_python.yml
secrets: inherit
all:
needs: [ windows, unit-tests, bootstrap ]
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Success - name: Success
run: | run: "true"
if [ "${{ needs.prechecks.result }}" == "failure" ] || [ "${{ needs.prechecks.result }}" == "canceled" ]; then
echo "Unit tests failed."
exit 1
else
exit 0
fi
coverage:
needs: [ unit-tests, prechecks ]
uses: ./.github/workflows/coverage.yml
secrets: inherit
all:
needs: [ unit-tests, coverage, bootstrap ]
if: ${{ always() }}
runs-on: ubuntu-latest
# See https://docs.github.com/en/actions/writing-workflows/choosing-what-your-workflow-does/accessing-contextual-information-about-workflow-runs#needs-context
steps:
- name: Status summary
run: |
if [ "${{ needs.unit-tests.result }}" == "failure" ] || [ "${{ needs.unit-tests.result }}" == "canceled" ]; then
echo "Unit tests failed."
exit 1
elif [ "${{ needs.bootstrap.result }}" == "failure" ] || [ "${{ needs.bootstrap.result }}" == "canceled" ]; then
echo "Bootstrap tests failed."
exit 1
else
exit 0
fi

View File

@@ -1,34 +0,0 @@
name: coverage
on:
workflow_call:
jobs:
# Upload coverage reports to codecov once as a single bundle
upload:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
- uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b
with:
python-version: '3.11'
cache: 'pip'
- name: Install python dependencies
run: pip install -r .github/workflows/requirements/coverage/requirements.txt
- name: Download coverage artifact files
uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16
with:
pattern: coverage-*
path: coverage
merge-multiple: true
- run: ls -la coverage
- run: coverage combine -a coverage/.coverage*
- run: coverage xml
- name: "Upload coverage report to CodeCov"
uses: codecov/codecov-action@5c47607acb93fed5485fdbf7232e8a31425f672a
with:
verbose: true

8
.github/workflows/install_spack.sh vendored Executable file
View File

@@ -0,0 +1,8 @@
#!/usr/bin/env sh
. share/spack/setup-env.sh
echo -e "config:\n build_jobs: 2" > etc/spack/config.yaml
spack config add "packages:all:target:[x86_64]"
spack compiler find
spack compiler info apple-clang
spack debug report
spack solve zlib

View File

@@ -14,10 +14,10 @@ jobs:
build-paraview-deps: build-paraview-deps:
runs-on: windows-latest runs-on: windows-latest
steps: steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
with: with:
fetch-depth: 0 fetch-depth: 0
- uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d
with: with:
python-version: 3.9 python-version: 3.9
- name: Install Python packages - name: Install Python packages

View File

@@ -1 +0,0 @@
coverage==7.6.1

View File

@@ -1,7 +0,0 @@
black==24.10.0
clingo==5.7.1
flake8==7.1.1
isort==5.13.2
mypy==1.8.0
types-six==1.16.21.20241105
vermin==1.6.0

View File

@@ -0,0 +1,7 @@
black==24.3.0
clingo==5.7.1
flake8==7.0.0
isort==5.13.2
mypy==1.8.0
types-six==1.16.21.9
vermin==1.6.0

View File

@@ -16,34 +16,45 @@ jobs:
matrix: matrix:
os: [ubuntu-latest] os: [ubuntu-latest]
python-version: ['3.7', '3.8', '3.9', '3.10', '3.11', '3.12'] python-version: ['3.7', '3.8', '3.9', '3.10', '3.11', '3.12']
concretizer: ['clingo']
on_develop: on_develop:
- ${{ github.ref == 'refs/heads/develop' }} - ${{ github.ref == 'refs/heads/develop' }}
include: include:
- python-version: '3.11'
os: ubuntu-latest
concretizer: original
on_develop: ${{ github.ref == 'refs/heads/develop' }}
- python-version: '3.6' - python-version: '3.6'
os: ubuntu-20.04 os: ubuntu-20.04
concretizer: clingo
on_develop: ${{ github.ref == 'refs/heads/develop' }} on_develop: ${{ github.ref == 'refs/heads/develop' }}
exclude: exclude:
- python-version: '3.7' - python-version: '3.7'
os: ubuntu-latest os: ubuntu-latest
concretizer: 'clingo'
on_develop: false on_develop: false
- python-version: '3.8' - python-version: '3.8'
os: ubuntu-latest os: ubuntu-latest
concretizer: 'clingo'
on_develop: false on_develop: false
- python-version: '3.9' - python-version: '3.9'
os: ubuntu-latest os: ubuntu-latest
concretizer: 'clingo'
on_develop: false on_develop: false
- python-version: '3.10' - python-version: '3.10'
os: ubuntu-latest os: ubuntu-latest
concretizer: 'clingo'
on_develop: false on_develop: false
- python-version: '3.11' - python-version: '3.11'
os: ubuntu-latest os: ubuntu-latest
concretizer: 'clingo'
on_develop: false on_develop: false
steps: steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
with: with:
fetch-depth: 0 fetch-depth: 0
- uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d
with: with:
python-version: ${{ matrix.python-version }} python-version: ${{ matrix.python-version }}
- name: Install System packages - name: Install System packages
@@ -61,7 +72,7 @@ jobs:
run: | run: |
# Need this for the git tests to succeed. # Need this for the git tests to succeed.
git --version git --version
. .github/workflows/bin/setup_git.sh . .github/workflows/setup_git.sh
- name: Bootstrap clingo - name: Bootstrap clingo
if: ${{ matrix.concretizer == 'clingo' }} if: ${{ matrix.concretizer == 'clingo' }}
env: env:
@@ -74,25 +85,25 @@ jobs:
- name: Run unit tests - name: Run unit tests
env: env:
SPACK_PYTHON: python SPACK_PYTHON: python
SPACK_TEST_SOLVER: ${{ matrix.concretizer }}
SPACK_TEST_PARALLEL: 2 SPACK_TEST_PARALLEL: 2
COVERAGE: true COVERAGE: true
COVERAGE_FILE: coverage/.coverage-${{ matrix.os }}-python${{ matrix.python-version }}
UNIT_TEST_COVERAGE: ${{ matrix.python-version == '3.11' }} UNIT_TEST_COVERAGE: ${{ matrix.python-version == '3.11' }}
run: | run: |
share/spack/qa/run-unit-tests share/spack/qa/run-unit-tests
- uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 - uses: codecov/codecov-action@c16abc29c95fcf9174b58eb7e1abf4c866893bc8
with: with:
name: coverage-${{ matrix.os }}-python${{ matrix.python-version }} flags: unittests,linux,${{ matrix.concretizer }}
path: coverage token: ${{ secrets.CODECOV_TOKEN }}
include-hidden-files: true verbose: true
# Test shell integration # Test shell integration
shell: shell:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
with: with:
fetch-depth: 0 fetch-depth: 0
- uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d
with: with:
python-version: '3.11' python-version: '3.11'
- name: Install System packages - name: Install System packages
@@ -107,17 +118,17 @@ jobs:
run: | run: |
# Need this for the git tests to succeed. # Need this for the git tests to succeed.
git --version git --version
. .github/workflows/bin/setup_git.sh . .github/workflows/setup_git.sh
- name: Run shell tests - name: Run shell tests
env: env:
COVERAGE: true COVERAGE: true
run: | run: |
share/spack/qa/run-shell-tests share/spack/qa/run-shell-tests
- uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 - uses: codecov/codecov-action@c16abc29c95fcf9174b58eb7e1abf4c866893bc8
with: with:
name: coverage-shell flags: shelltests,linux
path: coverage token: ${{ secrets.CODECOV_TOKEN }}
include-hidden-files: true verbose: true
# Test RHEL8 UBI with platform Python. This job is run # Test RHEL8 UBI with platform Python. This job is run
# only on PRs modifying core Spack # only on PRs modifying core Spack
@@ -130,13 +141,13 @@ jobs:
dnf install -y \ dnf install -y \
bzip2 curl file gcc-c++ gcc gcc-gfortran git gnupg2 gzip \ bzip2 curl file gcc-c++ gcc gcc-gfortran git gnupg2 gzip \
make patch tcl unzip which xz make patch tcl unzip which xz
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
- name: Setup repo and non-root user - name: Setup repo and non-root user
run: | run: |
git --version git --version
git config --global --add safe.directory /__w/spack/spack git config --global --add safe.directory /__w/spack/spack
git fetch --unshallow git fetch --unshallow
. .github/workflows/bin/setup_git.sh . .github/workflows/setup_git.sh
useradd spack-test useradd spack-test
chown -R spack-test . chown -R spack-test .
- name: Run unit tests - name: Run unit tests
@@ -149,49 +160,48 @@ jobs:
clingo-cffi: clingo-cffi:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
with: with:
fetch-depth: 0 fetch-depth: 0
- uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d
with: with:
python-version: '3.13' python-version: '3.11'
- name: Install System packages - name: Install System packages
run: | run: |
sudo apt-get -y update sudo apt-get -y update
sudo apt-get -y install coreutils gfortran graphviz gnupg2 sudo apt-get -y install coreutils cvs gfortran graphviz gnupg2 mercurial ninja-build kcov
- name: Install Python packages - name: Install Python packages
run: | run: |
pip install --upgrade pip setuptools pytest coverage[toml] pytest-cov clingo pip install --upgrade pip setuptools pytest coverage[toml] pytest-cov clingo pytest-xdist
pip install --upgrade flake8 "isort>=4.3.5" "mypy>=0.900" "click" "black" pip install --upgrade flake8 "isort>=4.3.5" "mypy>=0.900" "click" "black"
- name: Setup git configuration
run: |
# Need this for the git tests to succeed.
git --version
. .github/workflows/setup_git.sh
- name: Run unit tests (full suite with coverage) - name: Run unit tests (full suite with coverage)
env: env:
COVERAGE: true COVERAGE: true
COVERAGE_FILE: coverage/.coverage-clingo-cffi SPACK_TEST_SOLVER: clingo
run: | run: |
. share/spack/setup-env.sh share/spack/qa/run-unit-tests
spack bootstrap disable spack-install - uses: codecov/codecov-action@c16abc29c95fcf9174b58eb7e1abf4c866893bc8
spack bootstrap disable github-actions-v0.5
spack bootstrap disable github-actions-v0.6
spack bootstrap status
spack solve zlib
spack unit-test --verbose --cov --cov-config=pyproject.toml --cov-report=xml:coverage.xml lib/spack/spack/test/concretization/core.py
- uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882
with: with:
name: coverage-clingo-cffi flags: unittests,linux,clingo
path: coverage token: ${{ secrets.CODECOV_TOKEN }}
include-hidden-files: true verbose: true
# Run unit tests on MacOS # Run unit tests on MacOS
macos: macos:
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
strategy: strategy:
matrix: matrix:
os: [macos-13, macos-14] os: [macos-latest, macos-14]
python-version: ["3.11"] python-version: ["3.11"]
steps: steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
with: with:
fetch-depth: 0 fetch-depth: 0
- uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d
with: with:
python-version: ${{ matrix.python-version }} python-version: ${{ matrix.python-version }}
- name: Install Python packages - name: Install Python packages
@@ -200,52 +210,21 @@ jobs:
pip install --upgrade pytest coverage[toml] pytest-xdist pytest-cov pip install --upgrade pytest coverage[toml] pytest-xdist pytest-cov
- name: Setup Homebrew packages - name: Setup Homebrew packages
run: | run: |
brew install dash fish gcc gnupg kcov brew install dash fish gcc gnupg2 kcov
- name: Run unit tests - name: Run unit tests
env: env:
SPACK_TEST_SOLVER: clingo
SPACK_TEST_PARALLEL: 4 SPACK_TEST_PARALLEL: 4
COVERAGE_FILE: coverage/.coverage-${{ matrix.os }}-python${{ matrix.python-version }}
run: | run: |
git --version git --version
. .github/workflows/bin/setup_git.sh . .github/workflows/setup_git.sh
. share/spack/setup-env.sh . share/spack/setup-env.sh
$(which spack) bootstrap disable spack-install $(which spack) bootstrap disable spack-install
$(which spack) solve zlib $(which spack) solve zlib
common_args=(--dist loadfile --tx '4*popen//python=./bin/spack-tmpconfig python -u ./bin/spack python' -x) common_args=(--dist loadfile --tx '4*popen//python=./bin/spack-tmpconfig python -u ./bin/spack python' -x)
$(which spack) unit-test --verbose --cov --cov-config=pyproject.toml --cov-report=xml:coverage.xml "${common_args[@]}" $(which spack) unit-test --verbose --cov --cov-config=pyproject.toml --cov-report=xml:coverage.xml "${common_args[@]}"
- uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 - uses: codecov/codecov-action@c16abc29c95fcf9174b58eb7e1abf4c866893bc8
with: with:
name: coverage-${{ matrix.os }}-python${{ matrix.python-version }} flags: unittests,macos
path: coverage token: ${{ secrets.CODECOV_TOKEN }}
include-hidden-files: true verbose: true
# Run unit tests on Windows
windows:
defaults:
run:
shell:
powershell Invoke-Expression -Command "./share/spack/qa/windows_test_setup.ps1"; {0}
runs-on: windows-latest
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
with:
fetch-depth: 0
- uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b
with:
python-version: 3.9
- name: Install Python packages
run: |
python -m pip install --upgrade pip pywin32 setuptools pytest-cov clingo
- name: Create local develop
run: |
./.github/workflows/bin/setup_git.ps1
- name: Unit Test
env:
COVERAGE_FILE: coverage/.coverage-windows
run: |
spack unit-test -x --verbose --cov --cov-config=pyproject.toml
./share/spack/qa/validate_last_exit.ps1
- uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882
with:
name: coverage-windows
path: coverage
include-hidden-files: true

View File

@@ -18,15 +18,15 @@ jobs:
validate: validate:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
- uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d
with: with:
python-version: '3.11' python-version: '3.11'
cache: 'pip' cache: 'pip'
- name: Install Python Packages - name: Install Python Packages
run: | run: |
pip install --upgrade pip setuptools pip install --upgrade pip setuptools
pip install -r .github/workflows/requirements/style/requirements.txt pip install -r .github/workflows/style/requirements.txt
- name: vermin (Spack's Core) - name: vermin (Spack's Core)
run: vermin --backport importlib --backport argparse --violations --backport typing -t=3.6- -vvv lib/spack/spack/ lib/spack/llnl/ bin/ run: vermin --backport importlib --backport argparse --violations --backport typing -t=3.6- -vvv lib/spack/spack/ lib/spack/llnl/ bin/
- name: vermin (Repositories) - name: vermin (Repositories)
@@ -35,22 +35,22 @@ jobs:
style: style:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
with: with:
fetch-depth: 0 fetch-depth: 0
- uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d
with: with:
python-version: '3.11' python-version: '3.11'
cache: 'pip' cache: 'pip'
- name: Install Python packages - name: Install Python packages
run: | run: |
pip install --upgrade pip setuptools pip install --upgrade pip setuptools
pip install -r .github/workflows/requirements/style/requirements.txt pip install -r .github/workflows/style/requirements.txt
- name: Setup git configuration - name: Setup git configuration
run: | run: |
# Need this for the git tests to succeed. # Need this for the git tests to succeed.
git --version git --version
. .github/workflows/bin/setup_git.sh . .github/workflows/setup_git.sh
- name: Run style tests - name: Run style tests
run: | run: |
share/spack/qa/run-style-tests share/spack/qa/run-style-tests
@@ -70,13 +70,13 @@ jobs:
dnf install -y \ dnf install -y \
bzip2 curl file gcc-c++ gcc gcc-gfortran git gnupg2 gzip \ bzip2 curl file gcc-c++ gcc gcc-gfortran git gnupg2 gzip \
make patch tcl unzip which xz make patch tcl unzip which xz
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
- name: Setup repo and non-root user - name: Setup repo and non-root user
run: | run: |
git --version git --version
git config --global --add safe.directory /__w/spack/spack git config --global --add safe.directory /__w/spack/spack
git fetch --unshallow git fetch --unshallow
. .github/workflows/bin/setup_git.sh . .github/workflows/setup_git.sh
useradd spack-test useradd spack-test
chown -R spack-test . chown -R spack-test .
- name: Bootstrap Spack development environment - name: Bootstrap Spack development environment
@@ -85,64 +85,5 @@ jobs:
source share/spack/setup-env.sh source share/spack/setup-env.sh
spack debug report spack debug report
spack -d bootstrap now --dev spack -d bootstrap now --dev
spack -d style -t black spack style -t black
spack unit-test -V spack unit-test -V
import-check:
runs-on: ubuntu-latest
steps:
- uses: julia-actions/setup-julia@v2
with:
version: '1.10'
- uses: julia-actions/cache@v2
# PR: use the base of the PR as the old commit
- name: Checkout PR base commit
if: github.event_name == 'pull_request'
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
with:
ref: ${{ github.event.pull_request.base.sha }}
path: old
# not a PR: use the previous commit as the old commit
- name: Checkout previous commit
if: github.event_name != 'pull_request'
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
with:
fetch-depth: 2
path: old
- name: Checkout previous commit
if: github.event_name != 'pull_request'
run: git -C old reset --hard HEAD^
- name: Checkout new commit
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
with:
path: new
- name: Install circular import checker
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
with:
repository: haampie/circular-import-fighter
ref: 9f60f51bc7134e0be73f27623f1b0357d1718427
path: circular-import-fighter
- name: Install dependencies
working-directory: circular-import-fighter
run: make -j dependencies
- name: Import cycles before
working-directory: circular-import-fighter
run: make SPACK_ROOT=../old && cp solution solution.old
- name: Import cycles after
working-directory: circular-import-fighter
run: make clean-graph && make SPACK_ROOT=../new && cp solution solution.new
- name: Compare import cycles
working-directory: circular-import-fighter
run: |
edges_before="$(grep -oP 'edges to delete: \K\d+' solution.old)"
edges_after="$(grep -oP 'edges to delete: \K\d+' solution.new)"
if [ "$edges_after" -gt "$edges_before" ]; then
printf '\033[1;31mImport check failed: %s imports need to be deleted, ' "$edges_after"
printf 'previously this was %s\033[0m\n' "$edges_before"
printf 'Compare \033[1;97m"Import cycles before"\033[0m and '
printf '\033[1;97m"Import cycles after"\033[0m to see problematic imports.\n'
exit 1
else
printf '\033[1;32mImport check passed: %s <= %s\033[0m\n' "$edges_after" "$edges_before"
fi

83
.github/workflows/windows_python.yml vendored Normal file
View File

@@ -0,0 +1,83 @@
name: windows
on:
workflow_call:
concurrency:
group: windows-${{github.ref}}-${{github.event.pull_request.number || github.run_number}}
cancel-in-progress: true
defaults:
run:
shell:
powershell Invoke-Expression -Command "./share/spack/qa/windows_test_setup.ps1"; {0}
jobs:
unit-tests:
runs-on: windows-latest
steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
with:
fetch-depth: 0
- uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d
with:
python-version: 3.9
- name: Install Python packages
run: |
python -m pip install --upgrade pip pywin32 setuptools pytest-cov clingo
- name: Create local develop
run: |
./.github/workflows/setup_git.ps1
- name: Unit Test
run: |
spack unit-test -x --verbose --cov --cov-config=pyproject.toml --ignore=lib/spack/spack/test/cmd
./share/spack/qa/validate_last_exit.ps1
coverage combine -a
coverage xml
- uses: codecov/codecov-action@c16abc29c95fcf9174b58eb7e1abf4c866893bc8
with:
flags: unittests,windows
token: ${{ secrets.CODECOV_TOKEN }}
verbose: true
unit-tests-cmd:
runs-on: windows-latest
steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
with:
fetch-depth: 0
- uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d
with:
python-version: 3.9
- name: Install Python packages
run: |
python -m pip install --upgrade pip pywin32 setuptools coverage pytest-cov clingo
- name: Create local develop
run: |
./.github/workflows/setup_git.ps1
- name: Command Unit Test
run: |
spack unit-test -x --verbose --cov --cov-config=pyproject.toml lib/spack/spack/test/cmd
./share/spack/qa/validate_last_exit.ps1
coverage combine -a
coverage xml
- uses: codecov/codecov-action@c16abc29c95fcf9174b58eb7e1abf4c866893bc8
with:
flags: unittests,windows
token: ${{ secrets.CODECOV_TOKEN }}
verbose: true
build-abseil:
runs-on: windows-latest
steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
with:
fetch-depth: 0
- uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d
with:
python-version: 3.9
- name: Install Python packages
run: |
python -m pip install --upgrade pip pywin32 setuptools coverage
- name: Build Test
run: |
spack compiler find
spack -d external find cmake ninja
spack -d install abseil-cpp

View File

@@ -14,26 +14,3 @@ sphinx:
python: python:
install: install:
- requirements: lib/spack/docs/requirements.txt - requirements: lib/spack/docs/requirements.txt
search:
ranking:
spack.html: -10
spack.*.html: -10
llnl.html: -10
llnl.*.html: -10
_modules/*: -10
command_index.html: -9
basic_usage.html: 5
configuration.html: 5
config_yaml.html: 5
packages_yaml.html: 5
build_settings.html: 5
environments.html: 5
containers.html: 5
mirrors.html: 5
module_file_support.html: 5
repositories.html: 5
binary_caches.html: 5
chain.html: 5
pipelines.html: 5
packaging_guide.html: 5

View File

@@ -1,440 +1,3 @@
# v0.22.2 (2024-09-21)
## Bugfixes
- Forward compatibility with Spack 0.23 packages with language dependencies (#45205, #45191)
- Forward compatibility with `urllib` from Python 3.12.6+ (#46453, #46483)
- Bump vendored `archspec` for better aarch64 support (#45721, #46445)
- Support macOS Sequoia (#45018, #45127)
- Fix regression in `{variants.X}` and `{variants.X.value}` format strings (#46206)
- Ensure shell escaping of environment variable values in load and activate commands (#42780)
- Fix an issue where `spec[pkg]` considers specs outside the current DAG (#45090)
- Do not halt concretization on unknown variants in externals (#45326)
- Improve validation of `develop` config section (#46485)
- Explicitly disable `ccache` if turned off in config, to avoid cache pollution (#45275)
- Improve backwards compatibility in `include_concrete` (#45766)
- Fix issue where package tags were sometimes repeated (#45160)
- Make `setup-env.sh` "sourced only" by dropping execution bits (#45641)
- Make certain source/binary fetch errors recoverable instead of a hard error (#45683)
- Remove debug statements in package hash computation (#45235)
- Remove redundant clingo warnings (#45269)
- Remove hard-coded layout version (#45645)
- Do not initialize previous store state in `use_store` (#45268)
- Docs improvements (#46475)
## Package updates
- `chapel` major update (#42197, #44931, #45304)
# v0.22.1 (2024-07-04)
## Bugfixes
- Fix reuse of externals on Linux (#44316)
- Ensure parent gcc-runtime version >= child (#44834, #44870)
- Ensure the latest gcc-runtime is rpath'ed when multiple exist among link deps (#44219)
- Improve version detection of glibc (#44154)
- Improve heuristics for solver (#44893, #44976, #45023)
- Make strong preferences override reuse (#44373)
- Reduce verbosity when C compiler is missing (#44182)
- Make missing ccache executable an error when required (#44740)
- Make every environment view containing `python` a `venv` (#44382)
- Fix external detection for compilers with os but no target (#44156)
- Fix version optimization for roots (#44272)
- Handle common implementations of pagination of tags in OCI build caches (#43136)
- Apply fetched patches to develop specs (#44950)
- Avoid Windows wrappers for filesystem utilities on non-Windows (#44126)
- Fix issue with long filenames in build caches on Windows (#43851)
- Fix formatting issue in `spack audit` (#45045)
- CI fixes (#44582, #43965, #43967, #44279, #44213)
## Package updates
- protobuf: fix 3.4:3.21 patch checksum (#44443)
- protobuf: update hash for patch needed when="@3.4:3.21" (#44210)
- git: bump v2.39 to 2.45; deprecate unsafe versions (#44248)
- gcc: use -rpath {rpath_dir} not -rpath={rpath dir} (#44315)
- Remove mesa18 and libosmesa (#44264)
- Enforce consistency of `gl` providers (#44307)
- Require libiconv for iconv (#44335, #45026).
Notice that glibc/musl also provide iconv, but are not guaranteed to be
complete. Set `packages:iconv:require:[glibc]` to restore the old behavior.
- py-matplotlib: qualify when to do a post install (#44191)
- rust: fix v1.78.0 instructions (#44127)
- suite-sparse: improve setting of the `libs` property (#44214)
- netlib-lapack: provide blas and lapack together (#44981)
# v0.22.0 (2024-05-12)
`v0.22.0` is a major feature release.
## Features in this release
1. **Compiler dependencies**
We are in the process of making compilers proper dependencies in Spack, and a number
of changes in `v0.22` support that effort. You may notice nodes in your dependency
graphs for compiler runtime libraries like `gcc-runtime` or `libgfortran`, and you
may notice that Spack graphs now include `libc`. We've also begun moving compiler
configuration from `compilers.yaml` to `packages.yaml` to make it consistent with
other externals. We are trying to do this with the least disruption possible, so
your existing `compilers.yaml` files should still work. We expect to be done with
this transition by the `v0.23` release in November.
* #41104: Packages compiled with `%gcc` on Linux, macOS and FreeBSD now depend on a
new package `gcc-runtime`, which contains a copy of the shared compiler runtime
libraries. This enables gcc runtime libraries to be installed and relocated when
using a build cache. When building minimal Spack-generated container images it is
no longer necessary to install libgfortran, libgomp etc. using the system package
manager.
* #42062: Packages compiled with `%oneapi` now depend on a new package
`intel-oneapi-runtime`. This is similar to `gcc-runtime`, and the runtimes can
provide virtuals and compilers can inject dependencies on virtuals into compiled
packages. This allows us to model library soname compatibility and allows
compilers like `%oneapi` to provide virtuals like `sycl` (which can also be
provided by standalone libraries). Note that until we have an agreement in place
with intel, Intel packages are marked `redistribute(source=False, binary=False)`
and must be downloaded outside of Spack.
* #43272: changes to the optimization criteria of the solver improve the hit-rate of
buildcaches by a fair amount. The solver more relaxed compatibility rules and will
not try to strictly match compilers or targets of reused specs. Users can still
enforce the previous strict behavior with `require:` sections in `packages.yaml`.
Note that to enforce correct linking, Spack will *not* reuse old `%gcc` and
`%oneapi` specs that do not have the runtime libraries as a dependency.
* #43539: Spack will reuse specs built with compilers that are *not* explicitly
configured in `compilers.yaml`. Because we can now keep runtime libraries in build
cache, we do not require you to also have a local configured compiler to *use* the
runtime libraries. This improves reuse in buildcaches and avoids conflicts with OS
updates that happen underneath Spack.
* #43190: binary compatibility on `linux` is now based on the `libc` version,
instead of on the `os` tag. Spack builds now detect the host `libc` (`glibc` or
`musl`) and add it as an implicit external node in the dependency graph. Binaries
with a `libc` with the same name and a version less than or equal to that of the
detected `libc` can be reused. This is only on `linux`, not `macos` or `Windows`.
* #43464: each package that can provide a compiler is now detectable using `spack
external find`. External packages defining compiler paths are effectively used as
compilers, and `spack external find -t compiler` can be used as a substitute for
`spack compiler find`. More details on this transition are in
[the docs](https://spack.readthedocs.io/en/latest/getting_started.html#manual-compiler-configuration)
2. **Improved `spack find` UI for Environments**
If you're working in an enviroment, you likely care about:
* What are the roots
* Which ones are installed / not installed
* What's been added that still needs to be concretized
We've tweaked `spack find` in environments to show this information much more
clearly. Installation status is shown next to each root, so you can see what is
installed. Roots are also shown in bold in the list of installed packages. There is
also a new option for `spack find -r` / `--only-roots` that will only show env
roots, if you don't want to look at all the installed specs.
More details in #42334.
3. **Improved command-line string quoting**
We are making some breaking changes to how Spack parses specs on the CLI in order to
respect shell quoting instead of trying to fight it. If you (sadly) had to write
something like this on the command line:
```
spack install zlib cflags=\"-O2 -g\"
```
That will now result in an error, but you can now write what you probably expected
to work in the first place:
```
spack install zlib cflags="-O2 -g"
```
Quoted can also now include special characters, so you can supply flags like:
```
spack intall zlib ldflags='-Wl,-rpath=$ORIGIN/_libs'
```
To reduce ambiguity in parsing, we now require that you *not* put spaces around `=`
and `==` when for flags or variants. This would not have broken before but will now
result in an error:
```
spack install zlib cflags = "-O2 -g"
```
More details and discussion in #30634.
4. **Revert default `spack install` behavior to `--reuse`**
We changed the default concretizer behavior from `--reuse` to `--reuse-deps` in
#30990 (in `v0.20`), which meant that *every* `spack install` invocation would
attempt to build a new version of the requested package / any environment roots.
While this is a common ask for *upgrading* and for *developer* workflows, we don't
think it should be the default for a package manager.
We are going to try to stick to this policy:
1. Prioritize reuse and build as little as possible by default.
2. Only upgrade or install duplicates if they are explicitly asked for, or if there
is a known security issue that necessitates an upgrade.
With the install command you now have three options:
* `--reuse` (default): reuse as many existing installations as possible.
* `--reuse-deps` / `--fresh-roots`: upgrade (freshen) roots but reuse dependencies if possible.
* `--fresh`: install fresh versions of requested packages (roots) and their dependencies.
We've also introduced `--fresh-roots` as an alias for `--reuse-deps` to make it more clear
that it may give you fresh versions. More details in #41302 and #43988.
5. **More control over reused specs**
You can now control which packages to reuse and how. There is a new
`concretizer:reuse` config option, which accepts the following properties:
- `roots`: `true` to reuse roots, `false` to reuse just dependencies
- `exclude`: list of constraints used to select which specs *not* to reuse
- `include`: list of constraints used to select which specs *to* reuse
- `from`: list of sources for reused specs (some combination of `local`,
`buildcache`, or `external`)
For example, to reuse only specs compiled with GCC, you could write:
```yaml
concretizer:
reuse:
roots: true
include:
- "%gcc"
```
Or, if `openmpi` must be used from externals, and it must be the only external used:
```yaml
concretizer:
reuse:
roots: true
from:
- type: local
exclude: ["openmpi"]
- type: buildcache
exclude: ["openmpi"]
- type: external
include: ["openmpi"]
```
6. **New `redistribute()` directive**
Some packages can't be redistributed in source or binary form. We need an explicit
way to say that in a package.
Now there is a `redistribute()` directive so that package authors can write:
```python
class MyPackage(Package):
redistribute(source=False, binary=False)
```
Like other directives, this works with `when=`:
```python
class MyPackage(Package):
# 12.0 and higher are proprietary
redistribute(source=False, binary=False, when="@12.0:")
# can't redistribute when we depend on some proprietary dependency
redistribute(source=False, binary=False, when="^proprietary-dependency")
```
More in #20185.
7. **New `conflict:` and `prefer:` syntax for package preferences**
Previously, you could express conflicts and preferences in `packages.yaml` through
some contortions with `require:`:
```yaml
packages:
zlib-ng:
require:
- one_of: ["%clang", "@:"] # conflict on %clang
- any_of: ["+shared", "@:"] # strong preference for +shared
```
You can now use `require:` and `prefer:` for a much more readable configuration:
```yaml
packages:
zlib-ng:
conflict:
- "%clang"
prefer:
- "+shared"
```
See [the documentation](https://spack.readthedocs.io/en/latest/packages_yaml.html#conflicts-and-strong-preferences)
and #41832 for more details.
8. **`include_concrete` in environments**
You may want to build on the *concrete* contents of another environment without
changing that environment. You can now include the concrete specs from another
environment's `spack.lock` with `include_concrete`:
```yaml
spack:
specs: []
concretizer:
unify: true
include_concrete:
- /path/to/environment1
- /path/to/environment2
```
Now, when *this* environment is concretized, it will bring in the already concrete
specs from `environment1` and `environment2`, and build on top of them without
changing them. This is useful if you have phased deployments, where old deployments
should not be modified but you want to use as many of them as possible. More details
in #33768.
9. **`python-venv` isolation**
Spack has unique requirements for Python because it:
1. installs every package in its own independent directory, and
2. allows users to register *external* python installations.
External installations may contain their own installed packages that can interfere
with Spack installations, and some distributions (Debian and Ubuntu) even change the
`sysconfig` in ways that alter the installation layout of installed Python packages
(e.g., with the addition of a `/local` prefix on Debian or Ubuntu). To isolate Spack
from these and other issues, we now insert a small `python-venv` package in between
`python` and packages that need to install Python code. This isolates Spack's build
environment, isolates Spack from any issues with an external python, and resolves a
large number of issues we've had with Python installations.
See #40773 for further details.
## New commands, options, and directives
* Allow packages to be pushed to build cache after install from source (#42423)
* `spack develop`: stage build artifacts in same root as non-dev builds #41373
* Don't delete `spack develop` build artifacts after install (#43424)
* `spack find`: add options for local/upstream only (#42999)
* `spack logs`: print log files for packages (either partially built or installed) (#42202)
* `patch`: support reversing patches (#43040)
* `develop`: Add -b/--build-directory option to set build_directory package attribute (#39606)
* `spack list`: add `--namesapce` / `--repo` option (#41948)
* directives: add `checked_by` field to `license()`, add some license checks
* `spack gc`: add options for environments and build dependencies (#41731)
* Add `--create` to `spack env activate` (#40896)
## Performance improvements
* environment.py: fix excessive re-reads (#43746)
* ruamel yaml: fix quadratic complexity bug (#43745)
* Refactor to improve `spec format` speed (#43712)
* Do not acquire a write lock on the env post install if no views (#43505)
* asp.py: fewer calls to `spec.copy()` (#43715)
* spec.py: early return in `__str__`
* avoid `jinja2` import at startup unless needed (#43237)
## Other new features of note
* `archspec`: update to `v0.2.4`: support for Windows, bugfixes for `neoverse-v1` and
`neoverse-v2` detection.
* `spack config get`/`blame`: with no args, show entire config
* `spack env create <env>`: dir if dir-like (#44024)
* ASP-based solver: update os compatibility for macOS (#43862)
* Add handling of custom ssl certs in urllib ops (#42953)
* Add ability to rename environments (#43296)
* Add config option and compiler support to reuse across OS's (#42693)
* Support for prereleases (#43140)
* Only reuse externals when configured (#41707)
* Environments: Add support for including views (#42250)
## Binary caches
* Build cache: make signed/unsigned a mirror property (#41507)
* tools stack
## Removals, deprecations, and syntax changes
* remove `dpcpp` compiler and package (#43418)
* spack load: remove --only argument (#42120)
## Notable Bugfixes
* repo.py: drop deleted packages from provider cache (#43779)
* Allow `+` in module file names (#41999)
* `cmd/python`: use runpy to allow multiprocessing in scripts (#41789)
* Show extension commands with spack -h (#41726)
* Support environment variable expansion inside module projections (#42917)
* Alert user to failed concretizations (#42655)
* shell: fix zsh color formatting for PS1 in environments (#39497)
* spack mirror create --all: include patches (#41579)
## Spack community stats
* 7,994 total packages; 525 since `v0.21.0`
* 178 new Python packages, 5 new R packages
* 358 people contributed to this release
* 344 committers to packages
* 45 committers to core
# v0.21.3 (2024-10-02)
## Bugfixes
- Forward compatibility with Spack 0.23 packages with language dependencies (#45205, #45191)
- Forward compatibility with `urllib` from Python 3.12.6+ (#46453, #46483)
- Bump `archspec` to 0.2.5-dev for better aarch64 and Windows support (#42854, #44005,
#45721, #46445)
- Support macOS Sequoia (#45018, #45127, #43862)
- CI and test maintenance (#42909, #42728, #46711, #41943, #43363)
# v0.21.2 (2024-03-01)
## Bugfixes
- Containerize: accommodate nested or pre-existing spack-env paths (#41558)
- Fix setup-env script, when going back and forth between instances (#40924)
- Fix using fully-qualified namespaces from root specs (#41957)
- Fix a bug when a required provider is requested for multiple virtuals (#42088)
- OCI buildcaches:
- only push in parallel when forking (#42143)
- use pickleable errors (#42160)
- Fix using sticky variants in externals (#42253)
- Fix a rare issue with conditional requirements and multi-valued variants (#42566)
## Package updates
- rust: add v1.75, rework a few variants (#41161,#41903)
- py-transformers: add v4.35.2 (#41266)
- mgard: fix OpenMP on AppleClang (#42933)
# v0.21.1 (2024-01-11)
## New features
- Add support for reading buildcaches created by Spack v0.22 (#41773)
## Bugfixes
- spack graph: fix coloring with environments (#41240)
- spack info: sort variants in --variants-by-name (#41389)
- Spec.format: error on old style format strings (#41934)
- ASP-based solver:
- fix infinite recursion when computing concretization errors (#41061)
- don't error for type mismatch on preferences (#41138)
- don't emit spurious debug output (#41218)
- Improve the error message for deprecated preferences (#41075)
- Fix MSVC preview version breaking clingo build on Windows (#41185)
- Fix multi-word aliases (#41126)
- Add a warning for unconfigured compiler (#41213)
- environment: fix an issue with deconcretization/reconcretization of specs (#41294)
- buildcache: don't error if a patch is missing, when installing from binaries (#41986)
- Multiple improvements to unit-tests (#41215,#41369,#41495,#41359,#41361,#41345,#41342,#41308,#41226)
## Package updates
- root: add a webgui patch to address security issue (#41404)
- BerkeleyGW: update source urls (#38218)
# v0.21.0 (2023-11-11) # v0.21.0 (2023-11-11)
`v0.21.0` is a major feature release. `v0.21.0` is a major feature release.

View File

@@ -32,7 +32,7 @@
Spack is a multi-platform package manager that builds and installs Spack is a multi-platform package manager that builds and installs
multiple versions and configurations of software. It works on Linux, multiple versions and configurations of software. It works on Linux,
macOS, Windows, and many supercomputers. Spack is non-destructive: installing a macOS, and many supercomputers. Spack is non-destructive: installing a
new version of a package does not break existing installations, so many new version of a package does not break existing installations, so many
configurations of the same package can coexist. configurations of the same package can coexist.
@@ -46,18 +46,13 @@ See the
[Feature Overview](https://spack.readthedocs.io/en/latest/features.html) [Feature Overview](https://spack.readthedocs.io/en/latest/features.html)
for examples and highlights. for examples and highlights.
To install spack and your first package, make sure you have Python & Git. To install spack and your first package, make sure you have Python.
Then: Then:
$ git clone -c feature.manyFiles=true --depth=2 https://github.com/spack/spack.git $ git clone -c feature.manyFiles=true https://github.com/spack/spack.git
$ cd spack/bin $ cd spack/bin
$ ./spack install zlib $ ./spack install zlib
> [!TIP]
> `-c feature.manyFiles=true` improves git's performance on repositories with 1,000+ files.
>
> `--depth=2` prunes the git history to reduce the size of the Spack installation.
Documentation Documentation
---------------- ----------------
@@ -93,7 +88,7 @@ Resources:
[bridged](https://github.com/matrix-org/matrix-appservice-slack#matrix-appservice-slack) to Slack. [bridged](https://github.com/matrix-org/matrix-appservice-slack#matrix-appservice-slack) to Slack.
* [**Github Discussions**](https://github.com/spack/spack/discussions): * [**Github Discussions**](https://github.com/spack/spack/discussions):
for Q&A and discussions. Note the pinned discussions for announcements. for Q&A and discussions. Note the pinned discussions for announcements.
* **X**: [@spackpm](https://twitter.com/spackpm). Be sure to * **Twitter**: [@spackpm](https://twitter.com/spackpm). Be sure to
`@mention` us! `@mention` us!
* **Mailing list**: [groups.google.com/d/forum/spack](https://groups.google.com/d/forum/spack): * **Mailing list**: [groups.google.com/d/forum/spack](https://groups.google.com/d/forum/spack):
only for announcements. Please use other venues for discussions. only for announcements. Please use other venues for discussions.

View File

@@ -22,4 +22,4 @@
# #
# This is compatible across platforms. # This is compatible across platforms.
# #
exec spack python "$@" exec /usr/bin/env spack python "$@"

View File

@@ -188,27 +188,25 @@ if NOT "%_sp_args%"=="%_sp_args:--help=%" (
goto :end_switch goto :end_switch
:case_load :case_load
if NOT defined _sp_args ( :: If args contain --sh, --csh, or -h/--help: just execute.
exit /B 0 if defined _sp_args (
) if NOT "%_sp_args%"=="%_sp_args:--help=%" (
goto :default_case
:: If args contain --bat, or -h/--help: just execute. ) else if NOT "%_sp_args%"=="%_sp_args:-h=%" (
if NOT "%_sp_args%"=="%_sp_args:--help=%" ( goto :default_case
goto :default_case ) else if NOT "%_sp_args%"=="%_sp_args:--bat=%" (
) else if NOT "%_sp_args%"=="%_sp_args:-h=%" ( goto :default_case
goto :default_case )
) else if NOT "%_sp_args%"=="%_sp_args:--bat=%" (
goto :default_case
) else if NOT "%_sp_args%"=="%_sp_args:--list=%" (
goto :default_case
) )
for /f "tokens=* USEBACKQ" %%I in ( for /f "tokens=* USEBACKQ" %%I in (
`python "%spack%" %_sp_flags% %_sp_subcommand% --bat %_sp_args%` `python "%spack%" %_sp_flags% %_sp_subcommand% --bat %_sp_args%`) do %%I
) do %%I
goto :end_switch goto :end_switch
:case_unload
goto :case_load
:default_case :default_case
python "%spack%" %_sp_flags% %_sp_subcommand% %_sp_args% python "%spack%" %_sp_flags% %_sp_subcommand% %_sp_args%
goto :end_switch goto :end_switch

View File

@@ -144,5 +144,3 @@ switch($SpackSubCommand)
"unload" {Invoke-SpackLoad} "unload" {Invoke-SpackLoad}
default {python "$Env:SPACK_ROOT/bin/spack" $SpackCMD_params $SpackSubCommand $SpackSubCommandArgs} default {python "$Env:SPACK_ROOT/bin/spack" $SpackCMD_params $SpackSubCommand $SpackSubCommandArgs}
} }
exit $LASTEXITCODE

View File

@@ -1,11 +1,71 @@
@ECHO OFF @ECHO OFF
setlocal EnableDelayedExpansion
:: (c) 2021 Lawrence Livermore National Laboratory :: (c) 2021 Lawrence Livermore National Laboratory
:: To use this file independently of Spack's installer, execute this script in its directory, or add the :: To use this file independently of Spack's installer, execute this script in its directory, or add the
:: associated bin directory to your PATH. Invoke to launch Spack Shell. :: associated bin directory to your PATH. Invoke to launch Spack Shell.
:: ::
:: source_dir/spack/bin/spack_cmd.bat :: source_dir/spack/bin/spack_cmd.bat
:: ::
pushd %~dp0..
set SPACK_ROOT=%CD%
pushd %CD%\..
set spackinstdir=%CD%
popd
call "%~dp0..\share\spack\setup-env.bat"
pushd %SPACK_ROOT% :: Check if Python is on the PATH
%comspec% /K if not defined python_pf_ver (
(for /f "delims=" %%F in ('where python.exe') do (
set "python_pf_ver=%%F"
goto :found_python
) ) 2> NUL
)
:found_python
if not defined python_pf_ver (
:: If not, look for Python from the Spack installer
:get_builtin
(for /f "tokens=*" %%g in ('dir /b /a:d "!spackinstdir!\Python*"') do (
set "python_ver=%%g")) 2> NUL
if not defined python_ver (
echo Python was not found on your system.
echo Please install Python or add Python to your PATH.
) else (
set "py_path=!spackinstdir!\!python_ver!"
set "py_exe=!py_path!\python.exe"
)
goto :exitpoint
) else (
:: Python is already on the path
set "py_exe=!python_pf_ver!"
(for /F "tokens=* USEBACKQ" %%F in (
`"!py_exe!" --version`) do (set "output=%%F")) 2>NUL
if not "!output:Microsoft Store=!"=="!output!" goto :get_builtin
goto :exitpoint
)
:exitpoint
set "PATH=%SPACK_ROOT%\bin\;%PATH%"
if defined py_path (
set "PATH=%py_path%;%PATH%"
)
if defined py_exe (
"%py_exe%" "%SPACK_ROOT%\bin\haspywin.py"
)
set "EDITOR=notepad"
DOSKEY spacktivate=spack env activate $*
@echo **********************************************************************
@echo ** Spack Package Manager
@echo **********************************************************************
IF "%1"=="" GOTO CONTINUE
set
GOTO:EOF
:continue
set PROMPT=[spack] %PROMPT%
%comspec% /k

View File

@@ -9,15 +9,15 @@ bootstrap:
# may not be able to bootstrap all the software that Spack needs, # may not be able to bootstrap all the software that Spack needs,
# depending on its type. # depending on its type.
sources: sources:
- name: github-actions-v0.6 - name: 'github-actions-v0.5'
metadata: $spack/share/spack/bootstrap/github-actions-v0.6
- name: github-actions-v0.5
metadata: $spack/share/spack/bootstrap/github-actions-v0.5 metadata: $spack/share/spack/bootstrap/github-actions-v0.5
- name: spack-install - name: 'github-actions-v0.4'
metadata: $spack/share/spack/bootstrap/github-actions-v0.4
- name: 'spack-install'
metadata: $spack/share/spack/bootstrap/spack-install metadata: $spack/share/spack/bootstrap/spack-install
trusted: trusted:
# By default we trust bootstrapping from sources and from binaries # By default we trust bootstrapping from sources and from binaries
# produced on Github via the workflow # produced on Github via the workflow
github-actions-v0.6: true
github-actions-v0.5: true github-actions-v0.5: true
github-actions-v0.4: true
spack-install: true spack-install: true

View File

@@ -15,7 +15,7 @@ concretizer:
# as possible, rather than building. If `false`, we'll always give you a fresh # as possible, rather than building. If `false`, we'll always give you a fresh
# concretization. If `dependencies`, we'll only reuse dependencies but # concretization. If `dependencies`, we'll only reuse dependencies but
# give you a fresh concretization for your root specs. # give you a fresh concretization for your root specs.
reuse: true reuse: dependencies
# Options that tune which targets are considered for concretization. The # Options that tune which targets are considered for concretization. The
# concretization process is very sensitive to the number targets, and the time # concretization process is very sensitive to the number targets, and the time
# needed to reach a solution increases noticeably with the number of targets # needed to reach a solution increases noticeably with the number of targets
@@ -39,19 +39,11 @@ concretizer:
# Option to deal with possible duplicate nodes (i.e. different nodes from the same package) in the DAG. # Option to deal with possible duplicate nodes (i.e. different nodes from the same package) in the DAG.
duplicates: duplicates:
# "none": allows a single node for any package in the DAG. # "none": allows a single node for any package in the DAG.
# "minimal": allows the duplication of 'build-tools' nodes only # "minimal": allows the duplication of 'build-tools' nodes only (e.g. py-setuptools, cmake etc.)
# (e.g. py-setuptools, cmake etc.)
# "full" (experimental): allows separation of the entire build-tool stack (e.g. the entire "cmake" subDAG) # "full" (experimental): allows separation of the entire build-tool stack (e.g. the entire "cmake" subDAG)
strategy: minimal strategy: minimal
# Option to specify compatibility between operating systems for reuse of compilers and packages # Option to specify compatiblity between operating systems for reuse of compilers and packages
# Specified as a key: [list] where the key is the os that is being targeted, and the list contains the OS's # Specified as a key: [list] where the key is the os that is being targeted, and the list contains the OS's
# it can reuse. Note this is a directional compatibility so mutual compatibility between two OS's # it can reuse. Note this is a directional compatibility so mutual compatibility between two OS's
# requires two entries i.e. os_compatible: {sonoma: [monterey], monterey: [sonoma]} # requires two entries i.e. os_compatible: {sonoma: [monterey], monterey: [sonoma]}
os_compatible: {} os_compatible: {}
# Option to specify whether to support splicing. Splicing allows for
# the relinking of concrete package dependencies in order to better
# reuse already built packages with ABI compatible dependencies
splice:
explicit: []
automatic: false

View File

@@ -115,6 +115,12 @@ config:
suppress_gpg_warnings: false suppress_gpg_warnings: false
# If set to true, Spack will attempt to build any compiler on the spec
# that is not already available. If set to False, Spack will only use
# compilers already configured in compilers.yaml
install_missing_compilers: false
# If set to true, Spack will always check checksums after downloading # If set to true, Spack will always check checksums after downloading
# archives. If false, Spack skips the checksum step. # archives. If false, Spack skips the checksum step.
checksum: true checksum: true
@@ -164,6 +170,23 @@ config:
# If set to true, Spack will use ccache to cache C compiles. # If set to true, Spack will use ccache to cache C compiles.
ccache: false ccache: false
# The concretization algorithm to use in Spack. Options are:
#
# 'clingo': Uses a logic solver under the hood to solve DAGs with full
# backtracking and optimization for user preferences. Spack will
# try to bootstrap the logic solver, if not already available.
#
# 'original': Spack's original greedy, fixed-point concretizer. This
# algorithm can make decisions too early and will not backtrack
# sufficiently for many specs. This will soon be deprecated in
# favor of clingo.
#
# See `concretizer.yaml` for more settings you can fine-tune when
# using clingo.
concretizer: clingo
# How long to wait to lock the Spack installation database. This lock is used # How long to wait to lock the Spack installation database. This lock is used
# when Spack needs to manage its own package metadata and all operations are # when Spack needs to manage its own package metadata and all operations are
# expected to complete within the default time limit. The timeout should # expected to complete within the default time limit. The timeout should

View File

@@ -0,0 +1,16 @@
# -------------------------------------------------------------------------
# This is the default configuration for Spack's module file generation.
#
# Settings here are versioned with Spack and are intended to provide
# sensible defaults out of the box. Spack maintainers should edit this
# file to keep it current.
#
# Users can override these settings by editing the following files.
#
# Per-spack-instance settings (overrides defaults):
# $SPACK_ROOT/etc/spack/modules.yaml
#
# Per-user settings (overrides default and site settings):
# ~/.spack/modules.yaml
# -------------------------------------------------------------------------
modules: {}

View File

@@ -19,6 +19,7 @@ packages:
- apple-clang - apple-clang
- clang - clang
- gcc - gcc
- intel
providers: providers:
elf: [libelf] elf: [libelf]
fuse: [macfuse] fuse: [macfuse]

View File

@@ -15,19 +15,15 @@
# ------------------------------------------------------------------------- # -------------------------------------------------------------------------
packages: packages:
all: all:
compiler: [gcc, clang, oneapi, xl, nag, fj, aocc] compiler: [gcc, intel, pgi, clang, xl, nag, fj, aocc]
providers: providers:
awk: [gawk] awk: [gawk]
armci: [armcimpi]
blas: [openblas, amdblis] blas: [openblas, amdblis]
c: [gcc]
cxx: [gcc]
D: [ldc] D: [ldc]
daal: [intel-oneapi-daal] daal: [intel-oneapi-daal]
elf: [elfutils] elf: [elfutils]
fftw-api: [fftw, amdfftw] fftw-api: [fftw, amdfftw]
flame: [libflame, amdlibflame] flame: [libflame, amdlibflame]
fortran: [gcc]
fortran-rt: [gcc-runtime, intel-oneapi-runtime] fortran-rt: [gcc-runtime, intel-oneapi-runtime]
fuse: [libfuse] fuse: [libfuse]
gl: [glx, osmesa] gl: [glx, osmesa]
@@ -39,11 +35,11 @@ packages:
java: [openjdk, jdk, ibm-java] java: [openjdk, jdk, ibm-java]
jpeg: [libjpeg-turbo, libjpeg] jpeg: [libjpeg-turbo, libjpeg]
lapack: [openblas, amdlibflame] lapack: [openblas, amdlibflame]
libc: [glibc, musl] libgfortran: [ gcc-runtime ]
libgfortran: [gcc-runtime] libglx: [mesa+glx, mesa18+glx]
libglx: [mesa+glx] libifcore: [ intel-oneapi-runtime ]
libifcore: [intel-oneapi-runtime]
libllvm: [llvm] libllvm: [llvm]
libosmesa: [mesa+osmesa, mesa18+osmesa]
lua-lang: [lua, lua-luajit-openresty, lua-luajit] lua-lang: [lua, lua-luajit-openresty, lua-luajit]
luajit: [lua-luajit-openresty, lua-luajit] luajit: [lua-luajit-openresty, lua-luajit]
mariadb-client: [mariadb-c-client, mariadb] mariadb-client: [mariadb-c-client, mariadb]
@@ -64,7 +60,6 @@ packages:
tbb: [intel-tbb] tbb: [intel-tbb]
unwind: [libunwind] unwind: [libunwind]
uuid: [util-linux-uuid, libuuid] uuid: [util-linux-uuid, libuuid]
wasi-sdk: [wasi-sdk-prebuilt]
xxd: [xxd-standalone, vim] xxd: [xxd-standalone, vim]
yacc: [bison, byacc] yacc: [bison, byacc]
ziglang: [zig] ziglang: [zig]
@@ -72,13 +67,3 @@ packages:
permissions: permissions:
read: world read: world
write: user write: user
cray-mpich:
buildable: false
cray-mvapich2:
buildable: false
fujitsu-mpi:
buildable: false
hpcx-mpi:
buildable: false
spectrum-mpi:
buildable: false

View File

@@ -1,5 +1,6 @@
config: config:
locks: false locks: false
concretizer: clingo
build_stage:: build_stage::
- '$spack/.staging' - '$spack/.staging'
stage_name: '{name}-{version}-{hash:7}' stage_name: '{name}-{version}-{hash:7}'

View File

@@ -1,12 +0,0 @@
{% extends "!layout.html" %}
{%- block extrahead %}
<!-- Google tag (gtag.js) -->
<script async src="https://www.googletagmanager.com/gtag/js?id=G-S0PQ7WV75K"></script>
<script>
window.dataLayer = window.dataLayer || [];
function gtag(){dataLayer.push(arguments);}
gtag('js', new Date());
gtag('config', 'G-S0PQ7WV75K');
</script>
{% endblock %}

View File

@@ -865,7 +865,7 @@ There are several different ways to use Spack packages once you have
installed them. As you've seen, spack packages are installed into long installed them. As you've seen, spack packages are installed into long
paths with hashes, and you need a way to get them into your path. The paths with hashes, and you need a way to get them into your path. The
easiest way is to use :ref:`spack load <cmd-spack-load>`, which is easiest way is to use :ref:`spack load <cmd-spack-load>`, which is
described in this section. described in the next section.
Some more advanced ways to use Spack packages include: Some more advanced ways to use Spack packages include:
@@ -959,86 +959,7 @@ use ``spack find --loaded``.
You can also use ``spack load --list`` to get the same output, but it You can also use ``spack load --list`` to get the same output, but it
does not have the full set of query options that ``spack find`` offers. does not have the full set of query options that ``spack find`` offers.
We'll learn more about Spack's spec syntax in :ref:`a later section <sec-specs>`. We'll learn more about Spack's spec syntax in the next section.
.. _extensions:
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Python packages and virtual environments
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Spack can install a large number of Python packages. Their names are
typically prefixed with ``py-``. Installing and using them is no
different from any other package:
.. code-block:: console
$ spack install py-numpy
$ spack load py-numpy
$ python3
>>> import numpy
The ``spack load`` command sets the ``PATH`` variable so that the right Python
executable is used, and makes sure that ``numpy`` and its dependencies can be
located in the ``PYTHONPATH``.
Spack is different from other Python package managers in that it installs
every package into its *own* prefix. This is in contrast to ``pip``, which
installs all packages into the same prefix, be it in a virtual environment
or not.
For many users, **virtual environments** are more convenient than repeated
``spack load`` commands, particularly when working with multiple Python
packages. Fortunately Spack supports environments itself, which together
with a view are no different from Python virtual environments.
The recommended way of working with Python extensions such as ``py-numpy``
is through :ref:`Environments <environments>`. The following example creates
a Spack environment with ``numpy`` in the current working directory. It also
puts a filesystem view in ``./view``, which is a more traditional combined
prefix for all packages in the environment.
.. code-block:: console
$ spack env create --with-view view --dir .
$ spack -e . add py-numpy
$ spack -e . concretize
$ spack -e . install
Now you can activate the environment and start using the packages:
.. code-block:: console
$ spack env activate .
$ python3
>>> import numpy
The environment view is also a virtual environment, which is useful if you are
sharing the environment with others who are unfamiliar with Spack. They can
either use the Python executable directly:
.. code-block:: console
$ ./view/bin/python3
>>> import numpy
or use the activation script:
.. code-block:: console
$ source ./view/bin/activate
$ python3
>>> import numpy
In general, there should not be much difference between ``spack env activate``
and using the virtual environment. The main advantage of ``spack env activate``
is that it knows about more packages than just Python packages, and it may set
additional runtime variables that are not covered by the virtual environment
activation script.
See :ref:`environments` for a more in-depth description of Spack
environments and customizations to views.
.. _sec-specs: .. _sec-specs:
@@ -1175,17 +1096,6 @@ unspecified version, but packages can depend on other packages with
could depend on ``mpich@1.2:`` if it can only build with version could depend on ``mpich@1.2:`` if it can only build with version
``1.2`` or higher of ``mpich``. ``1.2`` or higher of ``mpich``.
.. note:: Windows Spec Syntax Caveats
Windows has a few idiosyncrasies when it comes to the Spack spec syntax and the use of certain shells
Spack's spec dependency syntax uses the carat (``^``) character, however this is an escape string in CMD
so it must be escaped with an additional carat (i.e. ``^^``).
CMD also will attempt to interpret strings with ``=`` characters in them. Any spec including this symbol
must double quote the string.
Note: All of these issues are unique to CMD, they can be avoided by using Powershell.
For more context on these caveats see the related issues: `carat <https://github.com/spack/spack/issues/42833>`_ and `equals <https://github.com/spack/spack/issues/43348>`_
Below are more details about the specifiers that you can add to specs. Below are more details about the specifiers that you can add to specs.
.. _version-specifier: .. _version-specifier:
@@ -1359,10 +1269,6 @@ For example, for the ``stackstart`` variant:
mpileaks stackstart==4 # variant will be propagated to dependencies mpileaks stackstart==4 # variant will be propagated to dependencies
mpileaks stackstart=4 # only mpileaks will have this variant value mpileaks stackstart=4 # only mpileaks will have this variant value
Spack also allows variants to be propagated from a package that does
not have that variant.
^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^
Compiler Flags Compiler Flags
^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^
@@ -1448,12 +1354,22 @@ the reserved keywords ``platform``, ``os`` and ``target``:
$ spack install libelf os=ubuntu18.04 $ spack install libelf os=ubuntu18.04
$ spack install libelf target=broadwell $ spack install libelf target=broadwell
or together by using the reserved keyword ``arch``:
.. code-block:: console
$ spack install libelf arch=cray-CNL10-haswell
Normally users don't have to bother specifying the architecture if they Normally users don't have to bother specifying the architecture if they
are installing software for their current host, as in that case the are installing software for their current host, as in that case the
values will be detected automatically. If you need fine-grained control values will be detected automatically. If you need fine-grained control
over which packages use which targets (or over *all* packages' default over which packages use which targets (or over *all* packages' default
target), see :ref:`package-preferences`. target), see :ref:`package-preferences`.
.. admonition:: Cray machines
The situation is a little bit different for Cray machines and a detailed
explanation on how the architecture can be set on them can be found at :ref:`cray-support`
.. _support-for-microarchitectures: .. _support-for-microarchitectures:
@@ -1789,6 +1705,165 @@ check only local packages (as opposed to those used transparently from
``upstream`` spack instances) and the ``-j,--json`` option to output ``upstream`` spack instances) and the ``-j,--json`` option to output
machine-readable json data for any errors. machine-readable json data for any errors.
.. _extensions:
---------------------------
Extensions & Python support
---------------------------
Spack's installation model assumes that each package will live in its
own install prefix. However, certain packages are typically installed
*within* the directory hierarchy of other packages. For example,
`Python <https://www.python.org>`_ packages are typically installed in the
``$prefix/lib/python-2.7/site-packages`` directory.
In Spack, installation prefixes are immutable, so this type of installation
is not directly supported. However, it is possible to create views that
allow you to merge install prefixes of multiple packages into a single new prefix.
Views are a convenient way to get a more traditional filesystem structure.
Using *extensions*, you can ensure that Python packages always share the
same prefix in the view as Python itself. Suppose you have
Python installed like so:
.. code-block:: console
$ spack find python
==> 1 installed packages.
-- linux-debian7-x86_64 / gcc@4.4.7 --------------------------------
python@2.7.8
.. _cmd-spack-extensions:
^^^^^^^^^^^^^^^^^^^^
``spack extensions``
^^^^^^^^^^^^^^^^^^^^
You can find extensions for your Python installation like this:
.. code-block:: console
$ spack extensions python
==> python@2.7.8%gcc@4.4.7 arch=linux-debian7-x86_64-703c7a96
==> 36 extensions:
geos py-ipython py-pexpect py-pyside py-sip
py-basemap py-libxml2 py-pil py-pytz py-six
py-biopython py-mako py-pmw py-rpy2 py-sympy
py-cython py-matplotlib py-pychecker py-scientificpython py-virtualenv
py-dateutil py-mpi4py py-pygments py-scikit-learn
py-epydoc py-mx py-pylint py-scipy
py-gnuplot py-nose py-pyparsing py-setuptools
py-h5py py-numpy py-pyqt py-shiboken
==> 12 installed:
-- linux-debian7-x86_64 / gcc@4.4.7 --------------------------------
py-dateutil@2.4.0 py-nose@1.3.4 py-pyside@1.2.2
py-dateutil@2.4.0 py-numpy@1.9.1 py-pytz@2014.10
py-ipython@2.3.1 py-pygments@2.0.1 py-setuptools@11.3.1
py-matplotlib@1.4.2 py-pyparsing@2.0.3 py-six@1.9.0
The extensions are a subset of what's returned by ``spack list``, and
they are packages like any other. They are installed into their own
prefixes, and you can see this with ``spack find --paths``:
.. code-block:: console
$ spack find --paths py-numpy
==> 1 installed packages.
-- linux-debian7-x86_64 / gcc@4.4.7 --------------------------------
py-numpy@1.9.1 ~/spack/opt/linux-debian7-x86_64/gcc@4.4.7/py-numpy@1.9.1-66733244
However, even though this package is installed, you cannot use it
directly when you run ``python``:
.. code-block:: console
$ spack load python
$ python
Python 2.7.8 (default, Feb 17 2015, 01:35:25)
[GCC 4.4.7 20120313 (Red Hat 4.4.7-11)] on linux2
Type "help", "copyright", "credits" or "license" for more information.
>>> import numpy
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ImportError: No module named numpy
>>>
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Using Extensions in Environments
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The recommended way of working with extensions such as ``py-numpy``
above is through :ref:`Environments <environments>`. For example,
the following creates an environment in the current working directory
with a filesystem view in the ``./view`` directory:
.. code-block:: console
$ spack env create --with-view view --dir .
$ spack -e . add py-numpy
$ spack -e . concretize
$ spack -e . install
We recommend environments for two reasons. Firstly, environments
can be activated (requires :ref:`shell-support`):
.. code-block:: console
$ spack env activate .
which sets all the right environment variables such as ``PATH`` and
``PYTHONPATH``. This ensures that
.. code-block:: console
$ python
>>> import numpy
works. Secondly, even without shell support, the view ensures
that Python can locate its extensions:
.. code-block:: console
$ ./view/bin/python
>>> import numpy
See :ref:`environments` for a more in-depth description of Spack
environments and customizations to views.
^^^^^^^^^^^^^^^^^^^^
Using ``spack load``
^^^^^^^^^^^^^^^^^^^^
A more traditional way of using Spack and extensions is ``spack load``
(requires :ref:`shell-support`). This will add the extension to ``PYTHONPATH``
in your current shell, and Python itself will be available in the ``PATH``:
.. code-block:: console
$ spack load py-numpy
$ python
>>> import numpy
The loaded packages can be checked using ``spack find --loaded``
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Loading Extensions via Modules
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Apart from ``spack env activate`` and ``spack load``, you can load numpy
through your environment modules (using ``environment-modules`` or
``lmod``). This will also add the extension to the ``PYTHONPATH`` in
your current shell.
.. code-block:: console
$ module load <name of numpy module>
If you do not know the name of the specific numpy module you wish to
load, you can use the ``spack module tcl|lmod loads`` command to get
the name of the module from the Spack spec.
----------------------- -----------------------
Filesystem requirements Filesystem requirements
----------------------- -----------------------

View File

@@ -220,40 +220,6 @@ section of the configuration:
.. _binary_caches_oci: .. _binary_caches_oci:
---------------------------------
Automatic push to a build cache
---------------------------------
Sometimes it is convenient to push packages to a build cache as soon as they are installed. Spack can do this by setting autopush flag when adding a mirror:
.. code-block:: console
$ spack mirror add --autopush <name> <url or path>
Or the autopush flag can be set for an existing mirror:
.. code-block:: console
$ spack mirror set --autopush <name> # enable automatic push for an existing mirror
$ spack mirror set --no-autopush <name> # disable automatic push for an existing mirror
Then after installing a package it is automatically pushed to all mirrors with ``autopush: true``. The command
.. code-block:: console
$ spack install <package>
will have the same effect as
.. code-block:: console
$ spack install <package>
$ spack buildcache push <cache> <package> # for all caches with autopush: true
.. note::
Packages are automatically pushed to a build cache only if they are built from source.
----------------------------------------- -----------------------------------------
OCI / Docker V2 registries as build cache OCI / Docker V2 registries as build cache
----------------------------------------- -----------------------------------------

View File

@@ -21,86 +21,23 @@ is the following:
Reuse already installed packages Reuse already installed packages
-------------------------------- --------------------------------
The ``reuse`` attribute controls how aggressively Spack reuses binary packages during concretization. The The ``reuse`` attribute controls whether Spack will prefer to use installed packages (``true``), or
attribute can either be a single value, or an object for more complex configurations. whether it will do a "fresh" installation and prefer the latest settings from
``package.py`` files and ``packages.yaml`` (``false``).
In the former case ("single value") it allows Spack to: You can use:
1. Reuse installed packages and buildcaches for all the specs to be concretized, when ``true``
2. Reuse installed packages and buildcaches only for the dependencies of the root specs, when ``dependencies``
3. Disregard reusing installed packages and buildcaches, when ``false``
In case a finer control over which specs are reused is needed, then the value of this attribute can be
an object, with the following keys:
1. ``roots``: if ``true`` root specs are reused, if ``false`` only dependencies of root specs are reused
2. ``from``: list of sources from which reused specs are taken
Each source in ``from`` is itself an object:
.. list-table:: Attributes for a source or reusable specs
:header-rows: 1
* - Attribute name
- Description
* - type (mandatory, string)
- Can be ``local``, ``buildcache``, or ``external``
* - include (optional, list of specs)
- If present, reusable specs must match at least one of the constraint in the list
* - exclude (optional, list of specs)
- If present, reusable specs must not match any of the constraint in the list.
For instance, the following configuration:
.. code-block:: yaml
concretizer:
reuse:
roots: true
from:
- type: local
include:
- "%gcc"
- "%clang"
tells the concretizer to reuse all specs compiled with either ``gcc`` or ``clang``, that are installed
in the local store. Any spec from remote buildcaches is disregarded.
To reduce the boilerplate in configuration files, default values for the ``include`` and
``exclude`` options can be pushed up one level:
.. code-block:: yaml
concretizer:
reuse:
roots: true
include:
- "%gcc"
from:
- type: local
- type: buildcache
- type: local
include:
- "foo %oneapi"
In the example above we reuse all specs compiled with ``gcc`` from the local store
and remote buildcaches, and we also reuse ``foo %oneapi``. Note that the last source of
specs override the default ``include`` attribute.
For one-off concretizations, the are command line arguments for each of the simple "single value"
configurations. This means a user can:
.. code-block:: console .. code-block:: console
% spack install --reuse <spec> % spack install --reuse <spec>
to enable reuse for a single installation, or: to enable reuse for a single installation, and you can use:
.. code-block:: console .. code-block:: console
spack install --fresh <spec> spack install --fresh <spec>
to do a fresh install if ``reuse`` is enabled by default. to do a fresh install if ``reuse`` is enabled by default.
``reuse: dependencies`` is the default.
.. seealso:: .. seealso::
@@ -166,106 +103,3 @@ while `py-numpy` still needs an older version:
Up to Spack v0.20 ``duplicates:strategy:none`` was the default (and only) behavior. From Spack v0.21 the Up to Spack v0.20 ``duplicates:strategy:none`` was the default (and only) behavior. From Spack v0.21 the
default behavior is ``duplicates:strategy:minimal``. default behavior is ``duplicates:strategy:minimal``.
--------
Splicing
--------
The ``splice`` key covers config attributes for splicing specs in the solver.
"Splicing" is a method for replacing a dependency with another spec
that provides the same package or virtual. There are two types of
splices, referring to different behaviors for shared dependencies
between the root spec and the new spec replacing a dependency:
"transitive" and "intransitive". A "transitive" splice is one that
resolves all conflicts by taking the dependency from the new node. An
"intransitive" splice is one that resolves all conflicts by taking the
dependency from the original root. From a theory perspective, hybrid
splices are possible but are not modeled by Spack.
All spliced specs retain a ``build_spec`` attribute that points to the
original Spec before any splice occurred. The ``build_spec`` for a
non-spliced spec is itself.
The figure below shows examples of transitive and intransitive splices:
.. figure:: images/splices.png
:align: center
The concretizer can be configured to explicitly splice particular
replacements for a target spec. Splicing will allow the user to make
use of generically built public binary caches, while swapping in
highly optimized local builds for performance critical components
and/or components that interact closely with the specific hardware
details of the system. The most prominent candidate for splicing is
MPI providers. MPI packages have relatively well-understood ABI
characteristics, and most High Performance Computing facilities deploy
highly optimized MPI packages tailored to their particular
hardware. The following config block configures Spack to replace
whatever MPI provider each spec was concretized to use with the
particular package of ``mpich`` with the hash that begins ``abcdef``.
.. code-block:: yaml
concretizer:
splice:
explicit:
- target: mpi
replacement: mpich/abcdef
transitive: false
.. warning::
When configuring an explicit splice, you as the user take on the
responsibility for ensuring ABI compatibility between the specs
matched by the target and the replacement you provide. If they are
not compatible, Spack will not warn you and your application will
fail to run.
The ``target`` field of an explicit splice can be any abstract
spec. The ``replacement`` field must be a spec that includes the hash
of a concrete spec, and the replacement must either be the same
package as the target, provide the virtual that is the target, or
provide a virtual that the target provides. The ``transitive`` field
is optional -- by default, splices will be transitive.
.. note::
With explicit splices configured, it is possible for Spack to
concretize to a spec that does not satisfy the input. For example,
with the config above ``hdf5 ^mvapich2`` will concretize to user
``mpich/abcdef`` instead of ``mvapich2`` as the MPI provider. Spack
will warn the user in this case, but will not fail the
concretization.
.. _automatic_splicing:
^^^^^^^^^^^^^^^^^^
Automatic Splicing
^^^^^^^^^^^^^^^^^^
The Spack solver can be configured to do automatic splicing for
ABI-compatible packages. Automatic splices are enabled in the concretizer
config section
.. code-block:: yaml
concretizer:
splice:
automatic: True
Packages can include ABI-compatibility information using the
``can_splice`` directive. See :ref:`the packaging
guide<abi_compatibility>` for instructions on specifying ABI
compatibility using the ``can_splice`` directive.
.. note::
The ``can_splice`` directive is experimental and may be changed in
future versions.
When automatic splicing is enabled, the concretizer will combine any
number of ABI-compatible specs if possible to reuse installed packages
and packages available from binary caches. The end result of these
specs is equivalent to a series of transitive/intransitive splices,
but the series may be non-obvious.

View File

@@ -147,15 +147,6 @@ example, the ``bash`` shell is used to run the ``autogen.sh`` script.
def autoreconf(self, spec, prefix): def autoreconf(self, spec, prefix):
which("bash")("autogen.sh") which("bash")("autogen.sh")
If the ``package.py`` has build instructions in a separate
:ref:`builder class <multiple_build_systems>`, the signature for a phase changes slightly:
.. code-block:: python
class AutotoolsBuilder(AutotoolsBuilder):
def autoreconf(self, pkg, spec, prefix):
which("bash")("autogen.sh")
""""""""""""""""""""""""""""""""""""""" """""""""""""""""""""""""""""""""""""""
patching configure or Makefile.in files patching configure or Makefile.in files
""""""""""""""""""""""""""""""""""""""" """""""""""""""""""""""""""""""""""""""

View File

@@ -130,19 +130,14 @@ before or after a particular phase. For example, in ``perl``, we see:
@run_after("install") @run_after("install")
def install_cpanm(self): def install_cpanm(self):
spec = self.spec spec = self.spec
maker = make
cpan_dir = join_path("cpanm", "cpanm") if spec.satisfies("+cpanm"):
if sys.platform == "win32": with working_dir(join_path("cpanm", "cpanm")):
maker = nmake perl = spec["perl"].command
cpan_dir = join_path(self.stage.source_path, cpan_dir) perl("Makefile.PL")
cpan_dir = windows_sfn(cpan_dir) make()
if "+cpanm" in spec: make("install")
with working_dir(cpan_dir):
perl = spec["perl"].command
perl("Makefile.PL")
maker()
maker("install")
This extra step automatically installs ``cpanm`` in addition to the This extra step automatically installs ``cpanm`` in addition to the
base Perl installation. base Perl installation.
@@ -181,14 +176,8 @@ In the ``perl`` package, we can see:
@run_after("build") @run_after("build")
@on_package_attributes(run_tests=True) @on_package_attributes(run_tests=True)
def build_test(self): def test(self):
if sys.platform == "win32": make("test")
win32_dir = os.path.join(self.stage.source_path, "win32")
win32_dir = windows_sfn(win32_dir)
with working_dir(win32_dir):
nmake("test", ignore_quotes=True)
else:
make("test")
As you can guess, this runs ``make test`` *after* building the package, As you can guess, this runs ``make test`` *after* building the package,
if and only if testing is requested. Again, this is not specific to if and only if testing is requested. Again, this is not specific to

View File

@@ -25,7 +25,7 @@ use Spack to build packages with the tools.
The Spack Python class ``IntelOneapiPackage`` is a base class that is The Spack Python class ``IntelOneapiPackage`` is a base class that is
used by ``IntelOneapiCompilers``, ``IntelOneapiMkl``, used by ``IntelOneapiCompilers``, ``IntelOneapiMkl``,
``IntelOneapiTbb`` and other classes to implement the oneAPI ``IntelOneapiTbb`` and other classes to implement the oneAPI
packages. Search for ``oneAPI`` at `packages.spack.io <https://packages.spack.io>`_ for the full packages. Search for ``oneAPI`` at `<packages.spack.io>`_ for the full
list of available oneAPI packages, or use:: list of available oneAPI packages, or use::
spack list -d oneAPI spack list -d oneAPI

View File

@@ -718,45 +718,23 @@ command-line tool, or C/C++/Fortran program with optional Python
modules? The former should be prepended with ``py-``, while the modules? The former should be prepended with ``py-``, while the
latter should not. latter should not.
"""""""""""""""""""""""""""""" """"""""""""""""""""""
``extends`` vs. ``depends_on`` extends vs. depends_on
"""""""""""""""""""""""""""""" """"""""""""""""""""""
This is very similar to the naming dilemma above, with a slight twist.
As mentioned in the :ref:`Packaging Guide <packaging_extensions>`, As mentioned in the :ref:`Packaging Guide <packaging_extensions>`,
``extends`` and ``depends_on`` are very similar, but ``extends`` ensures ``extends`` and ``depends_on`` are very similar, but ``extends`` ensures
that the extension and extendee share the same prefix in views. that the extension and extendee share the same prefix in views.
This allows the user to import a Python module without This allows the user to import a Python module without
having to add that module to ``PYTHONPATH``. having to add that module to ``PYTHONPATH``.
Additionally, ``extends("python")`` adds a dependency on the package When deciding between ``extends`` and ``depends_on``, the best rule of
``python-venv``. This improves isolation from the system, whether thumb is to check the installation prefix. If Python libraries are
it's during the build or at runtime: user and system site packages installed to ``<prefix>/lib/pythonX.Y/site-packages``, then you
cannot accidentally be used by any package that ``extends("python")``. should use ``extends``. If Python libraries are installed elsewhere
or the only files that get installed reside in ``<prefix>/bin``, then
As a rule of thumb: if a package does not install any Python modules don't use ``extends``.
of its own, and merely puts a Python script in the ``bin`` directory,
then there is no need for ``extends``. If the package installs modules
in the ``site-packages`` directory, it requires ``extends``.
"""""""""""""""""""""""""""""""""""""
Executing ``python`` during the build
"""""""""""""""""""""""""""""""""""""
Whenever you need to execute a Python command or pass the path of the
Python interpreter to the build system, it is best to use the global
variable ``python`` directly. For example:
.. code-block:: python
@run_before("install")
def recythonize(self):
python("setup.py", "clean") # use the `python` global
As mentioned in the previous section, ``extends("python")`` adds an
automatic dependency on ``python-venv``, which is a virtual environment
that guarantees build isolation. The ``python`` global always refers to
the correct Python interpreter, whether the package uses ``extends("python")``
or ``depends_on("python")``.
^^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^
Alternatives to Spack Alternatives to Spack

View File

@@ -49,14 +49,14 @@ following phases:
#. ``install`` - install the package #. ``install`` - install the package
Package developers often add unit tests that can be invoked with Package developers often add unit tests that can be invoked with
``scons test`` or ``scons check``. Spack provides a ``build_test`` method ``scons test`` or ``scons check``. Spack provides a ``test`` method
to handle this. Since we don't know which one the package developer to handle this. Since we don't know which one the package developer
chose, the ``build_test`` method does nothing by default, but can be easily chose, the ``test`` method does nothing by default, but can be easily
overridden like so: overridden like so:
.. code-block:: python .. code-block:: python
def build_test(self): def test(self):
scons("check") scons("check")

View File

@@ -5,14 +5,13 @@
.. chain: .. chain:
============================================= ============================
Chaining Spack Installations (upstreams.yaml) Chaining Spack Installations
============================================= ============================
You can point your Spack installation to another installation to use any You can point your Spack installation to another installation to use any
packages that are installed there. To register the other Spack instance, packages that are installed there. To register the other Spack instance,
you can add it as an entry to ``upstreams.yaml`` at any of the you can add it as an entry to ``upstreams.yaml``:
:ref:`configuration-scopes`:
.. code-block:: yaml .. code-block:: yaml
@@ -23,8 +22,7 @@ you can add it as an entry to ``upstreams.yaml`` at any of the
install_tree: /path/to/another/spack/opt/spack install_tree: /path/to/another/spack/opt/spack
``install_tree`` must point to the ``opt/spack`` directory inside of the ``install_tree`` must point to the ``opt/spack`` directory inside of the
Spack base directory, or the location of the ``install_tree`` defined Spack base directory.
in :ref:`config.yaml <config-yaml>`.
Once the upstream Spack instance has been added, ``spack find`` will Once the upstream Spack instance has been added, ``spack find`` will
automatically check the upstream instance when querying installed packages, automatically check the upstream instance when querying installed packages,

View File

@@ -206,24 +206,17 @@ def setup(sphinx):
("py:class", "six.moves.urllib.parse.ParseResult"), ("py:class", "six.moves.urllib.parse.ParseResult"),
("py:class", "TextIO"), ("py:class", "TextIO"),
("py:class", "hashlib._Hash"), ("py:class", "hashlib._Hash"),
("py:class", "concurrent.futures._base.Executor"),
# Spack classes that are private and we don't want to expose # Spack classes that are private and we don't want to expose
("py:class", "spack.provider_index._IndexBase"), ("py:class", "spack.provider_index._IndexBase"),
("py:class", "spack.repo._PrependFileLoader"), ("py:class", "spack.repo._PrependFileLoader"),
("py:class", "spack.build_systems._checks.BuilderWithDefaults"), ("py:class", "spack.build_systems._checks.BaseBuilder"),
# Spack classes that intersphinx is unable to resolve # Spack classes that intersphinx is unable to resolve
("py:class", "spack.version.StandardVersion"), ("py:class", "spack.version.StandardVersion"),
("py:class", "spack.spec.DependencySpec"), ("py:class", "spack.spec.DependencySpec"),
("py:class", "spack.spec.ArchSpec"),
("py:class", "spack.spec.InstallStatus"), ("py:class", "spack.spec.InstallStatus"),
("py:class", "spack.spec.SpecfileReaderBase"), ("py:class", "spack.spec.SpecfileReaderBase"),
("py:class", "spack.install_test.Pb"), ("py:class", "spack.install_test.Pb"),
("py:class", "spack.filesystem_view.SimpleFilesystemView"), ("py:class", "spack.filesystem_view.SimpleFilesystemView"),
("py:class", "spack.traverse.EdgeAndDepth"),
("py:class", "archspec.cpu.microarchitecture.Microarchitecture"),
("py:class", "spack.compiler.CompilerCache"),
# TypeVar that is not handled correctly
("py:class", "llnl.util.lang.T"),
] ]
# The reST default role (used for this markup: `text`) to use for all documents. # The reST default role (used for this markup: `text`) to use for all documents.

View File

@@ -150,7 +150,7 @@ this can expose you to attacks. Use at your own risk.
-------------------- --------------------
Path to custom certificats for SSL verification. The value can be a Path to custom certificats for SSL verification. The value can be a
filesytem path, or an environment variable that expands to an absolute file path. filesytem path, or an environment variable that expands to a file path.
The default value is set to the environment variable ``SSL_CERT_FILE`` The default value is set to the environment variable ``SSL_CERT_FILE``
to use the same syntax used by many other applications that automatically to use the same syntax used by many other applications that automatically
detect custom certificates. detect custom certificates.
@@ -160,9 +160,6 @@ in the subprocess calling ``curl``.
If ``url_fetch_method:urllib`` then files and directories are supported i.e. If ``url_fetch_method:urllib`` then files and directories are supported i.e.
``config:ssl_certs:$SSL_CERT_FILE`` or ``config:ssl_certs:$SSL_CERT_DIR`` ``config:ssl_certs:$SSL_CERT_FILE`` or ``config:ssl_certs:$SSL_CERT_DIR``
will work. will work.
In all cases the expanded path must be absolute for Spack to use the certificates.
Certificates relative to an environment can be created by prepending the path variable
with the Spack configuration variable``$env``.
-------------------- --------------------
``checksum`` ``checksum``

View File

@@ -281,7 +281,7 @@ When spack queries for configuration parameters, it searches in
higher-precedence scopes first. So, settings in a higher-precedence file higher-precedence scopes first. So, settings in a higher-precedence file
can override those with the same key in a lower-precedence one. For can override those with the same key in a lower-precedence one. For
list-valued settings, Spack *prepends* higher-precedence settings to list-valued settings, Spack *prepends* higher-precedence settings to
lower-precedence settings. Completely ignoring lower-precedence configuration lower-precedence settings. Completely ignoring higher-level configuration
options is supported with the ``::`` notation for keys (see options is supported with the ``::`` notation for keys (see
:ref:`config-overrides` below). :ref:`config-overrides` below).
@@ -511,7 +511,6 @@ Spack understands over a dozen special variables. These are:
* ``$target_family``. The target family for the current host, as * ``$target_family``. The target family for the current host, as
detected by ArchSpec. E.g. ``x86_64`` or ``aarch64``. detected by ArchSpec. E.g. ``x86_64`` or ``aarch64``.
* ``$date``: the current date in the format YYYY-MM-DD * ``$date``: the current date in the format YYYY-MM-DD
* ``$spack_short_version``: the Spack version truncated to the first components.
Note that, as with shell variables, you can write these as ``$varname`` Note that, as with shell variables, you can write these as ``$varname``

View File

@@ -194,18 +194,21 @@ The OS that are currently supported are summarized in the table below:
* - Operating System * - Operating System
- Base Image - Base Image
- Spack Image - Spack Image
* - Ubuntu 18.04
- ``ubuntu:18.04``
- ``spack/ubuntu-bionic``
* - Ubuntu 20.04 * - Ubuntu 20.04
- ``ubuntu:20.04`` - ``ubuntu:20.04``
- ``spack/ubuntu-focal`` - ``spack/ubuntu-focal``
* - Ubuntu 22.04 * - Ubuntu 22.04
- ``ubuntu:22.04`` - ``ubuntu:22.04``
- ``spack/ubuntu-jammy`` - ``spack/ubuntu-jammy``
* - Ubuntu 24.04 * - CentOS 7
- ``ubuntu:24.04`` - ``centos:7``
- ``spack/ubuntu-noble`` - ``spack/centos7``
* - CentOS Stream9 * - CentOS Stream
- ``quay.io/centos/centos:stream9`` - ``quay.io/centos/centos:stream``
- ``spack/centos-stream9`` - ``spack/centos-stream``
* - openSUSE Leap * - openSUSE Leap
- ``opensuse/leap`` - ``opensuse/leap``
- ``spack/leap15`` - ``spack/leap15``
@@ -224,12 +227,12 @@ The OS that are currently supported are summarized in the table below:
* - Rocky Linux 9 * - Rocky Linux 9
- ``rockylinux:9`` - ``rockylinux:9``
- ``spack/rockylinux9`` - ``spack/rockylinux9``
* - Fedora Linux 39 * - Fedora Linux 37
- ``fedora:39`` - ``fedora:37``
- ``spack/fedora39`` - ``spack/fedora37``
* - Fedora Linux 40 * - Fedora Linux 38
- ``fedora:40`` - ``fedora:38``
- ``spack/fedora40`` - ``spack/fedora38``

View File

@@ -184,7 +184,7 @@ Style Tests
Spack uses `Flake8 <http://flake8.pycqa.org/en/latest/>`_ to test for Spack uses `Flake8 <http://flake8.pycqa.org/en/latest/>`_ to test for
`PEP 8 <https://www.python.org/dev/peps/pep-0008/>`_ conformance and `PEP 8 <https://www.python.org/dev/peps/pep-0008/>`_ conformance and
`mypy <https://mypy.readthedocs.io/en/stable/>`_ for type checking. PEP 8 is `mypy <https://mypy.readthedocs.io/en/stable/>` for type checking. PEP 8 is
a series of style guides for Python that provide suggestions for everything a series of style guides for Python that provide suggestions for everything
from variable naming to indentation. In order to limit the number of PRs that from variable naming to indentation. In order to limit the number of PRs that
were mostly style changes, we decided to enforce PEP 8 conformance. Your PR were mostly style changes, we decided to enforce PEP 8 conformance. Your PR
@@ -316,215 +316,6 @@ documentation tests to make sure there are no errors. Documentation changes can
in some obfuscated warning messages. If you don't understand what they mean, feel free in some obfuscated warning messages. If you don't understand what they mean, feel free
to ask when you submit your PR. to ask when you submit your PR.
.. _spack-builders-and-pipelines:
^^^^^^^^^
GitLab CI
^^^^^^^^^
""""""""""""""""""
Build Cache Stacks
""""""""""""""""""
Spack welcomes the contribution of software stacks of interest to the community. These
stacks are used to test package recipes and generate publicly available build caches.
Spack uses GitLab CI for managing the orchestration of build jobs.
GitLab Entry Point
~~~~~~~~~~~~~~~~~~
Add stack entrypoint to the ``share/spack/gitlab/cloud_pipelines/.gitlab-ci.yml``. There
are two stages required for each new stack, the generation stage and the build stage.
The generate stage is defined using the job template ``.generate`` configured with
environment variables defining the name of the stack in ``SPACK_CI_STACK_NAME`` and the
platform (``SPACK_TARGET_PLATFORM``) and architecture (``SPACK_TARGET_ARCH``) configuration,
and the tags associated with the class of runners to build on.
.. note::
The ``SPACK_CI_STACK_NAME`` must match the name of the directory containing the
stacks ``spack.yaml``.
.. note::
The platform and architecture variables are specified in order to select the
correct configurations from the generic configurations used in Spack CI. The
configurations currently available are:
* ``.cray_rhel_zen4``
* ``.cray_sles_zen4``
* ``.darwin_aarch64``
* ``.darwin_x86_64``
* ``.linux_aarch64``
* ``.linux_icelake``
* ``.linux_neoverse_n1``
* ``.linux_neoverse_v1``
* ``.linux_neoverse_v2``
* ``.linux_power``
* ``.linux_skylake``
* ``.linux_x86_64``
* ``.linux_x86_64_v4``
New configurations can be added to accommodate new platforms and architectures.
The build stage is defined as a trigger job that consumes the GitLab CI pipeline generated in
the generate stage for this stack. Build stage jobs use the ``.build`` job template which
handles the basic configuration.
An example entry point for a new stack called ``my-super-cool-stack``
.. code-block:: yaml
.my-super-cool-stack:
extends: [ ".linux_x86_64_v3" ]
variables:
SPACK_CI_STACK_NAME: my-super-cool-stack
tags: [ "all", "tags", "your", "job", "needs"]
my-super-cool-stack-generate:
extends: [ ".generate", ".my-super-cool-stack" ]
image: my-super-cool-stack-image:0.0.1
my-super-cool-stack-build:
extends: [ ".build", ".my-super-cool-stack" ]
trigger:
include:
- artifact: jobs_scratch_dir/cloud-ci-pipeline.yml
job: my-super-cool-stack-generate
strategy: depend
needs:
- artifacts: True
job: my-super-cool-stack-generate
Stack Configuration
~~~~~~~~~~~~~~~~~~~
The stack configuration is a spack environment file with two additional sections added.
Stack configurations should be located in ``share/spack/gitlab/cloud_pipelines/stacks/<stack_name>/spack.yaml``.
The ``ci`` section is generally used to define stack specific mappings such as image or tags.
For more information on what can go into the ``ci`` section refer to the docs on pipelines.
The ``cdash`` section is used for defining where to upload the results of builds. Spack configures
most of the details for posting pipeline results to
`cdash.spack.io <https://cdash.spack.io/index.php?project=Spack+Testing>`_. The only
requirement in the stack configuration is to define a ``build-group`` that is unique,
this is usually the long name of the stack.
An example stack that builds ``zlib``.
.. code-block:: yaml
spack:
view: false
packages:
all:
require: ["%gcc", "target=x86_64_v3"]
specs:
- zlib
ci:
pipeline-gen
- build-job:
image: my-super-cool-stack-image:0.0.1
cdash:
build-group: My Super Cool Stack
.. note::
The ``image`` used in the ``*-generate`` job must match exactly the ``image`` used in the ``build-job``.
When the images do not match the build job may fail.
"""""""""""""""""""
Registering Runners
"""""""""""""""""""
Contributing computational resources to Spack's CI build farm is one way to help expand the
capabilities and offerings of the public Spack build caches. Currently, Spack utilizes linux runners
from AWS, Google, and the University of Oregon (UO).
Runners require three key peices:
* Runner Registration Token
* Accurate tags
* OIDC Authentication script
* GPG keys
Minimum GitLab Runner Version: ``16.1.0``
`Intallation instructions <https://docs.gitlab.com/runner/install/>`_
Registration Token
~~~~~~~~~~~~~~~~~~
The first step to contribute new runners is to open an issue in the `spack infrastructure <https://github.com/spack/spack-infrastructure/issues/new?assignees=&labels=runner-registration&projects=&template=runner_registration.yml>`_
project. This will be reported to the spack infrastructure team who will guide users through the process
of registering new runners for Spack CI.
The information needed to register a runner is the motivation for the new resources, a semi-detailed description of
the runner, and finallly the point of contact for maintaining the software on the runner.
The point of contact will then work with the infrastruture team to obtain runner registration token(s) for interacting with
with Spack's GitLab instance. Once the runner is active, this point of contact will also be responsible for updating the
GitLab runner software to keep pace with Spack's Gitlab.
Tagging
~~~~~~~
In the initial stages of runner registration it is important to **exclude** the special tag ``spack``. This will prevent
the new runner(s) from being picked up for production CI jobs while it is configured and evaluated. Once it is determined
that the runner is ready for production use the ``spack`` tag will be added.
Because gitlab has no concept of tag exclustion, runners that provide specialized resource also require specialized tags.
For example, a basic CPU only x86_64 runner may have a tag ``x86_64`` associated with it. However, a runner containing an
CUDA capable GPU may have the tag ``x86_64-cuda`` to denote that it should only be used for packages that will benefit from
a CUDA capable resource.
OIDC
~~~~
Spack runners use OIDC authentication for connecting to the appropriate AWS bucket
which is used for coordinating the communication of binaries between build jobs. In
order to configure OIDC authentication, Spack CI runners use a python script with minimal
dependencies. This script can be configured for runners as seen here using the ``pre_build_script``.
.. code-block:: toml
[[runners]]
pre_build_script = """
echo 'Executing Spack pre-build setup script'
for cmd in "${PY3:-}" python3 python; do
if command -v > /dev/null "$cmd"; then
export PY3="$(command -v "$cmd")"
break
fi
done
if [ -z "${PY3:-}" ]; then
echo "Unable to find python3 executable"
exit 1
fi
$PY3 -c "import urllib.request;urllib.request.urlretrieve('https://raw.githubusercontent.com/spack/spack-infrastructure/main/scripts/gitlab_runner_pre_build/pre_build.py', 'pre_build.py')"
$PY3 pre_build.py > envvars
. ./envvars
rm -f envvars
unset GITLAB_OIDC_TOKEN
"""
GPG Keys
~~~~~~~~
Runners that may be utilized for ``protected`` CI require the registration of an intermediate signing key that
can be used to sign packages. For more information on package signing read :ref:`key_architecture`.
-------- --------
Coverage Coverage
-------- --------

View File

@@ -181,6 +181,10 @@ Spec-related modules
:mod:`spack.parser` :mod:`spack.parser`
Contains :class:`~spack.parser.SpecParser` and functions related to parsing specs. Contains :class:`~spack.parser.SpecParser` and functions related to parsing specs.
:mod:`spack.concretize`
Contains :class:`~spack.concretize.Concretizer` implementation,
which allows site administrators to change Spack's :ref:`concretization-policies`.
:mod:`spack.version` :mod:`spack.version`
Implements a simple :class:`~spack.version.Version` class with simple Implements a simple :class:`~spack.version.Version` class with simple
comparison semantics. Also implements :class:`~spack.version.VersionRange` comparison semantics. Also implements :class:`~spack.version.VersionRange`
@@ -333,9 +337,13 @@ inserting them at different places in the spack code base. Whenever a hook
type triggers by way of a function call, we find all the hooks of that type, type triggers by way of a function call, we find all the hooks of that type,
and run them. and run them.
Spack defines hooks by way of a module in the ``lib/spack/spack/hooks`` directory. Spack defines hooks by way of a module at ``lib/spack/spack/hooks`` where we can define
This module has to be registered in ``__init__.py`` so that Spack is aware of it. types of hooks in the ``__init__.py``, and then python files in that folder
This section will cover the basic kind of hooks, and how to write them. can use hook functions. The files are automatically parsed, so if you write
a new file for some integration (e.g., ``lib/spack/spack/hooks/myintegration.py``
you can then write hook functions in that file that will be automatically detected,
and run whenever your hook is called. This section will cover the basic kind
of hooks, and how to write them.
^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^
Types of Hooks Types of Hooks
@@ -544,11 +552,11 @@ With either interpreter you can run a single command:
.. code-block:: console .. code-block:: console
$ spack python -c 'from spack.spec import Spec; Spec("python").concretized()' $ spack python -c 'import distro; distro.linux_distribution()'
... ('Ubuntu', '18.04', 'Bionic Beaver')
$ spack python -i ipython -c 'from spack.spec import Spec; Spec("python").concretized()' $ spack python -i ipython -c 'import distro; distro.linux_distribution()'
Out[1]: ... Out[1]: ('Ubuntu', '18.04', 'Bionic Beaver')
or a file: or a file:
@@ -708,27 +716,27 @@ Release branches
^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^
There are currently two types of Spack releases: :ref:`major releases There are currently two types of Spack releases: :ref:`major releases
<major-releases>` (``0.21.0``, ``0.22.0``, etc.) and :ref:`patch releases <major-releases>` (``0.17.0``, ``0.18.0``, etc.) and :ref:`point releases
<patch-releases>` (``0.22.1``, ``0.22.2``, ``0.22.3``, etc.). Here is a <point-releases>` (``0.17.1``, ``0.17.2``, ``0.17.3``, etc.). Here is a
diagram of how Spack release branches work:: diagram of how Spack release branches work::
o branch: develop (latest version, v0.23.0.dev0) o branch: develop (latest version, v0.19.0.dev0)
| |
o o
| o branch: releases/v0.22, tag: v0.22.1 | o branch: releases/v0.18, tag: v0.18.1
o | o |
| o tag: v0.22.0 | o tag: v0.18.0
o | o |
| o | o
|/ |/
o o
| |
o o
| o branch: releases/v0.21, tag: v0.21.2 | o branch: releases/v0.17, tag: v0.17.2
o | o |
| o tag: v0.21.1 | o tag: v0.17.1
o | o |
| o tag: v0.21.0 | o tag: v0.17.0
o | o |
| o | o
|/ |/
@@ -739,8 +747,8 @@ requests target ``develop``. The ``develop`` branch will report that its
version is that of the next **major** release with a ``.dev0`` suffix. version is that of the next **major** release with a ``.dev0`` suffix.
Each Spack release series also has a corresponding branch, e.g. Each Spack release series also has a corresponding branch, e.g.
``releases/v0.22`` has ``v0.22.x`` versions of Spack, and ``releases/v0.18`` has ``0.18.x`` versions of Spack, and
``releases/v0.21`` has ``v0.21.x`` versions. A major release is the first ``releases/v0.17`` has ``0.17.x`` versions. A major release is the first
tagged version on a release branch. Minor releases are back-ported from tagged version on a release branch. Minor releases are back-ported from
develop onto release branches. This is typically done by cherry-picking develop onto release branches. This is typically done by cherry-picking
bugfix commits off of ``develop``. bugfix commits off of ``develop``.
@@ -770,40 +778,27 @@ for more details.
Scheduling work for releases Scheduling work for releases
^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
We schedule work for **major releases** through `milestones We schedule work for releases by creating `GitHub projects
<https://github.com/spack/spack/milestones>`_ and `GitHub Projects <https://github.com/spack/spack/projects>`_. At any time, there may be
<https://github.com/spack/spack/projects>`_, while **patch releases** use `labels several open release projects. For example, below are two releases (from
<https://github.com/spack/spack/labels>`_. some past version of the page linked above):
There is only one milestone open at a time. Its name corresponds to the next major version, for .. image:: images/projects.png
example ``v0.23``. Important issues and pull requests should be assigned to this milestone by
core developers, so that they are not forgotten at the time of release. The milestone is closed
when the release is made, and a new milestone is created for the next major release.
Bug reports in GitHub issues are automatically labelled ``bug`` and ``triage``. Spack developers This image shows one release in progress for ``0.15.1`` and another for
assign one of the labels ``impact-low``, ``impact-medium`` or ``impact-high``. This will make the ``0.16.0``. Each of these releases has a project board containing issues
issue appear in the `Triaged bugs <https://github.com/orgs/spack/projects/6>`_ project board. and pull requests. GitHub shows a status bar with completed work in
Important issues should be assigned to the next milestone as well, so they appear at the top of green, work in progress in purple, and work not started yet in gray, so
the project board. it's fairly easy to see progress.
Spack's milestones are not firm commitments so we move work between releases frequently. If we Spack's project boards are not firm commitments so we move work between
need to make a release and some tasks are not yet done, we will simply move them to the next major releases frequently. If we need to make a release and some tasks are not
release milestone, rather than delaying the release to complete them. yet done, we will simply move them to the next minor or major release, rather
than delaying the release to complete them.
^^^^^^^^^^^^^^^^^^^^^ For more on using GitHub project boards, see `GitHub's documentation
Backporting bug fixes <https://docs.github.com/en/github/managing-your-work-on-github/about-project-boards>`_.
^^^^^^^^^^^^^^^^^^^^^
When a bug is fixed in the ``develop`` branch, it is often necessary to backport the fix to one
(or more) of the ``release/vX.Y`` branches. Only the release manager is responsible for doing
backports, but Spack maintainers are responsible for labelling pull requests (and issues if no bug
fix is available yet) with ``vX.Y.Z`` labels. The label should correspond to the next patch version
that the bug fix should be backported to.
Backports are done publicly by the release manager using a pull request named ``Backports vX.Y.Z``.
This pull request is opened from the ``backports/vX.Y.Z`` branch, targets the ``releases/vX.Y``
branch and contains a (growing) list of cherry-picked commits from the ``develop`` branch.
Typically there are one or two backport pull requests open at any given time.
.. _major-releases: .. _major-releases:
@@ -811,21 +806,25 @@ Typically there are one or two backport pull requests open at any given time.
Making major releases Making major releases
^^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^
Assuming all required work from the milestone is completed, the steps to make the major release Assuming a project board has already been created and all required work
are: completed, the steps to make the major release are:
#. `Create a new milestone <https://github.com/spack/spack/milestones>`_ for the next major #. Create two new project boards:
release.
#. `Create a new label <https://github.com/spack/spack/labels>`_ for the next patch release. * One for the next major release
* One for the next point release
#. Move any optional tasks that are not done to the next milestone. #. Move any optional tasks that are not done to one of the new project boards.
In general, small bugfixes should go to the next point release. Major
features, refactors, and changes that could affect concretization should
go in the next major release.
#. Create a branch for the release, based on ``develop``: #. Create a branch for the release, based on ``develop``:
.. code-block:: console .. code-block:: console
$ git checkout -b releases/v0.23 develop $ git checkout -b releases/v0.15 develop
For a version ``vX.Y.Z``, the branch's name should be For a version ``vX.Y.Z``, the branch's name should be
``releases/vX.Y``. That is, you should create a ``releases/vX.Y`` ``releases/vX.Y``. That is, you should create a ``releases/vX.Y``
@@ -861,8 +860,8 @@ are:
Create a pull request targeting the ``develop`` branch, bumping the major Create a pull request targeting the ``develop`` branch, bumping the major
version in ``lib/spack/spack/__init__.py`` with a ``dev0`` release segment. version in ``lib/spack/spack/__init__.py`` with a ``dev0`` release segment.
For instance when you have just released ``v0.23.0``, set the version For instance when you have just released ``v0.15.0``, set the version
to ``(0, 24, 0, 'dev0')`` on ``develop``. to ``(0, 16, 0, 'dev0')`` on ``develop``.
#. Follow the steps in :ref:`publishing-releases`. #. Follow the steps in :ref:`publishing-releases`.
@@ -871,52 +870,82 @@ are:
#. Follow the steps in :ref:`announcing-releases`. #. Follow the steps in :ref:`announcing-releases`.
.. _patch-releases: .. _point-releases:
^^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^
Making patch releases Making point releases
^^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^
To make the patch release process both efficient and transparent, we use a *backports pull request* Assuming a project board has already been created and all required work
which contains cherry-picked commits from the ``develop`` branch. The majority of the work is to completed, the steps to make the point release are:
cherry-pick the bug fixes, which ideally should be done as soon as they land on ``develop``:
this ensures cherry-picking happens in order, and makes conflicts easier to resolve since the
changes are fresh in the mind of the developer.
The backports pull request is always titled ``Backports vX.Y.Z`` and is labelled ``backports``. It #. Create a new project board for the next point release.
is opened from a branch named ``backports/vX.Y.Z`` and targets the ``releases/vX.Y`` branch.
Whenever a pull request labelled ``vX.Y.Z`` is merged, cherry-pick the associated squashed commit #. Move any optional tasks that are not done to the next project board.
on ``develop`` to the ``backports/vX.Y.Z`` branch. For pull requests that were rebased (or not
squashed), cherry-pick each associated commit individually. Never force push to the
``backports/vX.Y.Z`` branch.
.. warning:: #. Check out the release branch (it should already exist).
Sometimes you may **still** get merge conflicts even if you have For the ``X.Y.Z`` release, the release branch is called ``releases/vX.Y``.
cherry-picked all the commits in order. This generally means there For ``v0.15.1``, you would check out ``releases/v0.15``:
is some other intervening pull request that the one you're trying
to pick depends on. In these cases, you'll need to make a judgment
call regarding those pull requests. Consider the number of affected
files and/or the resulting differences.
1. If the changes are small, you might just cherry-pick it. .. code-block:: console
2. If the changes are large, then you may decide that this fix is not $ git checkout releases/v0.15
worth including in a patch release, in which case you should remove
the label from the pull request. Remember that large, manual backports
are seldom the right choice for a patch release.
When all commits are cherry-picked in the ``backports/vX.Y.Z`` branch, make the patch #. If a pull request to the release branch named ``Backports vX.Y.Z`` is not already
release as follows: in the project, create it. This pull request ought to be created as early as
possible when working on a release project, so that we can build the release
commits incrementally, and identify potential conflicts at an early stage.
#. `Create a new label <https://github.com/spack/spack/labels>`_ ``vX.Y.{Z+1}`` for the next patch #. Cherry-pick each pull request in the ``Done`` column of the release
release. project board onto the ``Backports vX.Y.Z`` pull request.
#. Replace the label ``vX.Y.Z`` with ``vX.Y.{Z+1}`` for all PRs and issues that are not done. This is **usually** fairly simple since we squash the commits from the
vast majority of pull requests. That means there is only one commit
per pull request to cherry-pick. For example, `this pull request
<https://github.com/spack/spack/pull/15777>`_ has three commits, but
they were squashed into a single commit on merge. You can see the
commit that was created here:
#. Manually push a single commit with commit message ``Set version to vX.Y.Z`` to the .. image:: images/pr-commit.png
``backports/vX.Y.Z`` branch, that both bumps the Spack version number and updates the changelog:
You can easily cherry pick it like this (assuming you already have the
release branch checked out):
.. code-block:: console
$ git cherry-pick 7e46da7
For pull requests that were rebased (or not squashed), you'll need to
cherry-pick each associated commit individually.
.. warning::
It is important to cherry-pick commits in the order they happened,
otherwise you can get conflicts while cherry-picking. When
cherry-picking look at the merge date,
**not** the number of the pull request or the date it was opened.
Sometimes you may **still** get merge conflicts even if you have
cherry-picked all the commits in order. This generally means there
is some other intervening pull request that the one you're trying
to pick depends on. In these cases, you'll need to make a judgment
call regarding those pull requests. Consider the number of affected
files and or the resulting differences.
1. If the dependency changes are small, you might just cherry-pick it,
too. If you do this, add the task to the release board.
2. If the changes are large, then you may decide that this fix is not
worth including in a point release, in which case you should remove
the task from the release project.
3. You can always decide to manually back-port the fix to the release
branch if neither of the above options makes sense, but this can
require a lot of work. It's seldom the right choice.
#. When all the commits from the project board are cherry-picked into
the ``Backports vX.Y.Z`` pull request, you can push a commit to:
1. Bump the version in ``lib/spack/spack/__init__.py``. 1. Bump the version in ``lib/spack/spack/__init__.py``.
2. Update ``CHANGELOG.md`` with a list of the changes. 2. Update ``CHANGELOG.md`` with a list of the changes.
@@ -925,22 +954,20 @@ release as follows:
release branch. See `the changelog from 0.14.1 release branch. See `the changelog from 0.14.1
<https://github.com/spack/spack/commit/ff0abb9838121522321df2a054d18e54b566b44a>`_. <https://github.com/spack/spack/commit/ff0abb9838121522321df2a054d18e54b566b44a>`_.
#. Make sure CI passes on the **backports pull request**, including: #. Merge the ``Backports vX.Y.Z`` PR with the **Rebase and merge** strategy. This
is needed to keep track in the release branch of all the commits that were
cherry-picked.
#. Make sure CI passes on the release branch, including:
* Regular unit tests * Regular unit tests
* Build tests * Build tests
* The E4S pipeline at `gitlab.spack.io <https://gitlab.spack.io>`_ * The E4S pipeline at `gitlab.spack.io <https://gitlab.spack.io>`_
#. Merge the ``Backports vX.Y.Z`` PR with the **Rebase and merge** strategy. This If CI does not pass, you'll need to figure out why, and make changes
is needed to keep track in the release branch of all the commits that were to the release branch until it does. You can make more commits, modify
cherry-picked. or remove cherry-picked commits, or cherry-pick **more** from
``develop`` to make this happen.
#. Make sure CI passes on the last commit of the **release branch**.
#. In the rare case you need to include additional commits in the patch release after the backports
PR is merged, it is best to delete the last commit ``Set version to vX.Y.Z`` from the release
branch with a single force push, open a new backports PR named ``Backports vX.Y.Z (2)``, and
repeat the process. Avoid repeated force pushes to the release branch.
#. Follow the steps in :ref:`publishing-releases`. #. Follow the steps in :ref:`publishing-releases`.
@@ -1015,31 +1042,25 @@ Updating `releases/latest`
If the new release is the **highest** Spack release yet, you should If the new release is the **highest** Spack release yet, you should
also tag it as ``releases/latest``. For example, suppose the highest also tag it as ``releases/latest``. For example, suppose the highest
release is currently ``0.22.3``: release is currently ``0.15.3``:
* If you are releasing ``0.22.4`` or ``0.23.0``, then you should tag * If you are releasing ``0.15.4`` or ``0.16.0``, then you should tag
it with ``releases/latest``, as these are higher than ``0.22.3``. it with ``releases/latest``, as these are higher than ``0.15.3``.
* If you are making a new release of an **older** major version of * If you are making a new release of an **older** major version of
Spack, e.g. ``0.21.4``, then you should not tag it as Spack, e.g. ``0.14.4``, then you should not tag it as
``releases/latest`` (as there are newer major versions). ``releases/latest`` (as there are newer major versions).
To do so, first fetch the latest tag created on GitHub, since you may not have it locally: To tag ``releases/latest``, do this:
.. code-block:: console .. code-block:: console
$ git fetch --force git@github.com:spack/spack vX.Y.Z $ git checkout releases/vX.Y # vX.Y is the new release's branch
$ git tag --force releases/latest
$ git push --force --tags
Then tag ``vX.Y.Z`` as ``releases/latest`` and push the individual tag to GitHub. The ``--force`` argument to ``git tag`` makes ``git`` overwrite the existing
``releases/latest`` tag with the new one.
.. code-block:: console
$ git tag --force releases/latest vX.Y.Z
$ git push --force git@github.com:spack/spack releases/latest
The ``--force`` argument to ``git tag`` makes ``git`` overwrite the existing ``releases/latest``
tag with the new one. Do **not** use the ``--tags`` flag when pushing, since this will push *all*
local tags.
.. _announcing-releases: .. _announcing-releases:
@@ -1050,9 +1071,9 @@ Announcing a release
We announce releases in all of the major Spack communication channels. We announce releases in all of the major Spack communication channels.
Publishing the release takes care of GitHub. The remaining channels are Publishing the release takes care of GitHub. The remaining channels are
X, Slack, and the mailing list. Here are the steps: Twitter, Slack, and the mailing list. Here are the steps:
#. Announce the release on X. #. Announce the release on Twitter.
* Compose the tweet on the ``@spackpm`` account per the * Compose the tweet on the ``@spackpm`` account per the
``spack-twitter`` slack channel. ``spack-twitter`` slack channel.

View File

@@ -5,56 +5,49 @@
.. _environments: .. _environments:
===================================== =========================
Environments (spack.yaml, spack.lock) Environments (spack.yaml)
===================================== =========================
An environment is used to group a set of specs intended for some purpose An environment is used to group together a set of specs for the
to be built, rebuilt, and deployed in a coherent fashion. Environments purpose of building, rebuilding and deploying in a coherent fashion.
define aspects of the installation of the software, such as: Environments provide a number of advantages over the *à la carte*
approach of building and loading individual Spack modules:
#. *which* specs to install; #. Environments separate the steps of (a) choosing what to
#. *how* those specs are configured; and install, (b) concretizing, and (c) installing. This allows
#. *where* the concretized software will be installed. Environments to remain stable and repeatable, even if Spack packages
are upgraded: specs are only re-concretized when the user
Aggregating this information into an environment for processing has advantages explicitly asks for it. It is even possible to reliably
over the *à la carte* approach of building and loading individual Spack modules. transport environments between different computers running
different versions of Spack!
With environments, you concretize, install, or load (activate) all of the #. Environments allow several specs to be built at once; a more robust
specs with a single command. Concretization fully configures the specs solution than ad-hoc scripts making multiple calls to ``spack
and dependencies of the environment in preparation for installing the install``.
software. This is a more robust solution than ad-hoc installation scripts. #. An Environment that is built as a whole can be loaded as a whole
And you can share an environment or even re-use it on a different computer. into the user environment. An Environment can be built to maintain
a filesystem view of its packages, and the environment can load
Environment definitions, especially *how* specs are configured, allow the that view into the user environment at activation time. Spack can
software to remain stable and repeatable even when Spack packages are upgraded. Changes are only picked up when the environment is explicitly re-concretized. also generate a script to load all modules related to an
environment.
Defining *where* specs are installed supports a filesystem view of the
environment. Yet Spack maintains a single installation of the software that
can be re-used across multiple environments.
Activating an environment determines *when* all of the associated (and
installed) specs are loaded so limits the software loaded to those specs
actually needed by the environment. Spack can even generate a script to
load all modules related to an environment.
Other packaging systems also provide environments that are similar in Other packaging systems also provide environments that are similar in
some ways to Spack environments; for example, `Conda environments some ways to Spack environments; for example, `Conda environments
<https://conda.io/docs/user-guide/tasks/manage-environments.html>`_ or <https://conda.io/docs/user-guide/tasks/manage-environments.html>`_ or
`Python Virtual Environments `Python Virtual Environments
<https://docs.python.org/3/tutorial/venv.html>`_. Spack environments <https://docs.python.org/3/tutorial/venv.html>`_. Spack environments
provide some distinctive features though: provide some distinctive features:
#. A spec installed "in" an environment is no different from the same #. A spec installed "in" an environment is no different from the same
spec installed anywhere else in Spack. spec installed anywhere else in Spack. Environments are assembled
#. Spack environments may contain more than one spec of the same simply by collecting together a set of specs.
#. Spack Environments may contain more than one spec of the same
package. package.
Spack uses a "manifest and lock" model similar to `Bundler gemfiles Spack uses a "manifest and lock" model similar to `Bundler gemfiles
<https://bundler.io/man/gemfile.5.html>`_ and other package managers. <https://bundler.io/man/gemfile.5.html>`_ and other package
The environment's user input file (or manifest), is named ``spack.yaml``. managers. The user input file is named ``spack.yaml`` and the lock
The lock file, which contains the fully configured and concretized specs, file is named ``spack.lock``
is named ``spack.lock``.
.. _environments-using: .. _environments-using:
@@ -75,60 +68,55 @@ An environment is created by:
$ spack env create myenv $ spack env create myenv
The directory ``$SPACK_ROOT/var/spack/environments/myenv`` is created Spack then creates the directory ``var/spack/environments/myenv``.
to manage the environment.
.. note:: .. note::
All managed environments by default are stored in the All managed environments by default are stored in the ``var/spack/environments`` folder.
``$SPACK_ROOT/var/spack/environments`` folder. This location can be changed This location can be changed by setting the ``environments_root`` variable in ``config.yaml``.
by setting the ``environments_root`` variable in ``config.yaml``.
Spack creates the file ``spack.yaml``, hidden directory ``.spack-env``, and In the ``var/spack/environments/myenv`` directory, Spack creates the
``spack.lock`` file under ``$SPACK_ROOT/var/spack/environments/myenv``. User file ``spack.yaml`` and the hidden directory ``.spack-env``.
interaction occurs through the ``spack.yaml`` file and the Spack commands
that affect it. Metadata and, by default, the view are stored in the Spack stores metadata in the ``.spack-env`` directory. User
``.spack-env`` directory. When the environment is concretized, Spack creates interaction will occur through the ``spack.yaml`` file and the Spack
the ``spack.lock`` file with the fully configured specs and dependencies for commands that affect it. When the environment is concretized, Spack
will create a file ``spack.lock`` with the concrete information for
the environment. the environment.
The ``.spack-env`` subdirectory also contains: In addition to being the default location for the view associated with
an Environment, the ``.spack-env`` directory also contains:
* ``repo/``: A subdirectory acting as the repo consisting of the Spack * ``repo/``: A repo consisting of the Spack packages used in this
packages used in the environment. It allows the environment to build environment. This allows the environment to build the same, in
the same, in theory, even on different versions of Spack with different theory, even on different versions of Spack with different
packages! packages!
* ``logs/``: A subdirectory containing the build logs for the packages * ``logs/``: A directory containing the build logs for the packages
in this environment. in this Environment.
Spack Environments can also be created from either the user input, or Spack Environments can also be created from either a manifest file
manifest, file or the lockfile. Create an environment from a manifest using: (usually but not necessarily named, ``spack.yaml``) or a lockfile.
To create an Environment from a manifest:
.. code-block:: console .. code-block:: console
$ spack env create myenv spack.yaml $ spack env create myenv spack.yaml
The resulting environment is guaranteed to have the same root specs as To create an Environment from a ``spack.lock`` lockfile:
the original but may concretize differently in the presence of different
explicit or default configuration settings (e.g., a different version of
Spack or for a different user account).
Create an environment from a ``spack.lock`` file using:
.. code-block:: console .. code-block:: console
$ spack env create myenv spack.lock $ spack env create myenv spack.lock
The resulting environment, when on the same or a compatible machine, is Either of these commands can also take a full path to the
guaranteed to initially have the same concrete specs as the original. initialization file.
.. note:: A Spack Environment created from a ``spack.yaml`` manifest is
guaranteed to have the same root specs as the original Environment,
Environment creation also accepts a full path to the file. but may concretize differently. A Spack Environment created from a
``spack.lock`` lockfile is guaranteed to have the same concrete specs
If the path is not under the ``$SPACK_ROOT/var/spack/environments`` as the original Environment. Either may obviously then differ as the
directory then the source is referred to as an user modifies it.
:ref:`independent environment <independent_environments>`.
^^^^^^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^^^^^
Activating an Environment Activating an Environment
@@ -141,7 +129,7 @@ To activate an environment, use the following command:
$ spack env activate myenv $ spack env activate myenv
By default, the ``spack env activate`` will load the view associated By default, the ``spack env activate`` will load the view associated
with the environment into the user environment. The ``-v, with the Environment into the user environment. The ``-v,
--with-view`` argument ensures this behavior, and the ``-V, --with-view`` argument ensures this behavior, and the ``-V,
--without-view`` argument activates the environment without changing --without-view`` argument activates the environment without changing
the user environment variables. the user environment variables.
@@ -155,10 +143,11 @@ user's prompt to begin with the environment name in brackets.
[myenv] $ ... [myenv] $ ...
The ``activate`` command can also be used to create a new environment, if it is The ``activate`` command can also be used to create a new environment, if it is
not already defined, by adding the ``--create`` flag. Managed and independent not already defined, by adding the ``--create`` flag. Managed and anonymous
environments can both be created using the same flags that `spack env create` environments, anonymous environments are explained in the next section,
accepts. If an environment already exists then spack will simply activate it can both be created using the same flags that `spack env create` accepts.
and ignore the create-specific flags. If an environment already exists then spack will simply activate it and ignore the
create specific flags.
.. code-block:: console .. code-block:: console
@@ -183,50 +172,34 @@ or the shortcut alias
If the environment was activated with its view, deactivating the If the environment was activated with its view, deactivating the
environment will remove the view from the user environment. environment will remove the view from the user environment.
.. _independent_environments: ^^^^^^^^^^^^^^^^^^^^^^
Anonymous Environments
^^^^^^^^^^^^^^^^^^^^^^
^^^^^^^^^^^^^^^^^^^^^^^^ Any directory can be treated as an environment if it contains a file
Independent Environments ``spack.yaml``. To load an anonymous environment, use:
^^^^^^^^^^^^^^^^^^^^^^^^
Independent environments can be located in any directory outside of Spack.
.. note::
When uninstalling packages, Spack asks the user to confirm the removal of packages
that are still used in a managed environment. This is not the case for independent
environments.
To create an independent environment, use one of the following commands:
.. code-block:: console .. code-block:: console
$ spack env create --dir my_env $ spack env activate -d /path/to/directory
$ spack env create ./my_env
As a shorthand, you can also create an independent environment upon activation if it does not Anonymous specs can be created in place using the command:
already exist:
.. code-block:: console .. code-block:: console
$ spack env activate --create ./my_env $ spack env create -d .
For convenience, Spack can also place an independent environment in a temporary directory for you: In this case Spack simply creates a ``spack.yaml`` file in the requested
directory.
.. code-block:: console ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Environment Sensitive Commands
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
$ spack env activate --temp Spack commands are environment sensitive. For example, the ``find``
command shows only the specs in the active Environment if an
Environment has been activated. Similarly, the ``install`` and
^^^^^^^^^^^^^^^^^^^^^^^^^^ ``uninstall`` commands act on the active environment.
Environment-Aware Commands
^^^^^^^^^^^^^^^^^^^^^^^^^^
Spack commands are environment-aware. For example, the ``find``
command shows only the specs in the active environment if an
environment has been activated. Otherwise it shows all specs in
the Spack instance. The same rule applies to the ``install`` and
``uninstall`` commands.
.. code-block:: console .. code-block:: console
@@ -271,33 +244,32 @@ the Spack instance. The same rule applies to the ``install`` and
Note that when we installed the abstract spec ``zlib@1.2.8``, it was Note that when we installed the abstract spec ``zlib@1.2.8``, it was
presented as a root of the environment. All explicitly installed presented as a root of the Environment. All explicitly installed
packages will be listed as roots of the environment. packages will be listed as roots of the Environment.
All of the Spack commands that act on the list of installed specs are All of the Spack commands that act on the list of installed specs are
environment-aware in this way, including ``install``, Environment-sensitive in this way, including ``install``,
``uninstall``, ``find``, ``extensions``, etcetera. In the ``uninstall``, ``find``, ``extensions``, and more. In the
:ref:`environment-configuration` section we will discuss :ref:`environment-configuration` section we will discuss
environment-aware commands further. Environment-sensitive commands further.
^^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^
Adding Abstract Specs Adding Abstract Specs
^^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^
An abstract spec is the user-specified spec before Spack applies An abstract spec is the user-specified spec before Spack has applied
defaults or dependency information. any defaults or dependency information.
Users can add abstract specs to an environment using the ``spack add`` Users can add abstract specs to an Environment using the ``spack add``
command. The most important component of an environment is a list of command. The most important component of an Environment is a list of
abstract specs. abstract specs.
Adding a spec adds it as a root spec of the environment in the user Adding a spec adds to the manifest (the ``spack.yaml`` file), which is
input file (``spack.yaml``). It does not affect the concrete specs used to define the roots of the Environment, but does not affect the
in the lock file (``spack.lock``) and it does not install the spec. concrete specs in the lockfile, nor does it install the spec.
The ``spack add`` command is environment-aware. It adds the spec to the The ``spack add`` command is environment aware. It adds to the
currently active environment. An error is generated if there isn't an currently active environment. All environment aware commands can also
active environment. All environment-aware commands can also
be called using the ``spack -e`` flag to specify the environment. be called using the ``spack -e`` flag to specify the environment.
.. code-block:: console .. code-block:: console
@@ -317,11 +289,11 @@ or
Concretizing Concretizing
^^^^^^^^^^^^ ^^^^^^^^^^^^
Once user specs have been added to an environment, they can be concretized. Once some user specs have been added to an environment, they can be concretized.
There are three different modes of operation to concretize an environment, There are at the moment three different modes of operation to concretize an environment,
explained in detail in :ref:`environments_concretization_config`. which are explained in details in :ref:`environments_concretization_config`.
Regardless of which mode of operation is chosen, the following Regardless of which mode of operation has been chosen, the following
command will ensure all of the root specs are concretized according to the command will ensure all the root specs are concretized according to the
constraints that are prescribed in the configuration: constraints that are prescribed in the configuration:
.. code-block:: console .. code-block:: console
@@ -330,15 +302,16 @@ constraints that are prescribed in the configuration:
In the case of specs that are not concretized together, the command In the case of specs that are not concretized together, the command
above will concretize only the specs that were added and not yet above will concretize only the specs that were added and not yet
concretized. Forcing a re-concretization of all of the specs can be done concretized. Forcing a re-concretization of all the specs can be done
by adding the ``-f`` option: instead with this command:
.. code-block:: console .. code-block:: console
[myenv]$ spack concretize -f [myenv]$ spack concretize -f
Without the option, Spack guarantees that already concretized specs are When the ``-f`` flag is not used to reconcretize all specs, Spack
unchanged in the environment. guarantees that already concretized specs are unchanged in the
environment.
The ``concretize`` command does not install any packages. For packages The ``concretize`` command does not install any packages. For packages
that have already been installed outside of the environment, the that have already been installed outside of the environment, the
@@ -371,16 +344,16 @@ installed specs using the ``-c`` (``--concretized``) flag.
Installing an Environment Installing an Environment
^^^^^^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^^^^^
In addition to adding individual specs to an environment, one In addition to installing individual specs into an Environment, one
can install the entire environment at once using the command can install the entire Environment at once using the command
.. code-block:: console .. code-block:: console
[myenv]$ spack install [myenv]$ spack install
If the environment has been concretized, Spack will install the If the Environment has been concretized, Spack will install the
concretized specs. Otherwise, ``spack install`` will concretize concretized specs. Otherwise, ``spack install`` will first concretize
the environment before installing the concretized specs. the Environment and then install the concretized specs.
.. note:: .. note::
@@ -401,17 +374,17 @@ the environment before installing the concretized specs.
As it installs, ``spack install`` creates symbolic links in the As it installs, ``spack install`` creates symbolic links in the
``logs/`` directory in the environment, allowing for easy inspection ``logs/`` directory in the Environment, allowing for easy inspection
of build logs related to that environment. The ``spack install`` of build logs related to that environment. The ``spack install``
command also stores a Spack repo containing the ``package.py`` file command also stores a Spack repo containing the ``package.py`` file
used at install time for each package in the ``repos/`` directory in used at install time for each package in the ``repos/`` directory in
the environment. the Environment.
The ``--no-add`` option can be used in a concrete environment to tell The ``--no-add`` option can be used in a concrete environment to tell
spack to install specs already present in the environment but not to spack to install specs already present in the environment but not to
add any new root specs to the environment. For root specs provided add any new root specs to the environment. For root specs provided
to ``spack install`` on the command line, ``--no-add`` is the default, to ``spack install`` on the command line, ``--no-add`` is the default,
while for dependency specs, it is optional. In other while for dependency specs on the other hand, it is optional. In other
words, if there is an unambiguous match in the active concrete environment words, if there is an unambiguous match in the active concrete environment
for a root spec provided to ``spack install`` on the command line, spack for a root spec provided to ``spack install`` on the command line, spack
does not require you to specify the ``--no-add`` option to prevent the spec does not require you to specify the ``--no-add`` option to prevent the spec
@@ -425,22 +398,12 @@ Developing Packages in a Spack Environment
The ``spack develop`` command allows one to develop Spack packages in The ``spack develop`` command allows one to develop Spack packages in
an environment. It requires a spec containing a concrete version, and an environment. It requires a spec containing a concrete version, and
will configure Spack to install the package from local source. will configure Spack to install the package from local source. By
If a version is not provided from the command line interface then spack default, it will also clone the package to a subdirectory in the
will automatically pick the highest version the package has defined. environment. This package will have a special variant ``dev_path``
This means any infinity versions (``develop``, ``main``, ``stable``) will be
preferred in this selection process.
By default, ``spack develop`` will also clone the package to a subdirectory in the
environment for the local source. This package will have a special variant ``dev_path``
set, and Spack will ensure the package and its dependents are rebuilt set, and Spack will ensure the package and its dependents are rebuilt
any time the environment is installed if the package's local source any time the environment is installed if the package's local source
code has been modified. Spack's native implementation to check for modifications code has been modified. Spack ensures that all instances of a
is to check if ``mtime`` is newer than the installation.
A custom check can be created by overriding the ``detect_dev_src_change`` method
in your package class. This is particularly useful for projects using custom spack repo's
to drive development and want to optimize performance.
Spack ensures that all instances of a
developed package in the environment are concretized to match the developed package in the environment are concretized to match the
version (and other constraints) passed as the spec argument to the version (and other constraints) passed as the spec argument to the
``spack develop`` command. ``spack develop`` command.
@@ -450,7 +413,7 @@ also be used as valid concrete versions (see :ref:`version-specifier`).
This means that for a package ``foo``, ``spack develop foo@git.main`` will clone This means that for a package ``foo``, ``spack develop foo@git.main`` will clone
the ``main`` branch of the package, and ``spack install`` will install from the ``main`` branch of the package, and ``spack install`` will install from
that git clone if ``foo`` is in the environment. that git clone if ``foo`` is in the environment.
Further development on ``foo`` can be tested by re-installing the environment, Further development on ``foo`` can be tested by reinstalling the environment,
and eventually committed and pushed to the upstream git repo. and eventually committed and pushed to the upstream git repo.
If the package being developed supports out-of-source builds then users can use the If the package being developed supports out-of-source builds then users can use the
@@ -486,125 +449,6 @@ Sourcing that file in Bash will make the environment available to the
user; and can be included in ``.bashrc`` files, etc. The ``loads`` user; and can be included in ``.bashrc`` files, etc. The ``loads``
file may also be copied out of the environment, renamed, etc. file may also be copied out of the environment, renamed, etc.
.. _environment_include_concrete:
------------------------------
Included Concrete Environments
------------------------------
Spack environments can create an environment based off of information in already
established environments. You can think of it as a combination of existing
environments. It will gather information from the existing environment's
``spack.lock`` and use that during the creation of this included concrete
environment. When an included concrete environment is created it will generate
a ``spack.lock`` file for the newly created environment.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Creating included environments
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
To create a combined concrete environment, you must have at least one existing
concrete environment. You will use the command ``spack env create`` with the
argument ``--include-concrete`` followed by the name or path of the environment
you'd like to include. Here is an example of how to create a combined environment
from the command line.
.. code-block:: console
$ spack env create myenv
$ spack -e myenv add python
$ spack -e myenv concretize
$ spack env create --include-concrete myenv included_env
You can also include an environment directly in the ``spack.yaml`` file. It
involves adding the ``include_concrete`` heading in the yaml followed by the
absolute path to the independent environments.
.. code-block:: yaml
spack:
specs: []
concretizer:
unify: true
include_concrete:
- /absolute/path/to/environment1
- /absolute/path/to/environment2
Once the ``spack.yaml`` has been updated you must concretize the environment to
get the concrete specs from the included environments.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Updating an included environment
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
If changes were made to the base environment and you want that reflected in the
included environment you will need to reconcretize both the base environment and the
included environment for the change to be implemented. For example:
.. code-block:: console
$ spack env create myenv
$ spack -e myenv add python
$ spack -e myenv concretize
$ spack env create --include-concrete myenv included_env
$ spack -e myenv find
==> In environment myenv
==> Root specs
python
==> 0 installed packages
$ spack -e included_env find
==> In environment included_env
==> No root specs
==> Included specs
python
==> 0 installed packages
Here we see that ``included_env`` has access to the python package through
the ``myenv`` environment. But if we were to add another spec to ``myenv``,
``included_env`` will not be able to access the new information.
.. code-block:: console
$ spack -e myenv add perl
$ spack -e myenv concretize
$ spack -e myenv find
==> In environment myenv
==> Root specs
perl python
==> 0 installed packages
$ spack -e included_env find
==> In environment included_env
==> No root specs
==> Included specs
python
==> 0 installed packages
It isn't until you run the ``spack concretize`` command that the combined
environment will get the updated information from the reconcretized base environmennt.
.. code-block:: console
$ spack -e included_env concretize
$ spack -e included_env find
==> In environment included_env
==> No root specs
==> Included specs
perl python
==> 0 installed packages
.. _environment-configuration: .. _environment-configuration:
------------------------ ------------------------
@@ -635,7 +479,7 @@ manipulate configuration inline in the ``spack.yaml`` file.
Inline configurations Inline configurations
^^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^
Inline environment-scope configuration is done using the same yaml Inline Environment-scope configuration is done using the same yaml
format as standard Spack configuration scopes, covered in the format as standard Spack configuration scopes, covered in the
:ref:`configuration` section. Each section is contained under a :ref:`configuration` section. Each section is contained under a
top-level yaml object with it's name. For example, a ``spack.yaml`` top-level yaml object with it's name. For example, a ``spack.yaml``
@@ -660,7 +504,7 @@ Included configurations
Spack environments allow an ``include`` heading in their yaml Spack environments allow an ``include`` heading in their yaml
schema. This heading pulls in external configuration files and applies schema. This heading pulls in external configuration files and applies
them to the environment. them to the Environment.
.. code-block:: yaml .. code-block:: yaml
@@ -673,9 +517,6 @@ them to the environment.
Environments can include files or URLs. File paths can be relative or Environments can include files or URLs. File paths can be relative or
absolute. URLs include the path to the text for individual files or absolute. URLs include the path to the text for individual files or
can be the path to a directory containing configuration files. can be the path to a directory containing configuration files.
Spack supports ``file``, ``http``, ``https`` and ``ftp`` protocols (or
schemes). Spack-specific, environment and user path variables may be
used in these paths. See :ref:`config-file-variables` for more information.
^^^^^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^^^^
Configuration precedence Configuration precedence
@@ -690,7 +531,7 @@ have higher precedence, as the included configs are applied in reverse order.
Manually Editing the Specs List Manually Editing the Specs List
------------------------------- -------------------------------
The list of abstract/root specs in the environment is maintained in The list of abstract/root specs in the Environment is maintained in
the ``spack.yaml`` manifest under the heading ``specs``. the ``spack.yaml`` manifest under the heading ``specs``.
.. code-block:: yaml .. code-block:: yaml
@@ -798,7 +639,7 @@ evaluates to the cross-product of those specs. Spec matrices also
contain an ``excludes`` directive, which eliminates certain contain an ``excludes`` directive, which eliminates certain
combinations from the evaluated result. combinations from the evaluated result.
The following two environment manifests are identical: The following two Environment manifests are identical:
.. code-block:: yaml .. code-block:: yaml
@@ -873,7 +714,7 @@ files are identical.
In short files like the example, it may be easier to simply list the In short files like the example, it may be easier to simply list the
included specs. However for more complicated examples involving many included specs. However for more complicated examples involving many
packages across many toolchains, separately factored lists make packages across many toolchains, separately factored lists make
environments substantially more manageable. Environments substantially more manageable.
Additionally, the ``-l`` option to the ``spack add`` command allows Additionally, the ``-l`` option to the ``spack add`` command allows
one to add to named lists in the definitions section of the manifest one to add to named lists in the definitions section of the manifest
@@ -892,7 +733,7 @@ named list ``compilers`` is ``['%gcc', '%clang', '%intel']`` on
spack: spack:
definitions: definitions:
- compilers: ['%gcc', '%clang'] - compilers: ['%gcc', '%clang']
- when: arch.satisfies('target=x86_64:') - when: arch.satisfies('x86_64:')
compilers: ['%intel'] compilers: ['%intel']
.. note:: .. note::
@@ -959,90 +800,37 @@ For example, the following environment has three root packages:
This allows for a much-needed reduction in redundancy between packages This allows for a much-needed reduction in redundancy between packages
and constraints. and constraints.
----------------
Filesystem Views
----------------
----------------- Spack Environments can define filesystem views, which provide a direct access point
Environment Views for software similar to the directory hierarchy that might exist under ``/usr/local``.
----------------- Filesystem views are updated every time the environment is written out to the lock
file ``spack.lock``, so the concrete environment and the view are always compatible.
Spack Environments can have an associated filesystem view, which is a directory The files of the view's installed packages are brought into the view by symbolic or
with a more traditional structure ``<view>/bin``, ``<view>/lib``, ``<view>/include`` hard links, referencing the original Spack installation, or by copy.
in which all files of the installed packages are linked.
By default a view is created for each environment, thanks to the ``view: true``
option in the ``spack.yaml`` manifest file:
.. code-block:: yaml
spack:
specs: [perl, python]
view: true
The view is created in a hidden directory ``.spack-env/view`` relative to the environment.
If you've used ``spack env activate``, you may have already interacted with this view. Spack
prepends its ``<view>/bin`` dir to ``PATH`` when the environment is activated, so that
you can directly run executables from all installed packages in the environment.
Views are highly customizable: you can control where they are put, modify their structure,
include and exclude specs, change how files are linked, and you can even generate multiple
views for a single environment.
.. _configuring_environment_views: .. _configuring_environment_views:
^^^^^^^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Minimal view configuration Configuration in ``spack.yaml``
^^^^^^^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The minimal configuration The Spack Environment manifest file has a top-level keyword
``view``. Each entry under that heading is a **view descriptor**, headed
.. code-block:: yaml by a name. Any number of views may be defined under the ``view`` heading.
The view descriptor contains the root of the view, and
spack: optionally the projections for the view, ``select`` and
# ... ``exclude`` lists for the view and link information via ``link`` and
view: true
lets Spack generate a single view with default settings under the
``.spack-env/view`` directory of the environment.
Another short way to configure a view is to specify just where to put it:
.. code-block:: yaml
spack:
# ...
view: /path/to/view
Views can also be disabled by setting ``view: false``.
^^^^^^^^^^^^^^^^^^^^^^^^^^^
Advanced view configuration
^^^^^^^^^^^^^^^^^^^^^^^^^^^
One or more **view descriptors** can be defined under ``view``, keyed by a name.
The example from the previous section with ``view: /path/to/view`` is equivalent
to defining a view descriptor named ``default`` with a ``root`` attribute:
.. code-block:: yaml
spack:
# ...
view:
default: # name of the view
root: /path/to/view # view descriptor attribute
The ``default`` view descriptor name is special: when you ``spack env activate`` your
environment, this view will be used to update (among other things) your ``PATH``
variable.
View descriptors must contain the root of the view, and optionally projections,
``select`` and ``exclude`` lists and link information via ``link`` and
``link_type``. ``link_type``.
As a more advanced example, in the following manifest For example, in the following manifest
file snippet we define a view named ``mpis``, rooted at file snippet we define a view named ``mpis``, rooted at
``/path/to/view`` in which all projections use the package name, ``/path/to/view`` in which all projections use the package name,
version, and compiler name to determine the path for a given version, and compiler name to determine the path for a given
package. This view selects all packages that depend on MPI, and package. This view selects all packages that depend on MPI, and
excludes those built with the GCC compiler at version 18.5. excludes those built with the PGI compiler at version 18.5.
The root specs with their (transitive) link and run type dependencies The root specs with their (transitive) link and run type dependencies
will be put in the view due to the ``link: all`` option, will be put in the view due to the ``link: all`` option,
and the files in the view will be symlinks to the spack install and the files in the view will be symlinks to the spack install
@@ -1056,7 +844,7 @@ directories.
mpis: mpis:
root: /path/to/view root: /path/to/view
select: [^mpi] select: [^mpi]
exclude: ['%gcc@18.5'] exclude: ['%pgi@18.5']
projections: projections:
all: '{name}/{version}-{compiler.name}' all: '{name}/{version}-{compiler.name}'
link: all link: all
@@ -1082,14 +870,63 @@ of ``hardlink`` or ``copy``.
when the environment is not activated, and linked libraries will be located when the environment is not activated, and linked libraries will be located
*outside* of the view thanks to rpaths. *outside* of the view thanks to rpaths.
There are two shorthands for environments with a single view. If the
environment at ``/path/to/env`` has a single view, with a root at
``/path/to/env/.spack-env/view``, with default selection and exclusion
and the default projection, we can put ``view: True`` in the
environment manifest. Similarly, if the environment has a view with a
different root, but default selection, exclusion, and projections, the
manifest can say ``view: /path/to/view``. These views are
automatically named ``default``, so that
.. code-block:: yaml
spack:
# ...
view: True
is equivalent to
.. code-block:: yaml
spack:
# ...
view:
default:
root: .spack-env/view
and
.. code-block:: yaml
spack:
# ...
view: /path/to/view
is equivalent to
.. code-block:: yaml
spack:
# ...
view:
default:
root: /path/to/view
By default, Spack environments are configured with ``view: True`` in
the manifest. Environments can be configured without views using
``view: False``. For backwards compatibility reasons, environments
with no ``view`` key are treated the same as ``view: True``.
From the command line, the ``spack env create`` command takes an From the command line, the ``spack env create`` command takes an
argument ``--with-view [PATH]`` that sets the path for a single, default argument ``--with-view [PATH]`` that sets the path for a single, default
view. If no path is specified, the default path is used (``view: view. If no path is specified, the default path is used (``view:
true``). The argument ``--without-view`` can be used to create an True``). The argument ``--without-view`` can be used to create an
environment without any view configured. environment without any view configured.
The ``spack env view`` command can be used to change the manage views The ``spack env view`` command can be used to change the manage views
of an environment. The subcommand ``spack env view enable`` will add a of an Environment. The subcommand ``spack env view enable`` will add a
view named ``default`` to an environment. It takes an optional view named ``default`` to an environment. It takes an optional
argument to specify the path for the new default view. The subcommand argument to specify the path for the new default view. The subcommand
``spack env view disable`` will remove the view named ``default`` from ``spack env view disable`` will remove the view named ``default`` from
@@ -1151,18 +988,11 @@ the projection under ``all`` before reaching those entries.
Activating environment views Activating environment views
^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The ``spack env activate <env>`` has two effects: The ``spack env activate`` command will put the default view for the
environment into the user's path, in addition to activating the
1. It activates the environment so that further Spack commands such environment for Spack commands. The arguments ``-v,--with-view`` and
as ``spack install`` will run in the context of the environment. ``-V,--without-view`` can be used to tune this behavior. The default
2. It activates the view so that environment variables such as behavior is to activate with the environment view if there is one.
``PATH`` are updated to include the view.
Without further arguments, the ``default`` view of the environment is
activated. If a view with a different name has to be activated,
``spack env activate --with-view <name> <env>`` can be
used instead. You can also activate the environment without modifying
further environment variables using ``--without-view``.
The environment variables affected by the ``spack env activate`` The environment variables affected by the ``spack env activate``
command and the paths that are used to update them are determined by command and the paths that are used to update them are determined by
@@ -1185,8 +1015,8 @@ relevant variable if the path exists. For this reason, it is not
recommended to use non-default projections with the default view of an recommended to use non-default projections with the default view of an
environment. environment.
The ``spack env deactivate`` command will remove the active view of The ``spack env deactivate`` command will remove the default view of
the Spack environment from the user's environment variables. the environment from the user's path.
.. _env-generate-depfile: .. _env-generate-depfile:
@@ -1203,7 +1033,7 @@ other targets to depend on the environment installation.
A typical workflow is as follows: A typical workflow is as follows:
.. code-block:: console .. code:: console
spack env create -d . spack env create -d .
spack -e . add perl spack -e . add perl
@@ -1257,7 +1087,7 @@ gets installed and is available for use in the ``env`` target.
$(SPACK) -e . env depfile -o $@ --make-prefix spack $(SPACK) -e . env depfile -o $@ --make-prefix spack
env: spack/env env: spack/env
$(info environment installed!) $(info Environment installed!)
clean: clean:
rm -rf spack.lock env.mk spack/ rm -rf spack.lock env.mk spack/
@@ -1296,7 +1126,7 @@ its dependencies. This can be useful when certain flags should only apply to
dependencies. Below we show a use case where a spec is installed with verbose dependencies. Below we show a use case where a spec is installed with verbose
output (``spack install --verbose``) while its dependencies are installed silently: output (``spack install --verbose``) while its dependencies are installed silently:
.. code-block:: console .. code:: console
$ spack env depfile -o Makefile $ spack env depfile -o Makefile
@@ -1318,7 +1148,7 @@ This can be accomplished through the generated ``[<prefix>/]SPACK_PACKAGE_IDS``
variable. Assuming we have an active and concrete environment, we generate the variable. Assuming we have an active and concrete environment, we generate the
associated ``Makefile`` with a prefix ``example``: associated ``Makefile`` with a prefix ``example``:
.. code-block:: console .. code:: console
$ spack env depfile -o env.mk --make-prefix example $ spack env depfile -o env.mk --make-prefix example
@@ -1345,7 +1175,7 @@ index once every package is pushed. Note how this target uses the generated
example/push/%: example/install/% example/push/%: example/install/%
@mkdir -p $(dir $@) @mkdir -p $(dir $@)
$(info About to push $(SPEC) to a buildcache) $(info About to push $(SPEC) to a buildcache)
$(SPACK) -e . buildcache push --only=package $(BUILDCACHE_DIR) /$(HASH) $(SPACK) -e . buildcache push --allow-root --only=package $(BUILDCACHE_DIR) /$(HASH)
@touch $@ @touch $@
push: $(addprefix example/push/,$(example/SPACK_PACKAGE_IDS)) push: $(addprefix example/push/,$(example/SPACK_PACKAGE_IDS))

View File

@@ -35,7 +35,7 @@ A build matrix showing which packages are working on which systems is shown belo
.. code-block:: console .. code-block:: console
apt update apt update
apt install bzip2 ca-certificates file g++ gcc gfortran git gzip lsb-release patch python3 tar unzip xz-utils zstd apt install build-essential ca-certificates coreutils curl environment-modules gfortran git gpg lsb-release python3 python3-distutils python3-venv unzip zip
.. tab-item:: RHEL .. tab-item:: RHEL
@@ -43,14 +43,14 @@ A build matrix showing which packages are working on which systems is shown belo
dnf install epel-release dnf install epel-release
dnf group install "Development Tools" dnf group install "Development Tools"
dnf install gcc-gfortran redhat-lsb-core python3 unzip dnf install curl findutils gcc-gfortran gnupg2 hostname iproute redhat-lsb-core python3 python3-pip python3-setuptools unzip python3-boto3
.. tab-item:: macOS Brew .. tab-item:: macOS Brew
.. code-block:: console .. code-block:: console
brew update brew update
brew install gcc git zip brew install curl gcc git gnupg zip
------------ ------------
Installation Installation
@@ -61,15 +61,10 @@ Getting Spack is easy. You can clone it from the `github repository
.. code-block:: console .. code-block:: console
$ git clone -c feature.manyFiles=true --depth=2 https://github.com/spack/spack.git $ git clone -c feature.manyFiles=true https://github.com/spack/spack.git
This will create a directory called ``spack``. This will create a directory called ``spack``.
.. note::
``-c feature.manyFiles=true`` improves git's performance on repositories with 1,000+ files.
``--depth=2`` prunes the git history to reduce the size of the Spack installation.
.. _shell-support: .. _shell-support:
^^^^^^^^^^^^^ ^^^^^^^^^^^^^
@@ -283,6 +278,10 @@ compilers`` or ``spack compiler list``:
intel@14.0.1 intel@13.0.1 intel@12.1.2 intel@10.1 intel@14.0.1 intel@13.0.1 intel@12.1.2 intel@10.1
-- clang ------------------------------------------------------- -- clang -------------------------------------------------------
clang@3.4 clang@3.3 clang@3.2 clang@3.1 clang@3.4 clang@3.3 clang@3.2 clang@3.1
-- pgi ---------------------------------------------------------
pgi@14.3-0 pgi@13.2-0 pgi@12.1-0 pgi@10.9-0 pgi@8.0-1
pgi@13.10-0 pgi@13.1-1 pgi@11.10-0 pgi@10.2-0 pgi@7.1-3
pgi@13.6-0 pgi@12.8-0 pgi@11.1-0 pgi@9.0-4 pgi@7.0-6
Any of these compilers can be used to build Spack packages. More on Any of these compilers can be used to build Spack packages. More on
how this is done is in :ref:`sec-specs`. how this is done is in :ref:`sec-specs`.
@@ -479,13 +478,6 @@ prefix, you can add them to the ``extra_attributes`` field. Similarly,
all other fields from the compilers config can be added to the all other fields from the compilers config can be added to the
``extra_attributes`` field for an external representing a compiler. ``extra_attributes`` field for an external representing a compiler.
Note that the format for the ``paths`` field in the
``extra_attributes`` section is different than in the ``compilers``
config. For compilers configured as external packages, the section is
named ``compilers`` and the dictionary maps language names (``c``,
``cxx``, ``fortran``) to paths, rather than using the names ``cc``,
``fc``, and ``f77``.
.. code-block:: yaml .. code-block:: yaml
packages: packages:
@@ -501,10 +493,11 @@ named ``compilers`` and the dictionary maps language names (``c``,
- spec: llvm+clang@15.0.0 arch=linux-rhel8-skylake - spec: llvm+clang@15.0.0 arch=linux-rhel8-skylake
prefix: /usr prefix: /usr
extra_attributes: extra_attributes:
compilers: paths:
c: /usr/bin/clang-with-suffix cc: /usr/bin/clang-with-suffix
cxx: /usr/bin/clang++-with-extra-info cxx: /usr/bin/clang++-with-extra-info
fortran: /usr/bin/gfortran fc: /usr/bin/gfortran
f77: /usr/bin/gfortran
extra_rpaths: extra_rpaths:
- /usr/lib/llvm/ - /usr/lib/llvm/
@@ -802,6 +795,65 @@ flags to the ``icc`` command:
spec: intel@15.0.24.4.9.3 spec: intel@15.0.24.4.9.3
^^^
PGI
^^^
PGI comes with two sets of compilers for C++ and Fortran,
distinguishable by their names. "Old" compilers:
.. code-block:: yaml
cc: /soft/pgi/15.10/linux86-64/15.10/bin/pgcc
cxx: /soft/pgi/15.10/linux86-64/15.10/bin/pgCC
f77: /soft/pgi/15.10/linux86-64/15.10/bin/pgf77
fc: /soft/pgi/15.10/linux86-64/15.10/bin/pgf90
"New" compilers:
.. code-block:: yaml
cc: /soft/pgi/15.10/linux86-64/15.10/bin/pgcc
cxx: /soft/pgi/15.10/linux86-64/15.10/bin/pgc++
f77: /soft/pgi/15.10/linux86-64/15.10/bin/pgfortran
fc: /soft/pgi/15.10/linux86-64/15.10/bin/pgfortran
Older installations of PGI contains just the old compilers; whereas
newer installations contain the old and the new. The new compiler is
considered preferable, as some packages
(``hdf``) will not build with the old compiler.
When auto-detecting a PGI compiler, there are cases where Spack will
find the old compilers, when you really want it to find the new
compilers. It is best to check this ``compilers.yaml``; and if the old
compilers are being used, change ``pgf77`` and ``pgf90`` to
``pgfortran``.
Other issues:
* There are reports that some packages will not build with PGI,
including ``libpciaccess`` and ``openssl``. A workaround is to
build these packages with another compiler and then use them as
dependencies for PGI-build packages. For example:
.. code-block:: console
$ spack install openmpi%pgi ^libpciaccess%gcc
* PGI requires a license to use; see :ref:`licensed-compilers` for more
information on installation.
.. note::
It is believed the problem with HDF 4 is that everything is
compiled with the ``F77`` compiler, but at some point some Fortran
90 code slipped in there. So compilers that can handle both FORTRAN
77 and Fortran 90 (``gfortran``, ``pgfortran``, etc) are fine. But
compilers specific to one or the other (``pgf77``, ``pgf90``) won't
work.
^^^ ^^^
NAG NAG
^^^ ^^^
@@ -1306,6 +1358,187 @@ This will write the private key to the file `dinosaur.priv`.
or for help on an issue or the Spack slack. or for help on an issue or the Spack slack.
.. _cray-support:
-------------
Spack on Cray
-------------
Spack differs slightly when used on a Cray system. The architecture spec
can differentiate between the front-end and back-end processor and operating system.
For example, on Edison at NERSC, the back-end target processor
is "Ivy Bridge", so you can specify to use the back-end this way:
.. code-block:: console
$ spack install zlib target=ivybridge
You can also use the operating system to build against the back-end:
.. code-block:: console
$ spack install zlib os=CNL10
Notice that the name includes both the operating system name and the major
version number concatenated together.
Alternatively, if you want to build something for the front-end,
you can specify the front-end target processor. The processor for a login node
on Edison is "Sandy bridge" so we specify on the command line like so:
.. code-block:: console
$ spack install zlib target=sandybridge
And the front-end operating system is:
.. code-block:: console
$ spack install zlib os=SuSE11
^^^^^^^^^^^^^^^^^^^^^^^
Cray compiler detection
^^^^^^^^^^^^^^^^^^^^^^^
Spack can detect compilers using two methods. For the front-end, we treat
everything the same. The difference lies in back-end compiler detection.
Back-end compiler detection is made via the Tcl module avail command.
Once it detects the compiler it writes the appropriate PrgEnv and compiler
module name to compilers.yaml and sets the paths to each compiler with Cray\'s
compiler wrapper names (i.e. cc, CC, ftn). During build time, Spack will load
the correct PrgEnv and compiler module and will call appropriate wrapper.
The compilers.yaml config file will also differ. There is a
modules section that is filled with the compiler's Programming Environment
and module name. On other systems, this field is empty []:
.. code-block:: yaml
- compiler:
modules:
- PrgEnv-intel
- intel/15.0.109
As mentioned earlier, the compiler paths will look different on a Cray system.
Since most compilers are invoked using cc, CC and ftn, the paths for each
compiler are replaced with their respective Cray compiler wrapper names:
.. code-block:: yaml
paths:
cc: cc
cxx: CC
f77: ftn
fc: ftn
As opposed to an explicit path to the compiler executable. This allows Spack
to call the Cray compiler wrappers during build time.
For more on compiler configuration, check out :ref:`compiler-config`.
Spack sets the default Cray link type to dynamic, to better match other
other platforms. Individual packages can enable static linking (which is the
default outside of Spack on cray systems) using the ``-static`` flag.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Setting defaults and using Cray modules
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
If you want to use default compilers for each PrgEnv and also be able
to load cray external modules, you will need to set up a ``packages.yaml``.
Here's an example of an external configuration for cray modules:
.. code-block:: yaml
packages:
mpich:
externals:
- spec: "mpich@7.3.1%gcc@5.2.0 arch=cray_xc-haswell-CNL10"
modules:
- cray-mpich
- spec: "mpich@7.3.1%intel@16.0.0.109 arch=cray_xc-haswell-CNL10"
modules:
- cray-mpich
all:
providers:
mpi: [mpich]
This tells Spack that for whatever package that depends on mpi, load the
cray-mpich module into the environment. You can then be able to use whatever
environment variables, libraries, etc, that are brought into the environment
via module load.
.. note::
For Cray-provided packages, it is best to use ``modules:`` instead of ``prefix:``
in ``packages.yaml``, because the Cray Programming Environment heavily relies on
modules (e.g., loading the ``cray-mpich`` module adds MPI libraries to the
compiler wrapper link line).
You can set the default compiler that Spack can use for each compiler type.
If you want to use the Cray defaults, then set them under ``all:`` in packages.yaml.
In the compiler field, set the compiler specs in your order of preference.
Whenever you build with that compiler type, Spack will concretize to that version.
Here is an example of a full packages.yaml used at NERSC
.. code-block:: yaml
packages:
mpich:
externals:
- spec: "mpich@7.3.1%gcc@5.2.0 arch=cray_xc-CNL10-ivybridge"
modules:
- cray-mpich
- spec: "mpich@7.3.1%intel@16.0.0.109 arch=cray_xc-SuSE11-ivybridge"
modules:
- cray-mpich
buildable: False
netcdf:
externals:
- spec: "netcdf@4.3.3.1%gcc@5.2.0 arch=cray_xc-CNL10-ivybridge"
modules:
- cray-netcdf
- spec: "netcdf@4.3.3.1%intel@16.0.0.109 arch=cray_xc-CNL10-ivybridge"
modules:
- cray-netcdf
buildable: False
hdf5:
externals:
- spec: "hdf5@1.8.14%gcc@5.2.0 arch=cray_xc-CNL10-ivybridge"
modules:
- cray-hdf5
- spec: "hdf5@1.8.14%intel@16.0.0.109 arch=cray_xc-CNL10-ivybridge"
modules:
- cray-hdf5
buildable: False
all:
compiler: [gcc@5.2.0, intel@16.0.0.109]
providers:
mpi: [mpich]
Here we tell spack that whenever we want to build with gcc use version 5.2.0 or
if we want to build with intel compilers, use version 16.0.0.109. We add a spec
for each compiler type for each cray modules. This ensures that for each
compiler on our system we can use that external module.
For more on external packages check out the section :ref:`sec-external-packages`.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Using Linux containers on Cray machines
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Spack uses environment variables particular to the Cray programming
environment to determine which systems are Cray platforms. These
environment variables may be propagated into containers that are not
using the Cray programming environment.
To ensure that Spack does not autodetect the Cray programming
environment, unset the environment variable ``MODULEPATH``. This
will cause Spack to treat a linux container on a Cray system as a base
linux distro.
.. _windows_support: .. _windows_support:
---------------- ----------------
@@ -1339,8 +1572,6 @@ Microsoft Visual Studio
""""""""""""""""""""""" """""""""""""""""""""""
Microsoft Visual Studio provides the only Windows C/C++ compiler that is currently supported by Spack. Microsoft Visual Studio provides the only Windows C/C++ compiler that is currently supported by Spack.
Spack additionally requires that the Windows SDK (including WGL) to be installed as part of your
visual studio installation as it is required to build many packages from source.
We require several specific components to be included in the Visual Studio installation. We require several specific components to be included in the Visual Studio installation.
One is the C/C++ toolset, which can be selected as "Desktop development with C++" or "C++ build tools," One is the C/C++ toolset, which can be selected as "Desktop development with C++" or "C++ build tools,"
@@ -1348,7 +1579,6 @@ depending on installation type (Professional, Build Tools, etc.) The other requ
"C++ CMake tools for Windows," which can be selected from among the optional packages. "C++ CMake tools for Windows," which can be selected from among the optional packages.
This provides CMake and Ninja for use during Spack configuration. This provides CMake and Ninja for use during Spack configuration.
If you already have Visual Studio installed, you can make sure these components are installed by If you already have Visual Studio installed, you can make sure these components are installed by
rerunning the installer. Next to your installation, select "Modify" and look at the rerunning the installer. Next to your installation, select "Modify" and look at the
"Installation details" pane on the right. "Installation details" pane on the right.
@@ -1417,14 +1647,16 @@ in a Windows CMD prompt.
Step 3: Run and configure Spack Step 3: Run and configure Spack
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
On Windows, Spack supports both primary native shells, Powershell and the traditional command prompt. To use Spack, run ``bin\spack_cmd.bat`` (you may need to Run as Administrator) from the top-level spack
To use Spack, pick your favorite shell, and run ``bin\spack_cmd.bat`` or ``share/spack/setup-env.ps1`` directory. This will provide a Windows command prompt with an environment properly set up with Spack
(you may need to Run as Administrator) from the top-level spack and its prerequisites. If you receive a warning message that Python is not in your ``PATH``
directory. This will provide a Spack enabled shell. If you receive a warning message that Python is not in your ``PATH``
(which may happen if you installed Python from the website and not the Windows Store) add the location (which may happen if you installed Python from the website and not the Windows Store) add the location
of the Python executable to your ``PATH`` now. You can permanently add Python to your ``PATH`` variable of the Python executable to your ``PATH`` now. You can permanently add Python to your ``PATH`` variable
by using the ``Edit the system environment variables`` utility in Windows Control Panel. by using the ``Edit the system environment variables`` utility in Windows Control Panel.
.. note::
Alternatively, Powershell can be used in place of CMD
To configure Spack, first run the following command inside the Spack console: To configure Spack, first run the following command inside the Spack console:
.. code-block:: console .. code-block:: console
@@ -1489,7 +1721,7 @@ and not tabs, so ensure that this is the case when editing one directly.
.. note:: Cygwin .. note:: Cygwin
The use of Cygwin is not officially supported by Spack and is not tested. The use of Cygwin is not officially supported by Spack and is not tested.
However Spack will not prevent this, so use if choosing to use Spack However Spack will not throw an error, so use if choosing to use Spack
with Cygwin, know that no functionality is garunteed. with Cygwin, know that no functionality is garunteed.
^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^
@@ -1503,12 +1735,21 @@ Spack console via:
spack install cpuinfo spack install cpuinfo
If in the previous step, you did not have CMake or Ninja installed, running the command above should install both packages If in the previous step, you did not have CMake or Ninja installed, running the command above should bootstrap both packages
.. note:: Spec Syntax Caveats """""""""""""""""""""""""""
Windows has a few idiosyncrasies when it comes to the Spack spec syntax and the use of certain shells Windows Compatible Packages
See the Spack spec syntax doc for more information """""""""""""""""""""""""""
Not all spack packages currently have Windows support. Some are inherently incompatible with the
platform, and others simply have yet to be ported. To view the current set of packages with Windows
support, the list command should be used via `spack list -t windows`. If there's a package you'd like
to install on Windows but is not in that list, feel free to reach out to request the port or contribute
the port yourself.
.. note::
This is by no means a comprehensive list, some packages may have ports that were not tagged
while others may just work out of the box on Windows and have not been tagged as such.
^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^
For developers For developers
@@ -1518,3 +1759,6 @@ The intent is to provide a Windows installer that will automatically set up
Python, Git, and Spack, instead of requiring the user to do so manually. Python, Git, and Spack, instead of requiring the user to do so manually.
Instructions for creating the installer are at Instructions for creating the installer are at
https://github.com/spack/spack/blob/develop/lib/spack/spack/cmd/installer/README.md https://github.com/spack/spack/blob/develop/lib/spack/spack/cmd/installer/README.md
Alternatively a pre-built copy of the Windows installer is available as an artifact of Spack's Windows CI
available at each run of the CI on develop or any PR.

Binary file not shown.

After

Width:  |  Height:  |  Size: 44 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 68 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 358 KiB

View File

@@ -12,6 +12,10 @@
Spack Spack
=================== ===================
.. epigraph::
`These are docs for the Spack package manager. For sphere packing, see` `pyspack <https://pyspack.readthedocs.io>`_.
Spack is a package management tool designed to support multiple Spack is a package management tool designed to support multiple
versions and configurations of software on a wide variety of platforms versions and configurations of software on a wide variety of platforms
and environments. It was designed for large supercomputing centers, and environments. It was designed for large supercomputing centers,
@@ -35,15 +39,10 @@ package:
.. code-block:: console .. code-block:: console
$ git clone -c feature.manyFiles=true --depth=2 https://github.com/spack/spack.git $ git clone -c feature.manyFiles=true https://github.com/spack/spack.git
$ cd spack/bin $ cd spack/bin
$ ./spack install libelf $ ./spack install libelf
.. note::
``-c feature.manyFiles=true`` improves git's performance on repositories with 1,000+ files.
``--depth=2`` prunes the git history to reduce the size of the Spack installation.
If you're new to spack and want to start using it, see :doc:`getting_started`, If you're new to spack and want to start using it, see :doc:`getting_started`,
or refer to the full manual below. or refer to the full manual below.

View File

@@ -457,11 +457,11 @@ For instance, the following config options,
tcl: tcl:
all: all:
suffixes: suffixes:
^python@3: 'python{^python.version}' ^python@3.12: 'python-3.12'
^openblas: 'openblas' ^openblas: 'openblas'
will add a ``python-3.12.1`` version string to any packages compiled with will add a ``python-3.12`` version string to any packages compiled with
Python matching the spec, ``python@3``. This is useful to know which Python matching the spec, ``python@3.12``. This is useful to know which
version of Python a set of Python extensions is associated with. Likewise, the version of Python a set of Python extensions is associated with. Likewise, the
``openblas`` string is attached to any program that has openblas in the spec, ``openblas`` string is attached to any program that has openblas in the spec,
most likely via the ``+blas`` variant specification. most likely via the ``+blas`` variant specification.

File diff suppressed because it is too large Load Diff

View File

@@ -59,7 +59,7 @@ Functional Example
------------------ ------------------
The simplest fully functional standalone example of a working pipeline can be The simplest fully functional standalone example of a working pipeline can be
examined live at this example `project <https://gitlab.com/spack/pipeline-quickstart>`_ examined live at this example `project <https://gitlab.com/scott.wittenburg/spack-pipeline-demo>`_
on gitlab.com. on gitlab.com.
Here's the ``.gitlab-ci.yml`` file from that example that builds and runs the Here's the ``.gitlab-ci.yml`` file from that example that builds and runs the
@@ -67,46 +67,39 @@ pipeline:
.. code-block:: yaml .. code-block:: yaml
stages: [ "generate", "build" ] stages: [generate, build]
variables: variables:
SPACK_REPOSITORY: "https://github.com/spack/spack.git" SPACK_REPO: https://github.com/scottwittenburg/spack.git
SPACK_REF: "develop-2024-10-06" SPACK_REF: pipelines-reproducible-builds
SPACK_USER_CONFIG_PATH: ${CI_PROJECT_DIR}
SPACK_BACKTRACE: 1
generate-pipeline: generate-pipeline:
tags:
- saas-linux-small-amd64
stage: generate stage: generate
tags:
- docker
image: image:
name: ghcr.io/spack/ubuntu20.04-runner-x86_64:2023-01-01 name: ghcr.io/scottwittenburg/ecpe4s-ubuntu18.04-runner-x86_64:2020-09-01
script: entrypoint: [""]
- git clone ${SPACK_REPOSITORY} before_script:
- cd spack && git checkout ${SPACK_REF} && cd ../ - git clone ${SPACK_REPO}
- pushd spack && git checkout ${SPACK_REF} && popd
- . "./spack/share/spack/setup-env.sh" - . "./spack/share/spack/setup-env.sh"
- spack --version script:
- spack env activate --without-view . - spack env activate --without-view .
- spack -d -v --color=always - spack -d ci generate
ci generate
--check-index-only
--artifacts-root "${CI_PROJECT_DIR}/jobs_scratch_dir" --artifacts-root "${CI_PROJECT_DIR}/jobs_scratch_dir"
--output-file "${CI_PROJECT_DIR}/jobs_scratch_dir/cloud-ci-pipeline.yml" --output-file "${CI_PROJECT_DIR}/jobs_scratch_dir/pipeline.yml"
artifacts: artifacts:
paths: paths:
- "${CI_PROJECT_DIR}/jobs_scratch_dir" - "${CI_PROJECT_DIR}/jobs_scratch_dir"
build-pipeline: build-jobs:
stage: build stage: build
trigger: trigger:
include: include:
- artifact: jobs_scratch_dir/cloud-ci-pipeline.yml - artifact: "jobs_scratch_dir/pipeline.yml"
job: generate-pipeline job: generate-pipeline
strategy: depend strategy: depend
needs:
- artifacts: True
job: generate-pipeline
The key thing to note above is that there are two jobs: The first job to run, The key thing to note above is that there are two jobs: The first job to run,
``generate-pipeline``, runs the ``spack ci generate`` command to generate a ``generate-pipeline``, runs the ``spack ci generate`` command to generate a
@@ -121,93 +114,82 @@ And here's the spack environment built by the pipeline represented as a
spack: spack:
view: false view: false
concretizer: concretizer:
unify: true unify: false
reuse: false
definitions: definitions:
- pkgs: - pkgs:
- zlib - zlib
- bzip2 ~debug - bzip2
- compiler: - arch:
- '%gcc' - '%gcc@7.5.0 arch=linux-ubuntu18.04-x86_64'
specs: specs:
- matrix: - matrix:
- - $pkgs - - $pkgs
- - $compiler - - $arch
mirrors: { "mirror": "s3://spack-public/mirror" }
ci: ci:
target: gitlab enable-artifacts-buildcache: True
rebuild-index: False
pipeline-gen: pipeline-gen:
- any-job: - any-job:
tags:
- saas-linux-small-amd64
image:
name: ghcr.io/spack/ubuntu20.04-runner-x86_64:2023-01-01
before_script: before_script:
- git clone ${SPACK_REPOSITORY} - git clone ${SPACK_REPO}
- cd spack && git checkout ${SPACK_REF} && cd ../ - pushd spack && git checkout ${SPACK_CHECKOUT_VERSION} && popd
- . "./spack/share/spack/setup-env.sh" - . "./spack/share/spack/setup-env.sh"
- spack --version - build-job:
- export SPACK_USER_CONFIG_PATH=${CI_PROJECT_DIR} tags: [docker]
- spack config blame mirrors image:
name: ghcr.io/scottwittenburg/ecpe4s-ubuntu18.04-runner-x86_64:2020-09-01
entrypoint: [""]
The elements of this file important to spack ci pipelines are described in more
detail below, but there are a couple of things to note about the above working
example:
.. note:: .. note::
The use of ``reuse: false`` in spack environments used for pipelines is There is no ``script`` attribute specified for here. The reason for this is
almost always what you want, as without it your pipelines will not rebuild Spack CI will automatically generate reasonable default scripts. More
packages even if package hashes have changed. This is due to the concretizer detail on what is in these scripts can be found below.
strongly preferring known hashes when ``reuse: true``.
The ``ci`` section in the above environment file contains the bare minimum Also notice the ``before_script`` section. It is required when using any of the
configuration required for ``spack ci generate`` to create a working pipeline. default scripts to source the ``setup-env.sh`` script in order to inform
The ``target: gitlab`` tells spack that the desired pipeline output is for the default scripts where to find the ``spack`` executable.
gitlab. However, this isn't strictly required, as currently gitlab is the
only possible output format for pipelines. The ``pipeline-gen`` section
contains the key information needed to specify attributes for the generated
jobs. Notice that it contains a list which has only a single element in
this case. In real pipelines it will almost certainly have more elements,
and in those cases, order is important: spack starts at the bottom of the
list and works upwards when applying attributes.
But in this simple case, we use only the special key ``any-job`` to Normally ``enable-artifacts-buildcache`` is not recommended in production as it
indicate that spack should apply the specified attributes (``tags``, ``image``, results in large binary artifacts getting transferred back and forth between
and ``before_script``) to any job it generates. This includes jobs for gitlab and the runners. But in this example on gitlab.com where there is no
building/pushing all packages, a ``rebuild-index`` job at the end of the shared, persistent file system, and where no secrets are stored for giving
pipeline, as well as any ``noop`` jobs that might be needed by gitlab when permission to write to an S3 bucket, ``enabled-buildcache-artifacts`` is the only
no rebuilds are required. way to propagate binaries from jobs to their dependents.
Something to note is that in this simple case, we rely on spack to Also, it is usually a good idea to let the pipeline generate a final "rebuild the
generate a reasonable script for the package build jobs (it just creates buildcache index" job, so that subsequent pipeline generation can quickly determine
a script that invokes ``spack ci rebuild``). which specs are up to date and which need to be rebuilt (it's a good idea for other
reasons as well, but those are out of scope for this discussion). In this case we
have disabled it (using ``rebuild-index: False``) because the index would only be
generated in the artifacts mirror anyway, and consequently would not be available
during subsequent pipeline runs.
Another thing to note is the use of the ``SPACK_USER_CONFIG_DIR`` environment .. note::
variable in any generated jobs. The purpose of this is to make spack With the addition of reproducible builds (#22887) a previously working
aware of one final file in the example, the one that contains the mirror pipeline will require some changes:
configuration. This file, ``mirrors.yaml`` looks like this:
.. code-block:: yaml * In the build-jobs, the environment location changed.
This will typically show as a ``KeyError`` in the failing job. Be sure to
point to ``${SPACK_CONCRETE_ENV_DIR}``.
mirrors: * When using ``include`` in your environment, be sure to make the included
buildcache-destination: files available in the build jobs. This means adding those files to the
url: oci://registry.gitlab.com/spack/pipeline-quickstart artifact directory. Those files will also be missing in the reproducibility
binary: true artifact.
access_pair:
id_variable: CI_REGISTRY_USER
secret_variable: CI_REGISTRY_PASSWORD
* Because the location of the environment changed, including files with
Note the name of the mirror is ``buildcache-destination``, which is required relative path may have to be adapted to work both in the project context
as of Spack 0.23 (see below for more information). The mirror url simply (generation job) and in the concrete env dir context (build job).
points to the container registry associated with the project, while
``id_variable`` and ``secret_variable`` refer to to environment variables
containing the access credentials for the mirror.
When spack builds packages for this example project, they will be pushed to
the project container registry, where they will be available for subsequent
jobs to install as dependencies, or for other pipelines to use to build runnable
container images.
----------------------------------- -----------------------------------
Spack commands supporting pipelines Spack commands supporting pipelines
@@ -271,6 +253,17 @@ can easily happen if it is not updated frequently, this behavior ensures that
spack has a way to know for certain about the status of any concrete spec on spack has a way to know for certain about the status of any concrete spec on
the remote mirror, but can slow down pipeline generation significantly. the remote mirror, but can slow down pipeline generation significantly.
The ``--optimize`` argument is experimental and runs the generated pipeline
document through a series of optimization passes designed to reduce the size
of the generated file.
The ``--dependencies`` is also experimental and disables what in Gitlab is
referred to as DAG scheduling, internally using the ``dependencies`` keyword
rather than ``needs`` to list dependency jobs. The drawback of using this option
is that before any job can begin, all jobs in previous stages must first
complete. The benefit is that Gitlab allows more dependencies to be listed
when using ``dependencies`` instead of ``needs``.
The optional ``--output-file`` argument should be an absolute path (including The optional ``--output-file`` argument should be an absolute path (including
file name) to the generated pipeline, and if not given, the default is file name) to the generated pipeline, and if not given, the default is
``./.gitlab-ci.yml``. ``./.gitlab-ci.yml``.
@@ -435,6 +428,15 @@ configuration with a ``script`` attribute. Specifying a signing job without a sc
does not create a signing job and the job configuration attributes will be ignored. does not create a signing job and the job configuration attributes will be ignored.
Signing jobs are always assigned the runner tags ``aws``, ``protected``, and ``notary``. Signing jobs are always assigned the runner tags ``aws``, ``protected``, and ``notary``.
^^^^^^^^^^^^^^^^^
Cleanup (cleanup)
^^^^^^^^^^^^^^^^^
When using ``temporary-storage-url-prefix`` the cleanup job will destroy the mirror
created for the associated Gitlab pipeline. Cleanup jobs do not allow modifying the
script, but do expect that the spack command is in the path and require a
``before_script`` to be specified that sources the ``setup-env.sh`` script.
.. _noop_jobs: .. _noop_jobs:
^^^^^^^^^^^^ ^^^^^^^^^^^^
@@ -601,77 +603,6 @@ the attributes will be merged starting from the bottom match going up to the top
In the case that no match is found in a submapping section, no additional attributes will be applied. In the case that no match is found in a submapping section, no additional attributes will be applied.
^^^^^^^^^^^^^^^^^^^^^^^^
Dynamic Mapping Sections
^^^^^^^^^^^^^^^^^^^^^^^^
For large scale CI where cost optimization is required, dynamic mapping allows for the use of real-time
mapping schemes served by a web service. This type of mapping does not support the ``-remove`` type
behavior, but it does follow the rest of the merge rules for configurations.
The dynamic mapping service needs to implement a single REST API interface for getting
requests ``GET <URL>[:PORT][/PATH]?spec=<pkg_name@pkg_version +variant1+variant2%compiler@compiler_version>``.
example request.
.. code-block::
https://my-dyn-mapping.spack.io/allocation?spec=zlib-ng@2.1.6 +compat+opt+shared+pic+new_strategies arch=linux-ubuntu20.04-x86_64_v3%gcc@12.0.0
With an example response the updates kubernetes request variables, overrides the max retries for gitlab,
and prepends a note about the modifications made by the my-dyn-mapping.spack.io service.
.. code-block::
200 OK
{
"variables":
{
"KUBERNETES_CPU_REQUEST": "500m",
"KUBERNETES_MEMORY_REQUEST": "2G",
},
"retry": { "max:": "1"}
"script+:":
[
"echo \"Job modified by my-dyn-mapping.spack.io\""
]
}
The ci.yaml configuration section takes the URL endpoint as well as a number of options to configure how responses are handled.
It is possible to specify a list of allowed and ignored configuration attributes under ``allow`` and ``ignore``
respectively. It is also possible to configure required attributes under ``required`` section.
Options to configure the client timeout and SSL verification using the ``timeout`` and ``verify_ssl`` options.
By default, the ``timeout`` is set to the option in ``config:timeout`` and ``veryify_ssl`` is set the the option in ``config::verify_ssl``.
Passing header parameters to the request can be achieved through the ``header`` section. The values of the variables passed to the
header may be environment variables that are expanded at runtime, such as a private token configured on the runner.
Here is an example configuration pointing to ``my-dyn-mapping.spack.io/allocation``.
.. code-block:: yaml
ci:
- dynamic-mapping:
endpoint: my-dyn-mapping.spack.io/allocation
timeout: 10
verify_ssl: True
header:
PRIVATE_TOKEN: ${MY_PRIVATE_TOKEN}
MY_CONFIG: "fuzz_allocation:false"
allow:
- variables
ignore:
- script
require: []
^^^^^^^^^^^^^ ^^^^^^^^^^^^^
Bootstrapping Bootstrapping
^^^^^^^^^^^^^ ^^^^^^^^^^^^^
@@ -743,13 +674,26 @@ build the package.
When including a bootstrapping phase as in the example above, the result is that When including a bootstrapping phase as in the example above, the result is that
the bootstrapped compiler packages will be pushed to the binary mirror (and the the bootstrapped compiler packages will be pushed to the binary mirror (and the
local artifacts mirror) before the actual release specs are built. local artifacts mirror) before the actual release specs are built. In this case,
the jobs corresponding to subsequent release specs are configured to
``install_missing_compilers``, so that if spack is asked to install a package
with a compiler it doesn't know about, it can be quickly installed from the
binary mirror first.
Since bootstrapping compilers is optional, those items can be left out of the Since bootstrapping compilers is optional, those items can be left out of the
environment/stack file, and in that case no bootstrapping will be done (only the environment/stack file, and in that case no bootstrapping will be done (only the
specs will be staged for building) and the runners will be expected to already specs will be staged for building) and the runners will be expected to already
have all needed compilers installed and configured for spack to use. have all needed compilers installed and configured for spack to use.
^^^^^^^^^^^^^^^^^^^
Pipeline Buildcache
^^^^^^^^^^^^^^^^^^^
The ``enable-artifacts-buildcache`` key
takes a boolean and determines whether the pipeline uses artifacts to store and
pass along the buildcaches from one stage to the next (the default if you don't
provide this option is ``False``).
^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^
Broken Specs URL Broken Specs URL
^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^

View File

@@ -476,3 +476,9 @@ implemented using Python's built-in `sys.path
:py:mod:`spack.repo` module implements a custom `Python importer :py:mod:`spack.repo` module implements a custom `Python importer
<https://docs.python.org/2/library/imp.html>`_. <https://docs.python.org/2/library/imp.html>`_.
.. warning::
The mechanism for extending packages is not yet extensively tested,
and extending packages across repositories imposes inter-repo
dependencies, which may be hard to manage. Use this feature at your
own risk, but let us know if you have a use case for it.

View File

@@ -1,13 +1,13 @@
sphinx==8.1.3 sphinx==7.2.6
sphinxcontrib-programoutput==0.17 sphinxcontrib-programoutput==0.17
sphinx_design==0.6.1 sphinx_design==0.5.0
sphinx-rtd-theme==3.0.2 sphinx-rtd-theme==2.0.0
python-levenshtein==0.26.1 python-levenshtein==0.25.0
docutils==0.21.2 docutils==0.20.1
pygments==2.18.0 pygments==2.17.2
urllib3==2.2.3 urllib3==2.2.1
pytest==8.3.3 pytest==8.1.1
isort==5.13.2 isort==5.13.2
black==24.10.0 black==24.3.0
flake8==7.1.1 flake8==7.0.0
mypy==1.11.1 mypy==1.9.0

377
lib/spack/env/cc vendored
View File

@@ -47,8 +47,7 @@ SPACK_F77_RPATH_ARG
SPACK_FC_RPATH_ARG SPACK_FC_RPATH_ARG
SPACK_LINKER_ARG SPACK_LINKER_ARG
SPACK_SHORT_SPEC SPACK_SHORT_SPEC
SPACK_SYSTEM_DIRS SPACK_SYSTEM_DIRS"
SPACK_MANAGED_DIRS"
# Optional parameters that aren't required to be set # Optional parameters that aren't required to be set
@@ -101,9 +100,10 @@ setsep() {
esac esac
} }
# prepend LISTNAME ELEMENT # prepend LISTNAME ELEMENT [SEP]
# #
# Prepend ELEMENT to the list stored in the variable LISTNAME. # Prepend ELEMENT to the list stored in the variable LISTNAME,
# assuming the list is separated by SEP.
# Handles empty lists and single-element lists. # Handles empty lists and single-element lists.
prepend() { prepend() {
varname="$1" varname="$1"
@@ -173,44 +173,20 @@ preextend() {
unset IFS unset IFS
} }
execute() { # system_dir PATH
# dump the full command if the caller supplies SPACK_TEST_COMMAND=dump-args # test whether a path is a system directory
if [ -n "${SPACK_TEST_COMMAND=}" ]; then system_dir() {
case "$SPACK_TEST_COMMAND" in IFS=':' # SPACK_SYSTEM_DIRS is colon-separated
dump-args) path="$1"
IFS="$lsep" for sd in $SPACK_SYSTEM_DIRS; do
for arg in $full_command_list; do if [ "${path}" = "${sd}" ] || [ "${path}" = "${sd}/" ]; then
echo "$arg" # success if path starts with a system prefix
done unset IFS
unset IFS return 0
exit fi
;; done
dump-env-*) unset IFS
var=${SPACK_TEST_COMMAND#dump-env-} return 1 # fail if path starts no system prefix
eval "printf '%s\n' \"\$0: \$var: \$$var\""
;;
*)
die "Unknown test command: '$SPACK_TEST_COMMAND'"
;;
esac
fi
#
# Write the input and output commands to debug logs if it's asked for.
#
if [ "$SPACK_DEBUG" = TRUE ]; then
input_log="$SPACK_DEBUG_LOG_DIR/spack-cc-$SPACK_DEBUG_LOG_ID.in.log"
output_log="$SPACK_DEBUG_LOG_DIR/spack-cc-$SPACK_DEBUG_LOG_ID.out.log"
echo "[$mode] $command $input_command" >> "$input_log"
IFS="$lsep"
echo "[$mode] "$full_command_list >> "$output_log"
unset IFS
fi
# Execute the full command, preserving spaces with IFS set
# to the alarm bell separator.
IFS="$lsep"; exec $full_command_list
exit
} }
# Fail with a clear message if the input contains any bell characters. # Fail with a clear message if the input contains any bell characters.
@@ -225,48 +201,6 @@ for param in $params; do
fi fi
done done
# eval this because SPACK_MANAGED_DIRS and SPACK_SYSTEM_DIRS are inputs we don't wanna loop over.
# moving the eval inside the function would eval it every call.
eval "\
path_order() {
case \"\$1\" in
$SPACK_MANAGED_DIRS) return 0 ;;
$SPACK_SYSTEM_DIRS) return 2 ;;
/*) return 1 ;;
esac
}
"
# path_list functions. Path_lists have 3 parts: spack_store_<list>, <list> and system_<list>,
# which are used to prioritize paths when assembling the final command line.
# init_path_lists LISTNAME
# Set <LISTNAME>, spack_store_<LISTNAME>, and system_<LISTNAME> to "".
init_path_lists() {
eval "spack_store_$1=\"\""
eval "$1=\"\""
eval "system_$1=\"\""
}
# assign_path_lists LISTNAME1 LISTNAME2
# Copy contents of LISTNAME2 into LISTNAME1, for each path_list prefix.
assign_path_lists() {
eval "spack_store_$1=\"\${spack_store_$2}\""
eval "$1=\"\${$2}\""
eval "system_$1=\"\${system_$2}\""
}
# append_path_lists LISTNAME ELT
# Append the provided ELT to the appropriate list, based on the result of path_order().
append_path_lists() {
path_order "$2"
case $? in
0) eval "append spack_store_$1 \"\$2\"" ;;
1) eval "append $1 \"\$2\"" ;;
2) eval "append system_$1 \"\$2\"" ;;
esac
}
# Check if optional parameters are defined # Check if optional parameters are defined
# If we aren't asking for debug flags, don't add them # If we aren't asking for debug flags, don't add them
if [ -z "${SPACK_ADD_DEBUG_FLAGS:-}" ]; then if [ -z "${SPACK_ADD_DEBUG_FLAGS:-}" ]; then
@@ -300,17 +234,12 @@ fi
# ld link # ld link
# ccld compile & link # ccld compile & link
# Note. SPACK_ALWAYS_XFLAGS are applied for all compiler invocations,
# including version checks (SPACK_XFLAGS variants are not applied
# for version checks).
command="${0##*/}" command="${0##*/}"
comp="CC" comp="CC"
vcheck_flags=""
case "$command" in case "$command" in
cpp) cpp)
mode=cpp mode=cpp
debug_flags="-g" debug_flags="-g"
vcheck_flags="${SPACK_ALWAYS_CPPFLAGS}"
;; ;;
cc|c89|c99|gcc|clang|armclang|icc|icx|pgcc|nvc|xlc|xlc_r|fcc|amdclang|cl.exe|craycc) cc|c89|c99|gcc|clang|armclang|icc|icx|pgcc|nvc|xlc|xlc_r|fcc|amdclang|cl.exe|craycc)
command="$SPACK_CC" command="$SPACK_CC"
@@ -318,15 +247,13 @@ case "$command" in
comp="CC" comp="CC"
lang_flags=C lang_flags=C
debug_flags="-g" debug_flags="-g"
vcheck_flags="${SPACK_ALWAYS_CFLAGS}"
;; ;;
c++|CC|g++|clang++|armclang++|icpc|icpx|pgc++|nvc++|xlc++|xlc++_r|FCC|amdclang++|crayCC) c++|CC|g++|clang++|armclang++|icpc|icpx|dpcpp|pgc++|nvc++|xlc++|xlc++_r|FCC|amdclang++|crayCC)
command="$SPACK_CXX" command="$SPACK_CXX"
language="C++" language="C++"
comp="CXX" comp="CXX"
lang_flags=CXX lang_flags=CXX
debug_flags="-g" debug_flags="-g"
vcheck_flags="${SPACK_ALWAYS_CXXFLAGS}"
;; ;;
ftn|f90|fc|f95|gfortran|flang|armflang|ifort|ifx|pgfortran|nvfortran|xlf90|xlf90_r|nagfor|frt|amdflang|crayftn) ftn|f90|fc|f95|gfortran|flang|armflang|ifort|ifx|pgfortran|nvfortran|xlf90|xlf90_r|nagfor|frt|amdflang|crayftn)
command="$SPACK_FC" command="$SPACK_FC"
@@ -334,7 +261,6 @@ case "$command" in
comp="FC" comp="FC"
lang_flags=F lang_flags=F
debug_flags="-g" debug_flags="-g"
vcheck_flags="${SPACK_ALWAYS_FFLAGS}"
;; ;;
f77|xlf|xlf_r|pgf77) f77|xlf|xlf_r|pgf77)
command="$SPACK_F77" command="$SPACK_F77"
@@ -342,7 +268,6 @@ case "$command" in
comp="F77" comp="F77"
lang_flags=F lang_flags=F
debug_flags="-g" debug_flags="-g"
vcheck_flags="${SPACK_ALWAYS_FFLAGS}"
;; ;;
ld|ld.gold|ld.lld) ld|ld.gold|ld.lld)
mode=ld mode=ld
@@ -443,11 +368,7 @@ unset IFS
export PATH="$new_dirs" export PATH="$new_dirs"
if [ "$mode" = vcheck ]; then if [ "$mode" = vcheck ]; then
full_command_list="$command" exec "${command}" "$@"
args="$@"
extend full_command_list vcheck_flags
extend full_command_list args
execute
fi fi
# Darwin's linker has a -r argument that merges object files together. # Darwin's linker has a -r argument that merges object files together.
@@ -499,7 +420,11 @@ input_command="$*"
parse_Wl() { parse_Wl() {
while [ $# -ne 0 ]; do while [ $# -ne 0 ]; do
if [ "$wl_expect_rpath" = yes ]; then if [ "$wl_expect_rpath" = yes ]; then
append_path_lists return_rpath_dirs_list "$1" if system_dir "$1"; then
append return_system_rpath_dirs_list "$1"
else
append return_rpath_dirs_list "$1"
fi
wl_expect_rpath=no wl_expect_rpath=no
else else
case "$1" in case "$1" in
@@ -507,15 +432,21 @@ parse_Wl() {
arg="${1#-rpath=}" arg="${1#-rpath=}"
if [ -z "$arg" ]; then if [ -z "$arg" ]; then
shift; continue shift; continue
elif system_dir "$arg"; then
append return_system_rpath_dirs_list "$arg"
else
append return_rpath_dirs_list "$arg"
fi fi
append_path_lists return_rpath_dirs_list "$arg"
;; ;;
--rpath=*) --rpath=*)
arg="${1#--rpath=}" arg="${1#--rpath=}"
if [ -z "$arg" ]; then if [ -z "$arg" ]; then
shift; continue shift; continue
elif system_dir "$arg"; then
append return_system_rpath_dirs_list "$arg"
else
append return_rpath_dirs_list "$arg"
fi fi
append_path_lists return_rpath_dirs_list "$arg"
;; ;;
-rpath|--rpath) -rpath|--rpath)
wl_expect_rpath=yes wl_expect_rpath=yes
@@ -523,7 +454,8 @@ parse_Wl() {
"$dtags_to_strip") "$dtags_to_strip")
;; ;;
-Wl) -Wl)
# Nested -Wl,-Wl means we're in NAG compiler territory. We don't support it. # Nested -Wl,-Wl means we're in NAG compiler territory, we don't support
# it.
return 1 return 1
;; ;;
*) *)
@@ -541,11 +473,14 @@ categorize_arguments() {
return_other_args_list="" return_other_args_list=""
return_isystem_was_used="" return_isystem_was_used=""
return_isystem_system_include_dirs_list=""
init_path_lists return_isystem_include_dirs_list return_isystem_include_dirs_list=""
init_path_lists return_include_dirs_list return_system_include_dirs_list=""
init_path_lists return_lib_dirs_list return_include_dirs_list=""
init_path_lists return_rpath_dirs_list return_system_lib_dirs_list=""
return_lib_dirs_list=""
return_system_rpath_dirs_list=""
return_rpath_dirs_list=""
# Global state for keeping track of -Wl,-rpath -Wl,/path # Global state for keeping track of -Wl,-rpath -Wl,/path
wl_expect_rpath=no wl_expect_rpath=no
@@ -591,7 +526,7 @@ categorize_arguments() {
continue continue
fi fi
replaced="$after$stripped" replaced="$after$stripped"
# it matched, remove it # it matched, remove it
shift shift
@@ -611,17 +546,29 @@ categorize_arguments() {
arg="${1#-isystem}" arg="${1#-isystem}"
return_isystem_was_used=true return_isystem_was_used=true
if [ -z "$arg" ]; then shift; arg="$1"; fi if [ -z "$arg" ]; then shift; arg="$1"; fi
append_path_lists return_isystem_include_dirs_list "$arg" if system_dir "$arg"; then
append return_isystem_system_include_dirs_list "$arg"
else
append return_isystem_include_dirs_list "$arg"
fi
;; ;;
-I*) -I*)
arg="${1#-I}" arg="${1#-I}"
if [ -z "$arg" ]; then shift; arg="$1"; fi if [ -z "$arg" ]; then shift; arg="$1"; fi
append_path_lists return_include_dirs_list "$arg" if system_dir "$arg"; then
append return_system_include_dirs_list "$arg"
else
append return_include_dirs_list "$arg"
fi
;; ;;
-L*) -L*)
arg="${1#-L}" arg="${1#-L}"
if [ -z "$arg" ]; then shift; arg="$1"; fi if [ -z "$arg" ]; then shift; arg="$1"; fi
append_path_lists return_lib_dirs_list "$arg" if system_dir "$arg"; then
append return_system_lib_dirs_list "$arg"
else
append return_lib_dirs_list "$arg"
fi
;; ;;
-l*) -l*)
# -loopopt=0 is generated erroneously in autoconf <= 2.69, # -loopopt=0 is generated erroneously in autoconf <= 2.69,
@@ -654,17 +601,29 @@ categorize_arguments() {
break break
elif [ "$xlinker_expect_rpath" = yes ]; then elif [ "$xlinker_expect_rpath" = yes ]; then
# Register the path of -Xlinker -rpath <other args> -Xlinker <path> # Register the path of -Xlinker -rpath <other args> -Xlinker <path>
append_path_lists return_rpath_dirs_list "$1" if system_dir "$1"; then
append return_system_rpath_dirs_list "$1"
else
append return_rpath_dirs_list "$1"
fi
xlinker_expect_rpath=no xlinker_expect_rpath=no
else else
case "$1" in case "$1" in
-rpath=*) -rpath=*)
arg="${1#-rpath=}" arg="${1#-rpath=}"
append_path_lists return_rpath_dirs_list "$arg" if system_dir "$arg"; then
append return_system_rpath_dirs_list "$arg"
else
append return_rpath_dirs_list "$arg"
fi
;; ;;
--rpath=*) --rpath=*)
arg="${1#--rpath=}" arg="${1#--rpath=}"
append_path_lists return_rpath_dirs_list "$arg" if system_dir "$arg"; then
append return_system_rpath_dirs_list "$arg"
else
append return_rpath_dirs_list "$arg"
fi
;; ;;
-rpath|--rpath) -rpath|--rpath)
xlinker_expect_rpath=yes xlinker_expect_rpath=yes
@@ -681,36 +640,7 @@ categorize_arguments() {
"$dtags_to_strip") "$dtags_to_strip")
;; ;;
*) *)
# if mode is not ld, we can just add to other args append return_other_args_list "$1"
if [ "$mode" != "ld" ]; then
append return_other_args_list "$1"
shift
continue
fi
# if we're in linker mode, we need to parse raw RPATH args
case "$1" in
-rpath=*)
arg="${1#-rpath=}"
append_path_lists return_rpath_dirs_list "$arg"
;;
--rpath=*)
arg="${1#--rpath=}"
append_path_lists return_rpath_dirs_list "$arg"
;;
-rpath|--rpath)
if [ $# -eq 1 ]; then
# -rpath without value: let the linker raise an error.
append return_other_args_list "$1"
break
fi
shift
append_path_lists return_rpath_dirs_list "$1"
;;
*)
append return_other_args_list "$1"
;;
esac
;; ;;
esac esac
shift shift
@@ -731,14 +661,16 @@ categorize_arguments() {
} }
categorize_arguments "$@" categorize_arguments "$@"
include_dirs_list="$return_include_dirs_list"
assign_path_lists isystem_include_dirs_list return_isystem_include_dirs_list lib_dirs_list="$return_lib_dirs_list"
assign_path_lists include_dirs_list return_include_dirs_list rpath_dirs_list="$return_rpath_dirs_list"
assign_path_lists lib_dirs_list return_lib_dirs_list system_include_dirs_list="$return_system_include_dirs_list"
assign_path_lists rpath_dirs_list return_rpath_dirs_list system_lib_dirs_list="$return_system_lib_dirs_list"
system_rpath_dirs_list="$return_system_rpath_dirs_list"
isystem_was_used="$return_isystem_was_used" isystem_was_used="$return_isystem_was_used"
other_args_list="$return_other_args_list" isystem_system_include_dirs_list="$return_isystem_system_include_dirs_list"
isystem_include_dirs_list="$return_isystem_include_dirs_list"
other_args_list="$return_other_args_list"
# #
# Add flags from Spack's cppflags, cflags, cxxflags, fcflags, fflags, and # Add flags from Spack's cppflags, cflags, cxxflags, fcflags, fflags, and
@@ -765,7 +697,6 @@ case "$mode" in
cc|ccld) cc|ccld)
case $lang_flags in case $lang_flags in
F) F)
extend spack_flags_list SPACK_ALWAYS_FFLAGS
extend spack_flags_list SPACK_FFLAGS extend spack_flags_list SPACK_FFLAGS
;; ;;
esac esac
@@ -775,7 +706,6 @@ esac
# C preprocessor flags come before any C/CXX flags # C preprocessor flags come before any C/CXX flags
case "$mode" in case "$mode" in
cpp|as|cc|ccld) cpp|as|cc|ccld)
extend spack_flags_list SPACK_ALWAYS_CPPFLAGS
extend spack_flags_list SPACK_CPPFLAGS extend spack_flags_list SPACK_CPPFLAGS
;; ;;
esac esac
@@ -786,11 +716,9 @@ case "$mode" in
cc|ccld) cc|ccld)
case $lang_flags in case $lang_flags in
C) C)
extend spack_flags_list SPACK_ALWAYS_CFLAGS
extend spack_flags_list SPACK_CFLAGS extend spack_flags_list SPACK_CFLAGS
;; ;;
CXX) CXX)
extend spack_flags_list SPACK_ALWAYS_CXXFLAGS
extend spack_flags_list SPACK_CXXFLAGS extend spack_flags_list SPACK_CXXFLAGS
;; ;;
esac esac
@@ -802,7 +730,7 @@ esac
# Linker flags # Linker flags
case "$mode" in case "$mode" in
ccld) ld|ccld)
extend spack_flags_list SPACK_LDFLAGS extend spack_flags_list SPACK_LDFLAGS
;; ;;
esac esac
@@ -810,14 +738,16 @@ esac
IFS="$lsep" IFS="$lsep"
categorize_arguments $spack_flags_list categorize_arguments $spack_flags_list
unset IFS unset IFS
spack_flags_include_dirs_list="$return_include_dirs_list"
assign_path_lists spack_flags_isystem_include_dirs_list return_isystem_include_dirs_list spack_flags_lib_dirs_list="$return_lib_dirs_list"
assign_path_lists spack_flags_include_dirs_list return_include_dirs_list spack_flags_rpath_dirs_list="$return_rpath_dirs_list"
assign_path_lists spack_flags_lib_dirs_list return_lib_dirs_list spack_flags_system_include_dirs_list="$return_system_include_dirs_list"
assign_path_lists spack_flags_rpath_dirs_list return_rpath_dirs_list spack_flags_system_lib_dirs_list="$return_system_lib_dirs_list"
spack_flags_system_rpath_dirs_list="$return_system_rpath_dirs_list"
spack_flags_isystem_was_used="$return_isystem_was_used" spack_flags_isystem_was_used="$return_isystem_was_used"
spack_flags_other_args_list="$return_other_args_list" spack_flags_isystem_system_include_dirs_list="$return_isystem_system_include_dirs_list"
spack_flags_isystem_include_dirs_list="$return_isystem_include_dirs_list"
spack_flags_other_args_list="$return_other_args_list"
# On macOS insert headerpad_max_install_names linker flag # On macOS insert headerpad_max_install_names linker flag
@@ -837,13 +767,11 @@ if [ "$mode" = ccld ] || [ "$mode" = ld ]; then
# Append RPATH directories. Note that in the case of the # Append RPATH directories. Note that in the case of the
# top-level package these directories may not exist yet. For dependencies # top-level package these directories may not exist yet. For dependencies
# it is assumed that paths have already been confirmed. # it is assumed that paths have already been confirmed.
extend spack_store_rpath_dirs_list SPACK_STORE_RPATH_DIRS
extend rpath_dirs_list SPACK_RPATH_DIRS extend rpath_dirs_list SPACK_RPATH_DIRS
fi fi
fi fi
if [ "$mode" = ccld ] || [ "$mode" = ld ]; then if [ "$mode" = ccld ] || [ "$mode" = ld ]; then
extend spack_store_lib_dirs_list SPACK_STORE_LINK_DIRS
extend lib_dirs_list SPACK_LINK_DIRS extend lib_dirs_list SPACK_LINK_DIRS
fi fi
@@ -870,82 +798,63 @@ case "$mode" in
;; ;;
esac esac
case "$mode" in
cpp|cc|as|ccld)
if [ "$spack_flags_isystem_was_used" = "true" ] || [ "$isystem_was_used" = "true" ]; then
extend spack_store_isystem_include_dirs_list SPACK_STORE_INCLUDE_DIRS
extend isystem_include_dirs_list SPACK_INCLUDE_DIRS
else
extend spack_store_include_dirs_list SPACK_STORE_INCLUDE_DIRS
extend include_dirs_list SPACK_INCLUDE_DIRS
fi
;;
esac
# #
# Finally, reassemble the command line. # Finally, reassemble the command line.
# #
args_list="$flags_list" args_list="$flags_list"
# Include search paths partitioned by (in store, non-sytem, system) # Insert include directories just prior to any system include directories
# NOTE: adding ${lsep} to the prefix here turns every added element into two # NOTE: adding ${lsep} to the prefix here turns every added element into two
extend args_list spack_store_spack_flags_include_dirs_list -I extend args_list spack_flags_include_dirs_list "-I"
extend args_list spack_store_include_dirs_list -I extend args_list include_dirs_list "-I"
extend args_list spack_flags_include_dirs_list -I
extend args_list include_dirs_list -I
extend args_list spack_store_spack_flags_isystem_include_dirs_list "-isystem${lsep}"
extend args_list spack_store_isystem_include_dirs_list "-isystem${lsep}"
extend args_list spack_flags_isystem_include_dirs_list "-isystem${lsep}" extend args_list spack_flags_isystem_include_dirs_list "-isystem${lsep}"
extend args_list isystem_include_dirs_list "-isystem${lsep}" extend args_list isystem_include_dirs_list "-isystem${lsep}"
extend args_list system_spack_flags_include_dirs_list -I case "$mode" in
cpp|cc|as|ccld)
if [ "$spack_flags_isystem_was_used" = "true" ]; then
extend args_list SPACK_INCLUDE_DIRS "-isystem${lsep}"
elif [ "$isystem_was_used" = "true" ]; then
extend args_list SPACK_INCLUDE_DIRS "-isystem${lsep}"
else
extend args_list SPACK_INCLUDE_DIRS "-I"
fi
;;
esac
extend args_list spack_flags_system_include_dirs_list -I
extend args_list system_include_dirs_list -I extend args_list system_include_dirs_list -I
extend args_list spack_flags_isystem_system_include_dirs_list "-isystem${lsep}"
extend args_list isystem_system_include_dirs_list "-isystem${lsep}"
extend args_list system_spack_flags_isystem_include_dirs_list "-isystem${lsep}" # Library search paths
extend args_list system_isystem_include_dirs_list "-isystem${lsep}"
# Library search paths partitioned by (in store, non-sytem, system)
extend args_list spack_store_spack_flags_lib_dirs_list "-L"
extend args_list spack_store_lib_dirs_list "-L"
extend args_list spack_flags_lib_dirs_list "-L" extend args_list spack_flags_lib_dirs_list "-L"
extend args_list lib_dirs_list "-L" extend args_list lib_dirs_list "-L"
extend args_list spack_flags_system_lib_dirs_list "-L"
extend args_list system_spack_flags_lib_dirs_list "-L"
extend args_list system_lib_dirs_list "-L" extend args_list system_lib_dirs_list "-L"
# RPATHs arguments # RPATHs arguments
rpath_prefix=""
case "$mode" in case "$mode" in
ccld) ccld)
if [ -n "$dtags_to_add" ] ; then if [ -n "$dtags_to_add" ] ; then
append args_list "$linker_arg$dtags_to_add" append args_list "$linker_arg$dtags_to_add"
fi fi
rpath_prefix="$rpath" extend args_list spack_flags_rpath_dirs_list "$rpath"
extend args_list rpath_dirs_list "$rpath"
extend args_list spack_flags_system_rpath_dirs_list "$rpath"
extend args_list system_rpath_dirs_list "$rpath"
;; ;;
ld) ld)
if [ -n "$dtags_to_add" ] ; then if [ -n "$dtags_to_add" ] ; then
append args_list "$dtags_to_add" append args_list "$dtags_to_add"
fi fi
rpath_prefix="-rpath${lsep}" extend args_list spack_flags_rpath_dirs_list "-rpath${lsep}"
extend args_list rpath_dirs_list "-rpath${lsep}"
extend args_list spack_flags_system_rpath_dirs_list "-rpath${lsep}"
extend args_list system_rpath_dirs_list "-rpath${lsep}"
;; ;;
esac esac
# if mode is ccld or ld, extend RPATH lists with the prefix determined above
if [ -n "$rpath_prefix" ]; then
extend args_list spack_store_spack_flags_rpath_dirs_list "$rpath_prefix"
extend args_list spack_store_rpath_dirs_list "$rpath_prefix"
extend args_list spack_flags_rpath_dirs_list "$rpath_prefix"
extend args_list rpath_dirs_list "$rpath_prefix"
extend args_list system_spack_flags_rpath_dirs_list "$rpath_prefix"
extend args_list system_rpath_dirs_list "$rpath_prefix"
fi
# Other arguments from the input command # Other arguments from the input command
extend args_list other_args_list extend args_list other_args_list
extend args_list spack_flags_other_args_list extend args_list spack_flags_other_args_list
@@ -968,4 +877,40 @@ if [ -n "$SPACK_CCACHE_BINARY" ]; then
esac esac
fi fi
execute # dump the full command if the caller supplies SPACK_TEST_COMMAND=dump-args
if [ -n "${SPACK_TEST_COMMAND=}" ]; then
case "$SPACK_TEST_COMMAND" in
dump-args)
IFS="$lsep"
for arg in $full_command_list; do
echo "$arg"
done
unset IFS
exit
;;
dump-env-*)
var=${SPACK_TEST_COMMAND#dump-env-}
eval "printf '%s\n' \"\$0: \$var: \$$var\""
;;
*)
die "Unknown test command: '$SPACK_TEST_COMMAND'"
;;
esac
fi
#
# Write the input and output commands to debug logs if it's asked for.
#
if [ "$SPACK_DEBUG" = TRUE ]; then
input_log="$SPACK_DEBUG_LOG_DIR/spack-cc-$SPACK_DEBUG_LOG_ID.in.log"
output_log="$SPACK_DEBUG_LOG_DIR/spack-cc-$SPACK_DEBUG_LOG_ID.out.log"
echo "[$mode] $command $input_command" >> "$input_log"
IFS="$lsep"
echo "[$mode] "$full_command_list >> "$output_log"
unset IFS
fi
# Execute the full command, preserving spaces with IFS set
# to the alarm bell separator.
IFS="$lsep"; exec $full_command_list

View File

@@ -18,7 +18,7 @@
* Homepage: https://pypi.python.org/pypi/archspec * Homepage: https://pypi.python.org/pypi/archspec
* Usage: Labeling, comparison and detection of microarchitectures * Usage: Labeling, comparison and detection of microarchitectures
* Version: 0.2.5 (commit 38ce485258ffc4fc6dd6688f8dc90cb269478c47) * Version: 0.2.3 (commit 7b8fe60b69e2861e7dac104bc1c183decfcd3daf)
astunparse astunparse
---------------- ----------------

View File

@@ -1265,29 +1265,27 @@ def _distro_release_info(self) -> Dict[str, str]:
match = _DISTRO_RELEASE_BASENAME_PATTERN.match(basename) match = _DISTRO_RELEASE_BASENAME_PATTERN.match(basename)
else: else:
try: try:
with os.scandir(self.etc_dir) as it: basenames = [
etc_files = [ basename
p.path for p in it for basename in os.listdir(self.etc_dir)
if p.is_file() and p.name not in _DISTRO_RELEASE_IGNORE_BASENAMES if basename not in _DISTRO_RELEASE_IGNORE_BASENAMES
] and os.path.isfile(os.path.join(self.etc_dir, basename))
]
# We sort for repeatability in cases where there are multiple # We sort for repeatability in cases where there are multiple
# distro specific files; e.g. CentOS, Oracle, Enterprise all # distro specific files; e.g. CentOS, Oracle, Enterprise all
# containing `redhat-release` on top of their own. # containing `redhat-release` on top of their own.
etc_files.sort() basenames.sort()
except OSError: except OSError:
# This may occur when /etc is not readable but we can't be # This may occur when /etc is not readable but we can't be
# sure about the *-release files. Check common entries of # sure about the *-release files. Check common entries of
# /etc for information. If they turn out to not be there the # /etc for information. If they turn out to not be there the
# error is handled in `_parse_distro_release_file()`. # error is handled in `_parse_distro_release_file()`.
etc_files = [ basenames = _DISTRO_RELEASE_BASENAMES
os.path.join(self.etc_dir, basename) for basename in basenames:
for basename in _DISTRO_RELEASE_BASENAMES match = _DISTRO_RELEASE_BASENAME_PATTERN.match(basename)
]
for filepath in etc_files:
match = _DISTRO_RELEASE_BASENAME_PATTERN.match(os.path.basename(filepath))
if match is None: if match is None:
continue continue
filepath = os.path.join(self.etc_dir, basename)
distro_info = self._parse_distro_release_file(filepath) distro_info = self._parse_distro_release_file(filepath)
# The name is always present if the pattern matches. # The name is always present if the pattern matches.
if "name" not in distro_info: if "name" not in distro_info:

View File

@@ -231,6 +231,96 @@ def is_host_name(instance):
return True return True
try:
# The built-in `idna` codec only implements RFC 3890, so we go elsewhere.
import idna
except ImportError:
pass
else:
@_checks_drafts(draft7="idn-hostname", raises=idna.IDNAError)
def is_idn_host_name(instance):
if not isinstance(instance, str_types):
return True
idna.encode(instance)
return True
try:
import rfc3987
except ImportError:
try:
from rfc3986_validator import validate_rfc3986
except ImportError:
pass
else:
@_checks_drafts(name="uri")
def is_uri(instance):
if not isinstance(instance, str_types):
return True
return validate_rfc3986(instance, rule="URI")
@_checks_drafts(
draft6="uri-reference",
draft7="uri-reference",
raises=ValueError,
)
def is_uri_reference(instance):
if not isinstance(instance, str_types):
return True
return validate_rfc3986(instance, rule="URI_reference")
else:
@_checks_drafts(draft7="iri", raises=ValueError)
def is_iri(instance):
if not isinstance(instance, str_types):
return True
return rfc3987.parse(instance, rule="IRI")
@_checks_drafts(draft7="iri-reference", raises=ValueError)
def is_iri_reference(instance):
if not isinstance(instance, str_types):
return True
return rfc3987.parse(instance, rule="IRI_reference")
@_checks_drafts(name="uri", raises=ValueError)
def is_uri(instance):
if not isinstance(instance, str_types):
return True
return rfc3987.parse(instance, rule="URI")
@_checks_drafts(
draft6="uri-reference",
draft7="uri-reference",
raises=ValueError,
)
def is_uri_reference(instance):
if not isinstance(instance, str_types):
return True
return rfc3987.parse(instance, rule="URI_reference")
try:
from strict_rfc3339 import validate_rfc3339
except ImportError:
try:
from rfc3339_validator import validate_rfc3339
except ImportError:
validate_rfc3339 = None
if validate_rfc3339:
@_checks_drafts(name="date-time")
def is_datetime(instance):
if not isinstance(instance, str_types):
return True
return validate_rfc3339(instance)
@_checks_drafts(draft7="time")
def is_time(instance):
if not isinstance(instance, str_types):
return True
return is_datetime("1970-01-01T" + instance)
@_checks_drafts(name="regex", raises=re.error) @_checks_drafts(name="regex", raises=re.error)
def is_regex(instance): def is_regex(instance):
if not isinstance(instance, str_types): if not isinstance(instance, str_types):
@@ -250,3 +340,86 @@ def is_draft3_time(instance):
if not isinstance(instance, str_types): if not isinstance(instance, str_types):
return True return True
return datetime.datetime.strptime(instance, "%H:%M:%S") return datetime.datetime.strptime(instance, "%H:%M:%S")
try:
import webcolors
except ImportError:
pass
else:
def is_css_color_code(instance):
return webcolors.normalize_hex(instance)
@_checks_drafts(draft3="color", raises=(ValueError, TypeError))
def is_css21_color(instance):
if (
not isinstance(instance, str_types) or
instance.lower() in webcolors.css21_names_to_hex
):
return True
return is_css_color_code(instance)
def is_css3_color(instance):
if instance.lower() in webcolors.css3_names_to_hex:
return True
return is_css_color_code(instance)
try:
import jsonpointer
except ImportError:
pass
else:
@_checks_drafts(
draft6="json-pointer",
draft7="json-pointer",
raises=jsonpointer.JsonPointerException,
)
def is_json_pointer(instance):
if not isinstance(instance, str_types):
return True
return jsonpointer.JsonPointer(instance)
# TODO: I don't want to maintain this, so it
# needs to go either into jsonpointer (pending
# https://github.com/stefankoegl/python-json-pointer/issues/34) or
# into a new external library.
@_checks_drafts(
draft7="relative-json-pointer",
raises=jsonpointer.JsonPointerException,
)
def is_relative_json_pointer(instance):
# Definition taken from:
# https://tools.ietf.org/html/draft-handrews-relative-json-pointer-01#section-3
if not isinstance(instance, str_types):
return True
non_negative_integer, rest = [], ""
for i, character in enumerate(instance):
if character.isdigit():
non_negative_integer.append(character)
continue
if not non_negative_integer:
return False
rest = instance[i:]
break
return (rest == "#") or jsonpointer.JsonPointer(rest)
try:
import uritemplate.exceptions
except ImportError:
pass
else:
@_checks_drafts(
draft6="uri-template",
draft7="uri-template",
raises=uritemplate.exceptions.InvalidTemplate,
)
def is_uri_template(
instance,
template_validator=uritemplate.Validator().force_balanced_braces(),
):
template = uritemplate.URITemplate(instance)
return template_validator.validate(template)

View File

@@ -497,7 +497,7 @@ def copy_attributes(self, t, memo=None):
Tag.attrib, merge_attrib]: Tag.attrib, merge_attrib]:
if hasattr(self, a): if hasattr(self, a):
if memo is not None: if memo is not None:
setattr(t, a, copy.deepcopy(getattr(self, a), memo)) setattr(t, a, copy.deepcopy(getattr(self, a, memo)))
else: else:
setattr(t, a, getattr(self, a)) setattr(t, a, getattr(self, a))
# fmt: on # fmt: on

View File

@@ -1,3 +1,3 @@
"""Init file to avoid namespace packages""" """Init file to avoid namespace packages"""
__version__ = "0.2.4" __version__ = "0.2.3"

View File

@@ -5,10 +5,9 @@
"""The "cpu" package permits to query and compare different """The "cpu" package permits to query and compare different
CPU microarchitectures. CPU microarchitectures.
""" """
from .detect import brand_string, host from .detect import host
from .microarchitecture import ( from .microarchitecture import (
TARGETS, TARGETS,
InvalidCompilerVersion,
Microarchitecture, Microarchitecture,
UnsupportedMicroarchitecture, UnsupportedMicroarchitecture,
generic_microarchitecture, generic_microarchitecture,
@@ -16,12 +15,10 @@
) )
__all__ = [ __all__ = [
"brand_string",
"host",
"TARGETS",
"InvalidCompilerVersion",
"Microarchitecture", "Microarchitecture",
"UnsupportedMicroarchitecture", "UnsupportedMicroarchitecture",
"TARGETS",
"generic_microarchitecture", "generic_microarchitecture",
"host",
"version_components", "version_components",
] ]

View File

@@ -47,11 +47,7 @@ def decorator(factory):
def partial_uarch( def partial_uarch(
name: str = "", name: str = "", vendor: str = "", features: Optional[Set[str]] = None, generation: int = 0
vendor: str = "",
features: Optional[Set[str]] = None,
generation: int = 0,
cpu_part: str = "",
) -> Microarchitecture: ) -> Microarchitecture:
"""Construct a partial microarchitecture, from information gathered during system scan.""" """Construct a partial microarchitecture, from information gathered during system scan."""
return Microarchitecture( return Microarchitecture(
@@ -61,7 +57,6 @@ def partial_uarch(
features=features or set(), features=features or set(),
compilers={}, compilers={},
generation=generation, generation=generation,
cpu_part=cpu_part,
) )
@@ -95,7 +90,6 @@ def proc_cpuinfo() -> Microarchitecture:
return partial_uarch( return partial_uarch(
vendor=_canonicalize_aarch64_vendor(data), vendor=_canonicalize_aarch64_vendor(data),
features=_feature_set(data, key="Features"), features=_feature_set(data, key="Features"),
cpu_part=data.get("CPU part", ""),
) )
if architecture in (PPC64LE, PPC64): if architecture in (PPC64LE, PPC64):
@@ -161,31 +155,6 @@ def _is_bit_set(self, register: int, bit: int) -> bool:
mask = 1 << bit mask = 1 << bit
return register & mask > 0 return register & mask > 0
def brand_string(self) -> Optional[str]:
"""Returns the brand string, if available."""
if self.highest_extension_support < 0x80000004:
return None
r1 = self.cpuid.registers_for(eax=0x80000002, ecx=0)
r2 = self.cpuid.registers_for(eax=0x80000003, ecx=0)
r3 = self.cpuid.registers_for(eax=0x80000004, ecx=0)
result = struct.pack(
"IIIIIIIIIIII",
r1.eax,
r1.ebx,
r1.ecx,
r1.edx,
r2.eax,
r2.ebx,
r2.ecx,
r2.edx,
r3.eax,
r3.ebx,
r3.ecx,
r3.edx,
).decode("utf-8")
return result.strip("\x00")
@detection(operating_system="Windows") @detection(operating_system="Windows")
def cpuid_info(): def cpuid_info():
@@ -205,8 +174,8 @@ def _check_output(args, env):
WINDOWS_MAPPING = { WINDOWS_MAPPING = {
"AMD64": X86_64, "AMD64": "x86_64",
"ARM64": AARCH64, "ARM64": "aarch64",
} }
@@ -351,10 +320,6 @@ def sorting_fn(item):
generic_candidates = [c for c in candidates if c.vendor == "generic"] generic_candidates = [c for c in candidates if c.vendor == "generic"]
best_generic = max(generic_candidates, key=sorting_fn) best_generic = max(generic_candidates, key=sorting_fn)
# Relevant for AArch64. Filter on "cpu_part" if we have any match
if info.cpu_part != "" and any(c for c in candidates if info.cpu_part == c.cpu_part):
candidates = [c for c in candidates if info.cpu_part == c.cpu_part]
# Filter the candidates to be descendant of the best generic candidate. # Filter the candidates to be descendant of the best generic candidate.
# This is to avoid that the lack of a niche feature that can be disabled # This is to avoid that the lack of a niche feature that can be disabled
# from e.g. BIOS prevents detection of a reasonably performant architecture # from e.g. BIOS prevents detection of a reasonably performant architecture
@@ -444,16 +409,3 @@ def compatibility_check_for_riscv64(info, target):
return (target == arch_root or arch_root in target.ancestors) and ( return (target == arch_root or arch_root in target.ancestors) and (
target.name == info.name or target.vendor == "generic" target.name == info.name or target.vendor == "generic"
) )
def brand_string() -> Optional[str]:
"""Returns the brand string of the host, if detected, or None."""
if platform.system() == "Darwin":
return _check_output(
["sysctl", "-n", "machdep.cpu.brand_string"], env=_ensure_bin_usrbin_in_path()
).strip()
if host().family == X86_64:
return CpuidInfoCollector().brand_string()
return None

View File

@@ -2,7 +2,9 @@
# Archspec Project Developers. See the top-level COPYRIGHT file for details. # Archspec Project Developers. See the top-level COPYRIGHT file for details.
# #
# SPDX-License-Identifier: (Apache-2.0 OR MIT) # SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""Types and functions to manage information on CPU microarchitectures.""" """Types and functions to manage information
on CPU microarchitectures.
"""
import functools import functools
import platform import platform
import re import re
@@ -63,31 +65,23 @@ class Microarchitecture:
passed in as argument above. passed in as argument above.
* versions: versions that support this micro-architecture. * versions: versions that support this micro-architecture.
generation (int): generation of the micro-architecture, if relevant. generation (int): generation of the micro-architecture, if
cpu_part (str): cpu part of the architecture, if relevant. relevant.
""" """
# pylint: disable=too-many-arguments,too-many-instance-attributes # pylint: disable=too-many-arguments
#: Aliases for micro-architecture's features #: Aliases for micro-architecture's features
feature_aliases = FEATURE_ALIASES feature_aliases = FEATURE_ALIASES
def __init__(self, name, parents, vendor, features, compilers, generation=0, cpu_part=""): def __init__(self, name, parents, vendor, features, compilers, generation=0):
self.name = name self.name = name
self.parents = parents self.parents = parents
self.vendor = vendor self.vendor = vendor
self.features = features self.features = features
self.compilers = compilers self.compilers = compilers
# Only relevant for PowerPC
self.generation = generation self.generation = generation
# Only relevant for AArch64 # Cache the ancestor computation
self.cpu_part = cpu_part
# Cache the "ancestor" computation
self._ancestors = None self._ancestors = None
# Cache the "generic" computation
self._generic = None
# Cache the "family" computation
self._family = None
@property @property
def ancestors(self): def ancestors(self):
@@ -117,12 +111,8 @@ def __eq__(self, other):
and self.parents == other.parents # avoid ancestors here and self.parents == other.parents # avoid ancestors here
and self.compilers == other.compilers and self.compilers == other.compilers
and self.generation == other.generation and self.generation == other.generation
and self.cpu_part == other.cpu_part
) )
def __hash__(self):
return hash(self.name)
@coerce_target_names @coerce_target_names
def __ne__(self, other): def __ne__(self, other):
return not self == other return not self == other
@@ -153,8 +143,7 @@ def __repr__(self):
cls_name = self.__class__.__name__ cls_name = self.__class__.__name__
fmt = ( fmt = (
cls_name + "({0.name!r}, {0.parents!r}, {0.vendor!r}, " cls_name + "({0.name!r}, {0.parents!r}, {0.vendor!r}, "
"{0.features!r}, {0.compilers!r}, generation={0.generation!r}, " "{0.features!r}, {0.compilers!r}, {0.generation!r})"
"cpu_part={0.cpu_part!r})"
) )
return fmt.format(self) return fmt.format(self)
@@ -179,22 +168,18 @@ def __contains__(self, feature):
@property @property
def family(self): def family(self):
"""Returns the architecture family a given target belongs to""" """Returns the architecture family a given target belongs to"""
if self._family is None: roots = [x for x in [self] + self.ancestors if not x.ancestors]
roots = [x for x in [self] + self.ancestors if not x.ancestors] msg = "a target is expected to belong to just one architecture family"
msg = "a target is expected to belong to just one architecture family" msg += f"[found {', '.join(str(x) for x in roots)}]"
msg += f"[found {', '.join(str(x) for x in roots)}]" assert len(roots) == 1, msg
assert len(roots) == 1, msg
self._family = roots.pop()
return self._family return roots.pop()
@property @property
def generic(self): def generic(self):
"""Returns the best generic architecture that is compatible with self""" """Returns the best generic architecture that is compatible with self"""
if self._generic is None: generics = [x for x in [self] + self.ancestors if x.vendor == "generic"]
generics = [x for x in [self] + self.ancestors if x.vendor == "generic"] return max(generics, key=lambda x: len(x.ancestors))
self._generic = max(generics, key=lambda x: len(x.ancestors))
return self._generic
def to_dict(self): def to_dict(self):
"""Returns a dictionary representation of this object.""" """Returns a dictionary representation of this object."""
@@ -205,7 +190,6 @@ def to_dict(self):
"generation": self.generation, "generation": self.generation,
"parents": [str(x) for x in self.parents], "parents": [str(x) for x in self.parents],
"compilers": self.compilers, "compilers": self.compilers,
"cpupart": self.cpu_part,
} }
@staticmethod @staticmethod
@@ -218,15 +202,12 @@ def from_dict(data) -> "Microarchitecture":
features=set(data["features"]), features=set(data["features"]),
compilers=data.get("compilers", {}), compilers=data.get("compilers", {}),
generation=data.get("generation", 0), generation=data.get("generation", 0),
cpu_part=data.get("cpupart", ""),
) )
def optimization_flags(self, compiler, version): def optimization_flags(self, compiler, version):
"""Returns a string containing the optimization flags that needs """Returns a string containing the optimization flags that needs
to be used to produce code optimized for this micro-architecture. to be used to produce code optimized for this micro-architecture.
The version is expected to be a string of dot separated digits.
If there is no information on the compiler passed as argument the If there is no information on the compiler passed as argument the
function returns an empty string. If it is known that the compiler function returns an empty string. If it is known that the compiler
version we want to use does not support this architecture the function version we want to use does not support this architecture the function
@@ -235,11 +216,6 @@ def optimization_flags(self, compiler, version):
Args: Args:
compiler (str): name of the compiler to be used compiler (str): name of the compiler to be used
version (str): version of the compiler to be used version (str): version of the compiler to be used
Raises:
UnsupportedMicroarchitecture: if the requested compiler does not support
this micro-architecture.
ValueError: if the version doesn't match the expected format
""" """
# If we don't have information on compiler at all return an empty string # If we don't have information on compiler at all return an empty string
if compiler not in self.family.compilers: if compiler not in self.family.compilers:
@@ -256,14 +232,6 @@ def optimization_flags(self, compiler, version):
msg = msg.format(compiler, best_target, best_target.family) msg = msg.format(compiler, best_target, best_target.family)
raise UnsupportedMicroarchitecture(msg) raise UnsupportedMicroarchitecture(msg)
# Check that the version matches the expected format
if not re.match(r"^(?:\d+\.)*\d+$", version):
msg = (
"invalid format for the compiler version argument. "
"Only dot separated digits are allowed."
)
raise InvalidCompilerVersion(msg)
# If we have information on this compiler we need to check the # If we have information on this compiler we need to check the
# version being used # version being used
compiler_info = self.compilers[compiler] compiler_info = self.compilers[compiler]
@@ -324,7 +292,7 @@ def generic_microarchitecture(name):
Args: Args:
name (str): name of the micro-architecture name (str): name of the micro-architecture
""" """
return Microarchitecture(name, parents=[], vendor="generic", features=set(), compilers={}) return Microarchitecture(name, parents=[], vendor="generic", features=[], compilers={})
def version_components(version): def version_components(version):
@@ -377,11 +345,8 @@ def fill_target_from_dict(name, data, targets):
features = set(values["features"]) features = set(values["features"])
compilers = values.get("compilers", {}) compilers = values.get("compilers", {})
generation = values.get("generation", 0) generation = values.get("generation", 0)
cpu_part = values.get("cpupart", "")
targets[name] = Microarchitecture( targets[name] = Microarchitecture(name, parents, vendor, features, compilers, generation)
name, parents, vendor, features, compilers, generation=generation, cpu_part=cpu_part
)
known_targets = {} known_targets = {}
data = archspec.cpu.schema.TARGETS_JSON["microarchitectures"] data = archspec.cpu.schema.TARGETS_JSON["microarchitectures"]
@@ -402,15 +367,7 @@ def fill_target_from_dict(name, data, targets):
TARGETS = LazyDictionary(_known_microarchitectures) TARGETS = LazyDictionary(_known_microarchitectures)
class ArchspecError(Exception): class UnsupportedMicroarchitecture(ValueError):
"""Base class for errors within archspec"""
class UnsupportedMicroarchitecture(ArchspecError, ValueError):
"""Raised if a compiler version does not support optimization for a given """Raised if a compiler version does not support optimization for a given
micro-architecture. micro-architecture.
""" """
class InvalidCompilerVersion(ArchspecError, ValueError):
"""Raised when an invalid format is used for compiler versions in archspec."""

View File

@@ -1482,6 +1482,7 @@
"cldemote", "cldemote",
"movdir64b", "movdir64b",
"movdiri", "movdiri",
"pdcm",
"serialize", "serialize",
"waitpkg" "waitpkg"
], ],
@@ -2224,96 +2225,14 @@
], ],
"nvhpc": [ "nvhpc": [
{ {
"versions": "21.11:23.8", "versions": "21.11:",
"name": "zen3", "name": "zen3",
"flags": "-tp {name}", "flags": "-tp {name}",
"warnings": "zen4 is not fully supported by nvhpc versions < 23.9, falling back to zen3" "warnings": "zen4 is not fully supported by nvhpc yet, falling back to zen3"
},
{
"versions": "23.9:",
"flags": "-tp {name}"
} }
] ]
} }
}, },
"zen5": {
"from": ["zen4"],
"vendor": "AuthenticAMD",
"features": [
"abm",
"aes",
"avx",
"avx2",
"avx512_bf16",
"avx512_bitalg",
"avx512bw",
"avx512cd",
"avx512dq",
"avx512f",
"avx512ifma",
"avx512vbmi",
"avx512_vbmi2",
"avx512vl",
"avx512_vnni",
"avx512_vp2intersect",
"avx512_vpopcntdq",
"avx_vnni",
"bmi1",
"bmi2",
"clflushopt",
"clwb",
"clzero",
"cppc",
"cx16",
"f16c",
"flush_l1d",
"fma",
"fsgsbase",
"gfni",
"ibrs_enhanced",
"mmx",
"movbe",
"movdir64b",
"movdiri",
"pclmulqdq",
"popcnt",
"rdseed",
"sse",
"sse2",
"sse4_1",
"sse4_2",
"sse4a",
"ssse3",
"tsc_adjust",
"vaes",
"vpclmulqdq",
"xsavec",
"xsaveopt"
],
"compilers": {
"gcc": [
{
"versions": "14.1:",
"name": "znver5",
"flags": "-march={name} -mtune={name}"
}
],
"aocc": [
{
"versions": "5.0:",
"name": "znver5",
"flags": "-march={name} -mtune={name}"
}
],
"clang": [
{
"versions": "19.1:",
"name": "znver5",
"flags": "-march={name} -mtune={name}"
}
]
}
},
"ppc64": { "ppc64": {
"from": [], "from": [],
"vendor": "generic", "vendor": "generic",
@@ -2792,8 +2711,7 @@
"flags": "-mcpu=thunderx2t99" "flags": "-mcpu=thunderx2t99"
} }
] ]
}, }
"cpupart": "0x0af"
}, },
"a64fx": { "a64fx": {
"from": ["armv8.2a"], "from": ["armv8.2a"],
@@ -2861,8 +2779,7 @@
"flags": "-march=armv8.2-a+crc+crypto+fp16+sve" "flags": "-march=armv8.2-a+crc+crypto+fp16+sve"
} }
] ]
}, }
"cpupart": "0x001"
}, },
"cortex_a72": { "cortex_a72": {
"from": ["aarch64"], "from": ["aarch64"],
@@ -2899,8 +2816,7 @@
"flags" : "-mcpu=cortex-a72" "flags" : "-mcpu=cortex-a72"
} }
] ]
}, }
"cpupart": "0xd08"
}, },
"neoverse_n1": { "neoverse_n1": {
"from": ["cortex_a72", "armv8.2a"], "from": ["cortex_a72", "armv8.2a"],
@@ -2921,7 +2837,8 @@
"asimdrdm", "asimdrdm",
"lrcpc", "lrcpc",
"dcpop", "dcpop",
"asimddp" "asimddp",
"ssbs"
], ],
"compilers" : { "compilers" : {
"gcc": [ "gcc": [
@@ -2985,8 +2902,7 @@
"flags": "-tp {name}" "flags": "-tp {name}"
} }
] ]
}, }
"cpupart": "0xd0c"
}, },
"neoverse_v1": { "neoverse_v1": {
"from": ["neoverse_n1", "armv8.4a"], "from": ["neoverse_n1", "armv8.4a"],
@@ -3010,6 +2926,8 @@
"lrcpc", "lrcpc",
"dcpop", "dcpop",
"sha3", "sha3",
"sm3",
"sm4",
"asimddp", "asimddp",
"sha512", "sha512",
"sve", "sve",
@@ -3018,6 +2936,9 @@
"uscat", "uscat",
"ilrcpc", "ilrcpc",
"flagm", "flagm",
"ssbs",
"paca",
"pacg",
"dcpodp", "dcpodp",
"svei8mm", "svei8mm",
"svebf16", "svebf16",
@@ -3085,7 +3006,7 @@
}, },
{ {
"versions": "11:", "versions": "11:",
"flags" : "-march=armv8.4-a+sve+fp16+bf16+crypto+i8mm+rng" "flags" : "-march=armv8.4-a+sve+ssbs+fp16+bf16+crypto+i8mm+rng"
}, },
{ {
"versions": "12:", "versions": "12:",
@@ -3109,8 +3030,7 @@
"flags": "-tp {name}" "flags": "-tp {name}"
} }
] ]
}, }
"cpupart": "0xd40"
}, },
"neoverse_v2": { "neoverse_v2": {
"from": ["neoverse_n1", "armv9.0a"], "from": ["neoverse_n1", "armv9.0a"],
@@ -3134,22 +3054,35 @@
"lrcpc", "lrcpc",
"dcpop", "dcpop",
"sha3", "sha3",
"sm3",
"sm4",
"asimddp", "asimddp",
"sha512", "sha512",
"sve", "sve",
"asimdfhm", "asimdfhm",
"dit",
"uscat", "uscat",
"ilrcpc", "ilrcpc",
"flagm", "flagm",
"ssbs",
"sb", "sb",
"paca",
"pacg",
"dcpodp", "dcpodp",
"sve2", "sve2",
"sveaes",
"svepmull",
"svebitperm",
"svesha3",
"svesm4",
"flagm2", "flagm2",
"frint", "frint",
"svei8mm", "svei8mm",
"svebf16", "svebf16",
"i8mm", "i8mm",
"bf16" "bf16",
"dgh",
"bti"
], ],
"compilers" : { "compilers" : {
"gcc": [ "gcc": [
@@ -3174,19 +3107,15 @@
"flags" : "-march=armv8.5-a+sve -mtune=cortex-a76" "flags" : "-march=armv8.5-a+sve -mtune=cortex-a76"
}, },
{ {
"versions": "10.0:11.3.99", "versions": "10.0:11.99",
"flags" : "-march=armv8.5-a+sve+sve2+i8mm+bf16 -mtune=cortex-a77" "flags" : "-march=armv8.5-a+sve+sve2+i8mm+bf16 -mtune=cortex-a77"
}, },
{
"versions": "11.4:11.99",
"flags" : "-mcpu=neoverse-v2"
},
{ {
"versions": "12.0:12.2.99", "versions": "12.0:12.99",
"flags" : "-march=armv9-a+i8mm+bf16 -mtune=cortex-a710" "flags" : "-march=armv9-a+i8mm+bf16 -mtune=cortex-a710"
}, },
{ {
"versions": "12.3:", "versions": "13.0:",
"flags" : "-mcpu=neoverse-v2" "flags" : "-mcpu=neoverse-v2"
} }
], ],
@@ -3221,112 +3150,7 @@
"flags": "-tp {name}" "flags": "-tp {name}"
} }
] ]
}, }
"cpupart": "0xd4f"
},
"neoverse_n2": {
"from": ["neoverse_n1", "armv9.0a"],
"vendor": "ARM",
"features": [
"fp",
"asimd",
"evtstrm",
"aes",
"pmull",
"sha1",
"sha2",
"crc32",
"atomics",
"fphp",
"asimdhp",
"cpuid",
"asimdrdm",
"jscvt",
"fcma",
"lrcpc",
"dcpop",
"sha3",
"asimddp",
"sha512",
"sve",
"asimdfhm",
"uscat",
"ilrcpc",
"flagm",
"sb",
"dcpodp",
"sve2",
"flagm2",
"frint",
"svei8mm",
"svebf16",
"i8mm",
"bf16"
],
"compilers" : {
"gcc": [
{
"versions": "4.8:5.99",
"flags": "-march=armv8-a"
},
{
"versions": "6:6.99",
"flags" : "-march=armv8.1-a"
},
{
"versions": "7.0:7.99",
"flags" : "-march=armv8.2-a -mtune=cortex-a72"
},
{
"versions": "8.0:8.99",
"flags" : "-march=armv8.4-a+sve -mtune=cortex-a72"
},
{
"versions": "9.0:9.99",
"flags" : "-march=armv8.5-a+sve -mtune=cortex-a76"
},
{
"versions": "10.0:10.99",
"flags" : "-march=armv8.5-a+sve+sve2+i8mm+bf16 -mtune=cortex-a77"
},
{
"versions": "11.0:",
"flags" : "-mcpu=neoverse-n2"
}
],
"clang" : [
{
"versions": "9.0:10.99",
"flags" : "-march=armv8.5-a+sve"
},
{
"versions": "11.0:13.99",
"flags" : "-march=armv8.5-a+sve+sve2+i8mm+bf16"
},
{
"versions": "14.0:15.99",
"flags" : "-march=armv9-a+i8mm+bf16"
},
{
"versions": "16.0:",
"flags" : "-mcpu=neoverse-n2"
}
],
"arm" : [
{
"versions": "23.04.0:",
"flags" : "-mcpu=neoverse-n2"
}
],
"nvhpc" : [
{
"versions": "23.3:",
"name": "neoverse-n1",
"flags": "-tp {name}"
}
]
},
"cpupart": "0xd49"
}, },
"m1": { "m1": {
"from": ["armv8.4a"], "from": ["armv8.4a"],
@@ -3392,8 +3216,7 @@
"flags" : "-mcpu=apple-m1" "flags" : "-mcpu=apple-m1"
} }
] ]
}, }
"cpupart": "0x022"
}, },
"m2": { "m2": {
"from": ["m1", "armv8.5a"], "from": ["m1", "armv8.5a"],
@@ -3471,8 +3294,7 @@
"flags" : "-mcpu=apple-m2" "flags" : "-mcpu=apple-m2"
} }
] ]
}, }
"cpupart": "0x032"
}, },
"arm": { "arm": {
"from": [], "from": [],

View File

@@ -52,9 +52,6 @@
} }
} }
} }
},
"cpupart": {
"type": "string"
} }
}, },
"required": [ "required": [
@@ -110,4 +107,4 @@
"additionalProperties": false "additionalProperties": false
} }
} }
} }

View File

@@ -1,45 +0,0 @@
diff --git a/lib/spack/external/_vendoring/distro/distro.py b/lib/spack/external/_vendoring/distro/distro.py
index 89e1868047..50c3b18d4d 100644
--- a/lib/spack/external/_vendoring/distro/distro.py
+++ b/lib/spack/external/_vendoring/distro/distro.py
@@ -1265,27 +1265,29 @@ def _distro_release_info(self) -> Dict[str, str]:
match = _DISTRO_RELEASE_BASENAME_PATTERN.match(basename)
else:
try:
- basenames = [
- basename
- for basename in os.listdir(self.etc_dir)
- if basename not in _DISTRO_RELEASE_IGNORE_BASENAMES
- and os.path.isfile(os.path.join(self.etc_dir, basename))
- ]
+ with os.scandir(self.etc_dir) as it:
+ etc_files = [
+ p.path for p in it
+ if p.is_file() and p.name not in _DISTRO_RELEASE_IGNORE_BASENAMES
+ ]
# We sort for repeatability in cases where there are multiple
# distro specific files; e.g. CentOS, Oracle, Enterprise all
# containing `redhat-release` on top of their own.
- basenames.sort()
+ etc_files.sort()
except OSError:
# This may occur when /etc is not readable but we can't be
# sure about the *-release files. Check common entries of
# /etc for information. If they turn out to not be there the
# error is handled in `_parse_distro_release_file()`.
- basenames = _DISTRO_RELEASE_BASENAMES
- for basename in basenames:
- match = _DISTRO_RELEASE_BASENAME_PATTERN.match(basename)
+ etc_files = [
+ os.path.join(self.etc_dir, basename)
+ for basename in _DISTRO_RELEASE_BASENAMES
+ ]
+
+ for filepath in etc_files:
+ match = _DISTRO_RELEASE_BASENAME_PATTERN.match(os.path.basename(filepath))
if match is None:
continue
- filepath = os.path.join(self.etc_dir, basename)
distro_info = self._parse_distro_release_file(filepath)
# The name is always present if the pattern matches.
if "name" not in distro_info:

View File

@@ -13,191 +13,3 @@ index 6b630cdfbb..1791fe7fbf 100644
-__version__ = metadata.version("jsonschema") -__version__ = metadata.version("jsonschema")
+ +
+__version__ = "3.2.0" +__version__ = "3.2.0"
diff --git a/lib/spack/external/_vendoring/jsonschema/_format.py b/lib/spack/external/_vendoring/jsonschema/_format.py
index 281a7cfcff..29061e3661 100644
--- a/lib/spack/external/_vendoring/jsonschema/_format.py
+++ b/lib/spack/external/_vendoring/jsonschema/_format.py
@@ -231,96 +231,6 @@ def is_host_name(instance):
return True
-try:
- # The built-in `idna` codec only implements RFC 3890, so we go elsewhere.
- import idna
-except ImportError:
- pass
-else:
- @_checks_drafts(draft7="idn-hostname", raises=idna.IDNAError)
- def is_idn_host_name(instance):
- if not isinstance(instance, str_types):
- return True
- idna.encode(instance)
- return True
-
-
-try:
- import rfc3987
-except ImportError:
- try:
- from rfc3986_validator import validate_rfc3986
- except ImportError:
- pass
- else:
- @_checks_drafts(name="uri")
- def is_uri(instance):
- if not isinstance(instance, str_types):
- return True
- return validate_rfc3986(instance, rule="URI")
-
- @_checks_drafts(
- draft6="uri-reference",
- draft7="uri-reference",
- raises=ValueError,
- )
- def is_uri_reference(instance):
- if not isinstance(instance, str_types):
- return True
- return validate_rfc3986(instance, rule="URI_reference")
-
-else:
- @_checks_drafts(draft7="iri", raises=ValueError)
- def is_iri(instance):
- if not isinstance(instance, str_types):
- return True
- return rfc3987.parse(instance, rule="IRI")
-
- @_checks_drafts(draft7="iri-reference", raises=ValueError)
- def is_iri_reference(instance):
- if not isinstance(instance, str_types):
- return True
- return rfc3987.parse(instance, rule="IRI_reference")
-
- @_checks_drafts(name="uri", raises=ValueError)
- def is_uri(instance):
- if not isinstance(instance, str_types):
- return True
- return rfc3987.parse(instance, rule="URI")
-
- @_checks_drafts(
- draft6="uri-reference",
- draft7="uri-reference",
- raises=ValueError,
- )
- def is_uri_reference(instance):
- if not isinstance(instance, str_types):
- return True
- return rfc3987.parse(instance, rule="URI_reference")
-
-
-try:
- from strict_rfc3339 import validate_rfc3339
-except ImportError:
- try:
- from rfc3339_validator import validate_rfc3339
- except ImportError:
- validate_rfc3339 = None
-
-if validate_rfc3339:
- @_checks_drafts(name="date-time")
- def is_datetime(instance):
- if not isinstance(instance, str_types):
- return True
- return validate_rfc3339(instance)
-
- @_checks_drafts(draft7="time")
- def is_time(instance):
- if not isinstance(instance, str_types):
- return True
- return is_datetime("1970-01-01T" + instance)
-
-
@_checks_drafts(name="regex", raises=re.error)
def is_regex(instance):
if not isinstance(instance, str_types):
@@ -340,86 +250,3 @@ def is_draft3_time(instance):
if not isinstance(instance, str_types):
return True
return datetime.datetime.strptime(instance, "%H:%M:%S")
-
-
-try:
- import webcolors
-except ImportError:
- pass
-else:
- def is_css_color_code(instance):
- return webcolors.normalize_hex(instance)
-
- @_checks_drafts(draft3="color", raises=(ValueError, TypeError))
- def is_css21_color(instance):
- if (
- not isinstance(instance, str_types) or
- instance.lower() in webcolors.css21_names_to_hex
- ):
- return True
- return is_css_color_code(instance)
-
- def is_css3_color(instance):
- if instance.lower() in webcolors.css3_names_to_hex:
- return True
- return is_css_color_code(instance)
-
-
-try:
- import jsonpointer
-except ImportError:
- pass
-else:
- @_checks_drafts(
- draft6="json-pointer",
- draft7="json-pointer",
- raises=jsonpointer.JsonPointerException,
- )
- def is_json_pointer(instance):
- if not isinstance(instance, str_types):
- return True
- return jsonpointer.JsonPointer(instance)
-
- # TODO: I don't want to maintain this, so it
- # needs to go either into jsonpointer (pending
- # https://github.com/stefankoegl/python-json-pointer/issues/34) or
- # into a new external library.
- @_checks_drafts(
- draft7="relative-json-pointer",
- raises=jsonpointer.JsonPointerException,
- )
- def is_relative_json_pointer(instance):
- # Definition taken from:
- # https://tools.ietf.org/html/draft-handrews-relative-json-pointer-01#section-3
- if not isinstance(instance, str_types):
- return True
- non_negative_integer, rest = [], ""
- for i, character in enumerate(instance):
- if character.isdigit():
- non_negative_integer.append(character)
- continue
-
- if not non_negative_integer:
- return False
-
- rest = instance[i:]
- break
- return (rest == "#") or jsonpointer.JsonPointer(rest)
-
-
-try:
- import uritemplate.exceptions
-except ImportError:
- pass
-else:
- @_checks_drafts(
- draft6="uri-template",
- draft7="uri-template",
- raises=uritemplate.exceptions.InvalidTemplate,
- )
- def is_uri_template(
- instance,
- template_validator=uritemplate.Validator().force_balanced_braces(),
- ):
- template = uritemplate.URITemplate(instance)
- return template_validator.validate(template)

View File

@@ -1,13 +0,0 @@
diff --git a/lib/spack/external/_vendoring/ruamel/yaml/comments.py b/lib/spack/external/_vendoring/ruamel/yaml/comments.py
index 1badeda585..892c868af3 100644
--- a/lib/spack/external/_vendoring/ruamel/yaml/comments.py
+++ b/lib/spack/external/_vendoring/ruamel/yaml/comments.py
@@ -497,7 +497,7 @@ def copy_attributes(self, t, memo=None):
Tag.attrib, merge_attrib]:
if hasattr(self, a):
if memo is not None:
- setattr(t, a, copy.deepcopy(getattr(self, a, memo)))
+ setattr(t, a, copy.deepcopy(getattr(self, a), memo))
else:
setattr(t, a, getattr(self, a))
# fmt: on

View File

@@ -98,10 +98,3 @@ def path_filter_caller(*args, **kwargs):
if _func: if _func:
return holder_func(_func) return holder_func(_func)
return holder_func return holder_func
def sanitize_win_longpath(path: str) -> str:
"""Strip Windows extended path prefix from strings
Returns sanitized string.
no-op if extended path prefix is not present"""
return path.lstrip("\\\\?\\")

View File

@@ -41,20 +41,6 @@ def comma_and(sequence: List[str]) -> str:
return comma_list(sequence, "and") return comma_list(sequence, "and")
def ordinal(number: int) -> str:
"""Return the ordinal representation (1st, 2nd, 3rd, etc.) for the provided number.
Args:
number: int to convert to ordinal number
Returns: number's corresponding ordinal
"""
idx = (number % 10) << 1
tens = number % 100 // 10
suffix = "th" if tens == 1 or idx > 6 else "thstndrd"[idx : idx + 2]
return f"{number}{suffix}"
def quote(sequence: List[str], q: str = "'") -> List[str]: def quote(sequence: List[str], q: str = "'") -> List[str]:
"""Quotes each item in the input list with the quote character passed as second argument.""" """Quotes each item in the input list with the quote character passed as second argument."""
return [f"{q}{e}{q}" for e in sequence] return [f"{q}{e}{q}" for e in sequence]

View File

@@ -20,25 +20,15 @@
import tempfile import tempfile
from contextlib import contextmanager from contextlib import contextmanager
from itertools import accumulate from itertools import accumulate
from typing import ( from typing import Callable, Iterable, List, Match, Optional, Tuple, Union
Callable,
Deque,
Dict,
Iterable,
List,
Match,
Optional,
Sequence,
Set,
Tuple,
Union,
)
import llnl.util.symlink import llnl.util.symlink
from llnl.util import tty from llnl.util import tty
from llnl.util.lang import dedupe, fnmatch_translate_multiple, memoized from llnl.util.lang import dedupe, memoized
from llnl.util.symlink import islink, readlink, resolve_link_target_relative_to_the_link, symlink from llnl.util.symlink import islink, readlink, resolve_link_target_relative_to_the_link, symlink
from spack.util.executable import Executable, which
from ..path import path_to_os_path, system_path_filter from ..path import path_to_os_path, system_path_filter
if sys.platform != "win32": if sys.platform != "win32":
@@ -59,11 +49,11 @@
"copy_mode", "copy_mode",
"filter_file", "filter_file",
"find", "find",
"find_first",
"find_headers", "find_headers",
"find_all_headers", "find_all_headers",
"find_libraries", "find_libraries",
"find_system_libraries", "find_system_libraries",
"fix_darwin_install_name",
"force_remove", "force_remove",
"force_symlink", "force_symlink",
"getuid", "getuid",
@@ -97,8 +87,6 @@
"visit_directory_tree", "visit_directory_tree",
] ]
Path = Union[str, pathlib.Path]
if sys.version_info < (3, 7, 4): if sys.version_info < (3, 7, 4):
# monkeypatch shutil.copystat to fix PermissionError when copying read-only # monkeypatch shutil.copystat to fix PermissionError when copying read-only
# files on Lustre when using Python < 3.7.4 # files on Lustre when using Python < 3.7.4
@@ -199,58 +187,26 @@ def polite_filename(filename: str) -> str:
return _polite_antipattern().sub("_", filename) return _polite_antipattern().sub("_", filename)
def getuid() -> Union[str, int]: def getuid():
"""Returns os getuid on non Windows
On Windows returns 0 for admin users, login string otherwise
This is in line with behavior from get_owner_uid which
always returns the login string on Windows
"""
if sys.platform == "win32": if sys.platform == "win32":
import ctypes import ctypes
# If not admin, use the string name of the login as a unique ID
if ctypes.windll.shell32.IsUserAnAdmin() == 0: if ctypes.windll.shell32.IsUserAnAdmin() == 0:
return os.getlogin() return 1
return 0 return 0
else: else:
return os.getuid() return os.getuid()
def _win_rename(src, dst):
# os.replace will still fail if on Windows (but not POSIX) if the dst
# is a symlink to a directory (all other cases have parity Windows <-> Posix)
if os.path.islink(dst) and os.path.isdir(os.path.realpath(dst)):
if os.path.samefile(src, dst):
# src and dst are the same
# do nothing and exit early
return
# If dst exists and is a symlink to a directory
# we need to remove dst and then perform rename/replace
# this is safe to do as there's no chance src == dst now
os.remove(dst)
os.replace(src, dst)
@system_path_filter
def msdos_escape_parens(path):
"""MS-DOS interprets parens as grouping parameters even in a quoted string"""
if sys.platform == "win32":
return path.replace("(", "^(").replace(")", "^)")
else:
return path
@system_path_filter @system_path_filter
def rename(src, dst): def rename(src, dst):
# On Windows, os.rename will fail if the destination file already exists # On Windows, os.rename will fail if the destination file already exists
# os.replace is the same as os.rename on POSIX and is MoveFileExW w/
# the MOVEFILE_REPLACE_EXISTING flag on Windows
# Windows invocation is abstracted behind additonal logic handling
# remaining cases of divergent behavior accross platforms
if sys.platform == "win32": if sys.platform == "win32":
_win_rename(src, dst) # Windows path existence checks will sometimes fail on junctions/links/symlinks
else: # so check for that case
os.replace(src, dst) if os.path.exists(dst) or islink(dst):
os.remove(dst)
os.rename(src, dst)
@system_path_filter @system_path_filter
@@ -260,6 +216,42 @@ def path_contains_subdirectory(path, root):
return norm_path.startswith(norm_root) return norm_path.startswith(norm_root)
@memoized
def file_command(*args):
"""Creates entry point to `file` system command with provided arguments"""
file_cmd = which("file", required=True)
for arg in args:
file_cmd.add_default_arg(arg)
return file_cmd
@memoized
def _get_mime_type():
"""Generate method to call `file` system command to aquire mime type
for a specified path
"""
if sys.platform == "win32":
# -h option (no-dereference) does not exist in Windows
return file_command("-b", "--mime-type")
else:
return file_command("-b", "-h", "--mime-type")
def mime_type(filename):
"""Returns the mime type and subtype of a file.
Args:
filename: file to be analyzed
Returns:
Tuple containing the MIME type and subtype
"""
output = _get_mime_type()(filename, output=str, error=str).strip()
tty.debug("==> " + output)
type, _, subtype = output.partition("/")
return type, subtype
#: This generates the library filenames that may appear on any OS. #: This generates the library filenames that may appear on any OS.
library_extensions = ["a", "la", "so", "tbd", "dylib"] library_extensions = ["a", "la", "so", "tbd", "dylib"]
@@ -544,13 +536,7 @@ def exploding_archive_handler(tarball_container, stage):
@system_path_filter(arg_slice=slice(1)) @system_path_filter(arg_slice=slice(1))
def get_owner_uid(path, err_msg=None) -> Union[str, int]: def get_owner_uid(path, err_msg=None):
"""Returns owner UID of path destination
On non Windows this is the value of st_uid
On Windows this is the login string associated with the
owning user.
"""
if not os.path.exists(path): if not os.path.exists(path):
mkdirp(path, mode=stat.S_IRWXU) mkdirp(path, mode=stat.S_IRWXU)
@@ -742,6 +728,7 @@ def copy_tree(
src: str, src: str,
dest: str, dest: str,
symlinks: bool = True, symlinks: bool = True,
allow_broken_symlinks: bool = sys.platform != "win32",
ignore: Optional[Callable[[str], bool]] = None, ignore: Optional[Callable[[str], bool]] = None,
_permissions: bool = False, _permissions: bool = False,
): ):
@@ -764,6 +751,8 @@ def copy_tree(
src (str): the directory to copy src (str): the directory to copy
dest (str): the destination directory dest (str): the destination directory
symlinks (bool): whether or not to preserve symlinks symlinks (bool): whether or not to preserve symlinks
allow_broken_symlinks (bool): whether or not to allow broken (dangling) symlinks,
On Windows, setting this to True will raise an exception. Defaults to true on unix.
ignore (typing.Callable): function indicating which files to ignore ignore (typing.Callable): function indicating which files to ignore
_permissions (bool): for internal use only _permissions (bool): for internal use only
@@ -771,6 +760,8 @@ def copy_tree(
IOError: if *src* does not match any files or directories IOError: if *src* does not match any files or directories
ValueError: if *src* is a parent directory of *dest* ValueError: if *src* is a parent directory of *dest*
""" """
if allow_broken_symlinks and sys.platform == "win32":
raise llnl.util.symlink.SymlinkError("Cannot allow broken symlinks on Windows!")
if _permissions: if _permissions:
tty.debug("Installing {0} to {1}".format(src, dest)) tty.debug("Installing {0} to {1}".format(src, dest))
else: else:
@@ -814,7 +805,7 @@ def copy_tree(
if islink(s): if islink(s):
link_target = resolve_link_target_relative_to_the_link(s) link_target = resolve_link_target_relative_to_the_link(s)
if symlinks: if symlinks:
target = readlink(s) target = os.readlink(s)
if os.path.isabs(target): if os.path.isabs(target):
def escaped_path(path): def escaped_path(path):
@@ -843,14 +834,16 @@ def escaped_path(path):
copy_mode(s, d) copy_mode(s, d)
for target, d, s in links: for target, d, s in links:
symlink(target, d) symlink(target, d, allow_broken_symlinks=allow_broken_symlinks)
if _permissions: if _permissions:
set_install_permissions(d) set_install_permissions(d)
copy_mode(s, d) copy_mode(s, d)
@system_path_filter @system_path_filter
def install_tree(src, dest, symlinks=True, ignore=None): def install_tree(
src, dest, symlinks=True, ignore=None, allow_broken_symlinks=sys.platform != "win32"
):
"""Recursively install an entire directory tree rooted at *src*. """Recursively install an entire directory tree rooted at *src*.
Same as :py:func:`copy_tree` with the addition of setting proper Same as :py:func:`copy_tree` with the addition of setting proper
@@ -861,12 +854,21 @@ def install_tree(src, dest, symlinks=True, ignore=None):
dest (str): the destination directory dest (str): the destination directory
symlinks (bool): whether or not to preserve symlinks symlinks (bool): whether or not to preserve symlinks
ignore (typing.Callable): function indicating which files to ignore ignore (typing.Callable): function indicating which files to ignore
allow_broken_symlinks (bool): whether or not to allow broken (dangling) symlinks,
On Windows, setting this to True will raise an exception.
Raises: Raises:
IOError: if *src* does not match any files or directories IOError: if *src* does not match any files or directories
ValueError: if *src* is a parent directory of *dest* ValueError: if *src* is a parent directory of *dest*
""" """
copy_tree(src, dest, symlinks=symlinks, ignore=ignore, _permissions=True) copy_tree(
src,
dest,
symlinks=symlinks,
allow_broken_symlinks=allow_broken_symlinks,
ignore=ignore,
_permissions=True,
)
@system_path_filter @system_path_filter
@@ -1215,12 +1217,10 @@ def windows_sfn(path: os.PathLike):
import ctypes import ctypes
k32 = ctypes.WinDLL("kernel32", use_last_error=True) k32 = ctypes.WinDLL("kernel32", use_last_error=True)
# Method with null values returns size of short path name
sz = k32.GetShortPathNameW(path, None, 0)
# stub Windows types TCHAR[LENGTH] # stub Windows types TCHAR[LENGTH]
TCHAR_arr = ctypes.c_wchar * sz TCHAR_arr = ctypes.c_wchar * len(path)
ret_str = TCHAR_arr() ret_str = TCHAR_arr()
k32.GetShortPathNameW(path, ctypes.byref(ret_str), sz) k32.GetShortPathNameW(path, ret_str, len(path))
return ret_str.value return ret_str.value
@@ -1600,12 +1600,6 @@ def remove_linked_tree(path):
shutil.rmtree(os.path.realpath(path), **kwargs) shutil.rmtree(os.path.realpath(path), **kwargs)
os.unlink(path) os.unlink(path)
else: else:
if sys.platform == "win32":
# Adding this prefix allows shutil to remove long paths on windows
# https://learn.microsoft.com/en-us/windows/win32/fileio/maximum-file-path-limitation?tabs=registry
long_path_pfx = "\\\\?\\"
if not path.startswith(long_path_pfx):
path = long_path_pfx + path
shutil.rmtree(path, **kwargs) shutil.rmtree(path, **kwargs)
@@ -1655,6 +1649,41 @@ def safe_remove(*files_or_dirs):
raise raise
@system_path_filter
def fix_darwin_install_name(path):
"""Fix install name of dynamic libraries on Darwin to have full path.
There are two parts of this task:
1. Use ``install_name('-id', ...)`` to change install name of a single lib
2. Use ``install_name('-change', ...)`` to change the cross linking between
libs. The function assumes that all libraries are in one folder and
currently won't follow subfolders.
Parameters:
path (str): directory in which .dylib files are located
"""
libs = glob.glob(join_path(path, "*.dylib"))
for lib in libs:
# fix install name first:
install_name_tool = Executable("install_name_tool")
install_name_tool("-id", lib, lib)
otool = Executable("otool")
long_deps = otool("-L", lib, output=str).split("\n")
deps = [dep.partition(" ")[0][1::] for dep in long_deps[2:-1]]
# fix all dependencies:
for dep in deps:
for loc in libs:
# We really want to check for either
# dep == os.path.basename(loc) or
# dep == join_path(builddir, os.path.basename(loc)),
# but we don't know builddir (nor how symbolic links look
# in builddir). We thus only compare the basenames.
if os.path.basename(dep) == os.path.basename(loc):
install_name_tool("-change", dep, loc, lib)
break
def find_first(root: str, files: Union[Iterable[str], str], bfs_depth: int = 2) -> Optional[str]: def find_first(root: str, files: Union[Iterable[str], str], bfs_depth: int = 2) -> Optional[str]:
"""Find the first file matching a pattern. """Find the first file matching a pattern.
@@ -1687,203 +1716,105 @@ def find_first(root: str, files: Union[Iterable[str], str], bfs_depth: int = 2)
return FindFirstFile(root, *files, bfs_depth=bfs_depth).find() return FindFirstFile(root, *files, bfs_depth=bfs_depth).find()
def find( def find(root, files, recursive=True):
root: Union[Path, Sequence[Path]], """Search for ``files`` starting from the ``root`` directory.
files: Union[str, Sequence[str]],
recursive: bool = True, Like GNU/BSD find but written entirely in Python.
max_depth: Optional[int] = None,
) -> List[str]: Examples:
"""Finds all files matching the patterns from ``files`` starting from ``root``. This function
returns a deterministic result for the same input and directory structure when run multiple .. code-block:: console
times. Symlinked directories are followed, and unique directories are searched only once. Each
matching file is returned only once at lowest depth in case multiple paths exist due to $ find /usr -name python
symlinked directories.
is equivalent to:
>>> find('/usr', 'python')
.. code-block:: console
$ find /usr/local/bin -maxdepth 1 -name python
is equivalent to:
>>> find('/usr/local/bin', 'python', recursive=False)
Accepts any glob characters accepted by fnmatch: Accepts any glob characters accepted by fnmatch:
========== ==================================== ========== ====================================
Pattern Meaning Pattern Meaning
========== ==================================== ========== ====================================
``*`` matches one or more characters ``*`` matches everything
``?`` matches any single character ``?`` matches any single character
``[seq]`` matches any character in ``seq`` ``[seq]`` matches any character in ``seq``
``[!seq]`` matches any character not in ``seq`` ``[!seq]`` matches any character not in ``seq``
========== ==================================== ========== ====================================
Examples:
>>> find("/usr", "*.txt", recursive=True, max_depth=2)
finds all files with the extension ``.txt`` in the directory ``/usr`` and subdirectories up to
depth 2.
>>> find(["/usr", "/var"], ["*.txt", "*.log"], recursive=True)
finds all files with the extension ``.txt`` or ``.log`` in the directories ``/usr`` and
``/var`` at any depth.
>>> find("/usr", "GL/*.h", recursive=True)
finds all header files in a directory GL at any depth in the directory ``/usr``.
Parameters: Parameters:
root: One or more root directories to start searching from root (str): The root directory to start searching from
files: One or more filename patterns to search for files (str or collections.abc.Sequence): Library name(s) to search for
recursive: if False search only root, if True descends from roots. Defaults to True. recursive (bool): if False search only root folder,
max_depth: if set, don't search below this depth. Cannot be set if recursive is False if True descends top-down from the root. Defaults to True.
Returns a list of absolute, matching file paths. Returns:
list: The files that have been found
""" """
if isinstance(root, (str, pathlib.Path)):
root = [root]
elif not isinstance(root, collections.abc.Sequence):
raise TypeError(f"'root' arg must be a path or a sequence of paths, not '{type(root)}']")
if isinstance(files, str): if isinstance(files, str):
files = [files] files = [files]
elif not isinstance(files, collections.abc.Sequence):
raise TypeError(f"'files' arg must be str or a sequence of str, not '{type(files)}']")
# If recursive is false, max_depth can only be None or 0 if recursive:
if max_depth and not recursive: tty.debug(f"Find (recursive): {root} {str(files)}")
raise ValueError(f"max_depth ({max_depth}) cannot be set if recursive is False") result = _find_recursive(root, files)
else:
tty.debug(f"Find (not recursive): {root} {str(files)}")
result = _find_non_recursive(root, files)
tty.debug(f"Find (max depth = {max_depth}): {root} {files}") tty.debug(f"Find complete: {root} {str(files)}")
if not recursive:
max_depth = 0
elif max_depth is None:
max_depth = sys.maxsize
result = _find_max_depth(root, files, max_depth)
tty.debug(f"Find complete: {root} {files}")
return result return result
def _log_file_access_issue(e: OSError, path: str) -> None: @system_path_filter
errno_name = errno.errorcode.get(e.errno, "UNKNOWN") def _find_recursive(root, search_files):
tty.debug(f"find must skip {path}: {errno_name} {e}") # The variable here is **on purpose** a defaultdict. The idea is that
# we want to poke the filesystem as little as possible, but still maintain
# stability in the order of the answer. Thus we are recording each library
# found in a key, and reconstructing the stable order later.
found_files = collections.defaultdict(list)
# Make the path absolute to have os.walk also return an absolute path
root = os.path.abspath(root)
for path, _, list_files in os.walk(root):
for search_file in search_files:
matches = glob.glob(os.path.join(path, search_file))
matches = [os.path.join(path, x) for x in matches]
found_files[search_file].extend(matches)
answer = []
for search_file in search_files:
answer.extend(found_files[search_file])
return answer
def _file_id(s: os.stat_result) -> Tuple[int, int]: @system_path_filter
# Note: on windows, st_ino is the file index and st_dev is the volume serial number. See def _find_non_recursive(root, search_files):
# https://github.com/python/cpython/blob/3.9/Python/fileutils.c # The variable here is **on purpose** a defaultdict as os.list_dir
return (s.st_ino, s.st_dev) # can return files in any order (does not preserve stability)
found_files = collections.defaultdict(list)
# Make the path absolute to have absolute path returned
root = os.path.abspath(root)
def _dedupe_files(paths: List[str]) -> List[str]: for search_file in search_files:
"""Deduplicate files by inode and device, dropping files that cannot be accessed.""" matches = glob.glob(os.path.join(root, search_file))
unique_files: List[str] = [] matches = [os.path.join(root, x) for x in matches]
# tuple of (inode, device) for each file without following symlinks found_files[search_file].extend(matches)
visited: Set[Tuple[int, int]] = set()
for path in paths:
try:
stat_info = os.lstat(path)
except OSError as e:
_log_file_access_issue(e, path)
continue
file_id = _file_id(stat_info)
if file_id not in visited:
unique_files.append(path)
visited.add(file_id)
return unique_files
answer = []
for search_file in search_files:
answer.extend(found_files[search_file])
def _find_max_depth( return answer
roots: Sequence[Path], globs: Sequence[str], max_depth: int = sys.maxsize
) -> List[str]:
"""See ``find`` for the public API."""
# We optimize for the common case of simple filename only patterns: a single, combined regex
# is used. For complex patterns that include path components, we use a slower glob call from
# every directory we visit within max_depth.
filename_only_patterns = {
f"pattern_{i}": os.path.normcase(x) for i, x in enumerate(globs) if "/" not in x
}
complex_patterns = {f"pattern_{i}": x for i, x in enumerate(globs) if "/" in x}
regex = re.compile(fnmatch_translate_multiple(filename_only_patterns))
# Ordered dictionary that keeps track of what pattern found which files
matched_paths: Dict[str, List[str]] = {f"pattern_{i}": [] for i, _ in enumerate(globs)}
# Ensure returned paths are always absolute
roots = [os.path.abspath(r) for r in roots]
# Breadth-first search queue. Each element is a tuple of (depth, dir)
dir_queue: Deque[Tuple[int, str]] = collections.deque()
# Set of visited directories. Each element is a tuple of (inode, device)
visited_dirs: Set[Tuple[int, int]] = set()
for root in roots:
try:
stat_root = os.stat(root)
except OSError as e:
_log_file_access_issue(e, root)
continue
dir_id = _file_id(stat_root)
if dir_id not in visited_dirs:
dir_queue.appendleft((0, root))
visited_dirs.add(dir_id)
while dir_queue:
depth, curr_dir = dir_queue.pop()
try:
dir_iter = os.scandir(curr_dir)
except OSError as e:
_log_file_access_issue(e, curr_dir)
continue
# Use glob.glob for complex patterns.
for pattern_name, pattern in complex_patterns.items():
matched_paths[pattern_name].extend(
path for path in glob.glob(os.path.join(curr_dir, pattern))
)
# List of subdirectories by path and (inode, device) tuple
subdirs: List[Tuple[str, Tuple[int, int]]] = []
with dir_iter:
for dir_entry in dir_iter:
# Match filename only patterns
if filename_only_patterns:
m = regex.match(os.path.normcase(dir_entry.name))
if m:
for pattern_name in filename_only_patterns:
if m.group(pattern_name):
matched_paths[pattern_name].append(dir_entry.path)
break
# Collect subdirectories
if depth >= max_depth:
continue
try:
if not dir_entry.is_dir(follow_symlinks=True):
continue
if sys.platform == "win32":
# Note: st_ino/st_dev on DirEntry.stat are not set on Windows, so we have
# to call os.stat
stat_info = os.stat(dir_entry.path, follow_symlinks=True)
else:
stat_info = dir_entry.stat(follow_symlinks=True)
except OSError as e:
# Possible permission issue, or a symlink that cannot be resolved (ELOOP).
_log_file_access_issue(e, dir_entry.path)
continue
subdirs.append((dir_entry.path, _file_id(stat_info)))
# Enqueue subdirectories in a deterministic order
if subdirs:
subdirs.sort(key=lambda s: os.path.basename(s[0]))
for subdir, subdir_id in subdirs:
if subdir_id not in visited_dirs:
dir_queue.appendleft((depth + 1, subdir))
visited_dirs.add(subdir_id)
# Sort the matched paths for deterministic output
for paths in matched_paths.values():
paths.sort()
all_matching_paths = [path for paths in matched_paths.values() for path in paths]
# We only dedupe files if we have any complex patterns, since only they can match the same file
# multiple times
return _dedupe_files(all_matching_paths) if complex_patterns else all_matching_paths
# Utilities for libraries and headers # Utilities for libraries and headers
@@ -2322,9 +2253,7 @@ def find_system_libraries(libraries, shared=True):
return libraries_found return libraries_found
def find_libraries( def find_libraries(libraries, root, shared=True, recursive=False, runtime=True):
libraries, root, shared=True, recursive=False, runtime=True, max_depth: Optional[int] = None
):
"""Returns an iterable of full paths to libraries found in a root dir. """Returns an iterable of full paths to libraries found in a root dir.
Accepts any glob characters accepted by fnmatch: Accepts any glob characters accepted by fnmatch:
@@ -2345,8 +2274,6 @@ def find_libraries(
otherwise for static. Defaults to True. otherwise for static. Defaults to True.
recursive (bool): if False search only root folder, recursive (bool): if False search only root folder,
if True descends top-down from the root. Defaults to False. if True descends top-down from the root. Defaults to False.
max_depth (int): if set, don't search below this depth. Cannot be set
if recursive is False
runtime (bool): Windows only option, no-op elsewhere. If true, runtime (bool): Windows only option, no-op elsewhere. If true,
search for runtime shared libs (.DLL), otherwise, search search for runtime shared libs (.DLL), otherwise, search
for .Lib files. If shared is false, this has no meaning. for .Lib files. If shared is false, this has no meaning.
@@ -2355,7 +2282,6 @@ def find_libraries(
Returns: Returns:
LibraryList: The libraries that have been found LibraryList: The libraries that have been found
""" """
if isinstance(libraries, str): if isinstance(libraries, str):
libraries = [libraries] libraries = [libraries]
elif not isinstance(libraries, collections.abc.Sequence): elif not isinstance(libraries, collections.abc.Sequence):
@@ -2388,10 +2314,8 @@ def find_libraries(
libraries = ["{0}.{1}".format(lib, suffix) for lib in libraries for suffix in suffixes] libraries = ["{0}.{1}".format(lib, suffix) for lib in libraries for suffix in suffixes]
if not recursive: if not recursive:
if max_depth:
raise ValueError(f"max_depth ({max_depth}) cannot be set if recursive is False")
# If not recursive, look for the libraries directly in root # If not recursive, look for the libraries directly in root
return LibraryList(find(root, libraries, recursive=False)) return LibraryList(find(root, libraries, False))
# To speedup the search for external packages configured e.g. in /usr, # To speedup the search for external packages configured e.g. in /usr,
# perform first non-recursive search in root/lib then in root/lib64 and # perform first non-recursive search in root/lib then in root/lib64 and
@@ -2409,7 +2333,7 @@ def find_libraries(
if found_libs: if found_libs:
break break
else: else:
found_libs = find(root, libraries, recursive=True, max_depth=max_depth) found_libs = find(root, libraries, True)
return LibraryList(found_libs) return LibraryList(found_libs)
@@ -2486,10 +2410,9 @@ def add_library_dependent(self, *dest):
""" """
for pth in dest: for pth in dest:
if os.path.isfile(pth): if os.path.isfile(pth):
new_pth = pathlib.Path(pth).parent self._additional_library_dependents.add(pathlib.Path(pth).parent)
else: else:
new_pth = pathlib.Path(pth) self._additional_library_dependents.add(pathlib.Path(pth))
self._additional_library_dependents.add(new_pth)
@property @property
def rpaths(self): def rpaths(self):
@@ -2567,14 +2490,8 @@ def establish_link(self):
# for each binary install dir in self.pkg (i.e. pkg.prefix.bin, pkg.prefix.lib) # for each binary install dir in self.pkg (i.e. pkg.prefix.bin, pkg.prefix.lib)
# install a symlink to each dependent library # install a symlink to each dependent library
for library, lib_dir in itertools.product(self.rpaths, self.library_dependents):
# do not rpath for system libraries included in the dag self._link(library, lib_dir)
# we should not be modifying libraries managed by the Windows system
# as this will negatively impact linker behavior and can result in permission
# errors if those system libs are not modifiable by Spack
if "windows-system" not in getattr(self.pkg, "tags", []):
for library, lib_dir in itertools.product(self.rpaths, self.library_dependents):
self._link(library, lib_dir)
@system_path_filter @system_path_filter

View File

@@ -5,20 +5,18 @@
import collections.abc import collections.abc
import contextlib import contextlib
import fnmatch
import functools import functools
import inspect
import itertools import itertools
import os import os
import re import re
import sys import sys
import traceback import traceback
import typing
import warnings
from datetime import datetime, timedelta from datetime import datetime, timedelta
from typing import Callable, Dict, Iterable, List, Tuple, TypeVar from typing import Any, Callable, Iterable, List, Tuple
# Ignore emacs backups when listing modules # Ignore emacs backups when listing modules
ignore_modules = r"^\.#|~$" ignore_modules = [r"^\.#", "~$"]
def index_by(objects, *funcs): def index_by(objects, *funcs):
@@ -86,6 +84,20 @@ def index_by(objects, *funcs):
return result return result
def caller_locals():
"""This will return the locals of the *parent* of the caller.
This allows a function to insert variables into its caller's
scope. Yes, this is some black magic, and yes it's useful
for implementing things like depends_on and provides.
"""
# Passing zero here skips line context for speed.
stack = inspect.stack(0)
try:
return stack[2][0].f_locals
finally:
del stack
def attr_setdefault(obj, name, value): def attr_setdefault(obj, name, value):
"""Like dict.setdefault, but for objects.""" """Like dict.setdefault, but for objects."""
if not hasattr(obj, name): if not hasattr(obj, name):
@@ -93,6 +105,15 @@ def attr_setdefault(obj, name, value):
return getattr(obj, name) return getattr(obj, name)
def has_method(cls, name):
for base in inspect.getmro(cls):
if base is object:
continue
if name in base.__dict__:
return True
return False
def union_dicts(*dicts): def union_dicts(*dicts):
"""Use update() to combine all dicts into one. """Use update() to combine all dicts into one.
@@ -157,22 +178,19 @@ def list_modules(directory, **kwargs):
order.""" order."""
list_directories = kwargs.setdefault("directories", True) list_directories = kwargs.setdefault("directories", True)
ignore = re.compile(ignore_modules) for name in os.listdir(directory):
if name == "__init__.py":
continue
with os.scandir(directory) as it: path = os.path.join(directory, name)
for entry in it: if list_directories and os.path.isdir(path):
if entry.name == "__init__.py" or entry.name == "__pycache__": init_py = os.path.join(path, "__init__.py")
continue if os.path.isfile(init_py):
yield name
if ( elif name.endswith(".py"):
list_directories if not any(re.search(pattern, name) for pattern in ignore_modules):
and entry.is_dir() yield re.sub(".py$", "", name)
and os.path.isfile(os.path.join(entry.path, "__init__.py"))
):
yield entry.name
elif entry.name.endswith(".py") and entry.is_file() and not ignore.search(entry.name):
yield entry.name[:-3] # strip .py
def decorator_with_or_without_args(decorator): def decorator_with_or_without_args(decorator):
@@ -219,8 +237,8 @@ def setter(name, value):
value.__name__ = name value.__name__ = name
setattr(cls, name, value) setattr(cls, name, value)
if not hasattr(cls, "_cmp_key"): if not has_method(cls, "_cmp_key"):
raise TypeError(f"'{cls.__name__}' doesn't define _cmp_key().") raise TypeError("'%s' doesn't define _cmp_key()." % cls.__name__)
setter("__eq__", lambda s, o: (s is o) or (o is not None and s._cmp_key() == o._cmp_key())) setter("__eq__", lambda s, o: (s is o) or (o is not None and s._cmp_key() == o._cmp_key()))
setter("__lt__", lambda s, o: o is not None and s._cmp_key() < o._cmp_key()) setter("__lt__", lambda s, o: o is not None and s._cmp_key() < o._cmp_key())
@@ -370,8 +388,8 @@ def cd_fun():
TypeError: If the class does not have a ``_cmp_iter`` method TypeError: If the class does not have a ``_cmp_iter`` method
""" """
if not hasattr(cls, "_cmp_iter"): if not has_method(cls, "_cmp_iter"):
raise TypeError(f"'{cls.__name__}' doesn't define _cmp_iter().") raise TypeError("'%s' doesn't define _cmp_iter()." % cls.__name__)
# comparison operators are implemented in terms of lazy_eq and lazy_lt # comparison operators are implemented in terms of lazy_eq and lazy_lt
def eq(self, other): def eq(self, other):
@@ -846,32 +864,20 @@ def uniq(sequence):
return uniq_list return uniq_list
def elide_list(line_list: List[str], max_num: int = 10) -> List[str]: def elide_list(line_list, max_num=10):
"""Takes a long list and limits it to a smaller number of elements, """Takes a long list and limits it to a smaller number of elements,
replacing intervening elements with '...'. For example:: replacing intervening elements with '...'. For example::
elide_list(["1", "2", "3", "4", "5", "6"], 4) elide_list([1,2,3,4,5,6], 4)
gives:: gives::
["1", "2", "3", "...", "6"] [1, 2, 3, '...', 6]
""" """
if len(line_list) > max_num: if len(line_list) > max_num:
return [*line_list[: max_num - 1], "...", line_list[-1]] return line_list[: max_num - 1] + ["..."] + line_list[-1:]
return line_list else:
return line_list
if sys.version_info >= (3, 9):
PatternStr = re.Pattern[str]
else:
PatternStr = typing.Pattern[str]
def fnmatch_translate_multiple(named_patterns: Dict[str, str]) -> str:
"""Similar to ``fnmatch.translate``, but takes an ordered dictionary where keys are pattern
names, and values are filename patterns. The output is a regex that matches any of the
patterns in order, and named capture groups are used to identify which pattern matched."""
return "|".join(f"(?P<{n}>{fnmatch.translate(p)})" for n, p in named_patterns.items())
@contextlib.contextmanager @contextlib.contextmanager
@@ -886,12 +892,18 @@ class UnhashableArguments(TypeError):
"""Raise when an @memoized function receives unhashable arg or kwarg values.""" """Raise when an @memoized function receives unhashable arg or kwarg values."""
T = TypeVar("T") def enum(**kwargs):
"""Return an enum-like class.
Args:
**kwargs: explicit dictionary of enums
"""
return type("Enum", (object,), kwargs)
def stable_partition( def stable_partition(
input_iterable: Iterable[T], predicate_fn: Callable[[T], bool] input_iterable: Iterable, predicate_fn: Callable[[Any], bool]
) -> Tuple[List[T], List[T]]: ) -> Tuple[List[Any], List[Any]]:
"""Partition the input iterable according to a custom predicate. """Partition the input iterable according to a custom predicate.
Args: Args:
@@ -903,13 +915,12 @@ def stable_partition(
Tuple of the list of elements evaluating to True, and Tuple of the list of elements evaluating to True, and
list of elements evaluating to False. list of elements evaluating to False.
""" """
true_items: List[T] = [] true_items, false_items = [], []
false_items: List[T] = []
for item in input_iterable: for item in input_iterable:
if predicate_fn(item): if predicate_fn(item):
true_items.append(item) true_items.append(item)
else: continue
false_items.append(item) false_items.append(item)
return true_items, false_items return true_items, false_items
@@ -921,21 +932,6 @@ def ensure_last(lst, *elements):
lst.append(lst.pop(lst.index(elt))) lst.append(lst.pop(lst.index(elt)))
class Const:
"""Class level constant, raises when trying to set the attribute"""
__slots__ = ["value"]
def __init__(self, value):
self.value = value
def __get__(self, instance, owner):
return self.value
def __set__(self, instance, value):
raise TypeError(f"Const value does not support assignment [value={self.value}]")
class TypedMutableSequence(collections.abc.MutableSequence): class TypedMutableSequence(collections.abc.MutableSequence):
"""Base class that behaves like a list, just with a different type. """Base class that behaves like a list, just with a different type.
@@ -1040,42 +1036,3 @@ def __init__(self, callback):
def __get__(self, instance, owner): def __get__(self, instance, owner):
return self.callback(owner) return self.callback(owner)
class DeprecatedProperty:
"""Data descriptor to error or warn when a deprecated property is accessed.
Derived classes must define a factory method to return an adaptor for the deprecated
property, if the descriptor is not set to error.
"""
__slots__ = ["name"]
#: 0 - Nothing
#: 1 - Warning
#: 2 - Error
error_lvl = 0
def __init__(self, name: str) -> None:
self.name = name
def __get__(self, instance, owner):
if instance is None:
return self
if self.error_lvl == 1:
warnings.warn(
f"accessing the '{self.name}' property of '{instance}', which is deprecated"
)
elif self.error_lvl == 2:
raise AttributeError(f"cannot access the '{self.name}' attribute of '{instance}'")
return self.factory(instance, owner)
def __set__(self, instance, value):
raise TypeError(
f"the deprecated property '{self.name}' of '{instance}' does not support assignment"
)
def factory(self, instance, owner):
raise NotImplementedError("must be implemented by derived classes")

View File

@@ -8,75 +8,100 @@
import subprocess import subprocess
import sys import sys
import tempfile import tempfile
from typing import Union
from llnl.util import lang, tty from llnl.util import lang, tty
from ..path import sanitize_win_longpath, system_path_filter from ..path import system_path_filter
if sys.platform == "win32": if sys.platform == "win32":
from win32file import CreateHardLink from win32file import CreateHardLink
is_windows = sys.platform == "win32"
def _windows_symlink(
src: str, dst: str, target_is_directory: bool = False, *, dir_fd: Union[int, None] = None
):
"""On Windows with System Administrator privileges this will be a normal symbolic link via
os.symlink. On Windows without privledges the link will be a junction for a directory and a
hardlink for a file. On Windows the various link types are:
Symbolic Link: A link to a file or directory on the same or different volume (drive letter) or def symlink(source_path: str, link_path: str, allow_broken_symlinks: bool = not is_windows):
even to a remote file or directory (using UNC in its path). Need System Administrator """
privileges to make these. Create a link.
Hard Link: A link to a file on the same volume (drive letter) only. Every file (file's data) On non-Windows and Windows with System Administrator
has at least 1 hard link (file's name). But when this method creates a new hard link there will privleges this will be a normal symbolic link via
be 2. Deleting all hard links effectively deletes the file. Don't need System Administrator os.symlink.
privileges.
Junction: A link to a directory on the same or different volume (drive letter) but not to a On Windows without privledges the link will be a
remote directory. Don't need System Administrator privileges.""" junction for a directory and a hardlink for a file.
source_path = os.path.normpath(src) On Windows the various link types are:
Symbolic Link: A link to a file or directory on the
same or different volume (drive letter) or even to
a remote file or directory (using UNC in its path).
Need System Administrator privileges to make these.
Hard Link: A link to a file on the same volume (drive
letter) only. Every file (file's data) has at least 1
hard link (file's name). But when this method creates
a new hard link there will be 2. Deleting all hard
links effectively deletes the file. Don't need System
Administrator privileges.
Junction: A link to a directory on the same or different
volume (drive letter) but not to a remote directory. Don't
need System Administrator privileges.
Parameters:
source_path (str): The real file or directory that the link points to.
Must be absolute OR relative to the link.
link_path (str): The path where the link will exist.
allow_broken_symlinks (bool): On Linux or Mac, don't raise an exception if the source_path
doesn't exist. This will still raise an exception on Windows.
"""
source_path = os.path.normpath(source_path)
win_source_path = source_path win_source_path = source_path
link_path = os.path.normpath(dst) link_path = os.path.normpath(link_path)
# Perform basic checks to make sure symlinking will succeed # Never allow broken links on Windows.
if os.path.lexists(link_path): if sys.platform == "win32" and allow_broken_symlinks:
raise AlreadyExistsError(f"Link path ({link_path}) already exists. Cannot create link.") raise ValueError("allow_broken_symlinks parameter cannot be True on Windows.")
if not os.path.exists(source_path): if not allow_broken_symlinks:
if os.path.isabs(source_path): # Perform basic checks to make sure symlinking will succeed
# An absolute source path that does not exist will result in a broken link. if os.path.lexists(link_path):
raise SymlinkError( raise AlreadyExistsError(
f"Source path ({source_path}) is absolute but does not exist. Resulting " f"Link path ({link_path}) already exists. Cannot create link."
f"link would be broken so not making link."
) )
else:
# os.symlink can create a link when the given source path is relative to if not os.path.exists(source_path):
# the link path. Emulate this behavior and check to see if the source exists if os.path.isabs(source_path) and not allow_broken_symlinks:
# relative to the link path ahead of link creation to prevent broken # An absolute source path that does not exist will result in a broken link.
# links from being made.
link_parent_dir = os.path.dirname(link_path)
relative_path = os.path.join(link_parent_dir, source_path)
if os.path.exists(relative_path):
# In order to work on windows, the source path needs to be modified to be
# relative because hardlink/junction dont resolve relative paths the same
# way as os.symlink. This is ignored on other operating systems.
win_source_path = relative_path
else:
raise SymlinkError( raise SymlinkError(
f"The source path ({source_path}) is not relative to the link path " f"Source path ({source_path}) is absolute but does not exist. Resulting "
f"({link_path}). Resulting link would be broken so not making link." f"link would be broken so not making link."
) )
else:
# os.symlink can create a link when the given source path is relative to
# the link path. Emulate this behavior and check to see if the source exists
# relative to the link path ahead of link creation to prevent broken
# links from being made.
link_parent_dir = os.path.dirname(link_path)
relative_path = os.path.join(link_parent_dir, source_path)
if os.path.exists(relative_path):
# In order to work on windows, the source path needs to be modified to be
# relative because hardlink/junction dont resolve relative paths the same
# way as os.symlink. This is ignored on other operating systems.
win_source_path = relative_path
elif not allow_broken_symlinks:
raise SymlinkError(
f"The source path ({source_path}) is not relative to the link path "
f"({link_path}). Resulting link would be broken so not making link."
)
# Create the symlink # Create the symlink
if not _windows_can_symlink(): if sys.platform == "win32" and not _windows_can_symlink():
_windows_create_link(win_source_path, link_path) _windows_create_link(win_source_path, link_path)
else: else:
os.symlink(source_path, link_path, target_is_directory=os.path.isdir(source_path)) os.symlink(source_path, link_path, target_is_directory=os.path.isdir(source_path))
def _windows_islink(path: str) -> bool: def islink(path: str) -> bool:
"""Override os.islink to give correct answer for spack logic. """Override os.islink to give correct answer for spack logic.
For Non-Windows: a link can be determined with the os.path.islink method. For Non-Windows: a link can be determined with the os.path.islink method.
@@ -222,9 +247,9 @@ def _windows_create_junction(source: str, link: str):
out, err = proc.communicate() out, err = proc.communicate()
tty.debug(out.decode()) tty.debug(out.decode())
if proc.returncode != 0: if proc.returncode != 0:
err_str = err.decode() err = err.decode()
tty.error(err_str) tty.error(err)
raise SymlinkError("Make junction command returned a non-zero return code.", err_str) raise SymlinkError("Make junction command returned a non-zero return code.", err)
def _windows_create_hard_link(path: str, link: str): def _windows_create_hard_link(path: str, link: str):
@@ -244,14 +269,14 @@ def _windows_create_hard_link(path: str, link: str):
CreateHardLink(link, path) CreateHardLink(link, path)
def _windows_readlink(path: str, *, dir_fd=None): def readlink(path: str):
"""Spack utility to override of os.readlink method to work cross platform""" """Spack utility to override of os.readlink method to work cross platform"""
if _windows_is_hardlink(path): if _windows_is_hardlink(path):
return _windows_read_hard_link(path) return _windows_read_hard_link(path)
elif _windows_is_junction(path): elif _windows_is_junction(path):
return _windows_read_junction(path) return _windows_read_junction(path)
else: else:
return sanitize_win_longpath(os.readlink(path, dir_fd=dir_fd)) return os.readlink(path)
def _windows_read_hard_link(link: str) -> str: def _windows_read_hard_link(link: str) -> str:
@@ -313,16 +338,6 @@ def resolve_link_target_relative_to_the_link(link):
return os.path.join(link_dir, target) return os.path.join(link_dir, target)
if sys.platform == "win32":
symlink = _windows_symlink
readlink = _windows_readlink
islink = _windows_islink
else:
symlink = os.symlink
readlink = os.readlink
islink = os.path.islink
class SymlinkError(RuntimeError): class SymlinkError(RuntimeError):
"""Exception class for errors raised while creating symlinks, """Exception class for errors raised while creating symlinks,
junctions and hard links junctions and hard links

View File

@@ -12,7 +12,7 @@
import traceback import traceback
from datetime import datetime from datetime import datetime
from sys import platform as _platform from sys import platform as _platform
from typing import Any, NoReturn from typing import NoReturn
if _platform != "win32": if _platform != "win32":
import fcntl import fcntl
@@ -158,22 +158,21 @@ def get_timestamp(force=False):
return "" return ""
def msg(message: Any, *args: Any, newline: bool = True) -> None: def msg(message, *args, **kwargs):
if not msg_enabled(): if not msg_enabled():
return return
if isinstance(message, Exception): if isinstance(message, Exception):
message = f"{message.__class__.__name__}: {message}" message = "%s: %s" % (message.__class__.__name__, str(message))
else:
message = str(message)
newline = kwargs.get("newline", True)
st_text = "" st_text = ""
if _stacktrace: if _stacktrace:
st_text = process_stacktrace(2) st_text = process_stacktrace(2)
if newline:
nl = "\n" if newline else "" cprint("@*b{%s==>} %s%s" % (st_text, get_timestamp(), cescape(_output_filter(message))))
cwrite(f"@*b{{{st_text}==>}} {get_timestamp()}{cescape(_output_filter(message))}{nl}") else:
cwrite("@*b{%s==>} %s%s" % (st_text, get_timestamp(), cescape(_output_filter(message))))
for arg in args: for arg in args:
print(indent + _output_filter(str(arg))) print(indent + _output_filter(str(arg)))

View File

@@ -237,6 +237,7 @@ def transpose():
def colified( def colified(
elts: List[Any], elts: List[Any],
cols: int = 0, cols: int = 0,
output: Optional[IO] = None,
indent: int = 0, indent: int = 0,
padding: int = 2, padding: int = 2,
tty: Optional[bool] = None, tty: Optional[bool] = None,

View File

@@ -59,11 +59,9 @@
To output an @, use '@@'. To output a } inside braces, use '}}'. To output an @, use '@@'. To output a } inside braces, use '}}'.
""" """
import os
import re import re
import sys import sys
from contextlib import contextmanager from contextlib import contextmanager
from typing import Optional
class ColorParseError(Exception): class ColorParseError(Exception):
@@ -97,34 +95,14 @@ def __init__(self, message):
} # white } # white
# Regex to be used for color formatting # Regex to be used for color formatting
COLOR_RE = re.compile(r"@(?:(@)|(\.)|([*_])?([a-zA-Z])?(?:{((?:[^}]|}})*)})?)") color_re = r"@(?:@|\.|([*_])?([a-zA-Z])?(?:{((?:[^}]|}})*)})?)"
# Mapping from color arguments to values for tty.set_color # Mapping from color arguments to values for tty.set_color
color_when_values = {"always": True, "auto": None, "never": False} color_when_values = {"always": True, "auto": None, "never": False}
# Force color; None: Only color if stdout is a tty
def _color_when_value(when): # True: Always colorize output, False: Never colorize output
"""Raise a ValueError for an invalid color setting. _force_color = None
Valid values are 'always', 'never', and 'auto', or equivalently,
True, False, and None.
"""
if when in color_when_values:
return color_when_values[when]
elif when not in color_when_values.values():
raise ValueError("Invalid color setting: %s" % when)
return when
def _color_from_environ() -> Optional[bool]:
try:
return _color_when_value(os.environ.get("SPACK_COLOR", "auto"))
except ValueError:
return None
#: When `None` colorize when stdout is tty, when `True` or `False` always or never colorize resp.
_force_color = _color_from_environ()
def try_enable_terminal_color_on_windows(): def try_enable_terminal_color_on_windows():
@@ -185,6 +163,19 @@ def _err_check(result, func, args):
debug("Unable to support color on Windows terminal") debug("Unable to support color on Windows terminal")
def _color_when_value(when):
"""Raise a ValueError for an invalid color setting.
Valid values are 'always', 'never', and 'auto', or equivalently,
True, False, and None.
"""
if when in color_when_values:
return color_when_values[when]
elif when not in color_when_values.values():
raise ValueError("Invalid color setting: %s" % when)
return when
def get_color_when(): def get_color_when():
"""Return whether commands should print color or not.""" """Return whether commands should print color or not."""
if _force_color is not None: if _force_color is not None:
@@ -212,66 +203,77 @@ def color_when(value):
set_color_when(old_value) set_color_when(old_value)
def _escape(s: str, color: bool, enclose: bool, zsh: bool) -> str: class match_to_ansi:
"""Returns a TTY escape sequence for a color""" def __init__(self, color=True, enclose=False, zsh=False):
if color: self.color = _color_when_value(color)
if zsh: self.enclose = enclose
result = rf"\e[0;{s}m" self.zsh = zsh
def escape(self, s):
"""Returns a TTY escape sequence for a color"""
if self.color:
if self.zsh:
result = rf"\e[0;{s}m"
else:
result = f"\033[{s}m"
if self.enclose:
result = rf"\[{result}\]"
return result
else: else:
result = f"\033[{s}m" return ""
if enclose: def __call__(self, match):
result = rf"\[{result}\]" """Convert a match object generated by ``color_re`` into an ansi
color code. This can be used as a handler in ``re.sub``.
"""
style, color, text = match.groups()
m = match.group(0)
return result if m == "@@":
else: return "@"
return "" elif m == "@.":
return self.escape(0)
elif m == "@":
raise ColorParseError("Incomplete color format: '%s' in %s" % (m, match.string))
string = styles[style]
if color:
if color not in colors:
raise ColorParseError(
"Invalid color specifier: '%s' in '%s'" % (color, match.string)
)
string += ";" + str(colors[color])
colored_text = ""
if text:
colored_text = text + self.escape(0)
return self.escape(string) + colored_text
def colorize( def colorize(string, **kwargs):
string: str, color: Optional[bool] = None, enclose: bool = False, zsh: bool = False
) -> str:
"""Replace all color expressions in a string with ANSI control codes. """Replace all color expressions in a string with ANSI control codes.
Args: Args:
string: The string to replace string (str): The string to replace
Returns: Returns:
The filtered string str: The filtered string
Keyword Arguments: Keyword Arguments:
color: If False, output will be plain text without control codes, for output to color (bool): If False, output will be plain text without control
non-console devices (default: automatically choose color or not) codes, for output to non-console devices.
enclose: If True, enclose ansi color sequences with enclose (bool): If True, enclose ansi color sequences with
square brackets to prevent misestimation of terminal width. square brackets to prevent misestimation of terminal width.
zsh: If True, use zsh ansi codes instead of bash ones (for variables like PS1) zsh (bool): If True, use zsh ansi codes instead of bash ones (for variables like PS1)
""" """
color = color if color is not None else get_color_when() color = _color_when_value(kwargs.get("color", get_color_when()))
zsh = kwargs.get("zsh", False)
def match_to_ansi(match): string = re.sub(color_re, match_to_ansi(color, kwargs.get("enclose")), string, zsh)
"""Convert a match object generated by ``COLOR_RE`` into an ansi string = string.replace("}}", "}")
color code. This can be used as a handler in ``re.sub``. return string
"""
escaped_at, dot, style, color_code, text = match.groups()
if escaped_at:
return "@"
elif dot:
return _escape(0, color, enclose, zsh)
elif not (style or color_code):
raise ColorParseError(
f"Incomplete color format: '{match.group(0)}' in '{match.string}'"
)
color_number = colors.get(color_code, "")
semi = ";" if color_number else ""
ansi_code = _escape(f"{styles[style]}{semi}{color_number}", color, enclose, zsh)
if text:
return f"{ansi_code}{text}{_escape(0, color, enclose, zsh)}"
else:
return ansi_code
return COLOR_RE.sub(match_to_ansi, string).replace("}}", "}")
def clen(string): def clen(string):
@@ -303,7 +305,7 @@ def cprint(string, stream=None, color=None):
cwrite(string + "\n", stream, color) cwrite(string + "\n", stream, color)
def cescape(string: str) -> str: def cescape(string):
"""Escapes special characters needed for color codes. """Escapes special characters needed for color codes.
Replaces the following symbols with their equivalent literal forms: Replaces the following symbols with their equivalent literal forms:
@@ -319,7 +321,10 @@ def cescape(string: str) -> str:
Returns: Returns:
(str): the string with color codes escaped (str): the string with color codes escaped
""" """
return string.replace("@", "@@").replace("}", "}}") string = str(string)
string = string.replace("@", "@@")
string = string.replace("}", "}}")
return string
class ColorStream: class ColorStream:

View File

@@ -18,10 +18,9 @@
import threading import threading
import traceback import traceback
from contextlib import contextmanager from contextlib import contextmanager
from multiprocessing.connection import Connection
from threading import Thread from threading import Thread
from types import ModuleType from types import ModuleType
from typing import Callable, Optional from typing import Optional
import llnl.util.tty as tty import llnl.util.tty as tty
@@ -34,23 +33,8 @@
pass pass
esc, bell, lbracket, bslash, newline = r"\x1b", r"\x07", r"\[", r"\\", r"\n"
# Ansi Control Sequence Introducers (CSI) are a well-defined format
# Standard ECMA-48: Control Functions for Character-Imaging I/O Devices, section 5.4
# https://www.ecma-international.org/wp-content/uploads/ECMA-48_5th_edition_june_1991.pdf
csi_pre = f"{esc}{lbracket}"
csi_param, csi_inter, csi_post = r"[0-?]", r"[ -/]", r"[@-~]"
ansi_csi = f"{csi_pre}{csi_param}*{csi_inter}*{csi_post}"
# General ansi escape sequences have well-defined prefixes,
# but content and suffixes are less reliable.
# Conservatively assume they end with either "<ESC>\" or "<BELL>",
# with no intervening "<ESC>"/"<BELL>" keys or newlines
esc_pre = f"{esc}[@-_]"
esc_content = f"[^{esc}{bell}{newline}]"
esc_post = f"(?:{esc}{bslash}|{bell})"
ansi_esc = f"{esc_pre}{esc_content}*{esc_post}"
# Use this to strip escape sequences # Use this to strip escape sequences
_escape = re.compile(f"{ansi_csi}|{ansi_esc}") _escape = re.compile(r"\x1b[^m]*m|\x1b\[?1034h|\x1b\][0-9]+;[^\x07]*\x07")
# control characters for enabling/disabling echo # control characters for enabling/disabling echo
# #
@@ -345,6 +329,49 @@ def close(self):
self.file.close() self.file.close()
class MultiProcessFd:
"""Return an object which stores a file descriptor and can be passed as an
argument to a function run with ``multiprocessing.Process``, such that
the file descriptor is available in the subprocess."""
def __init__(self, fd):
self._connection = None
self._fd = None
if sys.version_info >= (3, 8):
self._connection = multiprocessing.connection.Connection(fd)
else:
self._fd = fd
@property
def fd(self):
if self._connection:
return self._connection._handle
else:
return self._fd
def close(self):
if self._connection:
self._connection.close()
else:
os.close(self._fd)
def close_connection_and_file(multiprocess_fd, file):
# MultiprocessFd is intended to transmit a FD
# to a child process, this FD is then opened to a Python File object
# (using fdopen). In >= 3.8, MultiprocessFd encapsulates a
# multiprocessing.connection.Connection; Connection closes the FD
# when it is deleted, and prints a warning about duplicate closure if
# it is not explicitly closed. In < 3.8, MultiprocessFd encapsulates a
# simple FD; closing the FD here appears to conflict with
# closure of the File object (in < 3.8 that is). Therefore this needs
# to choose whether to close the File or the Connection.
if sys.version_info >= (3, 8):
multiprocess_fd.close()
else:
file.close()
@contextmanager @contextmanager
def replace_environment(env): def replace_environment(env):
"""Replace the current environment (`os.environ`) with `env`. """Replace the current environment (`os.environ`) with `env`.
@@ -502,20 +529,22 @@ def __enter__(self):
# forcing debug output. # forcing debug output.
self._saved_debug = tty._debug self._saved_debug = tty._debug
# Pipe for redirecting output to logger # OS-level pipe for redirecting output to logger
read_fd, self.write_fd = multiprocessing.Pipe(duplex=False) read_fd, write_fd = os.pipe()
# Pipe for communication back from the daemon read_multiprocess_fd = MultiProcessFd(read_fd)
# Multiprocessing pipe for communication back from the daemon
# Currently only used to save echo value between uses # Currently only used to save echo value between uses
self.parent_pipe, child_pipe = multiprocessing.Pipe(duplex=False) self.parent_pipe, child_pipe = multiprocessing.Pipe()
# Sets a daemon that writes to file what it reads from a pipe # Sets a daemon that writes to file what it reads from a pipe
try: try:
# need to pass this b/c multiprocessing closes stdin in child. # need to pass this b/c multiprocessing closes stdin in child.
input_fd = None input_multiprocess_fd = None
try: try:
if sys.stdin.isatty(): if sys.stdin.isatty():
input_fd = Connection(os.dup(sys.stdin.fileno())) input_multiprocess_fd = MultiProcessFd(os.dup(sys.stdin.fileno()))
except BaseException: except BaseException:
# just don't forward input if this fails # just don't forward input if this fails
pass pass
@@ -524,9 +553,9 @@ def __enter__(self):
self.process = multiprocessing.Process( self.process = multiprocessing.Process(
target=_writer_daemon, target=_writer_daemon,
args=( args=(
input_fd, input_multiprocess_fd,
read_fd, read_multiprocess_fd,
self.write_fd, write_fd,
self.echo, self.echo,
self.log_file, self.log_file,
child_pipe, child_pipe,
@@ -537,9 +566,9 @@ def __enter__(self):
self.process.start() self.process.start()
finally: finally:
if input_fd: if input_multiprocess_fd:
input_fd.close() input_multiprocess_fd.close()
read_fd.close() read_multiprocess_fd.close()
# Flush immediately before redirecting so that anything buffered # Flush immediately before redirecting so that anything buffered
# goes to the original stream # goes to the original stream
@@ -557,9 +586,9 @@ def __enter__(self):
self._saved_stderr = os.dup(sys.stderr.fileno()) self._saved_stderr = os.dup(sys.stderr.fileno())
# redirect to the pipe we created above # redirect to the pipe we created above
os.dup2(self.write_fd.fileno(), sys.stdout.fileno()) os.dup2(write_fd, sys.stdout.fileno())
os.dup2(self.write_fd.fileno(), sys.stderr.fileno()) os.dup2(write_fd, sys.stderr.fileno())
self.write_fd.close() os.close(write_fd)
else: else:
# Handle I/O the Python way. This won't redirect lower-level # Handle I/O the Python way. This won't redirect lower-level
@@ -572,7 +601,7 @@ def __enter__(self):
self._saved_stderr = sys.stderr self._saved_stderr = sys.stderr
# create a file object for the pipe; redirect to it. # create a file object for the pipe; redirect to it.
pipe_fd_out = os.fdopen(self.write_fd.fileno(), "w", closefd=False) pipe_fd_out = os.fdopen(write_fd, "w")
sys.stdout = pipe_fd_out sys.stdout = pipe_fd_out
sys.stderr = pipe_fd_out sys.stderr = pipe_fd_out
@@ -608,7 +637,6 @@ def __exit__(self, exc_type, exc_val, exc_tb):
else: else:
sys.stdout = self._saved_stdout sys.stdout = self._saved_stdout
sys.stderr = self._saved_stderr sys.stderr = self._saved_stderr
self.write_fd.close()
# print log contents in parent if needed. # print log contents in parent if needed.
if self.log_file.write_in_parent: if self.log_file.write_in_parent:
@@ -822,14 +850,14 @@ def force_echo(self):
def _writer_daemon( def _writer_daemon(
stdin_fd: Optional[Connection], stdin_multiprocess_fd,
read_fd: Connection, read_multiprocess_fd,
write_fd: Connection, write_fd,
echo: bool, echo,
log_file_wrapper: FileWrapper, log_file_wrapper,
control_fd: Connection, control_pipe,
filter_fn: Optional[Callable[[str], str]], filter_fn,
) -> None: ):
"""Daemon used by ``log_output`` to write to a log file and to ``stdout``. """Daemon used by ``log_output`` to write to a log file and to ``stdout``.
The daemon receives output from the parent process and writes it both The daemon receives output from the parent process and writes it both
@@ -866,37 +894,43 @@ def _writer_daemon(
``StringIO`` in the parent. This is mainly for testing. ``StringIO`` in the parent. This is mainly for testing.
Arguments: Arguments:
stdin_fd: optional input from the terminal stdin_multiprocess_fd (int): input from the terminal
read_fd: pipe for reading from parent's redirected stdout read_multiprocess_fd (int): pipe for reading from parent's redirected
echo: initial echo setting -- controlled by user and preserved across multiple writer stdout
daemons echo (bool): initial echo setting -- controlled by user and
log_file_wrapper: file to log all output preserved across multiple writer daemons
control_pipe: multiprocessing pipe on which to send control information to the parent log_file_wrapper (FileWrapper): file to log all output
filter_fn: optional function to filter each line of output control_pipe (Pipe): multiprocessing pipe on which to send control
information to the parent
filter_fn (callable, optional): function to filter each line of output
""" """
# This process depends on closing all instances of write_pipe to terminate the reading loop # If this process was forked, then it will inherit file descriptors from
write_fd.close() # the parent process. This process depends on closing all instances of
# write_fd to terminate the reading loop, so we close the file descriptor
# here. Forking is the process spawning method everywhere except Mac OS
# for Python >= 3.8 and on Windows
if sys.version_info < (3, 8) or sys.platform != "darwin":
os.close(write_fd)
# 1. Use line buffering (3rd param = 1) since Python 3 has a bug # 1. Use line buffering (3rd param = 1) since Python 3 has a bug
# that prevents unbuffered text I/O. # that prevents unbuffered text I/O.
# 2. Python 3.x before 3.7 does not open with UTF-8 encoding by default # 2. Python 3.x before 3.7 does not open with UTF-8 encoding by default
# 3. closefd=False because Connection has "ownership" in_pipe = os.fdopen(read_multiprocess_fd.fd, "r", 1, encoding="utf-8")
read_file = os.fdopen(read_fd.fileno(), "r", 1, encoding="utf-8", closefd=False)
if stdin_fd: if stdin_multiprocess_fd:
stdin_file = os.fdopen(stdin_fd.fileno(), closefd=False) stdin = os.fdopen(stdin_multiprocess_fd.fd)
else: else:
stdin_file = None stdin = None
# list of streams to select from # list of streams to select from
istreams = [read_file, stdin_file] if stdin_file else [read_file] istreams = [in_pipe, stdin] if stdin else [in_pipe]
force_echo = False # parent can force echo for certain output force_echo = False # parent can force echo for certain output
log_file = log_file_wrapper.unwrap() log_file = log_file_wrapper.unwrap()
try: try:
with keyboard_input(stdin_file) as kb: with keyboard_input(stdin) as kb:
while True: while True:
# fix the terminal settings if we recently came to # fix the terminal settings if we recently came to
# the foreground # the foreground
@@ -909,12 +943,12 @@ def _writer_daemon(
# Allow user to toggle echo with 'v' key. # Allow user to toggle echo with 'v' key.
# Currently ignores other chars. # Currently ignores other chars.
# only read stdin if we're in the foreground # only read stdin if we're in the foreground
if stdin_file and stdin_file in rlist and not _is_background_tty(stdin_file): if stdin in rlist and not _is_background_tty(stdin):
# it's possible to be backgrounded between the above # it's possible to be backgrounded between the above
# check and the read, so we ignore SIGTTIN here. # check and the read, so we ignore SIGTTIN here.
with ignore_signal(signal.SIGTTIN): with ignore_signal(signal.SIGTTIN):
try: try:
if stdin_file.read(1) == "v": if stdin.read(1) == "v":
echo = not echo echo = not echo
except IOError as e: except IOError as e:
# If SIGTTIN is ignored, the system gives EIO # If SIGTTIN is ignored, the system gives EIO
@@ -923,13 +957,13 @@ def _writer_daemon(
if e.errno != errno.EIO: if e.errno != errno.EIO:
raise raise
if read_file in rlist: if in_pipe in rlist:
line_count = 0 line_count = 0
try: try:
while line_count < 100: while line_count < 100:
# Handle output from the calling process. # Handle output from the calling process.
try: try:
line = _retry(read_file.readline)() line = _retry(in_pipe.readline)()
except UnicodeDecodeError: except UnicodeDecodeError:
# installs like --test=root gpgme produce non-UTF8 logs # installs like --test=root gpgme produce non-UTF8 logs
line = "<line lost: output was not encoded as UTF-8>\n" line = "<line lost: output was not encoded as UTF-8>\n"
@@ -958,7 +992,7 @@ def _writer_daemon(
if xoff in controls: if xoff in controls:
force_echo = False force_echo = False
if not _input_available(read_file): if not _input_available(in_pipe):
break break
finally: finally:
if line_count > 0: if line_count > 0:
@@ -973,14 +1007,14 @@ def _writer_daemon(
finally: finally:
# send written data back to parent if we used a StringIO # send written data back to parent if we used a StringIO
if isinstance(log_file, io.StringIO): if isinstance(log_file, io.StringIO):
control_fd.send(log_file.getvalue()) control_pipe.send(log_file.getvalue())
log_file_wrapper.close() log_file_wrapper.close()
read_fd.close() close_connection_and_file(read_multiprocess_fd, in_pipe)
if stdin_fd: if stdin_multiprocess_fd:
stdin_fd.close() close_connection_and_file(stdin_multiprocess_fd, stdin)
# send echo value back to the parent so it can be preserved. # send echo value back to the parent so it can be preserved.
control_fd.send(echo) control_pipe.send(echo)
def _retry(function): def _retry(function):

View File

@@ -3,15 +3,8 @@
# #
# SPDX-License-Identifier: (Apache-2.0 OR MIT) # SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
import re
from typing import Optional
import spack.paths
import spack.util.git
#: PEP440 canonical <major>.<minor>.<micro>.<devN> string #: PEP440 canonical <major>.<minor>.<micro>.<devN> string
__version__ = "0.24.0.dev0" __version__ = "0.22.0.dev0"
spack_version = __version__ spack_version = __version__
@@ -26,58 +19,4 @@ def __try_int(v):
spack_version_info = tuple([__try_int(v) for v in __version__.split(".")]) spack_version_info = tuple([__try_int(v) for v in __version__.split(".")])
def get_spack_commit() -> Optional[str]: __all__ = ["spack_version_info", "spack_version"]
"""Get the Spack git commit sha.
Returns:
(str or None) the commit sha if available, otherwise None
"""
git_path = os.path.join(spack.paths.prefix, ".git")
if not os.path.exists(git_path):
return None
git = spack.util.git.git()
if not git:
return None
rev = git(
"-C",
spack.paths.prefix,
"rev-parse",
"HEAD",
output=str,
error=os.devnull,
fail_on_error=False,
)
if git.returncode != 0:
return None
match = re.match(r"[a-f\d]{7,}$", rev)
return match.group(0) if match else None
def get_version() -> str:
"""Get a descriptive version of this instance of Spack.
Outputs '<PEP440 version> (<git commit sha>)'.
The commit sha is only added when available.
"""
commit = get_spack_commit()
if commit:
return f"{spack_version} ({commit})"
return spack_version
def get_short_version() -> str:
"""Short Spack version."""
return f"{spack_version_info[0]}.{spack_version_info[1]}"
__all__ = [
"spack_version_info",
"spack_version",
"get_version",
"get_spack_commit",
"get_short_version",
]

131
lib/spack/spack/abi.py Normal file
View File

@@ -0,0 +1,131 @@
# Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
from llnl.util.lang import memoized
import spack.spec
import spack.version
from spack.compilers.clang import Clang
from spack.util.executable import Executable, ProcessError
class ABI:
"""This class provides methods to test ABI compatibility between specs.
The current implementation is rather rough and could be improved."""
def architecture_compatible(
self, target: spack.spec.Spec, constraint: spack.spec.Spec
) -> bool:
"""Return true if architecture of target spec is ABI compatible
to the architecture of constraint spec. If either the target
or constraint specs have no architecture, target is also defined
as architecture ABI compatible to constraint."""
return (
not target.architecture
or not constraint.architecture
or target.architecture.intersects(constraint.architecture)
)
@memoized
def _gcc_get_libstdcxx_version(self, version):
"""Returns gcc ABI compatibility info by getting the library version of
a compiler's libstdc++ or libgcc_s"""
from spack.build_environment import dso_suffix
spec = spack.spec.CompilerSpec("gcc", version)
compilers = spack.compilers.compilers_for_spec(spec)
if not compilers:
return None
compiler = compilers[0]
rungcc = None
libname = None
output = None
if compiler.cxx:
rungcc = Executable(compiler.cxx)
libname = "libstdc++." + dso_suffix
elif compiler.cc:
rungcc = Executable(compiler.cc)
libname = "libgcc_s." + dso_suffix
else:
return None
try:
# Some gcc's are actually clang and don't respond properly to
# --print-file-name (they just print the filename, not the
# full path). Ignore these and expect them to be handled as clang.
if Clang.default_version(rungcc.exe[0]) != "unknown":
return None
output = rungcc("--print-file-name=%s" % libname, output=str)
except ProcessError:
return None
if not output:
return None
libpath = os.path.realpath(output.strip())
if not libpath:
return None
return os.path.basename(libpath)
@memoized
def _gcc_compiler_compare(self, pversion, cversion):
"""Returns true iff the gcc version pversion and cversion
are ABI compatible."""
plib = self._gcc_get_libstdcxx_version(pversion)
clib = self._gcc_get_libstdcxx_version(cversion)
if not plib or not clib:
return False
return plib == clib
def _intel_compiler_compare(
self, pversion: spack.version.ClosedOpenRange, cversion: spack.version.ClosedOpenRange
) -> bool:
"""Returns true iff the intel version pversion and cversion
are ABI compatible"""
# Test major and minor versions. Ignore build version.
pv = pversion.lo
cv = cversion.lo
return pv.up_to(2) == cv.up_to(2)
def compiler_compatible(
self, parent: spack.spec.Spec, child: spack.spec.Spec, loose: bool = False
) -> bool:
"""Return true if compilers for parent and child are ABI compatible."""
if not parent.compiler or not child.compiler:
return True
if parent.compiler.name != child.compiler.name:
# Different compiler families are assumed ABI incompatible
return False
if loose:
return True
# TODO: Can we move the specialized ABI matching stuff
# TODO: into compiler classes?
for pversion in parent.compiler.versions:
for cversion in child.compiler.versions:
# For a few compilers use specialized comparisons.
# Otherwise match on version match.
if pversion.intersects(cversion):
return True
elif parent.compiler.name == "gcc" and self._gcc_compiler_compare(
pversion, cversion
):
return True
elif parent.compiler.name == "intel" and self._intel_compiler_compare(
pversion, cversion
):
return True
return False
def compatible(
self, target: spack.spec.Spec, constraint: spack.spec.Spec, loose: bool = False
) -> bool:
"""Returns true if target spec is ABI compatible to constraint spec"""
return self.architecture_compatible(target, constraint) and self.compiler_compatible(
target, constraint, loose=loose
)

View File

@@ -42,20 +42,15 @@ def _search_duplicate_compilers(error_cls):
import inspect import inspect
import io import io
import itertools import itertools
import os
import pathlib import pathlib
import pickle import pickle
import re import re
import warnings import warnings
from typing import Iterable, List, Set, Tuple
from urllib.request import urlopen from urllib.request import urlopen
import llnl.util.lang import llnl.util.lang
from llnl.string import plural
import spack.builder
import spack.config import spack.config
import spack.fetch_strategy
import spack.patch import spack.patch
import spack.repo import spack.repo
import spack.spec import spack.spec
@@ -78,9 +73,7 @@ def __init__(self, summary, details):
self.details = tuple(details) self.details = tuple(details)
def __str__(self): def __str__(self):
if self.details: return self.summary + "\n" + "\n".join([" " + detail for detail in self.details])
return f"{self.summary}\n" + "\n".join(f" {detail}" for detail in self.details)
return self.summary
def __eq__(self, other): def __eq__(self, other):
if self.summary != other.summary or self.details != other.details: if self.summary != other.summary or self.details != other.details:
@@ -217,11 +210,6 @@ def _search_duplicate_compilers(error_cls):
group="configs", tag="CFG-PACKAGES", description="Sanity checks on packages.yaml", kwargs=() group="configs", tag="CFG-PACKAGES", description="Sanity checks on packages.yaml", kwargs=()
) )
#: Sanity checks on packages.yaml
config_repos = AuditClass(
group="configs", tag="CFG-REPOS", description="Sanity checks on repositories", kwargs=()
)
@config_packages @config_packages
def _search_duplicate_specs_in_externals(error_cls): def _search_duplicate_specs_in_externals(error_cls):
@@ -264,6 +252,40 @@ def _search_duplicate_specs_in_externals(error_cls):
return errors return errors
@config_packages
def _deprecated_preferences(error_cls):
"""Search package preferences deprecated in v0.21 (and slated for removal in v0.22)"""
# TODO (v0.22): remove this audit as the attributes will not be allowed in config
errors = []
packages_yaml = spack.config.CONFIG.get_config("packages")
def make_error(attribute_name, config_data, summary):
s = io.StringIO()
s.write("Occurring in the following file:\n")
dict_view = syaml.syaml_dict((k, v) for k, v in config_data.items() if k == attribute_name)
syaml.dump_config(dict_view, stream=s, blame=True)
return error_cls(summary=summary, details=[s.getvalue()])
if "all" in packages_yaml and "version" in packages_yaml["all"]:
summary = "Using the deprecated 'version' attribute under 'packages:all'"
errors.append(make_error("version", packages_yaml["all"], summary))
for package_name in packages_yaml:
if package_name == "all":
continue
package_conf = packages_yaml[package_name]
for attribute in ("compiler", "providers", "target"):
if attribute not in package_conf:
continue
summary = (
f"Using the deprecated '{attribute}' attribute " f"under 'packages:{package_name}'"
)
errors.append(make_error(attribute, package_conf, summary))
return errors
@config_packages @config_packages
def _avoid_mismatched_variants(error_cls): def _avoid_mismatched_variants(error_cls):
"""Warns if variant preferences have mismatched types or names.""" """Warns if variant preferences have mismatched types or names."""
@@ -284,7 +306,7 @@ def _avoid_mismatched_variants(error_cls):
pkg_cls = spack.repo.PATH.get_pkg_class(pkg_name) pkg_cls = spack.repo.PATH.get_pkg_class(pkg_name)
for variant in current_spec.variants.values(): for variant in current_spec.variants.values():
# Variant does not exist at all # Variant does not exist at all
if variant.name not in pkg_cls.variant_names(): if variant.name not in pkg_cls.variants:
summary = ( summary = (
f"Setting a preference for the '{pkg_name}' package to the " f"Setting a preference for the '{pkg_name}' package to the "
f"non-existing variant '{variant.name}'" f"non-existing variant '{variant.name}'"
@@ -293,8 +315,9 @@ def _avoid_mismatched_variants(error_cls):
continue continue
# Variant cannot accept this value # Variant cannot accept this value
s = spack.spec.Spec(pkg_name)
try: try:
spack.variant.prevalidate_variant_value(pkg_cls, variant, strict=True) s.update_variant_validate(variant.name, variant.value)
except Exception: except Exception:
summary = ( summary = (
f"Setting the variant '{variant.name}' of the '{pkg_name}' package " f"Setting the variant '{variant.name}' of the '{pkg_name}' package "
@@ -328,43 +351,6 @@ def _wrongly_named_spec(error_cls):
return errors return errors
@config_packages
def _ensure_all_virtual_packages_have_default_providers(error_cls):
"""All virtual packages must have a default provider explicitly set."""
configuration = spack.config.create()
defaults = configuration.get("packages", scope="defaults")
default_providers = defaults["all"]["providers"]
virtuals = spack.repo.PATH.provider_index.providers
default_providers_filename = configuration.scopes["defaults"].get_section_filename("packages")
return [
error_cls(f"'{virtual}' must have a default provider in {default_providers_filename}", [])
for virtual in virtuals
if virtual not in default_providers
]
@config_repos
def _ensure_no_folders_without_package_py(error_cls):
"""Check that we don't leave any folder without a package.py in repos"""
errors = []
for repository in spack.repo.PATH.repos:
missing = []
for entry in os.scandir(repository.packages_path):
if not entry.is_dir():
continue
package_py = pathlib.Path(entry.path) / spack.repo.package_file_name
if not package_py.exists():
missing.append(entry.path)
if missing:
summary = (
f"The '{repository.namespace}' repository misses a package.py file"
f" in the following folders"
)
errors.append(error_cls(summary=summary, details=[f"{x}" for x in missing]))
return errors
def _make_config_error(config_data, summary, error_cls): def _make_config_error(config_data, summary, error_cls):
s = io.StringIO() s = io.StringIO()
s.write("Occurring in the following file:\n") s.write("Occurring in the following file:\n")
@@ -388,14 +374,6 @@ def _make_config_error(config_data, summary, error_cls):
) )
package_deprecated_attributes = AuditClass(
group="packages",
tag="PKG-DEPRECATED-ATTRIBUTES",
description="Sanity checks to preclude use of deprecated package attributes",
kwargs=("pkgs",),
)
package_properties = AuditClass( package_properties = AuditClass(
group="packages", group="packages",
tag="PKG-PROPERTIES", tag="PKG-PROPERTIES",
@@ -414,23 +392,22 @@ def _make_config_error(config_data, summary, error_cls):
) )
@package_properties @package_directives
def _check_build_test_callbacks(pkgs, error_cls): def _check_build_test_callbacks(pkgs, error_cls):
"""Ensure stand-alone test methods are not included in build-time callbacks. """Ensure stand-alone test method is not included in build-time callbacks"""
Test methods are for checking the installed software as stand-alone tests.
They could also be called during the post-install phase of a build.
"""
errors = [] errors = []
for pkg_name in pkgs: for pkg_name in pkgs:
pkg_cls = spack.repo.PATH.get_pkg_class(pkg_name) pkg_cls = spack.repo.PATH.get_pkg_class(pkg_name)
test_callbacks = getattr(pkg_cls, "build_time_test_callbacks", None) test_callbacks = getattr(pkg_cls, "build_time_test_callbacks", None)
has_test_method = test_callbacks and any([m.startswith("test_") for m in test_callbacks]) # TODO (post-34236): "test*"->"test_*" once remove deprecated methods
# TODO (post-34236): "test"->"test_" once remove deprecated methods
has_test_method = test_callbacks and any([m.startswith("test") for m in test_callbacks])
if has_test_method: if has_test_method:
msg = f"Package {pkg_name} includes stand-alone test methods in build-time checks." msg = '{0} package contains "test*" method(s) in ' "build_time_test_callbacks"
callbacks = ", ".join(test_callbacks) instr = 'Remove all methods whose names start with "test" from: [{0}]'.format(
instr = f"Remove the following from 'build_time_test_callbacks': {callbacks}" ", ".join(test_callbacks)
)
errors.append(error_cls(msg.format(pkg_name), [instr])) errors.append(error_cls(msg.format(pkg_name), [instr]))
return errors return errors
@@ -444,10 +421,6 @@ def _check_patch_urls(pkgs, error_cls):
r"^https?://(?:patch-diff\.)?github(?:usercontent)?\.com/" r"^https?://(?:patch-diff\.)?github(?:usercontent)?\.com/"
r".+/.+/(?:commit|pull)/[a-fA-F0-9]+\.(?:patch|diff)" r".+/.+/(?:commit|pull)/[a-fA-F0-9]+\.(?:patch|diff)"
) )
github_pull_commits_re = (
r"^https?://(?:patch-diff\.)?github(?:usercontent)?\.com/"
r".+/.+/pull/\d+/commits/[a-fA-F0-9]+\.(?:patch|diff)"
)
# Only .diff URLs have stable/full hashes: # Only .diff URLs have stable/full hashes:
# https://forum.gitlab.com/t/patches-with-full-index/29313 # https://forum.gitlab.com/t/patches-with-full-index/29313
gitlab_patch_url_re = ( gitlab_patch_url_re = (
@@ -463,24 +436,14 @@ def _check_patch_urls(pkgs, error_cls):
if not isinstance(patch, spack.patch.UrlPatch): if not isinstance(patch, spack.patch.UrlPatch):
continue continue
if re.match(github_pull_commits_re, patch.url): if re.match(github_patch_url_re, patch.url):
url = re.sub(r"/pull/\d+/commits/", r"/commit/", patch.url)
url = re.sub(r"^(.*)(?<!full_index=1)$", r"\1?full_index=1", url)
errors.append(
error_cls(
f"patch URL in package {pkg_cls.name} "
+ "must not be a pull request commit; "
+ f"instead use {url}",
[patch.url],
)
)
elif re.match(github_patch_url_re, patch.url):
full_index_arg = "?full_index=1" full_index_arg = "?full_index=1"
if not patch.url.endswith(full_index_arg): if not patch.url.endswith(full_index_arg):
errors.append( errors.append(
error_cls( error_cls(
f"patch URL in package {pkg_cls.name} " "patch URL in package {0} must end with {1}".format(
+ f"must end with {full_index_arg}", pkg_cls.name, full_index_arg
),
[patch.url], [patch.url],
) )
) )
@@ -488,7 +451,9 @@ def _check_patch_urls(pkgs, error_cls):
if not patch.url.endswith(".diff"): if not patch.url.endswith(".diff"):
errors.append( errors.append(
error_cls( error_cls(
f"patch URL in package {pkg_cls.name} must end with .diff", "patch URL in package {0} must end with .diff".format(
pkg_cls.name
),
[patch.url], [patch.url],
) )
) )
@@ -505,7 +470,7 @@ def _search_for_reserved_attributes_names_in_packages(pkgs, error_cls):
name_definitions = collections.defaultdict(list) name_definitions = collections.defaultdict(list)
pkg_cls = spack.repo.PATH.get_pkg_class(pkg_name) pkg_cls = spack.repo.PATH.get_pkg_class(pkg_name)
for cls_item in pkg_cls.__mro__: for cls_item in inspect.getmro(pkg_cls):
for name in RESERVED_NAMES: for name in RESERVED_NAMES:
current_value = cls_item.__dict__.get(name) current_value = cls_item.__dict__.get(name)
if current_value is None: if current_value is None:
@@ -528,53 +493,13 @@ def _search_for_reserved_attributes_names_in_packages(pkgs, error_cls):
return errors return errors
@package_deprecated_attributes
def _search_for_deprecated_package_methods(pkgs, error_cls):
"""Ensure the package doesn't define or use deprecated methods"""
DEPRECATED_METHOD = (("test", "a name starting with 'test_'"),)
DEPRECATED_USE = (
("self.cache_extra_test_sources(", "cache_extra_test_sources(self, ..)"),
("self.install_test_root(", "install_test_root(self, ..)"),
("self.run_test(", "test_part(self, ..)"),
)
errors = []
for pkg_name in pkgs:
pkg_cls = spack.repo.PATH.get_pkg_class(pkg_name)
methods = inspect.getmembers(pkg_cls, predicate=lambda x: inspect.isfunction(x))
method_errors = collections.defaultdict(list)
for name, function in methods:
for deprecated_name, alternate in DEPRECATED_METHOD:
if name == deprecated_name:
msg = f"Rename '{deprecated_name}' method to {alternate} instead."
method_errors[name].append(msg)
source = inspect.getsource(function)
for deprecated_name, alternate in DEPRECATED_USE:
if deprecated_name in source:
msg = f"Change '{deprecated_name}' to '{alternate}' in '{name}' method."
method_errors[name].append(msg)
num_methods = len(method_errors)
if num_methods > 0:
methods = plural(num_methods, "method", show_n=False)
error_msg = (
f"Package '{pkg_name}' implements or uses unsupported deprecated {methods}."
)
instr = [f"Make changes to '{pkg_cls.__module__}':"]
for name in sorted(method_errors):
instr.extend([f" {msg}" for msg in method_errors[name]])
errors.append(error_cls(error_msg, instr))
return errors
@package_properties @package_properties
def _ensure_all_package_names_are_lowercase(pkgs, error_cls): def _ensure_all_package_names_are_lowercase(pkgs, error_cls):
"""Ensure package names are lowercase and consistent""" """Ensure package names are lowercase and consistent"""
badname_regex, errors = re.compile(r"[_A-Z]"), [] badname_regex, errors = re.compile(r"[_A-Z]"), []
for pkg_name in pkgs: for pkg_name in pkgs:
if badname_regex.search(pkg_name): if badname_regex.search(pkg_name):
error_msg = f"Package name '{pkg_name}' should be lowercase and must not contain '_'" error_msg = "Package name '{}' is either lowercase or conatine '_'".format(pkg_name)
errors.append(error_cls(error_msg, [])) errors.append(error_cls(error_msg, []))
return errors return errors
@@ -713,17 +638,12 @@ def _ensure_env_methods_are_ported_to_builders(pkgs, error_cls):
errors = [] errors = []
for pkg_name in pkgs: for pkg_name in pkgs:
pkg_cls = spack.repo.PATH.get_pkg_class(pkg_name) pkg_cls = spack.repo.PATH.get_pkg_class(pkg_name)
buildsystem_variant, _ = pkg_cls.variants["build_system"]
# values are either ConditionalValue objects or the values themselves buildsystem_names = [getattr(x, "value", x) for x in buildsystem_variant.values]
build_system_names = set( builder_cls_names = [spack.builder.BUILDER_CLS[x].__name__ for x in buildsystem_names]
v.value if isinstance(v, spack.variant.ConditionalValue) else v module = pkg_cls.module
for _, variant in pkg_cls.variant_definitions("build_system")
for v in variant.values
)
builder_cls_names = [spack.builder.BUILDER_CLS[x].__name__ for x in build_system_names]
has_builders_in_package_py = any( has_builders_in_package_py = any(
spack.builder.get_builder_class(pkg_cls, name) for name in builder_cls_names getattr(module, name, False) for name in builder_cls_names
) )
if not has_builders_in_package_py: if not has_builders_in_package_py:
continue continue
@@ -739,171 +659,6 @@ def _ensure_env_methods_are_ported_to_builders(pkgs, error_cls):
return errors return errors
class DeprecatedMagicGlobals(ast.NodeVisitor):
def __init__(self, magic_globals: Iterable[str]):
super().__init__()
self.magic_globals: Set[str] = set(magic_globals)
# State to track whether we're in a class function
self.depth: int = 0
self.in_function: bool = False
self.path = (ast.Module, ast.ClassDef, ast.FunctionDef)
# Defined locals in the current function (heuristically at least)
self.locals: Set[str] = set()
# List of (name, lineno) tuples for references to magic globals
self.references_to_globals: List[Tuple[str, int]] = []
def descend_in_function_def(self, node: ast.AST) -> None:
if not isinstance(node, self.path[self.depth]):
return
self.depth += 1
if self.depth == len(self.path):
self.in_function = True
super().generic_visit(node)
if self.depth == len(self.path):
self.in_function = False
self.locals.clear()
self.depth -= 1
def generic_visit(self, node: ast.AST) -> None:
# Recurse into function definitions
if self.depth < len(self.path):
return self.descend_in_function_def(node)
elif not self.in_function:
return
elif isinstance(node, ast.Global):
for name in node.names:
if name in self.magic_globals:
self.references_to_globals.append((name, node.lineno))
elif isinstance(node, ast.Assign):
# visit the rhs before lhs
super().visit(node.value)
for target in node.targets:
super().visit(target)
elif isinstance(node, ast.Name) and node.id in self.magic_globals:
if isinstance(node.ctx, ast.Load) and node.id not in self.locals:
self.references_to_globals.append((node.id, node.lineno))
elif isinstance(node.ctx, ast.Store):
self.locals.add(node.id)
else:
super().generic_visit(node)
@package_properties
def _uses_deprecated_globals(pkgs, error_cls):
"""Ensure that packages do not use deprecated globals"""
errors = []
for pkg_name in pkgs:
# some packages scheduled to be removed in v0.23 are not worth fixing.
pkg_cls = spack.repo.PATH.get_pkg_class(pkg_name)
if all(v.get("deprecated", False) for v in pkg_cls.versions.values()):
continue
file = spack.repo.PATH.filename_for_package_name(pkg_name)
tree = ast.parse(open(file).read())
visitor = DeprecatedMagicGlobals(("std_cmake_args", "std_meson_args", "std_pip_args"))
visitor.visit(tree)
if visitor.references_to_globals:
errors.append(
error_cls(
f"Package '{pkg_name}' uses deprecated globals",
[
f"{file}:{line} references '{name}'"
for name, line in visitor.references_to_globals
],
)
)
return errors
@package_properties
def _ensure_test_docstring(pkgs, error_cls):
"""Ensure stand-alone test methods have a docstring.
The docstring of a test method is implicitly used as the description of
the corresponding test part during test results reporting.
"""
doc_regex = r'\s+("""[^"]+""")'
errors = []
for pkg_name in pkgs:
pkg_cls = spack.repo.PATH.get_pkg_class(pkg_name)
methods = inspect.getmembers(pkg_cls, predicate=lambda x: inspect.isfunction(x))
method_names = []
for name, test_fn in methods:
if not name.startswith("test_"):
continue
# Ensure the test method has a docstring
source = inspect.getsource(test_fn)
match = re.search(doc_regex, source)
if match is None or len(match.group(0).replace('"', "").strip()) == 0:
method_names.append(name)
num_methods = len(method_names)
if num_methods > 0:
methods = plural(num_methods, "method", show_n=False)
docstrings = plural(num_methods, "docstring", show_n=False)
msg = f"Package {pkg_name} has test {methods} with empty or missing {docstrings}."
names = ", ".join(method_names)
instr = [
"Docstrings are used as descriptions in test outputs.",
f"Add a concise summary to the following {methods} in '{pkg_cls.__module__}':",
f"{names}",
]
errors.append(error_cls(msg, instr))
return errors
@package_properties
def _ensure_test_implemented(pkgs, error_cls):
"""Ensure stand-alone test methods are implemented.
The test method is also required to be non-empty.
"""
def skip(line):
ln = line.strip()
return ln.startswith("#") or "pass" in ln
doc_regex = r'\s+("""[^"]+""")'
errors = []
for pkg_name in pkgs:
pkg_cls = spack.repo.PATH.get_pkg_class(pkg_name)
methods = inspect.getmembers(pkg_cls, predicate=lambda x: inspect.isfunction(x))
method_names = []
for name, test_fn in methods:
if not name.startswith("test_"):
continue
source = inspect.getsource(test_fn)
# Attempt to ensure the test method is implemented.
impl = re.sub(doc_regex, r"", source).splitlines()[1:]
lines = [ln.strip() for ln in impl if not skip(ln)]
if not lines:
method_names.append(name)
num_methods = len(method_names)
if num_methods > 0:
methods = plural(num_methods, "method", show_n=False)
msg = f"Package {pkg_name} has empty or missing test {methods}."
names = ", ".join(method_names)
instr = [
f"Implement or remove the following {methods} from '{pkg_cls.__module__}': {names}"
]
errors.append(error_cls(msg, instr))
return errors
@package_https_directives @package_https_directives
def _linting_package_file(pkgs, error_cls): def _linting_package_file(pkgs, error_cls):
"""Check for correctness of links""" """Check for correctness of links"""
@@ -1024,7 +779,7 @@ def check_virtual_with_variants(spec, msg):
return return
error = error_cls( error = error_cls(
f"{pkg_name}: {msg}", f"{pkg_name}: {msg}",
[f"remove variants from '{spec}' in depends_on directive in {filename}"], f"remove variants from '{spec}' in depends_on directive in {filename}",
) )
errors.append(error) errors.append(error)
@@ -1070,22 +825,20 @@ def check_virtual_with_variants(spec, msg):
# check variants # check variants
dependency_variants = dep.spec.variants dependency_variants = dep.spec.variants
for name, variant in dependency_variants.items(): for name, value in dependency_variants.items():
try: try:
spack.variant.prevalidate_variant_value( v, _ = dependency_pkg_cls.variants[name]
dependency_pkg_cls, variant, dep.spec, strict=True v.validate_or_raise(value, pkg_cls=dependency_pkg_cls)
)
except Exception as e: except Exception as e:
summary = ( summary = (
f"{pkg_name}: wrong variant used for dependency in 'depends_on()'" f"{pkg_name}: wrong variant used for dependency in 'depends_on()'"
) )
error_msg = str(e)
if isinstance(e, KeyError): if isinstance(e, KeyError):
error_msg = ( error_msg = (
f"variant {str(e).strip()} does not exist in package {dep_name}" f"variant {str(e).strip()} does not exist in package {dep_name}"
f" in package '{dep_name}'"
) )
error_msg += f" in package '{dep_name}'"
errors.append( errors.append(
error_cls(summary=summary, details=[error_msg, f"in {filename}"]) error_cls(summary=summary, details=[error_msg, f"in {filename}"])
@@ -1097,38 +850,39 @@ def check_virtual_with_variants(spec, msg):
@package_directives @package_directives
def _ensure_variant_defaults_are_parsable(pkgs, error_cls): def _ensure_variant_defaults_are_parsable(pkgs, error_cls):
"""Ensures that variant defaults are present and parsable from cli""" """Ensures that variant defaults are present and parsable from cli"""
def check_variant(pkg_cls, variant, vname):
# bool is a subclass of int in python. Permitting a default that is an instance
# of 'int' means both foo=false and foo=0 are accepted. Other falsish values are
# not allowed, since they can't be parsed from CLI ('foo=')
default_is_parsable = isinstance(variant.default, int) or variant.default
if not default_is_parsable:
msg = f"Variant '{vname}' of package '{pkg_cls.name}' has an unparsable default value"
return [error_cls(msg, [])]
try:
vspec = variant.make_default()
except spack.variant.MultipleValuesInExclusiveVariantError:
msg = f"Can't create default value for variant '{vname}' in package '{pkg_cls.name}'"
return [error_cls(msg, [])]
try:
variant.validate_or_raise(vspec, pkg_cls.name)
except spack.variant.InvalidVariantValueError:
msg = "Default value of variant '{vname}' in package '{pkg.name}' is invalid"
question = "Is it among the allowed values?"
return [error_cls(msg, [question])]
return []
errors = [] errors = []
for pkg_name in pkgs: for pkg_name in pkgs:
pkg_cls = spack.repo.PATH.get_pkg_class(pkg_name) pkg_cls = spack.repo.PATH.get_pkg_class(pkg_name)
for vname in pkg_cls.variant_names(): for variant_name, entry in pkg_cls.variants.items():
for _, variant_def in pkg_cls.variant_definitions(vname): variant, _ = entry
errors.extend(check_variant(pkg_cls, variant_def, vname)) default_is_parsable = (
# Permitting a default that is an instance on 'int' permits
# to have foo=false or foo=0. Other falsish values are
# not allowed, since they can't be parsed from cli ('foo=')
isinstance(variant.default, int)
or variant.default
)
if not default_is_parsable:
error_msg = "Variant '{}' of package '{}' has a bad default value"
errors.append(error_cls(error_msg.format(variant_name, pkg_name), []))
continue
try:
vspec = variant.make_default()
except spack.variant.MultipleValuesInExclusiveVariantError:
error_msg = "Cannot create a default value for the variant '{}' in package '{}'"
errors.append(error_cls(error_msg.format(variant_name, pkg_name), []))
continue
try:
variant.validate_or_raise(vspec, pkg_cls=pkg_cls)
except spack.variant.InvalidVariantValueError:
error_msg = (
"The default value of the variant '{}' in package '{}' failed validation"
)
question = "Is it among the allowed values?"
errors.append(error_cls(error_msg.format(variant_name, pkg_name), [question]))
return errors return errors
@@ -1138,11 +892,11 @@ def _ensure_variants_have_descriptions(pkgs, error_cls):
errors = [] errors = []
for pkg_name in pkgs: for pkg_name in pkgs:
pkg_cls = spack.repo.PATH.get_pkg_class(pkg_name) pkg_cls = spack.repo.PATH.get_pkg_class(pkg_name)
for name in pkg_cls.variant_names(): for variant_name, entry in pkg_cls.variants.items():
for when, variant in pkg_cls.variant_definitions(name): variant, _ = entry
if not variant.description: if not variant.description:
msg = f"Variant '{name}' in package '{pkg_name}' is missing a description" error_msg = "Variant '{}' in package '{}' is missing a description"
errors.append(error_cls(msg, [])) errors.append(error_cls(error_msg.format(variant_name, pkg_name), []))
return errors return errors
@@ -1199,26 +953,29 @@ def _version_constraints_are_satisfiable_by_some_version_in_repo(pkgs, error_cls
def _analyze_variants_in_directive(pkg, constraint, directive, error_cls): def _analyze_variants_in_directive(pkg, constraint, directive, error_cls):
variant_exceptions = (
spack.variant.InconsistentValidationError,
spack.variant.MultipleValuesInExclusiveVariantError,
spack.variant.InvalidVariantValueError,
KeyError,
)
errors = [] errors = []
variant_names = pkg.variant_names()
summary = f"{pkg.name}: wrong variant in '{directive}' directive"
filename = spack.repo.PATH.filename_for_package_name(pkg.name)
for name, v in constraint.variants.items(): for name, v in constraint.variants.items():
if name not in variant_names:
msg = f"variant {name} does not exist in {pkg.name}"
errors.append(error_cls(summary=summary, details=[msg, f"in {filename}"]))
continue
try: try:
spack.variant.prevalidate_variant_value(pkg, v, constraint, strict=True) variant, _ = pkg.variants[name]
except ( variant.validate_or_raise(v, pkg_cls=pkg)
spack.variant.InconsistentValidationError, except variant_exceptions as e:
spack.variant.MultipleValuesInExclusiveVariantError, summary = pkg.name + ': wrong variant in "{0}" directive'
spack.variant.InvalidVariantValueError, summary = summary.format(directive)
) as e: filename = spack.repo.PATH.filename_for_package_name(pkg.name)
msg = str(e).strip()
errors.append(error_cls(summary=summary, details=[msg, f"in {filename}"])) error_msg = str(e).strip()
if isinstance(e, KeyError):
error_msg = "the variant {0} does not exist".format(error_msg)
err = error_cls(summary=summary, details=[error_msg, "in " + filename])
errors.append(err)
return errors return errors
@@ -1256,10 +1013,9 @@ def _extracts_errors(triggers, summary):
for dname in dnames for dname in dnames
) )
for when, variants_by_name in pkg_cls.variants.items(): for vname, (variant, triggers) in pkg_cls.variants.items():
for vname, variant in variants_by_name.items(): summary = f"{pkg_name}: wrong 'when=' condition for the '{vname}' variant"
summary = f"{pkg_name}: wrong 'when=' condition for the '{vname}' variant" errors.extend(_extracts_errors(triggers, summary))
errors.extend(_extracts_errors([when], summary))
for when, providers, details in _error_items(pkg_cls.provided): for when, providers, details in _error_items(pkg_cls.provided):
errors.extend( errors.extend(
@@ -1290,7 +1046,7 @@ def _extracts_errors(triggers, summary):
group="externals", group="externals",
tag="PKG-EXTERNALS", tag="PKG-EXTERNALS",
description="Sanity checks for external software detection", description="Sanity checks for external software detection",
kwargs=("pkgs", "debug_log"), kwargs=("pkgs",),
) )
@@ -1313,7 +1069,7 @@ def packages_with_detection_tests():
@external_detection @external_detection
def _test_detection_by_executable(pkgs, debug_log, error_cls): def _test_detection_by_executable(pkgs, error_cls):
"""Test drive external detection for packages""" """Test drive external detection for packages"""
import spack.detection import spack.detection
@@ -1339,7 +1095,6 @@ def _test_detection_by_executable(pkgs, debug_log, error_cls):
for idx, test_runner in enumerate( for idx, test_runner in enumerate(
spack.detection.detection_tests(pkg_name, spack.repo.PATH) spack.detection.detection_tests(pkg_name, spack.repo.PATH)
): ):
debug_log(f"[{__file__}]: running test {idx} for package {pkg_name}")
specs = test_runner.execute() specs = test_runner.execute()
expected_specs = test_runner.expected_specs expected_specs = test_runner.expected_specs
@@ -1356,75 +1111,4 @@ def _test_detection_by_executable(pkgs, debug_log, error_cls):
details = [msg.format(s, idx) for s in sorted(not_expected)] details = [msg.format(s, idx) for s in sorted(not_expected)]
errors.append(error_cls(summary=summary, details=details)) errors.append(error_cls(summary=summary, details=details))
matched_detection = []
for candidate in expected_specs:
try:
idx = specs.index(candidate)
matched_detection.append((candidate, specs[idx]))
except (AttributeError, ValueError):
pass
def _compare_extra_attribute(_expected, _detected, *, _spec):
result = []
# Check items are of the same type
if not isinstance(_detected, type(_expected)):
_summary = f'{pkg_name}: error when trying to detect "{_expected}"'
_details = [f"{_detected} was detected instead"]
return [error_cls(summary=_summary, details=_details)]
# If they are string expected is a regex
if isinstance(_expected, str):
try:
_regex = re.compile(_expected)
except re.error:
_summary = f'{pkg_name}: illegal regex in "{_spec}" extra attributes'
_details = [f"{_expected} is not a valid regex"]
return [error_cls(summary=_summary, details=_details)]
if not _regex.match(_detected):
_summary = (
f'{pkg_name}: error when trying to match "{_expected}" '
f"in extra attributes"
)
_details = [f"{_detected} does not match the regex"]
return [error_cls(summary=_summary, details=_details)]
if isinstance(_expected, dict):
_not_detected = set(_expected.keys()) - set(_detected.keys())
if _not_detected:
_summary = f"{pkg_name}: cannot detect some attributes for spec {_spec}"
_details = [
f'"{_expected}" was expected',
f'"{_detected}" was detected',
] + [f'attribute "{s}" was not detected' for s in sorted(_not_detected)]
result.append(error_cls(summary=_summary, details=_details))
_common = set(_expected.keys()) & set(_detected.keys())
for _key in _common:
result.extend(
_compare_extra_attribute(_expected[_key], _detected[_key], _spec=_spec)
)
return result
for expected, detected in matched_detection:
# We might not want to test all attributes, so avoid not_expected
not_detected = set(expected.extra_attributes) - set(detected.extra_attributes)
if not_detected:
summary = f"{pkg_name}: cannot detect some attributes for spec {expected}"
details = [
f'"{s}" was not detected [test_id={idx}]' for s in sorted(not_detected)
]
errors.append(error_cls(summary=summary, details=details))
common = set(expected.extra_attributes) & set(detected.extra_attributes)
for key in common:
errors.extend(
_compare_extra_attribute(
expected.extra_attributes[key],
detected.extra_attributes[key],
_spec=expected,
)
)
return errors return errors

File diff suppressed because it is too large Load Diff

View File

@@ -5,14 +5,7 @@
"""Function and classes needed to bootstrap Spack itself.""" """Function and classes needed to bootstrap Spack itself."""
from .config import ensure_bootstrap_configuration, is_bootstrapping, store_path from .config import ensure_bootstrap_configuration, is_bootstrapping, store_path
from .core import ( from .core import all_core_root_specs, ensure_core_dependencies, ensure_patchelf_in_path_or_raise
all_core_root_specs,
ensure_clingo_importable_or_raise,
ensure_core_dependencies,
ensure_file_in_path_or_raise,
ensure_gpg_in_path_or_raise,
ensure_patchelf_in_path_or_raise,
)
from .environment import BootstrapEnvironment, ensure_environment_dependencies from .environment import BootstrapEnvironment, ensure_environment_dependencies
from .status import status_message from .status import status_message
@@ -20,9 +13,6 @@
"is_bootstrapping", "is_bootstrapping",
"ensure_bootstrap_configuration", "ensure_bootstrap_configuration",
"ensure_core_dependencies", "ensure_core_dependencies",
"ensure_file_in_path_or_raise",
"ensure_gpg_in_path_or_raise",
"ensure_clingo_importable_or_raise",
"ensure_patchelf_in_path_or_raise", "ensure_patchelf_in_path_or_raise",
"all_core_root_specs", "all_core_root_specs",
"ensure_environment_dependencies", "ensure_environment_dependencies",

View File

@@ -4,8 +4,6 @@
# SPDX-License-Identifier: (Apache-2.0 OR MIT) # SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""Common basic functions used through the spack.bootstrap package""" """Common basic functions used through the spack.bootstrap package"""
import fnmatch import fnmatch
import glob
import importlib
import os.path import os.path
import re import re
import sys import sys
@@ -30,7 +28,7 @@
def _python_import(module: str) -> bool: def _python_import(module: str) -> bool:
try: try:
importlib.import_module(module) __import__(module)
except ImportError: except ImportError:
return False return False
return True return True
@@ -56,24 +54,11 @@ def _try_import_from_store(
installed_specs = spack.store.STORE.db.query(query_spec, installed=True) installed_specs = spack.store.STORE.db.query(query_spec, installed=True)
for candidate_spec in installed_specs: for candidate_spec in installed_specs:
# previously bootstrapped specs may not have a python-venv dependency. pkg = candidate_spec["python"].package
if candidate_spec.dependencies("python-venv"): module_paths = [
python, *_ = candidate_spec.dependencies("python-venv") os.path.join(candidate_spec.prefix, pkg.purelib),
else: os.path.join(candidate_spec.prefix, pkg.platlib),
python, *_ = candidate_spec.dependencies("python") ]
# if python is installed, ask it for the layout
if python.installed:
module_paths = [
os.path.join(candidate_spec.prefix, python.package.purelib),
os.path.join(candidate_spec.prefix, python.package.platlib),
]
# otherwise search for the site-packages directory
# (clingo from binaries with truncated python-venv runtime)
else:
module_paths = glob.glob(
os.path.join(candidate_spec.prefix, "lib", "python*", "site-packages")
)
path_before = list(sys.path) path_before = list(sys.path)
# NOTE: try module_paths first and last, last allows an existing version in path # NOTE: try module_paths first and last, last allows an existing version in path
@@ -224,18 +209,15 @@ def _root_spec(spec_str: str) -> str:
Args: Args:
spec_str: spec to be bootstrapped. Must be without compiler and target. spec_str: spec to be bootstrapped. Must be without compiler and target.
""" """
# Add a compiler and platform requirement to the root spec. # Add a compiler requirement to the root spec.
platform = str(spack.platforms.host()) platform = str(spack.platforms.host())
if platform == "darwin": if platform == "darwin":
spec_str += " %apple-clang" spec_str += " %apple-clang"
elif platform == "windows":
spec_str += " %msvc"
elif platform == "linux": elif platform == "linux":
spec_str += " %gcc" spec_str += " %gcc"
elif platform == "freebsd": elif platform == "freebsd":
spec_str += " %clang" spec_str += " %clang"
spec_str += f" platform={platform}"
target = archspec.cpu.host().family target = archspec.cpu.host().family
spec_str += f" target={target}" spec_str += f" target={target}"

View File

@@ -1,154 +0,0 @@
# Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""Bootstrap concrete specs for clingo
Spack uses clingo to concretize specs. When clingo itself needs to be bootstrapped from sources,
we need to rely on another mechanism to get a concrete spec that fits the current host.
This module contains the logic to get a concrete spec for clingo, starting from a prototype
JSON file for a similar platform.
"""
import pathlib
import sys
from typing import Dict, Optional, Tuple
import archspec.cpu
import spack.compiler
import spack.compilers
import spack.platforms
import spack.spec
import spack.traverse
from .config import spec_for_current_python
class ClingoBootstrapConcretizer:
def __init__(self, configuration):
self.host_platform = spack.platforms.host()
self.host_os = self.host_platform.operating_system("frontend")
self.host_target = archspec.cpu.host().family
self.host_architecture = spack.spec.ArchSpec.frontend_arch()
self.host_architecture.target = str(self.host_target)
self.host_compiler = self._valid_compiler_or_raise()
self.host_python = self.python_external_spec()
if str(self.host_platform) == "linux":
self.host_libc = self.libc_external_spec()
self.external_cmake, self.external_bison = self._externals_from_yaml(configuration)
def _valid_compiler_or_raise(self) -> "spack.compiler.Compiler":
if str(self.host_platform) == "linux":
compiler_name = "gcc"
elif str(self.host_platform) == "darwin":
compiler_name = "apple-clang"
elif str(self.host_platform) == "windows":
compiler_name = "msvc"
elif str(self.host_platform) == "freebsd":
compiler_name = "clang"
else:
raise RuntimeError(f"Cannot bootstrap clingo from sources on {self.host_platform}")
candidates = spack.compilers.compilers_for_spec(
compiler_name, arch_spec=self.host_architecture
)
if not candidates:
raise RuntimeError(
f"Cannot find any version of {compiler_name} to bootstrap clingo from sources"
)
candidates.sort(key=lambda x: x.spec.version, reverse=True)
return candidates[0]
def _externals_from_yaml(
self, configuration: "spack.config.Configuration"
) -> Tuple[Optional["spack.spec.Spec"], Optional["spack.spec.Spec"]]:
packages_yaml = configuration.get("packages")
requirements = {"cmake": "@3.20:", "bison": "@2.5:"}
selected: Dict[str, Optional["spack.spec.Spec"]] = {"cmake": None, "bison": None}
for pkg_name in ["cmake", "bison"]:
if pkg_name not in packages_yaml:
continue
candidates = packages_yaml[pkg_name].get("externals", [])
for candidate in candidates:
s = spack.spec.Spec(candidate["spec"], external_path=candidate["prefix"])
if not s.satisfies(requirements[pkg_name]):
continue
if not s.intersects(f"%{self.host_compiler.spec}"):
continue
if not s.intersects(f"arch={self.host_architecture}"):
continue
selected[pkg_name] = self._external_spec(s)
break
return selected["cmake"], selected["bison"]
def prototype_path(self) -> pathlib.Path:
"""Path to a prototype concrete specfile for clingo"""
parent_dir = pathlib.Path(__file__).parent
result = parent_dir / "prototypes" / f"clingo-{self.host_platform}-{self.host_target}.json"
if str(self.host_platform) == "linux":
# Using aarch64 as a fallback, since it has gnuconfig (x86_64 doesn't have it)
if not result.exists():
result = parent_dir / "prototypes" / f"clingo-{self.host_platform}-aarch64.json"
elif str(self.host_platform) == "freebsd":
result = parent_dir / "prototypes" / f"clingo-{self.host_platform}-amd64.json"
elif not result.exists():
raise RuntimeError(f"Cannot bootstrap clingo from sources on {self.host_platform}")
return result
def concretize(self) -> "spack.spec.Spec":
# Read the prototype and mark it NOT concrete
s = spack.spec.Spec.from_specfile(str(self.prototype_path()))
s._mark_concrete(False)
# Tweak it to conform to the host architecture
for node in s.traverse():
node.architecture.os = str(self.host_os)
node.compiler = self.host_compiler.spec
node.architecture = self.host_architecture
if node.name == "gcc-runtime":
node.versions = self.host_compiler.spec.versions
for edge in spack.traverse.traverse_edges([s], cover="edges"):
if edge.spec.name == "python":
edge.spec = self.host_python
if edge.spec.name == "bison" and self.external_bison:
edge.spec = self.external_bison
if edge.spec.name == "cmake" and self.external_cmake:
edge.spec = self.external_cmake
if "libc" in edge.virtuals:
edge.spec = self.host_libc
s._finalize_concretization()
# Work around the fact that the installer calls Spec.dependents() and
# we modified edges inconsistently
return s.copy()
def python_external_spec(self) -> "spack.spec.Spec":
"""Python external spec corresponding to the current running interpreter"""
result = spack.spec.Spec(spec_for_current_python(), external_path=sys.exec_prefix)
return self._external_spec(result)
def libc_external_spec(self) -> "spack.spec.Spec":
result = self.host_compiler.default_libc
return self._external_spec(result)
def _external_spec(self, initial_spec) -> "spack.spec.Spec":
initial_spec.namespace = "builtin"
initial_spec.compiler = self.host_compiler.spec
initial_spec.architecture = self.host_architecture
for flag_type in spack.spec.FlagMap.valid_compiler_flags():
initial_spec.compiler_flags[flag_type] = []
return spack.spec.parse_with_version_concrete(initial_spec)

View File

@@ -14,7 +14,6 @@
import spack.compilers import spack.compilers
import spack.config import spack.config
import spack.environment import spack.environment
import spack.modules
import spack.paths import spack.paths
import spack.platforms import spack.platforms
import spack.repo import spack.repo
@@ -130,10 +129,10 @@ def _bootstrap_config_scopes() -> Sequence["spack.config.ConfigScope"]:
configuration_paths = (spack.config.CONFIGURATION_DEFAULTS_PATH, ("bootstrap", _config_path())) configuration_paths = (spack.config.CONFIGURATION_DEFAULTS_PATH, ("bootstrap", _config_path()))
for name, path in configuration_paths: for name, path in configuration_paths:
platform = spack.platforms.host().name platform = spack.platforms.host().name
platform_scope = spack.config.DirectoryConfigScope( platform_scope = spack.config.ConfigScope(
f"{name}/{platform}", os.path.join(path, platform) "/".join([name, platform]), os.path.join(path, platform)
) )
generic_scope = spack.config.DirectoryConfigScope(name, path) generic_scope = spack.config.ConfigScope(name, path)
config_scopes.extend([generic_scope, platform_scope]) config_scopes.extend([generic_scope, platform_scope])
msg = "[BOOTSTRAP CONFIG SCOPE] name={0}, path={1}" msg = "[BOOTSTRAP CONFIG SCOPE] name={0}, path={1}"
tty.debug(msg.format(generic_scope.name, generic_scope.path)) tty.debug(msg.format(generic_scope.name, generic_scope.path))
@@ -144,7 +143,11 @@ def _bootstrap_config_scopes() -> Sequence["spack.config.ConfigScope"]:
def _add_compilers_if_missing() -> None: def _add_compilers_if_missing() -> None:
arch = spack.spec.ArchSpec.frontend_arch() arch = spack.spec.ArchSpec.frontend_arch()
if not spack.compilers.compilers_for_arch(arch): if not spack.compilers.compilers_for_arch(arch):
spack.compilers.find_compilers() new_compilers = spack.compilers.find_new_compilers(
mixed_toolchain=sys.platform == "darwin"
)
if new_compilers:
spack.compilers.add_compilers_to_config(new_compilers)
@contextlib.contextmanager @contextlib.contextmanager
@@ -153,7 +156,7 @@ def _ensure_bootstrap_configuration() -> Generator:
bootstrap_store_path = store_path() bootstrap_store_path = store_path()
user_configuration = _read_and_sanitize_configuration() user_configuration = _read_and_sanitize_configuration()
with spack.environment.no_active_environment(): with spack.environment.no_active_environment():
with spack.platforms.use_platform( with spack.platforms.prevent_cray_detection(), spack.platforms.use_platform(
spack.platforms.real_host() spack.platforms.real_host()
), spack.repo.use_repositories(spack.paths.packages_path): ), spack.repo.use_repositories(spack.paths.packages_path):
# Default configuration scopes excluding command line # Default configuration scopes excluding command line

View File

@@ -37,19 +37,23 @@
import spack.binary_distribution import spack.binary_distribution
import spack.config import spack.config
import spack.detection import spack.detection
import spack.mirror import spack.environment
import spack.modules
import spack.paths
import spack.platforms import spack.platforms
import spack.platforms.linux
import spack.repo
import spack.spec import spack.spec
import spack.store import spack.store
import spack.user_environment import spack.user_environment
import spack.util.environment
import spack.util.executable import spack.util.executable
import spack.util.path import spack.util.path
import spack.util.spack_yaml import spack.util.spack_yaml
import spack.util.url
import spack.version import spack.version
from spack.installer import PackageInstaller
from ._common import _executables_in_store, _python_import, _root_spec, _try_import_from_store from ._common import _executables_in_store, _python_import, _root_spec, _try_import_from_store
from .clingo import ClingoBootstrapConcretizer
from .config import spack_python_interpreter, spec_for_current_python from .config import spack_python_interpreter, spec_for_current_python
#: Name of the file containing metadata about the bootstrapping source #: Name of the file containing metadata about the bootstrapping source
@@ -91,7 +95,12 @@ def __init__(self, conf: ConfigDictionary) -> None:
self.metadata_dir = spack.util.path.canonicalize_path(conf["metadata"]) self.metadata_dir = spack.util.path.canonicalize_path(conf["metadata"])
# Promote (relative) paths to file urls # Promote (relative) paths to file urls
self.url = spack.mirror.Mirror(conf["info"]["url"]).fetch_url url = conf["info"]["url"]
if spack.util.url.is_path_instead_of_url(url):
if not os.path.isabs(url):
url = os.path.join(self.metadata_dir, url)
url = spack.util.url.path_to_file_url(url)
self.url = url
@property @property
def mirror_scope(self) -> spack.config.InternalConfigScope: def mirror_scope(self) -> spack.config.InternalConfigScope:
@@ -164,22 +173,35 @@ def _read_metadata(self, package_name: str) -> Any:
return data return data
def _install_by_hash( def _install_by_hash(
self, pkg_hash: str, pkg_sha256: str, bincache_platform: spack.platforms.Platform self,
pkg_hash: str,
pkg_sha256: str,
index: List[spack.spec.Spec],
bincache_platform: spack.platforms.Platform,
) -> None: ) -> None:
index_spec = next(x for x in index if x.dag_hash() == pkg_hash)
# Reconstruct the compiler that we need to use for bootstrapping
compiler_entry = {
"modules": [],
"operating_system": str(index_spec.os),
"paths": {
"cc": "/dev/null",
"cxx": "/dev/null",
"f77": "/dev/null",
"fc": "/dev/null",
},
"spec": str(index_spec.compiler),
"target": str(index_spec.target.family),
}
with spack.platforms.use_platform(bincache_platform): with spack.platforms.use_platform(bincache_platform):
query = spack.binary_distribution.BinaryCacheQuery(all_architectures=True) with spack.config.override("compilers", [{"compiler": compiler_entry}]):
for match in spack.store.find([f"/{pkg_hash}"], multiple=False, query_fn=query): spec_str = "/" + pkg_hash
spack.binary_distribution.install_root_node( query = spack.binary_distribution.BinaryCacheQuery(all_architectures=True)
# allow_missing is true since when bootstrapping clingo we truncate runtime matches = spack.store.find([spec_str], multiple=False, query_fn=query)
# deps such as gcc-runtime, since we link libstdc++ statically, and the other for match in matches:
# further runtime deps are loaded by the Python interpreter. This just silences spack.binary_distribution.install_root_node(
# warnings about missing dependencies. match, unsigned=True, force=True, sha256=pkg_sha256
match, )
unsigned=True,
force=True,
sha256=pkg_sha256,
allow_missing=True,
)
def _install_and_test( def _install_and_test(
self, self,
@@ -210,7 +232,7 @@ def _install_and_test(
continue continue
for _, pkg_hash, pkg_sha256 in item["binaries"]: for _, pkg_hash, pkg_sha256 in item["binaries"]:
self._install_by_hash(pkg_hash, pkg_sha256, bincache_platform) self._install_by_hash(pkg_hash, pkg_sha256, index, bincache_platform)
info: ConfigDictionary = {} info: ConfigDictionary = {}
if test_fn(query_spec=abstract_spec, query_info=info): if test_fn(query_spec=abstract_spec, query_info=info):
@@ -267,13 +289,19 @@ def try_import(self, module: str, abstract_spec_str: str) -> bool:
# Try to build and install from sources # Try to build and install from sources
with spack_python_interpreter(): with spack_python_interpreter():
# Add hint to use frontend operating system on Cray
concrete_spec = spack.spec.Spec(abstract_spec_str + " ^" + spec_for_current_python())
# This is needed to help the old concretizer taking the `setuptools` dependency
# only when bootstrapping from sources on Python 3.12
if spec_for_current_python() == "python@3.12":
concrete_spec.constrain("+force_setuptools")
if module == "clingo": if module == "clingo":
bootstrapper = ClingoBootstrapConcretizer(configuration=spack.config.CONFIG) # TODO: remove when the old concretizer is deprecated # pylint: disable=fixme
concrete_spec = bootstrapper.concretize() concrete_spec._old_concretize( # pylint: disable=protected-access
else: deprecation_warning=False
concrete_spec = spack.spec.Spec(
abstract_spec_str + " ^" + spec_for_current_python()
) )
else:
concrete_spec.concretize() concrete_spec.concretize()
msg = "[BOOTSTRAP MODULE {0}] Try installing '{1}' from sources" msg = "[BOOTSTRAP MODULE {0}] Try installing '{1}' from sources"
@@ -281,7 +309,7 @@ def try_import(self, module: str, abstract_spec_str: str) -> bool:
# Install the spec that should make the module importable # Install the spec that should make the module importable
with spack.config.override(self.mirror_scope): with spack.config.override(self.mirror_scope):
PackageInstaller([concrete_spec.package], fail_fast=True).install() concrete_spec.package.do_install(fail_fast=True)
if _try_import_from_store(module, query_spec=concrete_spec, query_info=info): if _try_import_from_store(module, query_spec=concrete_spec, query_info=info):
self.last_search = info self.last_search = info
@@ -300,11 +328,18 @@ def try_search_path(self, executables: Tuple[str], abstract_spec_str: str) -> bo
# might reduce compilation time by a fair amount # might reduce compilation time by a fair amount
_add_externals_if_missing() _add_externals_if_missing()
concrete_spec = spack.spec.Spec(abstract_spec_str).concretized() concrete_spec = spack.spec.Spec(abstract_spec_str)
if concrete_spec.name == "patchelf":
concrete_spec._old_concretize( # pylint: disable=protected-access
deprecation_warning=False
)
else:
concrete_spec.concretize()
msg = "[BOOTSTRAP] Try installing '{0}' from sources" msg = "[BOOTSTRAP] Try installing '{0}' from sources"
tty.debug(msg.format(abstract_spec_str)) tty.debug(msg.format(abstract_spec_str))
with spack.config.override(self.mirror_scope): with spack.config.override(self.mirror_scope):
PackageInstaller([concrete_spec.package], fail_fast=True).install() concrete_spec.package.do_install()
if _executables_in_store(executables, concrete_spec, query_info=info): if _executables_in_store(executables, concrete_spec, query_info=info):
self.last_search = info self.last_search = info
return True return True
@@ -470,8 +505,7 @@ def ensure_clingo_importable_or_raise() -> None:
def gnupg_root_spec() -> str: def gnupg_root_spec() -> str:
"""Return the root spec used to bootstrap GnuPG""" """Return the root spec used to bootstrap GnuPG"""
root_spec_name = "win-gpg" if IS_WINDOWS else "gnupg" return _root_spec("gnupg@2.3:")
return _root_spec(f"{root_spec_name}@2.3:")
def ensure_gpg_in_path_or_raise() -> None: def ensure_gpg_in_path_or_raise() -> None:
@@ -481,19 +515,6 @@ def ensure_gpg_in_path_or_raise() -> None:
) )
def file_root_spec() -> str:
"""Return the root spec used to bootstrap file"""
root_spec_name = "win-file" if IS_WINDOWS else "file"
return _root_spec(root_spec_name)
def ensure_file_in_path_or_raise() -> None:
"""Ensure file is in the PATH or raise"""
return ensure_executables_in_path_or_raise(
executables=["file"], abstract_spec=file_root_spec()
)
def patchelf_root_spec() -> str: def patchelf_root_spec() -> str:
"""Return the root spec used to bootstrap patchelf""" """Return the root spec used to bootstrap patchelf"""
# 0.13.1 is the last version not to require C++17. # 0.13.1 is the last version not to require C++17.
@@ -538,54 +559,18 @@ def ensure_patchelf_in_path_or_raise() -> spack.util.executable.Executable:
) )
def ensure_winsdk_external_or_raise() -> None:
"""Ensure the Windows SDK + WGL are available on system
If both of these package are found, the Spack user or bootstrap
configuration (depending on where Spack is running)
will be updated to include all versions and variants detected.
If either the WDK or WSDK are not found, this method will raise
a RuntimeError.
**NOTE:** This modifies the Spack config in the current scope,
either user or environment depending on the calling context.
This is different from all other current bootstrap dependency
checks.
"""
if set(["win-sdk", "wgl"]).issubset(spack.config.get("packages").keys()):
return
externals = spack.detection.by_path(["win-sdk", "wgl"])
if not set(["win-sdk", "wgl"]) == externals.keys():
missing_packages_lst = []
if "wgl" not in externals:
missing_packages_lst.append("wgl")
if "win-sdk" not in externals:
missing_packages_lst.append("win-sdk")
missing_packages = " & ".join(missing_packages_lst)
raise RuntimeError(
f"Unable to find the {missing_packages}, please install these packages \
via the Visual Studio installer \
before proceeding with Spack or provide the path to a non standard install with \
'spack external find --path'"
)
# wgl/sdk are not required for bootstrapping Spack, but
# are required for building anything non trivial
# add to user config so they can be used by subsequent Spack ops
spack.detection.update_configuration(externals, buildable=False)
def ensure_core_dependencies() -> None: def ensure_core_dependencies() -> None:
"""Ensure the presence of all the core dependencies.""" """Ensure the presence of all the core dependencies."""
if sys.platform.lower() == "linux": if sys.platform.lower() == "linux":
ensure_patchelf_in_path_or_raise() ensure_patchelf_in_path_or_raise()
elif sys.platform == "win32": if not IS_WINDOWS:
ensure_file_in_path_or_raise() ensure_gpg_in_path_or_raise()
ensure_gpg_in_path_or_raise()
ensure_clingo_importable_or_raise() ensure_clingo_importable_or_raise()
def all_core_root_specs() -> List[str]: def all_core_root_specs() -> List[str]:
"""Return a list of all the core root specs that may be used to bootstrap Spack""" """Return a list of all the core root specs that may be used to bootstrap Spack"""
return [clingo_root_spec(), gnupg_root_spec(), patchelf_root_spec(), file_root_spec()] return [clingo_root_spec(), gnupg_root_spec(), patchelf_root_spec()]
def bootstrapping_sources(scope: Optional[str] = None): def bootstrapping_sources(scope: Optional[str] = None):
@@ -602,10 +587,7 @@ def bootstrapping_sources(scope: Optional[str] = None):
current = copy.copy(entry) current = copy.copy(entry)
metadata_dir = spack.util.path.canonicalize_path(entry["metadata"]) metadata_dir = spack.util.path.canonicalize_path(entry["metadata"])
metadata_yaml = os.path.join(metadata_dir, METADATA_YAML_FILENAME) metadata_yaml = os.path.join(metadata_dir, METADATA_YAML_FILENAME)
try: with open(metadata_yaml, encoding="utf-8") as stream:
with open(metadata_yaml, encoding="utf-8") as stream: current.update(spack.util.spack_yaml.load(stream))
current.update(spack.util.spack_yaml.load(stream)) list_of_sources.append(current)
list_of_sources.append(current)
except OSError:
pass
return list_of_sources return list_of_sources

View File

@@ -3,20 +3,22 @@
# #
# SPDX-License-Identifier: (Apache-2.0 OR MIT) # SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""Bootstrap non-core Spack dependencies from an environment.""" """Bootstrap non-core Spack dependencies from an environment."""
import glob
import hashlib import hashlib
import os import os
import pathlib import pathlib
import sys import sys
from typing import Iterable, List import warnings
from typing import List
import archspec.cpu import archspec.cpu
from llnl.util import tty from llnl.util import tty
import spack.environment import spack.environment
import spack.spec
import spack.tengine import spack.tengine
import spack.util.path import spack.util.cpus
import spack.util.executable
from ._common import _root_spec from ._common import _root_spec
from .config import root_path, spec_for_current_python, store_path from .config import root_path, spec_for_current_python, store_path
@@ -26,16 +28,6 @@
class BootstrapEnvironment(spack.environment.Environment): class BootstrapEnvironment(spack.environment.Environment):
"""Environment to install dependencies of Spack for a given interpreter and architecture""" """Environment to install dependencies of Spack for a given interpreter and architecture"""
def __init__(self) -> None:
if not self.spack_yaml().exists():
self._write_spack_yaml_file()
super().__init__(self.environment_root())
# Remove python package roots created before python-venv was introduced
for s in self.concrete_roots():
if "python" in s.package.extendees and not s.dependencies("python-venv"):
self.deconcretize(s)
@classmethod @classmethod
def spack_dev_requirements(cls) -> List[str]: def spack_dev_requirements(cls) -> List[str]:
"""Spack development requirements""" """Spack development requirements"""
@@ -67,19 +59,31 @@ def view_root(cls) -> pathlib.Path:
return cls.environment_root().joinpath("view") return cls.environment_root().joinpath("view")
@classmethod @classmethod
def bin_dir(cls) -> pathlib.Path: def pythonpaths(cls) -> List[str]:
"""Paths to be added to PATH""" """Paths to be added to sys.path or PYTHONPATH"""
return cls.view_root().joinpath("bin") python_dir_part = f"python{'.'.join(str(x) for x in sys.version_info[:2])}"
glob_expr = str(cls.view_root().joinpath("**", python_dir_part, "**"))
result = glob.glob(glob_expr)
if not result:
msg = f"Cannot find any Python path in {cls.view_root()}"
warnings.warn(msg)
return result
def python_dirs(self) -> Iterable[pathlib.Path]: @classmethod
python = next(s for s in self.all_specs_generator() if s.name == "python-venv").package def bin_dirs(cls) -> List[pathlib.Path]:
return {self.view_root().joinpath(p) for p in (python.platlib, python.purelib)} """Paths to be added to PATH"""
return [cls.view_root().joinpath("bin")]
@classmethod @classmethod
def spack_yaml(cls) -> pathlib.Path: def spack_yaml(cls) -> pathlib.Path:
"""Environment spack.yaml file""" """Environment spack.yaml file"""
return cls.environment_root().joinpath("spack.yaml") return cls.environment_root().joinpath("spack.yaml")
def __init__(self) -> None:
if not self.spack_yaml().exists():
self._write_spack_yaml_file()
super().__init__(self.environment_root())
def update_installations(self) -> None: def update_installations(self) -> None:
"""Update the installations of this environment.""" """Update the installations of this environment."""
log_enabled = tty.is_debug() or tty.is_verbose() log_enabled = tty.is_debug() or tty.is_verbose()
@@ -96,13 +100,21 @@ def update_installations(self) -> None:
self.install_all() self.install_all()
self.write(regenerate=True) self.write(regenerate=True)
def load(self) -> None: def update_syspath_and_environ(self) -> None:
"""Update PATH and sys.path.""" """Update ``sys.path`` and the PATH, PYTHONPATH environment variables to point to
# Make executables available (shouldn't need PYTHONPATH) the environment view.
os.environ["PATH"] = f"{self.bin_dir()}{os.pathsep}{os.environ.get('PATH', '')}" """
# Do minimal modifications to sys.path and environment variables. In particular, pay
# Spack itself imports pytest # attention to have the smallest PYTHONPATH / sys.path possible, since that may impact
sys.path.extend(str(p) for p in self.python_dirs()) # the performance of the current interpreter
sys.path.extend(self.pythonpaths())
os.environ["PATH"] = os.pathsep.join(
[str(x) for x in self.bin_dirs()] + os.environ.get("PATH", "").split(os.pathsep)
)
os.environ["PYTHONPATH"] = os.pathsep.join(
os.environ.get("PYTHONPATH", "").split(os.pathsep)
+ [str(x) for x in self.pythonpaths()]
)
def _write_spack_yaml_file(self) -> None: def _write_spack_yaml_file(self) -> None:
tty.msg( tty.msg(
@@ -152,4 +164,4 @@ def ensure_environment_dependencies() -> None:
_add_externals_if_missing() _add_externals_if_missing()
with BootstrapEnvironment() as env: with BootstrapEnvironment() as env:
env.update_installations() env.update_installations()
env.load() env.update_syspath_and_environ()

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

Some files were not shown because too many files have changed in this diff Show More