Compare commits

..

1 Commits

Author SHA1 Message Date
Todd Gamblin
6cbe4e1311 spec: add {install_status} format attribute
`{install_status}` is handled in a funny way in `Spec.tree()`, and it can't be used in
other useful places like `Spec.format()`.

- [x] Make `{install_status}` a format attribute like most other things we want to print
      about specs.

- [x] Refactor whitespace handling in `Spec.format()` to only strip whitespace that wasn't
      in the original format string (i.e. that was added by our own attributes)
2024-02-16 22:46:58 -08:00
1371 changed files with 15961 additions and 27873 deletions

View File

@@ -1,4 +0,0 @@
{
"image": "ghcr.io/spack/ubuntu20.04-runner-amd64-gcc-11.4:2023.08.01",
"postCreateCommand": "./.devcontainer/postCreateCommand.sh"
}

View File

@@ -1,20 +0,0 @@
#!/bin/bash
# Load spack environment at terminal startup
cat <<EOF >> /root/.bashrc
. /workspaces/spack/share/spack/setup-env.sh
EOF
# Load spack environment in this script
. /workspaces/spack/share/spack/setup-env.sh
# Ensure generic targets for maximum matching with buildcaches
spack config --scope site add "packages:all:require:[target=x86_64_v3]"
spack config --scope site add "concretizer:targets:granularity:generic"
# Find compiler and install gcc-runtime
spack compiler find --scope site
# Setup buildcaches
spack mirror add --scope site develop https://binaries.spack.io/develop
spack buildcache keys --install --trust

View File

@@ -22,8 +22,8 @@ jobs:
matrix: matrix:
operating_system: ["ubuntu-latest", "macos-latest"] operating_system: ["ubuntu-latest", "macos-latest"]
steps: steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # @v2
- uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # @v2
with: with:
python-version: ${{inputs.python_version}} python-version: ${{inputs.python_version}}
- name: Install Python packages - name: Install Python packages
@@ -43,9 +43,7 @@ jobs:
. share/spack/setup-env.sh . share/spack/setup-env.sh
$(which spack) audit packages $(which spack) audit packages
$(which spack) audit externals $(which spack) audit externals
- uses: codecov/codecov-action@c16abc29c95fcf9174b58eb7e1abf4c866893bc8 - uses: codecov/codecov-action@e0b68c6749509c5f83f984dd99a76a1c1a231044 # @v2.1.0
if: ${{ inputs.with_coverage == 'true' }} if: ${{ inputs.with_coverage == 'true' }}
with: with:
flags: unittests,audits flags: unittests,audits
token: ${{ secrets.CODECOV_TOKEN }}
verbose: true

View File

@@ -24,7 +24,7 @@ jobs:
make patch unzip which xz python3 python3-devel tree \ make patch unzip which xz python3 python3-devel tree \
cmake bison bison-devel libstdc++-static cmake bison bison-devel libstdc++-static
- name: Checkout - name: Checkout
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with: with:
fetch-depth: 0 fetch-depth: 0
- name: Setup non-root user - name: Setup non-root user
@@ -62,7 +62,7 @@ jobs:
make patch unzip xz-utils python3 python3-dev tree \ make patch unzip xz-utils python3 python3-dev tree \
cmake bison cmake bison
- name: Checkout - name: Checkout
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with: with:
fetch-depth: 0 fetch-depth: 0
- name: Setup non-root user - name: Setup non-root user
@@ -99,7 +99,7 @@ jobs:
bzip2 curl file g++ gcc gfortran git gnupg2 gzip \ bzip2 curl file g++ gcc gfortran git gnupg2 gzip \
make patch unzip xz-utils python3 python3-dev tree make patch unzip xz-utils python3 python3-dev tree
- name: Checkout - name: Checkout
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with: with:
fetch-depth: 0 fetch-depth: 0
- name: Setup non-root user - name: Setup non-root user
@@ -133,7 +133,7 @@ jobs:
make patch unzip which xz python3 python3-devel tree \ make patch unzip which xz python3 python3-devel tree \
cmake bison cmake bison
- name: Checkout - name: Checkout
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with: with:
fetch-depth: 0 fetch-depth: 0
- name: Setup repo - name: Setup repo
@@ -158,8 +158,8 @@ jobs:
run: | run: |
brew install cmake bison@2.7 tree brew install cmake bison@2.7 tree
- name: Checkout - name: Checkout
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
- uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # @v2
with: with:
python-version: "3.12" python-version: "3.12"
- name: Bootstrap clingo - name: Bootstrap clingo
@@ -182,7 +182,7 @@ jobs:
run: | run: |
brew install tree brew install tree
- name: Checkout - name: Checkout
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
- name: Bootstrap clingo - name: Bootstrap clingo
run: | run: |
set -ex set -ex
@@ -207,7 +207,7 @@ jobs:
runs-on: ubuntu-20.04 runs-on: ubuntu-20.04
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with: with:
fetch-depth: 0 fetch-depth: 0
- name: Setup repo - name: Setup repo
@@ -250,7 +250,7 @@ jobs:
bzip2 curl file g++ gcc patchelf gfortran git gzip \ bzip2 curl file g++ gcc patchelf gfortran git gzip \
make patch unzip xz-utils python3 python3-dev tree make patch unzip xz-utils python3 python3-dev tree
- name: Checkout - name: Checkout
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with: with:
fetch-depth: 0 fetch-depth: 0
- name: Setup non-root user - name: Setup non-root user
@@ -287,7 +287,7 @@ jobs:
make patch unzip xz-utils python3 python3-dev tree \ make patch unzip xz-utils python3 python3-dev tree \
gawk gawk
- name: Checkout - name: Checkout
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with: with:
fetch-depth: 0 fetch-depth: 0
- name: Setup non-root user - name: Setup non-root user
@@ -320,7 +320,7 @@ jobs:
# Remove GnuPG since we want to bootstrap it # Remove GnuPG since we want to bootstrap it
sudo rm -rf /usr/local/bin/gpg sudo rm -rf /usr/local/bin/gpg
- name: Checkout - name: Checkout
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
- name: Bootstrap GnuPG - name: Bootstrap GnuPG
run: | run: |
source share/spack/setup-env.sh source share/spack/setup-env.sh
@@ -338,7 +338,7 @@ jobs:
# Remove GnuPG since we want to bootstrap it # Remove GnuPG since we want to bootstrap it
sudo rm -rf /usr/local/bin/gpg sudo rm -rf /usr/local/bin/gpg
- name: Checkout - name: Checkout
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
- name: Bootstrap GnuPG - name: Bootstrap GnuPG
run: | run: |
source share/spack/setup-env.sh source share/spack/setup-env.sh

View File

@@ -55,7 +55,7 @@ jobs:
if: github.repository == 'spack/spack' if: github.repository == 'spack/spack'
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # @v2
- uses: docker/metadata-action@8e5442c4ef9f78752691e2d8f8d19755c6f78e81 - uses: docker/metadata-action@8e5442c4ef9f78752691e2d8f8d19755c6f78e81
id: docker_meta id: docker_meta
@@ -96,10 +96,10 @@ jobs:
uses: docker/setup-qemu-action@68827325e0b33c7199eb31dd4e31fbe9023e06e3 uses: docker/setup-qemu-action@68827325e0b33c7199eb31dd4e31fbe9023e06e3
- name: Set up Docker Buildx - name: Set up Docker Buildx
uses: docker/setup-buildx-action@2b51285047da1547ffb1b2203d8be4c0af6b1f20 uses: docker/setup-buildx-action@f95db51fddba0c2d1ec667646a06c2ce06100226
- name: Log in to GitHub Container Registry - name: Log in to GitHub Container Registry
uses: docker/login-action@e92390c5fb421da1463c202d546fed0ec5c39f20 uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d
with: with:
registry: ghcr.io registry: ghcr.io
username: ${{ github.actor }} username: ${{ github.actor }}
@@ -107,13 +107,13 @@ jobs:
- name: Log in to DockerHub - name: Log in to DockerHub
if: github.event_name != 'pull_request' if: github.event_name != 'pull_request'
uses: docker/login-action@e92390c5fb421da1463c202d546fed0ec5c39f20 uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d
with: with:
username: ${{ secrets.DOCKERHUB_USERNAME }} username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }} password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build & Deploy ${{ matrix.dockerfile[0] }} - name: Build & Deploy ${{ matrix.dockerfile[0] }}
uses: docker/build-push-action@2cdde995de11925a030ce8070c3d77a52ffcf1c0 uses: docker/build-push-action@4a13e500e55cf31b7a5d59a38ab2040ab0f42f56
with: with:
context: dockerfiles/${{ matrix.dockerfile[0] }} context: dockerfiles/${{ matrix.dockerfile[0] }}
platforms: ${{ matrix.dockerfile[1] }} platforms: ${{ matrix.dockerfile[1] }}

View File

@@ -18,7 +18,6 @@ jobs:
prechecks: prechecks:
needs: [ changes ] needs: [ changes ]
uses: ./.github/workflows/valid-style.yml uses: ./.github/workflows/valid-style.yml
secrets: inherit
with: with:
with_coverage: ${{ needs.changes.outputs.core }} with_coverage: ${{ needs.changes.outputs.core }}
all-prechecks: all-prechecks:
@@ -36,12 +35,12 @@ jobs:
core: ${{ steps.filter.outputs.core }} core: ${{ steps.filter.outputs.core }}
packages: ${{ steps.filter.outputs.packages }} packages: ${{ steps.filter.outputs.packages }}
steps: steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # @v2
if: ${{ github.event_name == 'push' }} if: ${{ github.event_name == 'push' }}
with: with:
fetch-depth: 0 fetch-depth: 0
# For pull requests it's not necessary to checkout the code # For pull requests it's not necessary to checkout the code
- uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 - uses: dorny/paths-filter@ebc4d7e9ebcb0b1eb21480bb8f43113e996ac77a
id: filter id: filter
with: with:
# See https://github.com/dorny/paths-filter/issues/56 for the syntax used below # See https://github.com/dorny/paths-filter/issues/56 for the syntax used below
@@ -71,17 +70,14 @@ jobs:
if: ${{ github.repository == 'spack/spack' && needs.changes.outputs.bootstrap == 'true' }} if: ${{ github.repository == 'spack/spack' && needs.changes.outputs.bootstrap == 'true' }}
needs: [ prechecks, changes ] needs: [ prechecks, changes ]
uses: ./.github/workflows/bootstrap.yml uses: ./.github/workflows/bootstrap.yml
secrets: inherit
unit-tests: unit-tests:
if: ${{ github.repository == 'spack/spack' && needs.changes.outputs.core == 'true' }} if: ${{ github.repository == 'spack/spack' && needs.changes.outputs.core == 'true' }}
needs: [ prechecks, changes ] needs: [ prechecks, changes ]
uses: ./.github/workflows/unit_tests.yaml uses: ./.github/workflows/unit_tests.yaml
secrets: inherit
windows: windows:
if: ${{ github.repository == 'spack/spack' && needs.changes.outputs.core == 'true' }} if: ${{ github.repository == 'spack/spack' && needs.changes.outputs.core == 'true' }}
needs: [ prechecks ] needs: [ prechecks ]
uses: ./.github/workflows/windows_python.yml uses: ./.github/workflows/windows_python.yml
secrets: inherit
all: all:
needs: [ windows, unit-tests, bootstrap ] needs: [ windows, unit-tests, bootstrap ]
runs-on: ubuntu-latest runs-on: ubuntu-latest

View File

@@ -14,10 +14,10 @@ jobs:
build-paraview-deps: build-paraview-deps:
runs-on: windows-latest runs-on: windows-latest
steps: steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with: with:
fetch-depth: 0 fetch-depth: 0
- uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c
with: with:
python-version: 3.9 python-version: 3.9
- name: Install Python packages - name: Install Python packages

View File

@@ -1,4 +1,4 @@
black==24.3.0 black==24.2.0
clingo==5.7.1 clingo==5.7.1
flake8==7.0.0 flake8==7.0.0
isort==5.13.2 isort==5.13.2

View File

@@ -51,10 +51,10 @@ jobs:
on_develop: false on_develop: false
steps: steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # @v2
with: with:
fetch-depth: 0 fetch-depth: 0
- uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # @v2
with: with:
python-version: ${{ matrix.python-version }} python-version: ${{ matrix.python-version }}
- name: Install System packages - name: Install System packages
@@ -91,19 +91,17 @@ jobs:
UNIT_TEST_COVERAGE: ${{ matrix.python-version == '3.11' }} UNIT_TEST_COVERAGE: ${{ matrix.python-version == '3.11' }}
run: | run: |
share/spack/qa/run-unit-tests share/spack/qa/run-unit-tests
- uses: codecov/codecov-action@c16abc29c95fcf9174b58eb7e1abf4c866893bc8 - uses: codecov/codecov-action@e0b68c6749509c5f83f984dd99a76a1c1a231044
with: with:
flags: unittests,linux,${{ matrix.concretizer }} flags: unittests,linux,${{ matrix.concretizer }}
token: ${{ secrets.CODECOV_TOKEN }}
verbose: true
# Test shell integration # Test shell integration
shell: shell:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # @v2
with: with:
fetch-depth: 0 fetch-depth: 0
- uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # @v2
with: with:
python-version: '3.11' python-version: '3.11'
- name: Install System packages - name: Install System packages
@@ -124,11 +122,9 @@ jobs:
COVERAGE: true COVERAGE: true
run: | run: |
share/spack/qa/run-shell-tests share/spack/qa/run-shell-tests
- uses: codecov/codecov-action@c16abc29c95fcf9174b58eb7e1abf4c866893bc8 - uses: codecov/codecov-action@e0b68c6749509c5f83f984dd99a76a1c1a231044
with: with:
flags: shelltests,linux flags: shelltests,linux
token: ${{ secrets.CODECOV_TOKEN }}
verbose: true
# Test RHEL8 UBI with platform Python. This job is run # Test RHEL8 UBI with platform Python. This job is run
# only on PRs modifying core Spack # only on PRs modifying core Spack
@@ -141,7 +137,7 @@ jobs:
dnf install -y \ dnf install -y \
bzip2 curl file gcc-c++ gcc gcc-gfortran git gnupg2 gzip \ bzip2 curl file gcc-c++ gcc gcc-gfortran git gnupg2 gzip \
make patch tcl unzip which xz make patch tcl unzip which xz
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # @v2
- name: Setup repo and non-root user - name: Setup repo and non-root user
run: | run: |
git --version git --version
@@ -160,10 +156,10 @@ jobs:
clingo-cffi: clingo-cffi:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # @v2
with: with:
fetch-depth: 0 fetch-depth: 0
- uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # @v2
with: with:
python-version: '3.11' python-version: '3.11'
- name: Install System packages - name: Install System packages
@@ -185,23 +181,20 @@ jobs:
SPACK_TEST_SOLVER: clingo SPACK_TEST_SOLVER: clingo
run: | run: |
share/spack/qa/run-unit-tests share/spack/qa/run-unit-tests
- uses: codecov/codecov-action@c16abc29c95fcf9174b58eb7e1abf4c866893bc8 - uses: codecov/codecov-action@e0b68c6749509c5f83f984dd99a76a1c1a231044 # @v2.1.0
with: with:
flags: unittests,linux,clingo flags: unittests,linux,clingo
token: ${{ secrets.CODECOV_TOKEN }}
verbose: true
# Run unit tests on MacOS # Run unit tests on MacOS
macos: macos:
runs-on: ${{ matrix.os }} runs-on: macos-latest
strategy: strategy:
matrix: matrix:
os: [macos-latest, macos-14]
python-version: ["3.11"] python-version: ["3.11"]
steps: steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # @v2
with: with:
fetch-depth: 0 fetch-depth: 0
- uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # @v2
with: with:
python-version: ${{ matrix.python-version }} python-version: ${{ matrix.python-version }}
- name: Install Python packages - name: Install Python packages
@@ -223,8 +216,6 @@ jobs:
$(which spack) solve zlib $(which spack) solve zlib
common_args=(--dist loadfile --tx '4*popen//python=./bin/spack-tmpconfig python -u ./bin/spack python' -x) common_args=(--dist loadfile --tx '4*popen//python=./bin/spack-tmpconfig python -u ./bin/spack python' -x)
$(which spack) unit-test --verbose --cov --cov-config=pyproject.toml --cov-report=xml:coverage.xml "${common_args[@]}" $(which spack) unit-test --verbose --cov --cov-config=pyproject.toml --cov-report=xml:coverage.xml "${common_args[@]}"
- uses: codecov/codecov-action@c16abc29c95fcf9174b58eb7e1abf4c866893bc8 - uses: codecov/codecov-action@e0b68c6749509c5f83f984dd99a76a1c1a231044
with: with:
flags: unittests,macos flags: unittests,macos
token: ${{ secrets.CODECOV_TOKEN }}
verbose: true

View File

@@ -18,8 +18,8 @@ jobs:
validate: validate:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
- uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c
with: with:
python-version: '3.11' python-version: '3.11'
cache: 'pip' cache: 'pip'
@@ -35,10 +35,10 @@ jobs:
style: style:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with: with:
fetch-depth: 0 fetch-depth: 0
- uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c
with: with:
python-version: '3.11' python-version: '3.11'
cache: 'pip' cache: 'pip'
@@ -56,7 +56,6 @@ jobs:
share/spack/qa/run-style-tests share/spack/qa/run-style-tests
audit: audit:
uses: ./.github/workflows/audit.yaml uses: ./.github/workflows/audit.yaml
secrets: inherit
with: with:
with_coverage: ${{ inputs.with_coverage }} with_coverage: ${{ inputs.with_coverage }}
python_version: '3.11' python_version: '3.11'
@@ -70,7 +69,7 @@ jobs:
dnf install -y \ dnf install -y \
bzip2 curl file gcc-c++ gcc gcc-gfortran git gnupg2 gzip \ bzip2 curl file gcc-c++ gcc gcc-gfortran git gnupg2 gzip \
make patch tcl unzip which xz make patch tcl unzip which xz
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # @v2
- name: Setup repo and non-root user - name: Setup repo and non-root user
run: | run: |
git --version git --version

View File

@@ -15,10 +15,10 @@ jobs:
unit-tests: unit-tests:
runs-on: windows-latest runs-on: windows-latest
steps: steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with: with:
fetch-depth: 0 fetch-depth: 0
- uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c
with: with:
python-version: 3.9 python-version: 3.9
- name: Install Python packages - name: Install Python packages
@@ -33,18 +33,16 @@ jobs:
./share/spack/qa/validate_last_exit.ps1 ./share/spack/qa/validate_last_exit.ps1
coverage combine -a coverage combine -a
coverage xml coverage xml
- uses: codecov/codecov-action@c16abc29c95fcf9174b58eb7e1abf4c866893bc8 - uses: codecov/codecov-action@e0b68c6749509c5f83f984dd99a76a1c1a231044
with: with:
flags: unittests,windows flags: unittests,windows
token: ${{ secrets.CODECOV_TOKEN }}
verbose: true
unit-tests-cmd: unit-tests-cmd:
runs-on: windows-latest runs-on: windows-latest
steps: steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with: with:
fetch-depth: 0 fetch-depth: 0
- uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c
with: with:
python-version: 3.9 python-version: 3.9
- name: Install Python packages - name: Install Python packages
@@ -59,18 +57,16 @@ jobs:
./share/spack/qa/validate_last_exit.ps1 ./share/spack/qa/validate_last_exit.ps1
coverage combine -a coverage combine -a
coverage xml coverage xml
- uses: codecov/codecov-action@c16abc29c95fcf9174b58eb7e1abf4c866893bc8 - uses: codecov/codecov-action@e0b68c6749509c5f83f984dd99a76a1c1a231044
with: with:
flags: unittests,windows flags: unittests,windows
token: ${{ secrets.CODECOV_TOKEN }}
verbose: true
build-abseil: build-abseil:
runs-on: windows-latest runs-on: windows-latest
steps: steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with: with:
fetch-depth: 0 fetch-depth: 0
- uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c
with: with:
python-version: 3.9 python-version: 3.9
- name: Install Python packages - name: Install Python packages

View File

@@ -42,8 +42,3 @@ concretizer:
# "minimal": allows the duplication of 'build-tools' nodes only (e.g. py-setuptools, cmake etc.) # "minimal": allows the duplication of 'build-tools' nodes only (e.g. py-setuptools, cmake etc.)
# "full" (experimental): allows separation of the entire build-tool stack (e.g. the entire "cmake" subDAG) # "full" (experimental): allows separation of the entire build-tool stack (e.g. the entire "cmake" subDAG)
strategy: minimal strategy: minimal
# Option to specify compatiblity between operating systems for reuse of compilers and packages
# Specified as a key: [list] where the key is the os that is being targeted, and the list contains the OS's
# it can reuse. Note this is a directional compatibility so mutual compatibility between two OS's
# requires two entries i.e. os_compatible: {sonoma: [monterey], monterey: [sonoma]}
os_compatible: {}

View File

@@ -101,12 +101,6 @@ config:
verify_ssl: true verify_ssl: true
# This is where custom certs for proxy/firewall are stored.
# It can be a path or environment variable. To match ssl env configuration
# the default is the environment variable SSL_CERT_FILE
ssl_certs: $SSL_CERT_FILE
# Suppress gpg warnings from binary package verification # Suppress gpg warnings from binary package verification
# Only suppresses warnings, gpg failure will still fail the install # Only suppresses warnings, gpg failure will still fail the install
# Potential rationale to set True: users have already explicitly trusted the # Potential rationale to set True: users have already explicitly trusted the

View File

@@ -24,7 +24,6 @@ packages:
elf: [elfutils] elf: [elfutils]
fftw-api: [fftw, amdfftw] fftw-api: [fftw, amdfftw]
flame: [libflame, amdlibflame] flame: [libflame, amdlibflame]
fortran-rt: [gcc-runtime, intel-oneapi-runtime]
fuse: [libfuse] fuse: [libfuse]
gl: [glx, osmesa] gl: [glx, osmesa]
glu: [mesa-glu, openglu] glu: [mesa-glu, openglu]
@@ -35,9 +34,7 @@ packages:
java: [openjdk, jdk, ibm-java] java: [openjdk, jdk, ibm-java]
jpeg: [libjpeg-turbo, libjpeg] jpeg: [libjpeg-turbo, libjpeg]
lapack: [openblas, amdlibflame] lapack: [openblas, amdlibflame]
libgfortran: [ gcc-runtime ]
libglx: [mesa+glx, mesa18+glx] libglx: [mesa+glx, mesa18+glx]
libifcore: [ intel-oneapi-runtime ]
libllvm: [llvm] libllvm: [llvm]
libosmesa: [mesa+osmesa, mesa18+osmesa] libosmesa: [mesa+osmesa, mesa18+osmesa]
lua-lang: [lua, lua-luajit-openresty, lua-luajit] lua-lang: [lua, lua-luajit-openresty, lua-luajit]

View File

@@ -1119,9 +1119,6 @@ and ``3.4.2``. Similarly, ``@4.2:`` means any version above and including
``4.2``. As a short-hand, ``@3`` is equivalent to the range ``@3:3`` and ``4.2``. As a short-hand, ``@3`` is equivalent to the range ``@3:3`` and
includes any version with major version ``3``. includes any version with major version ``3``.
Versions are ordered lexicograpically by its components. For more details
on the order, see :ref:`the packaging guide <version-comparison>`.
Notice that you can distinguish between the specific version ``@=3.2`` and Notice that you can distinguish between the specific version ``@=3.2`` and
the range ``@3.2``. This is useful for packages that follow a versioning the range ``@3.2``. This is useful for packages that follow a versioning
scheme that omits the zero patch version number: ``3.2``, ``3.2.1``, scheme that omits the zero patch version number: ``3.2``, ``3.2.1``,

View File

@@ -220,40 +220,6 @@ section of the configuration:
.. _binary_caches_oci: .. _binary_caches_oci:
---------------------------------
Automatic push to a build cache
---------------------------------
Sometimes it is convenient to push packages to a build cache as soon as they are installed. Spack can do this by setting autopush flag when adding a mirror:
.. code-block:: console
$ spack mirror add --autopush <name> <url or path>
Or the autopush flag can be set for an existing mirror:
.. code-block:: console
$ spack mirror set --autopush <name> # enable automatic push for an existing mirror
$ spack mirror set --no-autopush <name> # disable automatic push for an existing mirror
Then after installing a package it is automatically pushed to all mirrors with ``autopush: true``. The command
.. code-block:: console
$ spack install <package>
will have the same effect as
.. code-block:: console
$ spack install <package>
$ spack buildcache push <cache> <package> # for all caches with autopush: true
.. note::
Packages are automatically pushed to a build cache only if they are built from source.
----------------------------------------- -----------------------------------------
OCI / Docker V2 registries as build cache OCI / Docker V2 registries as build cache
----------------------------------------- -----------------------------------------

View File

@@ -87,7 +87,7 @@ You can check what is installed in the bootstrapping store at any time using:
.. code-block:: console .. code-block:: console
% spack -b find % spack find -b
==> Showing internal bootstrap store at "/Users/spack/.spack/bootstrap/store" ==> Showing internal bootstrap store at "/Users/spack/.spack/bootstrap/store"
==> 11 installed packages ==> 11 installed packages
-- darwin-catalina-x86_64 / apple-clang@12.0.0 ------------------ -- darwin-catalina-x86_64 / apple-clang@12.0.0 ------------------
@@ -101,7 +101,7 @@ In case it is needed you can remove all the software in the current bootstrappin
% spack clean -b % spack clean -b
==> Removing bootstrapped software and configuration in "/Users/spack/.spack/bootstrap" ==> Removing bootstrapped software and configuration in "/Users/spack/.spack/bootstrap"
% spack -b find % spack find -b
==> Showing internal bootstrap store at "/Users/spack/.spack/bootstrap/store" ==> Showing internal bootstrap store at "/Users/spack/.spack/bootstrap/store"
==> 0 installed packages ==> 0 installed packages
@@ -175,4 +175,4 @@ bootstrapping.
This command needs to be run on a machine with internet access and the resulting folder This command needs to be run on a machine with internet access and the resulting folder
has to be moved over to the air-gapped system. Once the local sources are added using the has to be moved over to the air-gapped system. Once the local sources are added using the
commands suggested at the prompt, they can be used to bootstrap Spack. commands suggested at the prompt, they can be used to bootstrap Spack.

View File

@@ -250,7 +250,7 @@ generator is Ninja. To switch to the Ninja generator, simply add:
.. code-block:: python .. code-block:: python
generator("ninja") generator = "Ninja"
``CMakePackage`` defaults to "Unix Makefiles". If you switch to the ``CMakePackage`` defaults to "Unix Makefiles". If you switch to the

View File

@@ -173,72 +173,6 @@ arguments to ``Makefile.PL`` or ``Build.PL`` by overriding
] ]
^^^^^^^
Testing
^^^^^^^
``PerlPackage`` provides a simple stand-alone test of the successfully
installed package to confirm that installed perl module(s) can be used.
These tests can be performed any time after the installation using
``spack -v test run``. (For more information on the command, see
:ref:`cmd-spack-test-run`.)
The base class automatically detects perl modules based on the presence
of ``*.pm`` files under the package's library directory. For example,
the files under ``perl-bignum``'s perl library are:
.. code-block:: console
$ find . -name "*.pm"
./bigfloat.pm
./bigrat.pm
./Math/BigFloat/Trace.pm
./Math/BigInt/Trace.pm
./Math/BigRat/Trace.pm
./bigint.pm
./bignum.pm
which results in the package having the ``use_modules`` property containing:
.. code-block:: python
use_modules = [
"bigfloat",
"bigrat",
"Math::BigFloat::Trace",
"Math::BigInt::Trace",
"Math::BigRat::Trace",
"bigint",
"bignum",
]
.. note::
This list can often be used to catch missing dependencies.
If the list is somehow wrong, you can provide the names of the modules
yourself by overriding ``use_modules`` like so:
.. code-block:: python
use_modules = ["bigfloat", "bigrat", "bigint", "bignum"]
If you only want a subset of the automatically detected modules to be
tested, you could instead define the ``skip_modules`` property on the
package. So, instead of overriding ``use_modules`` as shown above, you
could define the following:
.. code-block:: python
skip_modules = [
"Math::BigFloat::Trace",
"Math::BigInt::Trace",
"Math::BigRat::Trace",
]
for the same use tests.
^^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^
Alternatives to Spack Alternatives to Spack
^^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^

View File

@@ -145,22 +145,6 @@ hosts when making ``ssl`` connections. Set to ``false`` to disable, and
tools like ``curl`` will use their ``--insecure`` options. Disabling tools like ``curl`` will use their ``--insecure`` options. Disabling
this can expose you to attacks. Use at your own risk. this can expose you to attacks. Use at your own risk.
--------------------
``ssl_certs``
--------------------
Path to custom certificats for SSL verification. The value can be a
filesytem path, or an environment variable that expands to a file path.
The default value is set to the environment variable ``SSL_CERT_FILE``
to use the same syntax used by many other applications that automatically
detect custom certificates.
When ``url_fetch_method:curl`` the ``config:ssl_certs`` should resolve to
a single file. Spack will then set the environment variable ``CURL_CA_BUNDLE``
in the subprocess calling ``curl``.
If ``url_fetch_method:urllib`` then files and directories are supported i.e.
``config:ssl_certs:$SSL_CERT_FILE`` or ``config:ssl_certs:$SSL_CERT_DIR``
will work.
-------------------- --------------------
``checksum`` ``checksum``
-------------------- --------------------

View File

@@ -73,12 +73,9 @@ are six configuration scopes. From lowest to highest:
Spack instance per project) or for site-wide settings on a multi-user Spack instance per project) or for site-wide settings on a multi-user
machine (e.g., for a common Spack instance). machine (e.g., for a common Spack instance).
#. **plugin**: Read from a Python project's entry points. Settings here affect
all instances of Spack running with the same Python installation. This scope takes higher precedence than site, system, and default scopes.
#. **user**: Stored in the home directory: ``~/.spack/``. These settings #. **user**: Stored in the home directory: ``~/.spack/``. These settings
affect all instances of Spack and take higher precedence than site, affect all instances of Spack and take higher precedence than site,
system, plugin, or defaults scopes. system, or defaults scopes.
#. **custom**: Stored in a custom directory specified by ``--config-scope``. #. **custom**: Stored in a custom directory specified by ``--config-scope``.
If multiple scopes are listed on the command line, they are ordered If multiple scopes are listed on the command line, they are ordered
@@ -199,45 +196,6 @@ with MPICH. You can create different configuration scopes for use with
mpi: [mpich] mpi: [mpich]
.. _plugin-scopes:
^^^^^^^^^^^^^
Plugin scopes
^^^^^^^^^^^^^
.. note::
Python version >= 3.8 is required to enable plugin configuration.
Spack can be made aware of configuration scopes that are installed as part of a python package. To do so, register a function that returns the scope's path to the ``"spack.config"`` entry point. Consider the Python package ``my_package`` that includes Spack configurations:
.. code-block:: console
my-package/
├── src
│   ├── my_package
│   │   ├── __init__.py
│   │   └── spack/
│   │   │   └── config.yaml
└── pyproject.toml
adding the following to ``my_package``'s ``pyproject.toml`` will make ``my_package``'s ``spack/`` configurations visible to Spack when ``my_package`` is installed:
.. code-block:: toml
[project.entry_points."spack.config"]
my_package = "my_package:get_config_path"
The function ``my_package.get_extension_path`` in ``my_package/__init__.py`` might look like
.. code-block:: python
import importlib.resources
def get_config_path():
dirname = importlib.resources.files("my_package").joinpath("spack")
if dirname.exists():
return str(dirname)
.. _platform-scopes: .. _platform-scopes:
------------------------ ------------------------

View File

@@ -952,17 +952,6 @@ function, as shown in the example below:
^mpi: "{name}-{version}/{^mpi.name}-{^mpi.version}-{compiler.name}-{compiler.version}" ^mpi: "{name}-{version}/{^mpi.name}-{^mpi.version}-{compiler.name}-{compiler.version}"
all: "{name}-{version}/{compiler.name}-{compiler.version}" all: "{name}-{version}/{compiler.name}-{compiler.version}"
Projections also permit environment and spack configuration variable
expansions as shown below:
.. code-block:: yaml
projections:
all: "{name}-{version}/{compiler.name}-{compiler.version}/$date/$SYSTEM_ENV_VARIBLE"
where ``$date`` is the spack configuration variable that will expand with the ``YYYY-MM-DD``
format and ``$SYSTEM_ENV_VARIABLE`` is an environment variable defined in the shell.
The entries in the projections configuration file must all be either The entries in the projections configuration file must all be either
specs or the keyword ``all``. For each spec, the projection used will specs or the keyword ``all``. For each spec, the projection used will
be the first non-``all`` entry that the spec satisfies, or ``all`` if be the first non-``all`` entry that the spec satisfies, or ``all`` if

View File

@@ -111,39 +111,3 @@ The corresponding unit tests can be run giving the appropriate options to ``spac
(5 durations < 0.005s hidden. Use -vv to show these durations.) (5 durations < 0.005s hidden. Use -vv to show these durations.)
=========================================== 5 passed in 5.06s ============================================ =========================================== 5 passed in 5.06s ============================================
---------------------------------------
Registering Extensions via Entry Points
---------------------------------------
.. note::
Python version >= 3.8 is required to register extensions via entry points.
Spack can be made aware of extensions that are installed as part of a python package. To do so, register a function that returns the extension path, or paths, to the ``"spack.extensions"`` entry point. Consider the Python package ``my_package`` that includes a Spack extension:
.. code-block:: console
my-package/
├── src
│   ├── my_package
│   │   └── __init__.py
│   └── spack-scripting/ # the spack extensions
└── pyproject.toml
adding the following to ``my_package``'s ``pyproject.toml`` will make the ``spack-scripting`` extension visible to Spack when ``my_package`` is installed:
.. code-block:: toml
[project.entry_points."spack.extenions"]
my_package = "my_package:get_extension_path"
The function ``my_package.get_extension_path`` in ``my_package/__init__.py`` might look like
.. code-block:: python
import importlib.resources
def get_extension_path():
dirname = importlib.resources.files("my_package").joinpath("spack-scripting")
if dirname.exists():
return str(dirname)

View File

@@ -250,10 +250,9 @@ Compiler configuration
Spack has the ability to build packages with multiple compilers and Spack has the ability to build packages with multiple compilers and
compiler versions. Compilers can be made available to Spack by compiler versions. Compilers can be made available to Spack by
specifying them manually in ``compilers.yaml`` or ``packages.yaml``, specifying them manually in ``compilers.yaml``, or automatically by
or automatically by running ``spack compiler find``, but for running ``spack compiler find``, but for convenience Spack will
convenience Spack will automatically detect compilers the first time automatically detect compilers the first time it needs them.
it needs them.
.. _cmd-spack-compilers: .. _cmd-spack-compilers:
@@ -458,48 +457,6 @@ specification. The operations available to modify the environment are ``set``, `
prepend_path: # Similar for append|remove_path prepend_path: # Similar for append|remove_path
LD_LIBRARY_PATH: /ld/paths/added/by/setvars/sh LD_LIBRARY_PATH: /ld/paths/added/by/setvars/sh
.. note::
Spack is in the process of moving compilers from a separate
attribute to be handled like all other packages. As part of this
process, the ``compilers.yaml`` section will eventually be replaced
by configuration in the ``packages.yaml`` section. This new
configuration is now available, although it is not yet the default
behavior.
Compilers can also be configured as external packages in the
``packages.yaml`` config file. Any external package for a compiler
(e.g. ``gcc`` or ``llvm``) will be treated as a configured compiler
assuming the paths to the compiler executables are determinable from
the prefix.
If the paths to the compiler executable are not determinable from the
prefix, you can add them to the ``extra_attributes`` field. Similarly,
all other fields from the compilers config can be added to the
``extra_attributes`` field for an external representing a compiler.
.. code-block:: yaml
packages:
gcc:
external:
- spec: gcc@12.2.0 arch=linux-rhel8-skylake
prefix: /usr
extra_attributes:
environment:
set:
GCC_ROOT: /usr
external:
- spec: llvm+clang@15.0.0 arch=linux-rhel8-skylake
prefix: /usr
extra_attributes:
paths:
cc: /usr/bin/clang-with-suffix
cxx: /usr/bin/clang++-with-extra-info
fc: /usr/bin/gfortran
f77: /usr/bin/gfortran
extra_rpaths:
- /usr/lib/llvm/
^^^^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^^^
Build Your Own Compiler Build Your Own Compiler

View File

@@ -273,21 +273,9 @@ builtin support through the ``depends_on`` function, the latter simply uses a ``
statement. Both module systems (at least in newer versions) do reference counting, so that if a statement. Both module systems (at least in newer versions) do reference counting, so that if a
module is loaded by two different modules, it will only be unloaded after the others are. module is loaded by two different modules, it will only be unloaded after the others are.
The ``autoload`` key accepts the values: The ``autoload`` key accepts the values ``none``, ``direct``, and ``all``. To disable it, use
``none``, and to enable, it's best to stick to ``direct``, which only autoloads the direct link and
* ``none``: no autoloading run type dependencies, relying on recursive autoloading to load the rest.
* ``run``: autoload direct *run* type dependencies
* ``direct``: autoload direct *link and run* type dependencies
* ``all``: autoload all dependencies
In case of ``run`` and ``direct``, a ``module load`` triggers a recursive load.
The ``direct`` option is most correct: there are cases where pure link dependencies need to set
variables for themselves, or need to have variables of their own dependencies set.
In practice however, ``run`` is often sufficient, and may make ``module load`` snappier.
The ``all`` option is discouraged and seldomly used.
A common complaint about autoloading is the large number of modules that are visible to the user. A common complaint about autoloading is the large number of modules that are visible to the user.
Spack has a solution for this as well: ``hide_implicits: true``. This ensures that only those Spack has a solution for this as well: ``hide_implicits: true``. This ensures that only those
@@ -309,11 +297,11 @@ Environment Modules requires version 4.7 or higher.
tcl: tcl:
hide_implicits: true hide_implicits: true
all: all:
autoload: direct # or `run` autoload: direct
lmod: lmod:
hide_implicits: true hide_implicits: true
all: all:
autoload: direct # or `run` autoload: direct
.. _anonymous_specs: .. _anonymous_specs:

View File

@@ -893,50 +893,26 @@ as an option to the ``version()`` directive. Example situations would be a
"snapshot"-like Version Control System (VCS) tag, a VCS branch such as "snapshot"-like Version Control System (VCS) tag, a VCS branch such as
``v6-16-00-patches``, or a URL specifying a regularly updated snapshot tarball. ``v6-16-00-patches``, or a URL specifying a regularly updated snapshot tarball.
.. _version-comparison:
^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^
Version comparison Version comparison
^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^
Spack imposes a generic total ordering on the set of versions,
independently from the package they are associated with.
Most Spack versions are numeric, a tuple of integers; for example, Most Spack versions are numeric, a tuple of integers; for example,
``0.1``, ``6.96`` or ``1.2.3.1``. In this very basic case, version ``0.1``, ``6.96`` or ``1.2.3.1``. Spack knows how to compare and sort
comparison is lexicographical on the numeric components: numeric versions.
``1.2 < 1.2.1 < 1.2.2 < 1.10``.
Spack can also supports string components such as ``1.1.1a`` and Some Spack versions involve slight extensions of numeric syntax; for
``1.y.0``. String components are considered less than numeric example, ``py-sphinx-rtd-theme@=0.1.10a0``. In this case, numbers are
components, so ``1.y.0 < 1.0``. This is for consistency with always considered to be "newer" than letters. This is for consistency
`RPM <https://bugzilla.redhat.com/show_bug.cgi?id=50977>`_. String with `RPM <https://bugzilla.redhat.com/show_bug.cgi?id=50977>`_.
components do not have to be separated by dots or any other delimiter.
So, the contrived version ``1y0`` is identical to ``1.y.0``.
Pre-release suffixes also contain string parts, but they are handled Spack versions may also be arbitrary non-numeric strings, for example
in a special way. For example ``1.2.3alpha1`` is parsed as a pre-release ``develop``, ``master``, ``local``.
of the version ``1.2.3``. This allows Spack to order it before the
actual release: ``1.2.3alpha1 < 1.2.3``. Spack supports alpha, beta and
release candidate suffixes: ``1.2alpha1 < 1.2beta1 < 1.2rc1 < 1.2``. Any
suffix not recognized as a pre-release is treated as an ordinary
string component, so ``1.2 < 1.2-mysuffix``.
Finally, there are a few special string components that are considered The order on versions is defined as follows. A version string is split
"infinity versions". They include ``develop``, ``main``, ``master``, into a list of components based on delimiters such as ``.``, ``-`` etc.
``head``, ``trunk``, and ``stable``. For example: ``1.2 < develop``. Lists are then ordered lexicographically, where components are ordered
These are useful for specifying the most recent development version of as follows:
a package (often a moving target like a git branch), without assigning
a specific version number. Infinity versions are not automatically used when determining the latest version of a package unless explicitly required by another package or user.
More formally, the order on versions is defined as follows. A version
string is split into a list of components based on delimiters such as
``.`` and ``-`` and string boundaries. The components are split into
the **release** and a possible **pre-release** (if the last component
is numeric and the second to last is a string ``alpha``, ``beta`` or ``rc``).
The release components are ordered lexicographically, with comparsion
between different types of components as follows:
#. The following special strings are considered larger than any other #. The following special strings are considered larger than any other
numeric or non-numeric version component, and satisfy the following numeric or non-numeric version component, and satisfy the following
@@ -949,9 +925,6 @@ between different types of components as follows:
#. All other non-numeric components are less than numeric components, #. All other non-numeric components are less than numeric components,
and are ordered alphabetically. and are ordered alphabetically.
Finally, if the release components are equal, the pre-release components
are used to break the tie, in the obvious way.
The logic behind this sort order is two-fold: The logic behind this sort order is two-fold:
#. Non-numeric versions are usually used for special cases while #. Non-numeric versions are usually used for special cases while

View File

@@ -5,9 +5,9 @@ sphinx-rtd-theme==2.0.0
python-levenshtein==0.25.0 python-levenshtein==0.25.0
docutils==0.20.1 docutils==0.20.1
pygments==2.17.2 pygments==2.17.2
urllib3==2.2.1 urllib3==2.2.0
pytest==8.1.1 pytest==8.0.1
isort==5.13.2 isort==5.13.2
black==24.3.0 black==24.2.0
flake8==7.0.0 flake8==7.0.0
mypy==1.9.0 mypy==1.8.0

5
lib/spack/env/cc vendored
View File

@@ -248,7 +248,7 @@ case "$command" in
lang_flags=C lang_flags=C
debug_flags="-g" debug_flags="-g"
;; ;;
c++|CC|g++|clang++|armclang++|icpc|icpx|pgc++|nvc++|xlc++|xlc++_r|FCC|amdclang++|crayCC) c++|CC|g++|clang++|armclang++|icpc|icpx|dpcpp|pgc++|nvc++|xlc++|xlc++_r|FCC|amdclang++|crayCC)
command="$SPACK_CXX" command="$SPACK_CXX"
language="C++" language="C++"
comp="CXX" comp="CXX"
@@ -526,7 +526,7 @@ categorize_arguments() {
continue continue
fi fi
replaced="$after$stripped" replaced="$after$stripped"
# it matched, remove it # it matched, remove it
shift shift
@@ -913,3 +913,4 @@ fi
# Execute the full command, preserving spaces with IFS set # Execute the full command, preserving spaces with IFS set
# to the alarm bell separator. # to the alarm bell separator.
IFS="$lsep"; exec $full_command_list IFS="$lsep"; exec $full_command_list

View File

@@ -18,7 +18,7 @@
* Homepage: https://pypi.python.org/pypi/archspec * Homepage: https://pypi.python.org/pypi/archspec
* Usage: Labeling, comparison and detection of microarchitectures * Usage: Labeling, comparison and detection of microarchitectures
* Version: 0.2.3 (commit 7b8fe60b69e2861e7dac104bc1c183decfcd3daf) * Version: 0.2.2 (commit 1dc58a5776dd77e6fc6e4ba5626af5b1fb24996e)
astunparse astunparse
---------------- ----------------

View File

@@ -1,3 +1,2 @@
"""Init file to avoid namespace packages""" """Init file to avoid namespace packages"""
__version__ = "0.2.2"
__version__ = "0.2.3"

View File

@@ -3,7 +3,6 @@
""" """
import sys import sys
from .cli import main from .cli import main
sys.exit(main()) sys.exit(main())

View File

@@ -46,11 +46,7 @@ def _make_parser() -> argparse.ArgumentParser:
def cpu() -> int: def cpu() -> int:
"""Run the `archspec cpu` subcommand.""" """Run the `archspec cpu` subcommand."""
try: print(archspec.cpu.host())
print(archspec.cpu.host())
except FileNotFoundError as exc:
print(exc)
return 1
return 0 return 0

View File

@@ -5,14 +5,10 @@
"""The "cpu" package permits to query and compare different """The "cpu" package permits to query and compare different
CPU microarchitectures. CPU microarchitectures.
""" """
from .microarchitecture import Microarchitecture, UnsupportedMicroarchitecture
from .microarchitecture import TARGETS, generic_microarchitecture
from .microarchitecture import version_components
from .detect import host from .detect import host
from .microarchitecture import (
TARGETS,
Microarchitecture,
UnsupportedMicroarchitecture,
generic_microarchitecture,
version_components,
)
__all__ = [ __all__ = [
"Microarchitecture", "Microarchitecture",

View File

@@ -4,17 +4,15 @@
# SPDX-License-Identifier: (Apache-2.0 OR MIT) # SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""Detection of CPU microarchitectures""" """Detection of CPU microarchitectures"""
import collections import collections
import functools
import os import os
import platform import platform
import re import re
import struct
import subprocess import subprocess
import warnings import warnings
from typing import Dict, List, Optional, Set, Tuple, Union
from ..vendor.cpuid.cpuid import CPUID from .microarchitecture import generic_microarchitecture, TARGETS
from .microarchitecture import TARGETS, Microarchitecture, generic_microarchitecture from .schema import TARGETS_JSON
from .schema import CPUID_JSON, TARGETS_JSON
#: Mapping from operating systems to chain of commands #: Mapping from operating systems to chain of commands
#: to obtain a dictionary of raw info on the current cpu #: to obtain a dictionary of raw info on the current cpu
@@ -24,46 +22,43 @@
#: functions checking the compatibility of the host with a given target #: functions checking the compatibility of the host with a given target
COMPATIBILITY_CHECKS = {} COMPATIBILITY_CHECKS = {}
# Constants for commonly used architectures
X86_64 = "x86_64"
AARCH64 = "aarch64"
PPC64LE = "ppc64le"
PPC64 = "ppc64"
RISCV64 = "riscv64"
def info_dict(operating_system):
def detection(operating_system: str): """Decorator to mark functions that are meant to return raw info on
"""Decorator to mark functions that are meant to return partial information on the current cpu. the current cpu.
Args: Args:
operating_system: operating system where this function can be used. operating_system (str or tuple): operating system for which the marked
function is a viable factory of raw info dictionaries.
""" """
def decorator(factory): def decorator(factory):
INFO_FACTORY[operating_system].append(factory) INFO_FACTORY[operating_system].append(factory)
return factory
@functools.wraps(factory)
def _impl():
info = factory()
# Check that info contains a few mandatory fields
msg = 'field "{0}" is missing from raw info dictionary'
assert "vendor_id" in info, msg.format("vendor_id")
assert "flags" in info, msg.format("flags")
assert "model" in info, msg.format("model")
assert "model_name" in info, msg.format("model_name")
return info
return _impl
return decorator return decorator
def partial_uarch( @info_dict(operating_system="Linux")
name: str = "", vendor: str = "", features: Optional[Set[str]] = None, generation: int = 0 def proc_cpuinfo():
) -> Microarchitecture: """Returns a raw info dictionary by parsing the first entry of
"""Construct a partial microarchitecture, from information gathered during system scan.""" ``/proc/cpuinfo``
return Microarchitecture( """
name=name, info = {}
parents=[],
vendor=vendor,
features=features or set(),
compilers={},
generation=generation,
)
@detection(operating_system="Linux")
def proc_cpuinfo() -> Microarchitecture:
"""Returns a partial Microarchitecture, obtained from scanning ``/proc/cpuinfo``"""
data = {}
with open("/proc/cpuinfo") as file: # pylint: disable=unspecified-encoding with open("/proc/cpuinfo") as file: # pylint: disable=unspecified-encoding
for line in file: for line in file:
key, separator, value = line.partition(":") key, separator, value = line.partition(":")
@@ -75,96 +70,11 @@ def proc_cpuinfo() -> Microarchitecture:
# #
# we are on a blank line separating two cpus. Exit early as # we are on a blank line separating two cpus. Exit early as
# we want to read just the first entry in /proc/cpuinfo # we want to read just the first entry in /proc/cpuinfo
if separator != ":" and data: if separator != ":" and info:
break break
data[key.strip()] = value.strip() info[key.strip()] = value.strip()
return info
architecture = _machine()
if architecture == X86_64:
return partial_uarch(
vendor=data.get("vendor_id", "generic"), features=_feature_set(data, key="flags")
)
if architecture == AARCH64:
return partial_uarch(
vendor=_canonicalize_aarch64_vendor(data),
features=_feature_set(data, key="Features"),
)
if architecture in (PPC64LE, PPC64):
generation_match = re.search(r"POWER(\d+)", data.get("cpu", ""))
try:
generation = int(generation_match.group(1))
except AttributeError:
# There might be no match under emulated environments. For instance
# emulating a ppc64le with QEMU and Docker still reports the host
# /proc/cpuinfo and not a Power
generation = 0
return partial_uarch(generation=generation)
if architecture == RISCV64:
if data.get("uarch") == "sifive,u74-mc":
data["uarch"] = "u74mc"
return partial_uarch(name=data.get("uarch", RISCV64))
return generic_microarchitecture(architecture)
class CpuidInfoCollector:
"""Collects the information we need on the host CPU from cpuid"""
# pylint: disable=too-few-public-methods
def __init__(self):
self.cpuid = CPUID()
registers = self.cpuid.registers_for(**CPUID_JSON["vendor"]["input"])
self.highest_basic_support = registers.eax
self.vendor = struct.pack("III", registers.ebx, registers.edx, registers.ecx).decode(
"utf-8"
)
registers = self.cpuid.registers_for(**CPUID_JSON["highest_extension_support"]["input"])
self.highest_extension_support = registers.eax
self.features = self._features()
def _features(self):
result = set()
def check_features(data):
registers = self.cpuid.registers_for(**data["input"])
for feature_check in data["bits"]:
current = getattr(registers, feature_check["register"])
if self._is_bit_set(current, feature_check["bit"]):
result.add(feature_check["name"])
for call_data in CPUID_JSON["flags"]:
if call_data["input"]["eax"] > self.highest_basic_support:
continue
check_features(call_data)
for call_data in CPUID_JSON["extension-flags"]:
if call_data["input"]["eax"] > self.highest_extension_support:
continue
check_features(call_data)
return result
def _is_bit_set(self, register: int, bit: int) -> bool:
mask = 1 << bit
return register & mask > 0
@detection(operating_system="Windows")
def cpuid_info():
"""Returns a partial Microarchitecture, obtained from running the cpuid instruction"""
architecture = _machine()
if architecture == X86_64:
data = CpuidInfoCollector()
return partial_uarch(vendor=data.vendor, features=data.features)
return generic_microarchitecture(architecture)
def _check_output(args, env): def _check_output(args, env):
@@ -173,25 +83,14 @@ def _check_output(args, env):
return str(output.decode("utf-8")) return str(output.decode("utf-8"))
WINDOWS_MAPPING = {
"AMD64": "x86_64",
"ARM64": "aarch64",
}
def _machine(): def _machine():
"""Return the machine architecture we are on""" """ "Return the machine architecture we are on"""
operating_system = platform.system() operating_system = platform.system()
# If we are not on Darwin or Windows, trust what Python tells us # If we are not on Darwin, trust what Python tells us
if operating_system not in ("Darwin", "Windows"): if operating_system != "Darwin":
return platform.machine() return platform.machine()
# Normalize windows specific names
if operating_system == "Windows":
platform_machine = platform.machine()
return WINDOWS_MAPPING.get(platform_machine, platform_machine)
# On Darwin it might happen that we are on M1, but using an interpreter # On Darwin it might happen that we are on M1, but using an interpreter
# built for x86_64. In that case "platform.machine() == 'x86_64'", so we # built for x86_64. In that case "platform.machine() == 'x86_64'", so we
# need to fix that. # need to fix that.
@@ -204,47 +103,54 @@ def _machine():
if "Apple" in output: if "Apple" in output:
# Note that a native Python interpreter on Apple M1 would return # Note that a native Python interpreter on Apple M1 would return
# "arm64" instead of "aarch64". Here we normalize to the latter. # "arm64" instead of "aarch64". Here we normalize to the latter.
return AARCH64 return "aarch64"
return X86_64 return "x86_64"
@detection(operating_system="Darwin") @info_dict(operating_system="Darwin")
def sysctl_info() -> Microarchitecture: def sysctl_info_dict():
"""Returns a raw info dictionary parsing the output of sysctl.""" """Returns a raw info dictionary parsing the output of sysctl."""
child_environment = _ensure_bin_usrbin_in_path() child_environment = _ensure_bin_usrbin_in_path()
def sysctl(*args: str) -> str: def sysctl(*args):
return _check_output(["sysctl"] + list(args), env=child_environment).strip() return _check_output(["sysctl"] + list(args), env=child_environment).strip()
if _machine() == X86_64: if _machine() == "x86_64":
features = ( flags = (
f'{sysctl("-n", "machdep.cpu.features").lower()} ' sysctl("-n", "machdep.cpu.features").lower()
f'{sysctl("-n", "machdep.cpu.leaf7_features").lower()}' + " "
+ sysctl("-n", "machdep.cpu.leaf7_features").lower()
) )
features = set(features.split()) info = {
"vendor_id": sysctl("-n", "machdep.cpu.vendor"),
"flags": flags,
"model": sysctl("-n", "machdep.cpu.model"),
"model name": sysctl("-n", "machdep.cpu.brand_string"),
}
else:
model = "unknown"
model_str = sysctl("-n", "machdep.cpu.brand_string").lower()
if "m2" in model_str:
model = "m2"
elif "m1" in model_str:
model = "m1"
elif "apple" in model_str:
model = "m1"
# Flags detected on Darwin turned to their linux counterpart info = {
for darwin_flag, linux_flag in TARGETS_JSON["conversions"]["darwin_flags"].items(): "vendor_id": "Apple",
if darwin_flag in features: "flags": [],
features.update(linux_flag.split()) "model": model,
"CPU implementer": "Apple",
return partial_uarch(vendor=sysctl("-n", "machdep.cpu.vendor"), features=features) "model name": sysctl("-n", "machdep.cpu.brand_string"),
}
model = "unknown" return info
model_str = sysctl("-n", "machdep.cpu.brand_string").lower()
if "m2" in model_str:
model = "m2"
elif "m1" in model_str:
model = "m1"
elif "apple" in model_str:
model = "m1"
return partial_uarch(name=model, vendor="Apple")
def _ensure_bin_usrbin_in_path(): def _ensure_bin_usrbin_in_path():
# Make sure that /sbin and /usr/sbin are in PATH as sysctl is usually found there # Make sure that /sbin and /usr/sbin are in PATH as sysctl is
# usually found there
child_environment = dict(os.environ.items()) child_environment = dict(os.environ.items())
search_paths = child_environment.get("PATH", "").split(os.pathsep) search_paths = child_environment.get("PATH", "").split(os.pathsep)
for additional_path in ("/sbin", "/usr/sbin"): for additional_path in ("/sbin", "/usr/sbin"):
@@ -254,10 +160,22 @@ def _ensure_bin_usrbin_in_path():
return child_environment return child_environment
def _canonicalize_aarch64_vendor(data: Dict[str, str]) -> str: def adjust_raw_flags(info):
"""Adjust the vendor field to make it human-readable""" """Adjust the flags detected on the system to homogenize
if "CPU implementer" not in data: slightly different representations.
return "generic" """
# Flags detected on Darwin turned to their linux counterpart
flags = info.get("flags", [])
d2l = TARGETS_JSON["conversions"]["darwin_flags"]
for darwin_flag, linux_flag in d2l.items():
if darwin_flag in flags:
info["flags"] += " " + linux_flag
def adjust_raw_vendor(info):
"""Adjust the vendor field to make it human readable"""
if "CPU implementer" not in info:
return
# Mapping numeric codes to vendor (ARM). This list is a merge from # Mapping numeric codes to vendor (ARM). This list is a merge from
# different sources: # different sources:
@@ -267,37 +185,43 @@ def _canonicalize_aarch64_vendor(data: Dict[str, str]) -> str:
# https://github.com/gcc-mirror/gcc/blob/master/gcc/config/aarch64/aarch64-cores.def # https://github.com/gcc-mirror/gcc/blob/master/gcc/config/aarch64/aarch64-cores.def
# https://patchwork.kernel.org/patch/10524949/ # https://patchwork.kernel.org/patch/10524949/
arm_vendors = TARGETS_JSON["conversions"]["arm_vendors"] arm_vendors = TARGETS_JSON["conversions"]["arm_vendors"]
arm_code = data["CPU implementer"] arm_code = info["CPU implementer"]
return arm_vendors.get(arm_code, arm_code) if arm_code in arm_vendors:
info["CPU implementer"] = arm_vendors[arm_code]
def _feature_set(data: Dict[str, str], key: str) -> Set[str]: def raw_info_dictionary():
return set(data.get(key, "").split()) """Returns a dictionary with information on the cpu of the current host.
This function calls all the viable factories one after the other until
def detected_info() -> Microarchitecture: there's one that is able to produce the requested information.
"""Returns a partial Microarchitecture with information on the CPU of the current host.
This function calls all the viable factories one after the other until there's one that is
able to produce the requested information. Falls-back to a generic microarchitecture, if none
of the calls succeed.
""" """
# pylint: disable=broad-except # pylint: disable=broad-except
info = {}
for factory in INFO_FACTORY[platform.system()]: for factory in INFO_FACTORY[platform.system()]:
try: try:
return factory() info = factory()
except Exception as exc: except Exception as exc:
warnings.warn(str(exc)) warnings.warn(str(exc))
return generic_microarchitecture(_machine()) if info:
adjust_raw_flags(info)
adjust_raw_vendor(info)
break
return info
def compatible_microarchitectures(info: Microarchitecture) -> List[Microarchitecture]: def compatible_microarchitectures(info):
"""Returns an unordered list of known micro-architectures that are compatible with the """Returns an unordered list of known micro-architectures that are
partial Microarchitecture passed as input. compatible with the info dictionary passed as argument.
Args:
info (dict): dictionary containing information on the host cpu
""" """
architecture_family = _machine() architecture_family = _machine()
# If a tester is not registered, assume no known target is compatible with the host # If a tester is not registered, be conservative and assume no known
# target is compatible with the host
tester = COMPATIBILITY_CHECKS.get(architecture_family, lambda x, y: False) tester = COMPATIBILITY_CHECKS.get(architecture_family, lambda x, y: False)
return [x for x in TARGETS.values() if tester(info, x)] or [ return [x for x in TARGETS.values() if tester(info, x)] or [
generic_microarchitecture(architecture_family) generic_microarchitecture(architecture_family)
@@ -306,8 +230,8 @@ def compatible_microarchitectures(info: Microarchitecture) -> List[Microarchitec
def host(): def host():
"""Detects the host micro-architecture and returns it.""" """Detects the host micro-architecture and returns it."""
# Retrieve information on the host's cpu # Retrieve a dictionary with raw information on the host's cpu
info = detected_info() info = raw_info_dictionary()
# Get a list of possible candidates for this micro-architecture # Get a list of possible candidates for this micro-architecture
candidates = compatible_microarchitectures(info) candidates = compatible_microarchitectures(info)
@@ -334,15 +258,16 @@ def sorting_fn(item):
return max(candidates, key=sorting_fn) return max(candidates, key=sorting_fn)
def compatibility_check(architecture_family: Union[str, Tuple[str, ...]]): def compatibility_check(architecture_family):
"""Decorator to register a function as a proper compatibility check. """Decorator to register a function as a proper compatibility check.
A compatibility check function takes a partial Microarchitecture object as a first argument, A compatibility check function takes the raw info dictionary as a first
and an arbitrary target Microarchitecture as the second argument. It returns True if the argument and an arbitrary target as the second argument. It returns True
target is compatible with first argument, False otherwise. if the target is compatible with the info dictionary, False otherwise.
Args: Args:
architecture_family: architecture family for which this test can be used architecture_family (str or tuple): architecture family for which
this test can be used, e.g. x86_64 or ppc64le etc.
""" """
# Turn the argument into something iterable # Turn the argument into something iterable
if isinstance(architecture_family, str): if isinstance(architecture_family, str):
@@ -355,57 +280,86 @@ def decorator(func):
return decorator return decorator
@compatibility_check(architecture_family=(PPC64LE, PPC64)) @compatibility_check(architecture_family=("ppc64le", "ppc64"))
def compatibility_check_for_power(info, target): def compatibility_check_for_power(info, target):
"""Compatibility check for PPC64 and PPC64LE architectures.""" """Compatibility check for PPC64 and PPC64LE architectures."""
basename = platform.machine()
generation_match = re.search(r"POWER(\d+)", info.get("cpu", ""))
try:
generation = int(generation_match.group(1))
except AttributeError:
# There might be no match under emulated environments. For instance
# emulating a ppc64le with QEMU and Docker still reports the host
# /proc/cpuinfo and not a Power
generation = 0
# We can use a target if it descends from our machine type and our # We can use a target if it descends from our machine type and our
# generation (9 for POWER9, etc) is at least its generation. # generation (9 for POWER9, etc) is at least its generation.
arch_root = TARGETS[_machine()] arch_root = TARGETS[basename]
return ( return (
target == arch_root or arch_root in target.ancestors target == arch_root or arch_root in target.ancestors
) and target.generation <= info.generation ) and target.generation <= generation
@compatibility_check(architecture_family=X86_64) @compatibility_check(architecture_family="x86_64")
def compatibility_check_for_x86_64(info, target): def compatibility_check_for_x86_64(info, target):
"""Compatibility check for x86_64 architectures.""" """Compatibility check for x86_64 architectures."""
basename = "x86_64"
vendor = info.get("vendor_id", "generic")
features = set(info.get("flags", "").split())
# We can use a target if it descends from our machine type, is from our # We can use a target if it descends from our machine type, is from our
# vendor, and we have all of its features # vendor, and we have all of its features
arch_root = TARGETS[X86_64] arch_root = TARGETS[basename]
return ( return (
(target == arch_root or arch_root in target.ancestors) (target == arch_root or arch_root in target.ancestors)
and target.vendor in (info.vendor, "generic") and target.vendor in (vendor, "generic")
and target.features.issubset(info.features) and target.features.issubset(features)
) )
@compatibility_check(architecture_family=AARCH64) @compatibility_check(architecture_family="aarch64")
def compatibility_check_for_aarch64(info, target): def compatibility_check_for_aarch64(info, target):
"""Compatibility check for AARCH64 architectures.""" """Compatibility check for AARCH64 architectures."""
# At the moment, it's not clear how to detect compatibility with basename = "aarch64"
features = set(info.get("Features", "").split())
vendor = info.get("CPU implementer", "generic")
# At the moment it's not clear how to detect compatibility with
# a specific version of the architecture # a specific version of the architecture
if target.vendor == "generic" and target.name != AARCH64: if target.vendor == "generic" and target.name != "aarch64":
return False return False
arch_root = TARGETS[AARCH64] arch_root = TARGETS[basename]
arch_root_and_vendor = arch_root == target.family and target.vendor in ( arch_root_and_vendor = arch_root == target.family and target.vendor in (
info.vendor, vendor,
"generic", "generic",
) )
# On macOS it seems impossible to get all the CPU features # On macOS it seems impossible to get all the CPU features
# with syctl info, but for ARM we can get the exact model # with syctl info, but for ARM we can get the exact model
if platform.system() == "Darwin": if platform.system() == "Darwin":
model = TARGETS[info.name] model_key = info.get("model", basename)
model = TARGETS[model_key]
return arch_root_and_vendor and (target == model or target in model.ancestors) return arch_root_and_vendor and (target == model or target in model.ancestors)
return arch_root_and_vendor and target.features.issubset(info.features) return arch_root_and_vendor and target.features.issubset(features)
@compatibility_check(architecture_family=RISCV64) @compatibility_check(architecture_family="riscv64")
def compatibility_check_for_riscv64(info, target): def compatibility_check_for_riscv64(info, target):
"""Compatibility check for riscv64 architectures.""" """Compatibility check for riscv64 architectures."""
arch_root = TARGETS[RISCV64] basename = "riscv64"
uarch = info.get("uarch")
# sifive unmatched board
if uarch == "sifive,u74-mc":
uarch = "u74mc"
# catch-all for unknown uarchs
else:
uarch = "riscv64"
arch_root = TARGETS[basename]
return (target == arch_root or arch_root in target.ancestors) and ( return (target == arch_root or arch_root in target.ancestors) and (
target.name == info.name or target.vendor == "generic" target == uarch or target.vendor == "generic"
) )

View File

@@ -13,7 +13,6 @@
import archspec import archspec
import archspec.cpu.alias import archspec.cpu.alias
import archspec.cpu.schema import archspec.cpu.schema
from .alias import FEATURE_ALIASES from .alias import FEATURE_ALIASES
from .schema import LazyDictionary from .schema import LazyDictionary
@@ -48,7 +47,7 @@ class Microarchitecture:
which has "broadwell" as a parent, supports running binaries which has "broadwell" as a parent, supports running binaries
optimized for "broadwell". optimized for "broadwell".
vendor (str): vendor of the micro-architecture vendor (str): vendor of the micro-architecture
features (set of str): supported CPU flags. Note that the semantic features (list of str): supported CPU flags. Note that the semantic
of the flags in this field might vary among architectures, if of the flags in this field might vary among architectures, if
at all present. For instance x86_64 processors will list all at all present. For instance x86_64 processors will list all
the flags supported by a given CPU while Arm processors will the flags supported by a given CPU while Arm processors will
@@ -181,28 +180,24 @@ def generic(self):
generics = [x for x in [self] + self.ancestors if x.vendor == "generic"] generics = [x for x in [self] + self.ancestors if x.vendor == "generic"]
return max(generics, key=lambda x: len(x.ancestors)) return max(generics, key=lambda x: len(x.ancestors))
def to_dict(self): def to_dict(self, return_list_of_items=False):
"""Returns a dictionary representation of this object.""" """Returns a dictionary representation of this object.
return {
"name": str(self.name),
"vendor": str(self.vendor),
"features": sorted(str(x) for x in self.features),
"generation": self.generation,
"parents": [str(x) for x in self.parents],
"compilers": self.compilers,
}
@staticmethod Args:
def from_dict(data) -> "Microarchitecture": return_list_of_items (bool): if True returns an ordered list of
"""Construct a microarchitecture from a dictionary representation.""" items instead of the dictionary
return Microarchitecture( """
name=data["name"], list_of_items = [
parents=[TARGETS[x] for x in data["parents"]], ("name", str(self.name)),
vendor=data["vendor"], ("vendor", str(self.vendor)),
features=set(data["features"]), ("features", sorted(str(x) for x in self.features)),
compilers=data.get("compilers", {}), ("generation", self.generation),
generation=data.get("generation", 0), ("parents", [str(x) for x in self.parents]),
) ]
if return_list_of_items:
return list_of_items
return dict(list_of_items)
def optimization_flags(self, compiler, version): def optimization_flags(self, compiler, version):
"""Returns a string containing the optimization flags that needs """Returns a string containing the optimization flags that needs
@@ -276,7 +271,9 @@ def tuplify(ver):
flags = flags_fmt.format(**compiler_entry) flags = flags_fmt.format(**compiler_entry)
return flags return flags
msg = "cannot produce optimized binary for micro-architecture '{0}' with {1}@{2}" msg = (
"cannot produce optimized binary for micro-architecture '{0}' with {1}@{2}"
)
if compiler_info: if compiler_info:
versions = [x["versions"] for x in compiler_info] versions = [x["versions"] for x in compiler_info]
msg += f' [supported compiler versions are {", ".join(versions)}]' msg += f' [supported compiler versions are {", ".join(versions)}]'
@@ -292,7 +289,9 @@ def generic_microarchitecture(name):
Args: Args:
name (str): name of the micro-architecture name (str): name of the micro-architecture
""" """
return Microarchitecture(name, parents=[], vendor="generic", features=[], compilers={}) return Microarchitecture(
name, parents=[], vendor="generic", features=[], compilers={}
)
def version_components(version): def version_components(version):
@@ -346,7 +345,9 @@ def fill_target_from_dict(name, data, targets):
compilers = values.get("compilers", {}) compilers = values.get("compilers", {})
generation = values.get("generation", 0) generation = values.get("generation", 0)
targets[name] = Microarchitecture(name, parents, vendor, features, compilers, generation) targets[name] = Microarchitecture(
name, parents, vendor, features, compilers, generation
)
known_targets = {} known_targets = {}
data = archspec.cpu.schema.TARGETS_JSON["microarchitectures"] data = archspec.cpu.schema.TARGETS_JSON["microarchitectures"]

View File

@@ -7,9 +7,7 @@
""" """
import collections.abc import collections.abc
import json import json
import os import os.path
import pathlib
from typing import Tuple
class LazyDictionary(collections.abc.MutableMapping): class LazyDictionary(collections.abc.MutableMapping):
@@ -48,65 +46,21 @@ def __len__(self):
return len(self.data) return len(self.data)
#: Environment variable that might point to a directory with a user defined JSON file def _load_json_file(json_file):
DIR_FROM_ENVIRONMENT = "ARCHSPEC_CPU_DIR" json_dir = os.path.join(os.path.dirname(__file__), "..", "json", "cpu")
json_dir = os.path.abspath(json_dir)
#: Environment variable that might point to a directory with extensions to JSON files def _factory():
EXTENSION_DIR_FROM_ENVIRONMENT = "ARCHSPEC_EXTENSION_CPU_DIR" filename = os.path.join(json_dir, json_file)
with open(filename, "r", encoding="utf-8") as file:
return json.load(file)
return _factory
def _json_file(filename: str, allow_custom: bool = False) -> Tuple[pathlib.Path, pathlib.Path]:
"""Given a filename, returns the absolute path for the main JSON file, and an
optional absolute path for an extension JSON file.
Args:
filename: filename for the JSON file
allow_custom: if True, allows overriding the location where the file resides
"""
json_dir = pathlib.Path(__file__).parent / ".." / "json" / "cpu"
if allow_custom and DIR_FROM_ENVIRONMENT in os.environ:
json_dir = pathlib.Path(os.environ[DIR_FROM_ENVIRONMENT])
json_dir = json_dir.absolute()
json_file = json_dir / filename
extension_file = None
if allow_custom and EXTENSION_DIR_FROM_ENVIRONMENT in os.environ:
extension_dir = pathlib.Path(os.environ[EXTENSION_DIR_FROM_ENVIRONMENT])
extension_dir.absolute()
extension_file = extension_dir / filename
return json_file, extension_file
def _load(json_file: pathlib.Path, extension_file: pathlib.Path):
with open(json_file, "r", encoding="utf-8") as file:
data = json.load(file)
if not extension_file or not extension_file.exists():
return data
with open(extension_file, "r", encoding="utf-8") as file:
extension_data = json.load(file)
top_level_sections = list(data.keys())
for key in top_level_sections:
if key not in extension_data:
continue
data[key].update(extension_data[key])
return data
#: In memory representation of the data in microarchitectures.json, #: In memory representation of the data in microarchitectures.json,
#: loaded on first access #: loaded on first access
TARGETS_JSON = LazyDictionary(_load, *_json_file("microarchitectures.json", allow_custom=True)) TARGETS_JSON = LazyDictionary(_load_json_file("microarchitectures.json"))
#: JSON schema for microarchitectures.json, loaded on first access #: JSON schema for microarchitectures.json, loaded on first access
TARGETS_JSON_SCHEMA = LazyDictionary(_load, *_json_file("microarchitectures_schema.json")) SCHEMA = LazyDictionary(_load_json_file("microarchitectures_schema.json"))
#: Information on how to call 'cpuid' to get information on the HOST CPU
CPUID_JSON = LazyDictionary(_load, *_json_file("cpuid.json", allow_custom=True))
#: JSON schema for cpuid.json, loaded on first access
CPUID_JSON_SCHEMA = LazyDictionary(_load, *_json_file("cpuid_schema.json"))

View File

@@ -9,11 +9,11 @@ language specific APIs.
Currently the repository contains the following JSON files: Currently the repository contains the following JSON files:
```console ```console
cpu/ .
├── cpuid.json # Contains information on CPUID calls to retrieve vendor and features on x86_64 ├── COPYRIGHT
── cpuid_schema.json # Schema for the file above ── cpu
├── microarchitectures.json # Contains information on CPU microarchitectures    ├── microarchitectures.json # Contains information on CPU microarchitectures
└── microarchitectures_schema.json # Schema for the file above    └── microarchitectures_schema.json # Schema for the file above
``` ```

File diff suppressed because it is too large Load Diff

View File

@@ -1,134 +0,0 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
"title": "Schema for microarchitecture definitions and feature aliases",
"type": "object",
"additionalProperties": false,
"properties": {
"vendor": {
"type": "object",
"additionalProperties": false,
"properties": {
"description": {
"type": "string"
},
"input": {
"type": "object",
"additionalProperties": false,
"properties": {
"eax": {
"type": "integer"
},
"ecx": {
"type": "integer"
}
}
}
}
},
"highest_extension_support": {
"type": "object",
"additionalProperties": false,
"properties": {
"description": {
"type": "string"
},
"input": {
"type": "object",
"additionalProperties": false,
"properties": {
"eax": {
"type": "integer"
},
"ecx": {
"type": "integer"
}
}
}
}
},
"flags": {
"type": "array",
"items": {
"type": "object",
"additionalProperties": false,
"properties": {
"description": {
"type": "string"
},
"input": {
"type": "object",
"additionalProperties": false,
"properties": {
"eax": {
"type": "integer"
},
"ecx": {
"type": "integer"
}
}
},
"bits": {
"type": "array",
"items": {
"type": "object",
"additionalProperties": false,
"properties": {
"name": {
"type": "string"
},
"register": {
"type": "string"
},
"bit": {
"type": "integer"
}
}
}
}
}
}
},
"extension-flags": {
"type": "array",
"items": {
"type": "object",
"additionalProperties": false,
"properties": {
"description": {
"type": "string"
},
"input": {
"type": "object",
"additionalProperties": false,
"properties": {
"eax": {
"type": "integer"
},
"ecx": {
"type": "integer"
}
}
},
"bits": {
"type": "array",
"items": {
"type": "object",
"additionalProperties": false,
"properties": {
"name": {
"type": "string"
},
"register": {
"type": "string"
},
"bit": {
"type": "integer"
}
}
}
}
}
}
}
}
}

View File

@@ -1,20 +0,0 @@
The MIT License (MIT)
Copyright (c) 2014 Anders Høst
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View File

@@ -1,76 +0,0 @@
cpuid.py
========
Now, this is silly!
Pure Python library for accessing information about x86 processors
by querying the [CPUID](http://en.wikipedia.org/wiki/CPUID)
instruction. Well, not exactly pure Python...
It works by allocating a small piece of virtual memory, copying
a raw x86 function to that memory, giving the memory execute
permissions and then calling the memory as a function. The injected
function executes the CPUID instruction and copies the result back
to a ctypes.Structure where is can be read by Python.
It should work fine on both 32 and 64 bit versions of Windows and Linux
running x86 processors. Apple OS X and other BSD systems should also work,
not tested though...
Why?
----
For poops and giggles. Plus, having access to a low-level feature
without having to compile a C wrapper is pretty neat.
Examples
--------
Getting info with eax=0:
import cpuid
q = cpuid.CPUID()
eax, ebx, ecx, edx = q(0)
Running the files:
$ python example.py
Vendor ID : GenuineIntel
CPU name : Intel(R) Xeon(R) CPU W3550 @ 3.07GHz
Vector instructions supported:
SSE : Yes
SSE2 : Yes
SSE3 : Yes
SSSE3 : Yes
SSE4.1 : Yes
SSE4.2 : Yes
SSE4a : --
AVX : --
AVX2 : --
$ python cpuid.py
CPUID A B C D
00000000 0000000b 756e6547 6c65746e 49656e69
00000001 000106a5 00100800 009ce3bd bfebfbff
00000002 55035a01 00f0b2e4 00000000 09ca212c
00000003 00000000 00000000 00000000 00000000
00000004 00000000 00000000 00000000 00000000
00000005 00000040 00000040 00000003 00001120
00000006 00000003 00000002 00000001 00000000
00000007 00000000 00000000 00000000 00000000
00000008 00000000 00000000 00000000 00000000
00000009 00000000 00000000 00000000 00000000
0000000a 07300403 00000044 00000000 00000603
0000000b 00000000 00000000 00000095 00000000
80000000 80000008 00000000 00000000 00000000
80000001 00000000 00000000 00000001 28100800
80000002 65746e49 2952286c 6f655820 2952286e
80000003 55504320 20202020 20202020 57202020
80000004 30353533 20402020 37302e33 007a4847
80000005 00000000 00000000 00000000 00000000
80000006 00000000 00000000 01006040 00000000
80000007 00000000 00000000 00000000 00000100
80000008 00003024 00000000 00000000 00000000

View File

@@ -1,172 +0,0 @@
# -*- coding: utf-8 -*-
#
# Copyright (c) 2024 Anders Høst
#
from __future__ import print_function
import platform
import os
import ctypes
from ctypes import c_uint32, c_long, c_ulong, c_size_t, c_void_p, POINTER, CFUNCTYPE
# Posix x86_64:
# Three first call registers : RDI, RSI, RDX
# Volatile registers : RAX, RCX, RDX, RSI, RDI, R8-11
# Windows x86_64:
# Three first call registers : RCX, RDX, R8
# Volatile registers : RAX, RCX, RDX, R8-11
# cdecl 32 bit:
# Three first call registers : Stack (%esp)
# Volatile registers : EAX, ECX, EDX
_POSIX_64_OPC = [
0x53, # push %rbx
0x89, 0xf0, # mov %esi,%eax
0x89, 0xd1, # mov %edx,%ecx
0x0f, 0xa2, # cpuid
0x89, 0x07, # mov %eax,(%rdi)
0x89, 0x5f, 0x04, # mov %ebx,0x4(%rdi)
0x89, 0x4f, 0x08, # mov %ecx,0x8(%rdi)
0x89, 0x57, 0x0c, # mov %edx,0xc(%rdi)
0x5b, # pop %rbx
0xc3 # retq
]
_WINDOWS_64_OPC = [
0x53, # push %rbx
0x89, 0xd0, # mov %edx,%eax
0x49, 0x89, 0xc9, # mov %rcx,%r9
0x44, 0x89, 0xc1, # mov %r8d,%ecx
0x0f, 0xa2, # cpuid
0x41, 0x89, 0x01, # mov %eax,(%r9)
0x41, 0x89, 0x59, 0x04, # mov %ebx,0x4(%r9)
0x41, 0x89, 0x49, 0x08, # mov %ecx,0x8(%r9)
0x41, 0x89, 0x51, 0x0c, # mov %edx,0xc(%r9)
0x5b, # pop %rbx
0xc3 # retq
]
_CDECL_32_OPC = [
0x53, # push %ebx
0x57, # push %edi
0x8b, 0x7c, 0x24, 0x0c, # mov 0xc(%esp),%edi
0x8b, 0x44, 0x24, 0x10, # mov 0x10(%esp),%eax
0x8b, 0x4c, 0x24, 0x14, # mov 0x14(%esp),%ecx
0x0f, 0xa2, # cpuid
0x89, 0x07, # mov %eax,(%edi)
0x89, 0x5f, 0x04, # mov %ebx,0x4(%edi)
0x89, 0x4f, 0x08, # mov %ecx,0x8(%edi)
0x89, 0x57, 0x0c, # mov %edx,0xc(%edi)
0x5f, # pop %edi
0x5b, # pop %ebx
0xc3 # ret
]
is_windows = os.name == "nt"
is_64bit = ctypes.sizeof(ctypes.c_voidp) == 8
class CPUID_struct(ctypes.Structure):
_register_names = ("eax", "ebx", "ecx", "edx")
_fields_ = [(r, c_uint32) for r in _register_names]
def __getitem__(self, item):
if item not in self._register_names:
raise KeyError(item)
return getattr(self, item)
def __repr__(self):
return "eax=0x{:x}, ebx=0x{:x}, ecx=0x{:x}, edx=0x{:x}".format(self.eax, self.ebx, self.ecx, self.edx)
class CPUID(object):
def __init__(self):
if platform.machine() not in ("AMD64", "x86_64", "x86", "i686"):
raise SystemError("Only available for x86")
if is_windows:
if is_64bit:
# VirtualAlloc seems to fail under some weird
# circumstances when ctypes.windll.kernel32 is
# used under 64 bit Python. CDLL fixes this.
self.win = ctypes.CDLL("kernel32.dll")
opc = _WINDOWS_64_OPC
else:
# Here ctypes.windll.kernel32 is needed to get the
# right DLL. Otherwise it will fail when running
# 32 bit Python on 64 bit Windows.
self.win = ctypes.windll.kernel32
opc = _CDECL_32_OPC
else:
opc = _POSIX_64_OPC if is_64bit else _CDECL_32_OPC
size = len(opc)
code = (ctypes.c_ubyte * size)(*opc)
if is_windows:
self.win.VirtualAlloc.restype = c_void_p
self.win.VirtualAlloc.argtypes = [ctypes.c_void_p, ctypes.c_size_t, ctypes.c_ulong, ctypes.c_ulong]
self.addr = self.win.VirtualAlloc(None, size, 0x1000, 0x40)
if not self.addr:
raise MemoryError("Could not allocate RWX memory")
ctypes.memmove(self.addr, code, size)
else:
from mmap import (
mmap,
MAP_PRIVATE,
MAP_ANONYMOUS,
PROT_WRITE,
PROT_READ,
PROT_EXEC,
)
self.mm = mmap(
-1,
size,
flags=MAP_PRIVATE | MAP_ANONYMOUS,
prot=PROT_WRITE | PROT_READ | PROT_EXEC,
)
self.mm.write(code)
self.addr = ctypes.addressof(ctypes.c_int.from_buffer(self.mm))
func_type = CFUNCTYPE(None, POINTER(CPUID_struct), c_uint32, c_uint32)
self.func_ptr = func_type(self.addr)
def __call__(self, eax, ecx=0):
struct = self.registers_for(eax=eax, ecx=ecx)
return struct.eax, struct.ebx, struct.ecx, struct.edx
def registers_for(self, eax, ecx=0):
"""Calls cpuid with eax and ecx set as the input arguments, and returns a structure
containing eax, ebx, ecx, and edx.
"""
struct = CPUID_struct()
self.func_ptr(struct, eax, ecx)
return struct
def __del__(self):
if is_windows:
self.win.VirtualFree.restype = c_long
self.win.VirtualFree.argtypes = [c_void_p, c_size_t, c_ulong]
self.win.VirtualFree(self.addr, 0, 0x8000)
else:
self.mm.close()
if __name__ == "__main__":
def valid_inputs():
cpuid = CPUID()
for eax in (0x0, 0x80000000):
highest, _, _, _ = cpuid(eax)
while eax <= highest:
regs = cpuid(eax)
yield (eax, regs)
eax += 1
print(" ".join(x.ljust(8) for x in ("CPUID", "A", "B", "C", "D")).strip())
for eax, regs in valid_inputs():
print("%08x" % eax, " ".join("%08x" % reg for reg in regs))

View File

@@ -1,62 +0,0 @@
# -*- coding: utf-8 -*-
#
# Copyright (c) 2024 Anders Høst
#
from __future__ import print_function
import struct
import cpuid
def cpu_vendor(cpu):
_, b, c, d = cpu(0)
return struct.pack("III", b, d, c).decode("utf-8")
def cpu_name(cpu):
name = "".join((struct.pack("IIII", *cpu(0x80000000 + i)).decode("utf-8")
for i in range(2, 5)))
return name.split('\x00', 1)[0]
def is_set(cpu, leaf, subleaf, reg_idx, bit):
"""
@param {leaf} %eax
@param {sublead} %ecx, 0 in most cases
@param {reg_idx} idx of [%eax, %ebx, %ecx, %edx], 0-based
@param {bit} bit of reg selected by {reg_idx}, 0-based
"""
regs = cpu(leaf, subleaf)
if (1 << bit) & regs[reg_idx]:
return "Yes"
else:
return "--"
if __name__ == "__main__":
cpu = cpuid.CPUID()
print("Vendor ID : %s" % cpu_vendor(cpu))
print("CPU name : %s" % cpu_name(cpu))
print()
print("Vector instructions supported:")
print("SSE : %s" % is_set(cpu, 1, 0, 3, 25))
print("SSE2 : %s" % is_set(cpu, 1, 0, 3, 26))
print("SSE3 : %s" % is_set(cpu, 1, 0, 2, 0))
print("SSSE3 : %s" % is_set(cpu, 1, 0, 2, 9))
print("SSE4.1 : %s" % is_set(cpu, 1, 0, 2, 19))
print("SSE4.2 : %s" % is_set(cpu, 1, 0, 2, 20))
print("SSE4a : %s" % is_set(cpu, 0x80000001, 0, 2, 6))
print("AVX : %s" % is_set(cpu, 1, 0, 2, 28))
print("AVX2 : %s" % is_set(cpu, 7, 0, 1, 5))
print("BMI1 : %s" % is_set(cpu, 7, 0, 1, 3))
print("BMI2 : %s" % is_set(cpu, 7, 0, 1, 8))
# Intel RDT CMT/MBM
print("L3 Monitoring : %s" % is_set(cpu, 0xf, 0, 3, 1))
print("L3 Occupancy : %s" % is_set(cpu, 0xf, 1, 3, 0))
print("L3 Total BW : %s" % is_set(cpu, 0xf, 1, 3, 1))
print("L3 Local BW : %s" % is_set(cpu, 0xf, 1, 3, 2))

View File

@@ -42,6 +42,11 @@ def convert_to_posix_path(path: str) -> str:
return format_os_path(path, mode=Path.unix) return format_os_path(path, mode=Path.unix)
def convert_to_windows_path(path: str) -> str:
"""Converts the input path to Windows style."""
return format_os_path(path, mode=Path.windows)
def convert_to_platform_path(path: str) -> str: def convert_to_platform_path(path: str) -> str:
"""Converts the input path to the current platform's native style.""" """Converts the input path to the current platform's native style."""
return format_os_path(path, mode=Path.platform_path) return format_os_path(path, mode=Path.platform_path)

View File

@@ -12,7 +12,7 @@
# Archive extensions allowed in Spack # Archive extensions allowed in Spack
PREFIX_EXTENSIONS = ("tar", "TAR") PREFIX_EXTENSIONS = ("tar", "TAR")
EXTENSIONS = ("gz", "bz2", "xz", "Z") EXTENSIONS = ("gz", "bz2", "xz", "Z")
NO_TAR_EXTENSIONS = ("zip", "tgz", "tbz2", "tbz", "txz", "whl") NO_TAR_EXTENSIONS = ("zip", "tgz", "tbz2", "tbz", "txz")
# Add PREFIX_EXTENSIONS and EXTENSIONS last so that .tar.gz is matched *before* .tar or .gz # Add PREFIX_EXTENSIONS and EXTENSIONS last so that .tar.gz is matched *before* .tar or .gz
ALLOWED_ARCHIVE_TYPES = ( ALLOWED_ARCHIVE_TYPES = (
@@ -357,8 +357,10 @@ def strip_version_suffixes(path_or_url: str) -> str:
r"i[36]86", r"i[36]86",
r"ppc64(le)?", r"ppc64(le)?",
r"armv?(7l|6l|64)?", r"armv?(7l|6l|64)?",
# PyPI wheels # PyPI
r"-(?:py|cp)[23].*", r"[._-]py[23].*\.whl",
r"[._-]cp[23].*\.whl",
r"[._-]win.*\.exe",
] ]
for regex in suffix_regexes: for regex in suffix_regexes:
@@ -401,7 +403,7 @@ def expand_contracted_extension_in_path(
def compression_ext_from_compressed_archive(extension: str) -> Optional[str]: def compression_ext_from_compressed_archive(extension: str) -> Optional[str]:
"""Returns compression extension for a compressed archive""" """Returns compression extension for a compressed archive"""
extension = expand_contracted_extension(extension) extension = expand_contracted_extension(extension)
for ext in EXTENSIONS: for ext in [*EXTENSIONS]:
if ext in extension: if ext in extension:
return ext return ext
return None return None

View File

@@ -198,32 +198,15 @@ def getuid():
return os.getuid() return os.getuid()
def _win_rename(src, dst):
# os.replace will still fail if on Windows (but not POSIX) if the dst
# is a symlink to a directory (all other cases have parity Windows <-> Posix)
if os.path.islink(dst) and os.path.isdir(os.path.realpath(dst)):
if os.path.samefile(src, dst):
# src and dst are the same
# do nothing and exit early
return
# If dst exists and is a symlink to a directory
# we need to remove dst and then perform rename/replace
# this is safe to do as there's no chance src == dst now
os.remove(dst)
os.replace(src, dst)
@system_path_filter @system_path_filter
def rename(src, dst): def rename(src, dst):
# On Windows, os.rename will fail if the destination file already exists # On Windows, os.rename will fail if the destination file already exists
# os.replace is the same as os.rename on POSIX and is MoveFileExW w/
# the MOVEFILE_REPLACE_EXISTING flag on Windows
# Windows invocation is abstracted behind additonal logic handling
# remaining cases of divergent behavior accross platforms
if sys.platform == "win32": if sys.platform == "win32":
_win_rename(src, dst) # Windows path existence checks will sometimes fail on junctions/links/symlinks
else: # so check for that case
os.replace(src, dst) if os.path.exists(dst) or islink(dst):
os.remove(dst)
os.rename(src, dst)
@system_path_filter @system_path_filter
@@ -254,6 +237,16 @@ def _get_mime_type():
return file_command("-b", "-h", "--mime-type") return file_command("-b", "-h", "--mime-type")
@memoized
def _get_mime_type_compressed():
"""Same as _get_mime_type but attempts to check for
compression first
"""
mime_uncompressed = _get_mime_type()
mime_uncompressed.add_default_arg("-Z")
return mime_uncompressed
def mime_type(filename): def mime_type(filename):
"""Returns the mime type and subtype of a file. """Returns the mime type and subtype of a file.
@@ -269,6 +262,21 @@ def mime_type(filename):
return type, subtype return type, subtype
def compressed_mime_type(filename):
"""Same as mime_type but checks for type that has been compressed
Args:
filename (str): file to be analyzed
Returns:
Tuple containing the MIME type and subtype
"""
output = _get_mime_type_compressed()(filename, output=str, error=str).strip()
tty.debug("==> " + output)
type, _, subtype = output.partition("/")
return type, subtype
#: This generates the library filenames that may appear on any OS. #: This generates the library filenames that may appear on any OS.
library_extensions = ["a", "la", "so", "tbd", "dylib"] library_extensions = ["a", "la", "so", "tbd", "dylib"]
@@ -300,6 +308,13 @@ def paths_containing_libs(paths, library_names):
return rpaths_to_include return rpaths_to_include
@system_path_filter
def same_path(path1, path2):
norm1 = os.path.abspath(path1).rstrip(os.path.sep)
norm2 = os.path.abspath(path2).rstrip(os.path.sep)
return norm1 == norm2
def filter_file( def filter_file(
regex: str, regex: str,
repl: Union[str, Callable[[Match], str]], repl: Union[str, Callable[[Match], str]],
@@ -894,6 +909,17 @@ def is_exe(path):
return os.path.isfile(path) and os.access(path, os.X_OK) return os.path.isfile(path) and os.access(path, os.X_OK)
@system_path_filter
def get_filetype(path_name):
"""
Return the output of file path_name as a string to identify file type.
"""
file = Executable("file")
file.add_default_env("LC_ALL", "C")
output = file("-b", "-h", "%s" % path_name, output=str, error=str)
return output.strip()
def has_shebang(path): def has_shebang(path):
"""Returns whether a path has a shebang line. Returns False if the file cannot be opened.""" """Returns whether a path has a shebang line. Returns False if the file cannot be opened."""
try: try:
@@ -1143,6 +1169,20 @@ def write_tmp_and_move(filename):
shutil.move(tmp, filename) shutil.move(tmp, filename)
@contextmanager
@system_path_filter
def open_if_filename(str_or_file, mode="r"):
"""Takes either a path or a file object, and opens it if it is a path.
If it's a file object, just yields the file object.
"""
if isinstance(str_or_file, str):
with open(str_or_file, mode) as f:
yield f
else:
yield str_or_file
@system_path_filter @system_path_filter
def touch(path): def touch(path):
"""Creates an empty file at the specified path.""" """Creates an empty file at the specified path."""
@@ -1200,47 +1240,6 @@ def get_single_file(directory):
return fnames[0] return fnames[0]
@system_path_filter
def windows_sfn(path: os.PathLike):
"""Returns 8.3 Filename (SFN) representation of
path
8.3 Filenames (SFN or short filename) is a file
naming convention used prior to Win95 that Windows
still (and will continue to) support. This convention
caps filenames at 8 characters, and most importantly
does not allow for spaces in addition to other specifications.
The scheme is generally the same as a normal Windows
file scheme, but all spaces are removed and the filename
is capped at 6 characters. The remaining characters are
replaced with ~N where N is the number file in a directory
that a given file represents i.e. Program Files and Program Files (x86)
would be PROGRA~1 and PROGRA~2 respectively.
Further, all file/directory names are all caps (although modern Windows
is case insensitive in practice).
Conversion is accomplished by fileapi.h GetShortPathNameW
Returns paths in 8.3 Filename form
Note: this method is a no-op on Linux
Args:
path: Path to be transformed into SFN (8.3 filename) format
"""
# This should not be run-able on linux/macos
if sys.platform != "win32":
return path
path = str(path)
import ctypes
k32 = ctypes.WinDLL("kernel32", use_last_error=True)
# stub Windows types TCHAR[LENGTH]
TCHAR_arr = ctypes.c_wchar * len(path)
ret_str = TCHAR_arr()
k32.GetShortPathNameW(path, ret_str, len(path))
return ret_str.value
@contextmanager @contextmanager
def temp_cwd(): def temp_cwd():
tmp_dir = tempfile.mkdtemp() tmp_dir = tempfile.mkdtemp()
@@ -1255,6 +1254,19 @@ def temp_cwd():
shutil.rmtree(tmp_dir, **kwargs) shutil.rmtree(tmp_dir, **kwargs)
@contextmanager
@system_path_filter
def temp_rename(orig_path, temp_path):
same_path = os.path.realpath(orig_path) == os.path.realpath(temp_path)
if not same_path:
shutil.move(orig_path, temp_path)
try:
yield
finally:
if not same_path:
shutil.move(temp_path, orig_path)
@system_path_filter @system_path_filter
def can_access(file_name): def can_access(file_name):
"""True if we have read/write access to the file.""" """True if we have read/write access to the file."""

View File

@@ -98,6 +98,36 @@ def caller_locals():
del stack del stack
def get_calling_module_name():
"""Make sure that the caller is a class definition, and return the
enclosing module's name.
"""
# Passing zero here skips line context for speed.
stack = inspect.stack(0)
try:
# Make sure locals contain __module__
caller_locals = stack[2][0].f_locals
finally:
del stack
if "__module__" not in caller_locals:
raise RuntimeError(
"Must invoke get_calling_module_name() " "from inside a class definition!"
)
module_name = caller_locals["__module__"]
base_name = module_name.split(".")[-1]
return base_name
def attr_required(obj, attr_name):
"""Ensure that a class has a required attribute."""
if not hasattr(obj, attr_name):
raise RequiredAttributeError(
"No required attribute '%s' in class '%s'" % (attr_name, obj.__class__.__name__)
)
def attr_setdefault(obj, name, value): def attr_setdefault(obj, name, value):
"""Like dict.setdefault, but for objects.""" """Like dict.setdefault, but for objects."""
if not hasattr(obj, name): if not hasattr(obj, name):
@@ -483,6 +513,42 @@ def copy(self):
return clone return clone
def in_function(function_name):
"""True if the caller was called from some function with
the supplied Name, False otherwise."""
stack = inspect.stack()
try:
for elt in stack[2:]:
if elt[3] == function_name:
return True
return False
finally:
del stack
def check_kwargs(kwargs, fun):
"""Helper for making functions with kwargs. Checks whether the kwargs
are empty after all of them have been popped off. If they're
not, raises an error describing which kwargs are invalid.
Example::
def foo(self, **kwargs):
x = kwargs.pop('x', None)
y = kwargs.pop('y', None)
z = kwargs.pop('z', None)
check_kwargs(kwargs, self.foo)
# This raises a TypeError:
foo(w='bad kwarg')
"""
if kwargs:
raise TypeError(
"'%s' is an invalid keyword argument for function %s()."
% (next(iter(kwargs)), fun.__name__)
)
def match_predicate(*args): def match_predicate(*args):
"""Utility function for making string matching predicates. """Utility function for making string matching predicates.
@@ -698,6 +764,11 @@ def pretty_seconds(seconds):
return pretty_seconds_formatter(seconds)(seconds) return pretty_seconds_formatter(seconds)(seconds)
class RequiredAttributeError(ValueError):
def __init__(self, message):
super().__init__(message)
class ObjectWrapper: class ObjectWrapper:
"""Base class that wraps an object. Derived classes can add new behavior """Base class that wraps an object. Derived classes can add new behavior
while staying undercover. while staying undercover.
@@ -772,30 +843,6 @@ def __repr__(self):
return repr(self.instance) return repr(self.instance)
def get_entry_points(*, group: str):
"""Wrapper for ``importlib.metadata.entry_points``
Args:
group: entry points to select
Returns:
EntryPoints for ``group`` or empty list if unsupported
"""
try:
import importlib.metadata # type: ignore # novermin
except ImportError:
return []
try:
return importlib.metadata.entry_points(group=group)
except TypeError:
# Prior to Python 3.10, entry_points accepted no parameters and always
# returned a dictionary of entry points, keyed by group. See
# https://docs.python.org/3/library/importlib.metadata.html#entry-points
return importlib.metadata.entry_points().get(group, [])
def load_module_from_file(module_name, module_path): def load_module_from_file(module_name, module_path):
"""Loads a python module from the path of the corresponding file. """Loads a python module from the path of the corresponding file.
@@ -864,6 +911,25 @@ def uniq(sequence):
return uniq_list return uniq_list
def star(func):
"""Unpacks arguments for use with Multiprocessing mapping functions"""
def _wrapper(args):
return func(*args)
return _wrapper
class Devnull:
"""Null stream with less overhead than ``os.devnull``.
See https://stackoverflow.com/a/2929954.
"""
def write(self, *_):
pass
def elide_list(line_list, max_num=10): def elide_list(line_list, max_num=10):
"""Takes a long list and limits it to a smaller number of elements, """Takes a long list and limits it to a smaller number of elements,
replacing intervening elements with '...'. For example:: replacing intervening elements with '...'. For example::

View File

@@ -815,6 +815,10 @@ def __init__(self, path):
super().__init__(msg) super().__init__(msg)
class LockLimitError(LockError):
"""Raised when exceed maximum attempts to acquire a lock."""
class LockTimeoutError(LockError): class LockTimeoutError(LockError):
"""Raised when an attempt to acquire a lock times out.""" """Raised when an attempt to acquire a lock times out."""

View File

@@ -189,7 +189,6 @@ def _windows_can_symlink() -> bool:
import llnl.util.filesystem as fs import llnl.util.filesystem as fs
fs.touchp(fpath) fs.touchp(fpath)
fs.mkdirp(dpath)
try: try:
os.symlink(dpath, dlink) os.symlink(dpath, dlink)

View File

@@ -44,6 +44,10 @@ def is_debug(level=1):
return _debug >= level return _debug >= level
def is_stacktrace():
return _stacktrace
def set_debug(level=0): def set_debug(level=0):
global _debug global _debug
assert level >= 0, "Debug level must be a positive value" assert level >= 0, "Debug level must be a positive value"
@@ -248,6 +252,37 @@ def die(message, *args, **kwargs) -> NoReturn:
sys.exit(1) sys.exit(1)
def get_number(prompt, **kwargs):
default = kwargs.get("default", None)
abort = kwargs.get("abort", None)
if default is not None and abort is not None:
prompt += " (default is %s, %s to abort) " % (default, abort)
elif default is not None:
prompt += " (default is %s) " % default
elif abort is not None:
prompt += " (%s to abort) " % abort
number = None
while number is None:
msg(prompt, newline=False)
ans = input()
if ans == str(abort):
return None
if ans:
try:
number = int(ans)
if number < 1:
msg("Please enter a valid number.")
number = None
except ValueError:
msg("Please enter a valid number.")
elif default is not None:
number = default
return number
def get_yes_or_no(prompt, **kwargs): def get_yes_or_no(prompt, **kwargs):
default_value = kwargs.get("default", None) default_value = kwargs.get("default", None)

View File

@@ -17,6 +17,7 @@
import tarfile import tarfile
import tempfile import tempfile
import time import time
import traceback
import urllib.error import urllib.error
import urllib.parse import urllib.parse
import urllib.request import urllib.request
@@ -110,6 +111,10 @@ def __init__(self, errors):
super().__init__(self.message) super().__init__(self.message)
class ListMirrorSpecsError(spack.error.SpackError):
"""Raised when unable to retrieve list of specs from the mirror"""
class BinaryCacheIndex: class BinaryCacheIndex:
""" """
The BinaryCacheIndex tracks what specs are available on (usually remote) The BinaryCacheIndex tracks what specs are available on (usually remote)
@@ -536,6 +541,83 @@ def binary_index_location():
BINARY_INDEX: BinaryCacheIndex = llnl.util.lang.Singleton(BinaryCacheIndex) # type: ignore BINARY_INDEX: BinaryCacheIndex = llnl.util.lang.Singleton(BinaryCacheIndex) # type: ignore
class NoOverwriteException(spack.error.SpackError):
"""Raised when a file would be overwritten"""
def __init__(self, file_path):
super().__init__(f"Refusing to overwrite the following file: {file_path}")
class NoGpgException(spack.error.SpackError):
"""
Raised when gpg2 is not in PATH
"""
def __init__(self, msg):
super().__init__(msg)
class NoKeyException(spack.error.SpackError):
"""
Raised when gpg has no default key added.
"""
def __init__(self, msg):
super().__init__(msg)
class PickKeyException(spack.error.SpackError):
"""
Raised when multiple keys can be used to sign.
"""
def __init__(self, keys):
err_msg = "Multiple keys available for signing\n%s\n" % keys
err_msg += "Use spack buildcache create -k <key hash> to pick a key."
super().__init__(err_msg)
class NoVerifyException(spack.error.SpackError):
"""
Raised if file fails signature verification.
"""
pass
class NoChecksumException(spack.error.SpackError):
"""
Raised if file fails checksum verification.
"""
def __init__(self, path, size, contents, algorithm, expected, computed):
super().__init__(
f"{algorithm} checksum failed for {path}",
f"Expected {expected} but got {computed}. "
f"File size = {size} bytes. Contents = {contents!r}",
)
class NewLayoutException(spack.error.SpackError):
"""
Raised if directory layout is different from buildcache.
"""
def __init__(self, msg):
super().__init__(msg)
class InvalidMetadataFile(spack.error.SpackError):
pass
class UnsignedPackageException(spack.error.SpackError):
"""
Raised if installation of unsigned package is attempted without
the use of ``--no-check-signature``.
"""
def compute_hash(data): def compute_hash(data):
if isinstance(data, str): if isinstance(data, str):
data = data.encode("utf-8") data = data.encode("utf-8")
@@ -910,10 +992,15 @@ def url_read_method(url):
if entry.endswith("spec.json") or entry.endswith("spec.json.sig") if entry.endswith("spec.json") or entry.endswith("spec.json.sig")
] ]
read_fn = url_read_method read_fn = url_read_method
except KeyError as inst:
msg = "No packages at {0}: {1}".format(cache_prefix, inst)
tty.warn(msg)
except Exception as err: except Exception as err:
# If we got some kind of S3 (access denied or other connection error), the first non # If we got some kind of S3 (access denied or other connection
# boto-specific class in the exception is Exception. Just print a warning and return # error), the first non boto-specific class in the exception
tty.warn(f"Encountered problem listing packages at {cache_prefix}: {err}") # hierarchy is Exception. Just print a warning and return
msg = "Encountered problem listing packages at {0}: {1}".format(cache_prefix, err)
tty.warn(msg)
return file_list, read_fn return file_list, read_fn
@@ -960,10 +1047,11 @@ def generate_package_index(cache_prefix, concurrency=32):
""" """
try: try:
file_list, read_fn = _spec_files_from_cache(cache_prefix) file_list, read_fn = _spec_files_from_cache(cache_prefix)
except ListMirrorSpecsError as e: except ListMirrorSpecsError as err:
raise GenerateIndexError(f"Unable to generate package index: {e}") from e tty.error("Unable to generate package index, {0}".format(err))
return
tty.debug(f"Retrieving spec descriptor files from {cache_prefix} to build index") tty.debug("Retrieving spec descriptor files from {0} to build index".format(cache_prefix))
tmpdir = tempfile.mkdtemp() tmpdir = tempfile.mkdtemp()
@@ -973,22 +1061,27 @@ def generate_package_index(cache_prefix, concurrency=32):
try: try:
_read_specs_and_push_index(file_list, read_fn, cache_prefix, db, db_root_dir, concurrency) _read_specs_and_push_index(file_list, read_fn, cache_prefix, db, db_root_dir, concurrency)
except Exception as e: except Exception as err:
raise GenerateIndexError( msg = "Encountered problem pushing package index to {0}: {1}".format(cache_prefix, err)
f"Encountered problem pushing package index to {cache_prefix}: {e}" tty.warn(msg)
) from e tty.debug("\n" + traceback.format_exc())
finally: finally:
shutil.rmtree(tmpdir, ignore_errors=True) shutil.rmtree(tmpdir)
def generate_key_index(key_prefix, tmpdir=None): def generate_key_index(key_prefix, tmpdir=None):
"""Create the key index page. """Create the key index page.
Creates (or replaces) the "index.json" page at the location given in key_prefix. This page Creates (or replaces) the "index.json" page at the location given in
contains an entry for each key (.pub) under key_prefix. key_prefix. This page contains an entry for each key (.pub) under
key_prefix.
""" """
tty.debug(f"Retrieving key.pub files from {url_util.format(key_prefix)} to build key index") tty.debug(
" ".join(
("Retrieving key.pub files from", url_util.format(key_prefix), "to build key index")
)
)
try: try:
fingerprints = ( fingerprints = (
@@ -996,8 +1089,17 @@ def generate_key_index(key_prefix, tmpdir=None):
for entry in web_util.list_url(key_prefix, recursive=False) for entry in web_util.list_url(key_prefix, recursive=False)
if entry.endswith(".pub") if entry.endswith(".pub")
) )
except Exception as e: except KeyError as inst:
raise CannotListKeys(f"Encountered problem listing keys at {key_prefix}: {e}") from e msg = "No keys at {0}: {1}".format(key_prefix, inst)
tty.warn(msg)
return
except Exception as err:
# If we got some kind of S3 (access denied or other connection
# error), the first non boto-specific class in the exception
# hierarchy is Exception. Just print a warning and return
msg = "Encountered problem listing keys at {0}: {1}".format(key_prefix, err)
tty.warn(msg)
return
remove_tmpdir = False remove_tmpdir = False
@@ -1022,13 +1124,12 @@ def generate_key_index(key_prefix, tmpdir=None):
keep_original=False, keep_original=False,
extra_args={"ContentType": "application/json"}, extra_args={"ContentType": "application/json"},
) )
except Exception as e: except Exception as err:
raise GenerateIndexError( msg = "Encountered problem pushing key index to {0}: {1}".format(key_prefix, err)
f"Encountered problem pushing key index to {key_prefix}: {e}" tty.warn(msg)
) from e
finally: finally:
if remove_tmpdir: if remove_tmpdir:
shutil.rmtree(tmpdir, ignore_errors=True) shutil.rmtree(tmpdir)
def tarfile_of_spec_prefix(tar: tarfile.TarFile, prefix: str) -> None: def tarfile_of_spec_prefix(tar: tarfile.TarFile, prefix: str) -> None:
@@ -1099,8 +1200,7 @@ def push_or_raise(spec: Spec, out_url: str, options: PushOptions):
used at the mirror (following <tarball_directory_name>). used at the mirror (following <tarball_directory_name>).
This method raises :py:class:`NoOverwriteException` when ``force=False`` and the tarball or This method raises :py:class:`NoOverwriteException` when ``force=False`` and the tarball or
spec.json file already exist in the buildcache. It raises :py:class:`PushToBuildCacheError` spec.json file already exist in the buildcache.
when the tarball or spec.json file cannot be pushed to the buildcache.
""" """
if not spec.concrete: if not spec.concrete:
raise ValueError("spec must be concrete to build tarball") raise ValueError("spec must be concrete to build tarball")
@@ -1178,18 +1278,13 @@ def _build_tarball_in_stage_dir(spec: Spec, out_url: str, stage_dir: str, option
key = select_signing_key(options.key) key = select_signing_key(options.key)
sign_specfile(key, options.force, specfile_path) sign_specfile(key, options.force, specfile_path)
try: # push tarball and signed spec json to remote mirror
# push tarball and signed spec json to remote mirror web_util.push_to_url(spackfile_path, remote_spackfile_path, keep_original=False)
web_util.push_to_url(spackfile_path, remote_spackfile_path, keep_original=False) web_util.push_to_url(
web_util.push_to_url( signed_specfile_path if not options.unsigned else specfile_path,
signed_specfile_path if not options.unsigned else specfile_path, remote_signed_specfile_path if not options.unsigned else remote_specfile_path,
remote_signed_specfile_path if not options.unsigned else remote_specfile_path, keep_original=False,
keep_original=False, )
)
except Exception as e:
raise PushToBuildCacheError(
f"Encountered problem pushing binary {remote_spackfile_path}: {e}"
) from e
# push the key to the build cache's _pgp directory so it can be # push the key to the build cache's _pgp directory so it can be
# imported # imported
@@ -1201,6 +1296,8 @@ def _build_tarball_in_stage_dir(spec: Spec, out_url: str, stage_dir: str, option
if options.regenerate_index: if options.regenerate_index:
generate_package_index(url_util.join(out_url, os.path.relpath(cache_prefix, stage_dir))) generate_package_index(url_util.join(out_url, os.path.relpath(cache_prefix, stage_dir)))
return None
class NotInstalledError(spack.error.SpackError): class NotInstalledError(spack.error.SpackError):
"""Raised when a spec is not installed but picked to be packaged.""" """Raised when a spec is not installed but picked to be packaged."""
@@ -1255,6 +1352,28 @@ def specs_to_be_packaged(
return [s for s in itertools.chain(roots, deps) if not s.external] return [s for s in itertools.chain(roots, deps) if not s.external]
def push(spec: Spec, mirror_url: str, options: PushOptions):
"""Create and push binary package for a single spec to the specified
mirror url.
Args:
spec: Spec to package and push
mirror_url: Desired destination url for binary package
options:
Returns:
True if package was pushed, False otherwise.
"""
try:
push_or_raise(spec, mirror_url, options)
except NoOverwriteException as e:
warnings.warn(str(e))
return False
return True
def try_verify(specfile_path): def try_verify(specfile_path):
"""Utility function to attempt to verify a local file. Assumes the """Utility function to attempt to verify a local file. Assumes the
file is a clearsigned signature file. file is a clearsigned signature file.
@@ -1422,7 +1541,7 @@ def fetch_url_to_mirror(url):
response = spack.oci.opener.urlopen( response = spack.oci.opener.urlopen(
urllib.request.Request( urllib.request.Request(
url=ref.manifest_url(), url=ref.manifest_url(),
headers={"Accept": ", ".join(spack.oci.oci.manifest_content_type)}, headers={"Accept": "application/vnd.oci.image.manifest.v1+json"},
) )
) )
except Exception: except Exception:
@@ -2587,96 +2706,3 @@ def conditional_fetch(self) -> FetchIndexResult:
raise FetchIndexError(f"Remote index {url_manifest} is invalid") raise FetchIndexError(f"Remote index {url_manifest} is invalid")
return FetchIndexResult(etag=None, hash=index_digest.digest, data=result, fresh=False) return FetchIndexResult(etag=None, hash=index_digest.digest, data=result, fresh=False)
class NoOverwriteException(spack.error.SpackError):
"""Raised when a file would be overwritten"""
def __init__(self, file_path):
super().__init__(f"Refusing to overwrite the following file: {file_path}")
class NoGpgException(spack.error.SpackError):
"""
Raised when gpg2 is not in PATH
"""
def __init__(self, msg):
super().__init__(msg)
class NoKeyException(spack.error.SpackError):
"""
Raised when gpg has no default key added.
"""
def __init__(self, msg):
super().__init__(msg)
class PickKeyException(spack.error.SpackError):
"""
Raised when multiple keys can be used to sign.
"""
def __init__(self, keys):
err_msg = "Multiple keys available for signing\n%s\n" % keys
err_msg += "Use spack buildcache create -k <key hash> to pick a key."
super().__init__(err_msg)
class NoVerifyException(spack.error.SpackError):
"""
Raised if file fails signature verification.
"""
pass
class NoChecksumException(spack.error.SpackError):
"""
Raised if file fails checksum verification.
"""
def __init__(self, path, size, contents, algorithm, expected, computed):
super().__init__(
f"{algorithm} checksum failed for {path}",
f"Expected {expected} but got {computed}. "
f"File size = {size} bytes. Contents = {contents!r}",
)
class NewLayoutException(spack.error.SpackError):
"""
Raised if directory layout is different from buildcache.
"""
def __init__(self, msg):
super().__init__(msg)
class InvalidMetadataFile(spack.error.SpackError):
pass
class UnsignedPackageException(spack.error.SpackError):
"""
Raised if installation of unsigned package is attempted without
the use of ``--no-check-signature``.
"""
class ListMirrorSpecsError(spack.error.SpackError):
"""Raised when unable to retrieve list of specs from the mirror"""
class GenerateIndexError(spack.error.SpackError):
"""Raised when unable to generate key or package index for mirror"""
class CannotListKeys(GenerateIndexError):
"""Raised when unable to list keys when generating key index"""
class PushToBuildCacheError(spack.error.SpackError):
"""Raised when unable to push objects to binary mirror"""

View File

@@ -213,6 +213,9 @@ def _root_spec(spec_str: str) -> str:
platform = str(spack.platforms.host()) platform = str(spack.platforms.host())
if platform == "darwin": if platform == "darwin":
spec_str += " %apple-clang" spec_str += " %apple-clang"
elif platform == "windows":
# TODO (johnwparent): Remove version constraint when clingo patch is up
spec_str += " %msvc@:19.37"
elif platform == "linux": elif platform == "linux":
spec_str += " %gcc" spec_str += " %gcc"
elif platform == "freebsd": elif platform == "freebsd":

View File

@@ -147,7 +147,7 @@ def _add_compilers_if_missing() -> None:
mixed_toolchain=sys.platform == "darwin" mixed_toolchain=sys.platform == "darwin"
) )
if new_compilers: if new_compilers:
spack.compilers.add_compilers_to_config(new_compilers) spack.compilers.add_compilers_to_config(new_compilers, init_config=False)
@contextlib.contextmanager @contextlib.contextmanager

View File

@@ -57,10 +57,8 @@
import spack.build_systems.meson import spack.build_systems.meson
import spack.build_systems.python import spack.build_systems.python
import spack.builder import spack.builder
import spack.compilers
import spack.config import spack.config
import spack.deptypes as dt import spack.deptypes as dt
import spack.error
import spack.main import spack.main
import spack.package_base import spack.package_base
import spack.paths import spack.paths
@@ -585,22 +583,10 @@ def set_package_py_globals(pkg, context: Context = Context.BUILD):
# Put spack compiler paths in module scope. (Some packages use it # Put spack compiler paths in module scope. (Some packages use it
# in setup_run_environment etc, so don't put it context == build) # in setup_run_environment etc, so don't put it context == build)
link_dir = spack.paths.build_env_path link_dir = spack.paths.build_env_path
pkg_compiler = None module.spack_cc = os.path.join(link_dir, pkg.compiler.link_paths["cc"])
try: module.spack_cxx = os.path.join(link_dir, pkg.compiler.link_paths["cxx"])
pkg_compiler = pkg.compiler module.spack_f77 = os.path.join(link_dir, pkg.compiler.link_paths["f77"])
except spack.compilers.NoCompilerForSpecError as e: module.spack_fc = os.path.join(link_dir, pkg.compiler.link_paths["fc"])
tty.debug(f"cannot set 'spack_cc': {str(e)}")
if pkg_compiler is not None:
module.spack_cc = os.path.join(link_dir, pkg_compiler.link_paths["cc"])
module.spack_cxx = os.path.join(link_dir, pkg_compiler.link_paths["cxx"])
module.spack_f77 = os.path.join(link_dir, pkg_compiler.link_paths["f77"])
module.spack_fc = os.path.join(link_dir, pkg_compiler.link_paths["fc"])
else:
module.spack_cc = None
module.spack_cxx = None
module.spack_f77 = None
module.spack_fc = None
# Useful directories within the prefix are encapsulated in # Useful directories within the prefix are encapsulated in
# a Prefix object. # a Prefix object.
@@ -803,7 +789,7 @@ def setup_package(pkg, dirty, context: Context = Context.BUILD):
for mod in ["cray-mpich", "cray-libsci"]: for mod in ["cray-mpich", "cray-libsci"]:
module("unload", mod) module("unload", mod)
if target and target.module_name: if target.module_name:
load_module(target.module_name) load_module(target.module_name)
load_external_modules(pkg) load_external_modules(pkg)

View File

@@ -434,6 +434,11 @@ def _do_patch_libtool(self):
r"crtendS\.o", r"crtendS\.o",
]: ]:
x.filter(regex=(rehead + o), repl="") x.filter(regex=(rehead + o), repl="")
elif self.pkg.compiler.name == "dpcpp":
# Hack to filter out spurious predep_objects when building with Intel dpcpp
# (see https://github.com/spack/spack/issues/32863):
x.filter(regex=r"^(predep_objects=.*)/tmp/conftest-[0-9A-Fa-f]+\.o", repl=r"\1")
x.filter(regex=r"^(predep_objects=.*)/tmp/a-[0-9A-Fa-f]+\.o", repl=r"\1")
elif self.pkg.compiler.name == "nag": elif self.pkg.compiler.name == "nag":
for tag in ["fc", "f77"]: for tag in ["fc", "f77"]:
marker = markers[tag] marker = markers[tag]
@@ -536,7 +541,7 @@ def autoreconf(self, pkg, spec, prefix):
if os.path.exists(self.configure_abs_path): if os.path.exists(self.configure_abs_path):
return return
# Else try to regenerate it, which requires a few build dependencies # Else try to regenerate it, which reuquires a few build dependencies
ensure_build_dependencies_or_raise( ensure_build_dependencies_or_raise(
spec=spec, spec=spec,
dependencies=["autoconf", "automake", "libtool"], dependencies=["autoconf", "automake", "libtool"],

View File

@@ -4,7 +4,6 @@
# SPDX-License-Identifier: (Apache-2.0 OR MIT) # SPDX-License-Identifier: (Apache-2.0 OR MIT)
import collections.abc import collections.abc
import os import os
import re
from typing import Tuple from typing import Tuple
import llnl.util.filesystem as fs import llnl.util.filesystem as fs
@@ -16,12 +15,6 @@
from .cmake import CMakeBuilder, CMakePackage from .cmake import CMakeBuilder, CMakePackage
def spec_uses_toolchain(spec):
gcc_toolchain_regex = re.compile(".*gcc-toolchain.*")
using_toolchain = list(filter(gcc_toolchain_regex.match, spec.compiler_flags["cxxflags"]))
return using_toolchain
def cmake_cache_path(name, value, comment="", force=False): def cmake_cache_path(name, value, comment="", force=False):
"""Generate a string for a cmake cache variable""" """Generate a string for a cmake cache variable"""
force_str = " FORCE" if force else "" force_str = " FORCE" if force else ""
@@ -220,7 +213,7 @@ def initconfig_mpi_entries(self):
else: else:
# starting with cmake 3.10, FindMPI expects MPIEXEC_EXECUTABLE # starting with cmake 3.10, FindMPI expects MPIEXEC_EXECUTABLE
# vs the older versions which expect MPIEXEC # vs the older versions which expect MPIEXEC
if spec["cmake"].satisfies("@3.10:"): if self.pkg.spec["cmake"].satisfies("@3.10:"):
entries.append(cmake_cache_path("MPIEXEC_EXECUTABLE", mpiexec)) entries.append(cmake_cache_path("MPIEXEC_EXECUTABLE", mpiexec))
else: else:
entries.append(cmake_cache_path("MPIEXEC", mpiexec)) entries.append(cmake_cache_path("MPIEXEC", mpiexec))
@@ -255,17 +248,12 @@ def initconfig_hardware_entries(self):
# Include the deprecated CUDA_TOOLKIT_ROOT_DIR for supporting BLT packages # Include the deprecated CUDA_TOOLKIT_ROOT_DIR for supporting BLT packages
entries.append(cmake_cache_path("CUDA_TOOLKIT_ROOT_DIR", cudatoolkitdir)) entries.append(cmake_cache_path("CUDA_TOOLKIT_ROOT_DIR", cudatoolkitdir))
# CUDA_FLAGS archs = spec.variants["cuda_arch"].value
cuda_flags = [] if archs[0] != "none":
arch_str = ";".join(archs)
if not spec.satisfies("cuda_arch=none"): entries.append(
cuda_archs = ";".join(spec.variants["cuda_arch"].value) cmake_cache_string("CMAKE_CUDA_ARCHITECTURES", "{0}".format(arch_str))
entries.append(cmake_cache_string("CMAKE_CUDA_ARCHITECTURES", cuda_archs)) )
if spec_uses_toolchain(spec):
cuda_flags.append("-Xcompiler {}".format(spec_uses_toolchain(spec)[0]))
entries.append(cmake_cache_string("CMAKE_CUDA_FLAGS", " ".join(cuda_flags)))
if "+rocm" in spec: if "+rocm" in spec:
entries.append("#------------------{0}".format("-" * 30)) entries.append("#------------------{0}".format("-" * 30))
@@ -274,6 +262,9 @@ def initconfig_hardware_entries(self):
# Explicitly setting HIP_ROOT_DIR may be a patch that is no longer necessary # Explicitly setting HIP_ROOT_DIR may be a patch that is no longer necessary
entries.append(cmake_cache_path("HIP_ROOT_DIR", "{0}".format(spec["hip"].prefix))) entries.append(cmake_cache_path("HIP_ROOT_DIR", "{0}".format(spec["hip"].prefix)))
entries.append(
cmake_cache_path("HIP_CXX_COMPILER", "{0}".format(self.spec["hip"].hipcc))
)
llvm_bin = spec["llvm-amdgpu"].prefix.bin llvm_bin = spec["llvm-amdgpu"].prefix.bin
llvm_prefix = spec["llvm-amdgpu"].prefix llvm_prefix = spec["llvm-amdgpu"].prefix
# Some ROCm systems seem to point to /<path>/rocm-<ver>/ and # Some ROCm systems seem to point to /<path>/rocm-<ver>/ and
@@ -286,9 +277,11 @@ def initconfig_hardware_entries(self):
archs = self.spec.variants["amdgpu_target"].value archs = self.spec.variants["amdgpu_target"].value
if archs[0] != "none": if archs[0] != "none":
arch_str = ";".join(archs) arch_str = ";".join(archs)
entries.append(cmake_cache_string("CMAKE_HIP_ARCHITECTURES", arch_str)) entries.append(
entries.append(cmake_cache_string("AMDGPU_TARGETS", arch_str)) cmake_cache_string("CMAKE_HIP_ARCHITECTURES", "{0}".format(arch_str))
entries.append(cmake_cache_string("GPU_TARGETS", arch_str)) )
entries.append(cmake_cache_string("AMDGPU_TARGETS", "{0}".format(arch_str)))
entries.append(cmake_cache_string("GPU_TARGETS", "{0}".format(arch_str)))
return entries return entries

View File

@@ -69,7 +69,7 @@ class MSBuildBuilder(BaseBuilder):
@property @property
def build_directory(self): def build_directory(self):
"""Return the directory containing the MSBuild solution or vcxproj.""" """Return the directory containing the MSBuild solution or vcxproj."""
return fs.windows_sfn(self.pkg.stage.source_path) return self.pkg.stage.source_path
@property @property
def toolchain_version(self): def toolchain_version(self):

View File

@@ -77,11 +77,7 @@ def ignore_quotes(self):
@property @property
def build_directory(self): def build_directory(self):
"""Return the directory containing the makefile.""" """Return the directory containing the makefile."""
return ( return self.pkg.stage.source_path if not self.makefile_root else self.makefile_root
fs.windows_sfn(self.pkg.stage.source_path)
if not self.makefile_root
else fs.windows_sfn(self.makefile_root)
)
@property @property
def std_nmake_args(self): def std_nmake_args(self):

View File

@@ -14,7 +14,7 @@
from llnl.util.link_tree import LinkTree from llnl.util.link_tree import LinkTree
from spack.build_environment import dso_suffix from spack.build_environment import dso_suffix
from spack.directives import conflicts, license, variant from spack.directives import conflicts, variant
from spack.package_base import InstallError from spack.package_base import InstallError
from spack.util.environment import EnvironmentModifications from spack.util.environment import EnvironmentModifications
from spack.util.executable import Executable from spack.util.executable import Executable
@@ -26,7 +26,6 @@ class IntelOneApiPackage(Package):
"""Base class for Intel oneAPI packages.""" """Base class for Intel oneAPI packages."""
homepage = "https://software.intel.com/oneapi" homepage = "https://software.intel.com/oneapi"
license("https://intel.ly/393CijO")
# oneAPI license does not allow mirroring outside of the # oneAPI license does not allow mirroring outside of the
# organization (e.g. University/Company). # organization (e.g. University/Company).

View File

@@ -4,15 +4,12 @@
# SPDX-License-Identifier: (Apache-2.0 OR MIT) # SPDX-License-Identifier: (Apache-2.0 OR MIT)
import inspect import inspect
import os import os
from typing import Iterable
from llnl.util.filesystem import filter_file, find from llnl.util.filesystem import filter_file
from llnl.util.lang import memoized
import spack.builder import spack.builder
import spack.package_base import spack.package_base
from spack.directives import build_system, extends from spack.directives import build_system, extends
from spack.install_test import SkipTest, test_part
from spack.util.executable import Executable from spack.util.executable import Executable
from ._checks import BaseBuilder, execute_build_time_tests from ._checks import BaseBuilder, execute_build_time_tests
@@ -31,58 +28,6 @@ class PerlPackage(spack.package_base.PackageBase):
extends("perl", when="build_system=perl") extends("perl", when="build_system=perl")
@property
@memoized
def _platform_dir(self):
"""Name of platform-specific module subdirectory."""
perl = self.spec["perl"].command
options = "-E", "use Config; say $Config{archname}"
out = perl(*options, output=str.split, error=str.split)
return out.strip()
@property
def use_modules(self) -> Iterable[str]:
"""Names of the package's perl modules."""
module_files = find(self.prefix.lib, ["*.pm"], recursive=True)
# Drop the platform directory, if present
if self._platform_dir:
platform_dir = self._platform_dir + os.sep
module_files = [m.replace(platform_dir, "") for m in module_files]
# Drop the extension and library path
prefix = self.prefix.lib + os.sep
modules = [os.path.splitext(m)[0].replace(prefix, "") for m in module_files]
# Drop the perl subdirectory as well
return ["::".join(m.split(os.sep)[1:]) for m in modules]
@property
def skip_modules(self) -> Iterable[str]:
"""Names of modules that should be skipped when running tests.
These are a subset of use_modules.
Returns:
List of strings of module names.
"""
return []
def test_use(self):
"""Test 'use module'"""
if not self.use_modules:
raise SkipTest("Test requires use_modules package property.")
perl = self.spec["perl"].command
for module in self.use_modules:
if module in self.skip_modules:
continue
with test_part(self, f"test_use-{module}", purpose=f"checking use of {module}"):
options = ["-we", f'use strict; use {module}; print("OK\n")']
out = perl(*options, output=str.split, error=str.split)
assert "OK" in out
@spack.builder.builder("perl") @spack.builder.builder("perl")
class PerlBuilder(BaseBuilder): class PerlBuilder(BaseBuilder):
@@ -107,7 +52,7 @@ class PerlBuilder(BaseBuilder):
phases = ("configure", "build", "install") phases = ("configure", "build", "install")
#: Names associated with package methods in the old build-system format #: Names associated with package methods in the old build-system format
legacy_methods = ("configure_args", "check", "test_use") legacy_methods = ("configure_args", "check")
#: Names associated with package attributes in the old build-system format #: Names associated with package attributes in the old build-system format
legacy_attributes = () legacy_attributes = ()

View File

@@ -2,10 +2,7 @@
# Spack Project Developers. See the top-level COPYRIGHT file for details. # Spack Project Developers. See the top-level COPYRIGHT file for details.
# #
# SPDX-License-Identifier: (Apache-2.0 OR MIT) # SPDX-License-Identifier: (Apache-2.0 OR MIT)
import functools
import inspect import inspect
import operator
import os import os
import re import re
import shutil import shutil
@@ -27,7 +24,7 @@
import spack.package_base import spack.package_base
import spack.spec import spack.spec
import spack.store import spack.store
from spack.directives import build_system, depends_on, extends from spack.directives import build_system, depends_on, extends, maintainers
from spack.error import NoHeadersError, NoLibrariesError from spack.error import NoHeadersError, NoLibrariesError
from spack.install_test import test_part from spack.install_test import test_part
from spack.spec import Spec from spack.spec import Spec
@@ -56,6 +53,8 @@ def _flatten_dict(dictionary: Mapping[str, object]) -> Iterable[str]:
class PythonExtension(spack.package_base.PackageBase): class PythonExtension(spack.package_base.PackageBase):
maintainers("adamjstewart")
@property @property
def import_modules(self) -> Iterable[str]: def import_modules(self) -> Iterable[str]:
"""Names of modules that the Python package provides. """Names of modules that the Python package provides.
@@ -181,7 +180,7 @@ def add_files_to_view(self, view, merge_map, skip_if_exists=True):
except (OSError, KeyError): except (OSError, KeyError):
target = None target = None
if target: if target:
os.symlink(os.path.relpath(target, os.path.dirname(dst)), dst) os.symlink(target, dst)
else: else:
view.link(src, dst, spec=self.spec) view.link(src, dst, spec=self.spec)
@@ -369,19 +368,16 @@ def headers(self) -> HeaderList:
# Remove py- prefix in package name # Remove py- prefix in package name
name = self.spec.name[3:] name = self.spec.name[3:]
# Headers should only be in include or platlib, but no harm in checking purelib too # Headers may be in either location
include = self.prefix.join(self.spec["python"].package.include).join(name) include = self.prefix.join(self.spec["python"].package.include).join(name)
platlib = self.prefix.join(self.spec["python"].package.platlib).join(name) platlib = self.prefix.join(self.spec["python"].package.platlib).join(name)
purelib = self.prefix.join(self.spec["python"].package.purelib).join(name) headers = fs.find_all_headers(include) + fs.find_all_headers(platlib)
headers_list = map(fs.find_all_headers, [include, platlib, purelib])
headers = functools.reduce(operator.add, headers_list)
if headers: if headers:
return headers return headers
msg = "Unable to locate {} headers in {}, {}, or {}" msg = "Unable to locate {} headers in {} or {}"
raise NoHeadersError(msg.format(self.spec.name, include, platlib, purelib)) raise NoHeadersError(msg.format(self.spec.name, include, platlib))
@property @property
def libs(self) -> LibraryList: def libs(self) -> LibraryList:
@@ -390,19 +386,15 @@ def libs(self) -> LibraryList:
# Remove py- prefix in package name # Remove py- prefix in package name
name = self.spec.name[3:] name = self.spec.name[3:]
# Libraries should only be in platlib, but no harm in checking purelib too root = self.prefix.join(self.spec["python"].package.platlib).join(name)
platlib = self.prefix.join(self.spec["python"].package.platlib).join(name)
purelib = self.prefix.join(self.spec["python"].package.purelib).join(name)
find_all_libraries = functools.partial(fs.find_all_libraries, recursive=True) libs = fs.find_all_libraries(root, recursive=True)
libs_list = map(find_all_libraries, [platlib, purelib])
libs = functools.reduce(operator.add, libs_list)
if libs: if libs:
return libs return libs
msg = "Unable to recursively locate {} libraries in {} or {}" msg = "Unable to recursively locate {} libraries in {}"
raise NoLibrariesError(msg.format(self.spec.name, platlib, purelib)) raise NoLibrariesError(msg.format(self.spec.name, root))
@spack.builder.builder("python_pip") @spack.builder.builder("python_pip")

View File

@@ -75,8 +75,6 @@
# does not like its directory structure. # does not like its directory structure.
# #
import os
import spack.variant import spack.variant
from spack.directives import conflicts, depends_on, variant from spack.directives import conflicts, depends_on, variant
from spack.package_base import PackageBase from spack.package_base import PackageBase
@@ -156,32 +154,6 @@ def hip_flags(amdgpu_target):
archs = ",".join(amdgpu_target) archs = ",".join(amdgpu_target)
return "--amdgpu-target={0}".format(archs) return "--amdgpu-target={0}".format(archs)
# ASAN
@staticmethod
def asan_on(env, llvm_path):
env.set("CC", llvm_path + "/bin/clang")
env.set("CXX", llvm_path + "/bin/clang++")
env.set("ASAN_OPTIONS", "detect_leaks=0")
for root, dirs, files in os.walk(llvm_path):
if "libclang_rt.asan-x86_64.so" in files:
asan_lib_path = root
env.prepend_path("LD_LIBRARY_PATH", asan_lib_path)
SET_DWARF_VERSION_4 = ""
try:
# This will throw an error if imported on a non-Linux platform.
import distro
distname = distro.id()
except ImportError:
distname = "unknown"
if "rhel" in distname or "sles" in distname:
SET_DWARF_VERSION_4 = "-gdwarf-5"
env.set("CFLAGS", "-fsanitize=address -shared-libasan -g " + SET_DWARF_VERSION_4)
env.set("CXXFLAGS", "-fsanitize=address -shared-libasan -g " + SET_DWARF_VERSION_4)
env.set("LDFLAGS", "-Wl,--enable-new-dtags -fuse-ld=lld -fsanitize=address -g -Wl,")
# HIP version vs Architecture # HIP version vs Architecture
# TODO: add a bunch of lines like: # TODO: add a bunch of lines like:
@@ -190,9 +162,23 @@ def asan_on(env, llvm_path):
# Add compiler minimum versions based on the first release where the # Add compiler minimum versions based on the first release where the
# processor is included in llvm/lib/Support/TargetParser.cpp # processor is included in llvm/lib/Support/TargetParser.cpp
depends_on("llvm-amdgpu@4.1.0:", when="amdgpu_target=gfx900:xnack-")
depends_on("llvm-amdgpu@4.1.0:", when="amdgpu_target=gfx906:xnack-")
depends_on("llvm-amdgpu@4.1.0:", when="amdgpu_target=gfx908:xnack-")
depends_on("llvm-amdgpu@4.1.0:", when="amdgpu_target=gfx90c")
depends_on("llvm-amdgpu@4.3.0:", when="amdgpu_target=gfx90a")
depends_on("llvm-amdgpu@4.3.0:", when="amdgpu_target=gfx90a:xnack-")
depends_on("llvm-amdgpu@4.3.0:", when="amdgpu_target=gfx90a:xnack+")
depends_on("llvm-amdgpu@5.2.0:", when="amdgpu_target=gfx940") depends_on("llvm-amdgpu@5.2.0:", when="amdgpu_target=gfx940")
depends_on("llvm-amdgpu@5.7.0:", when="amdgpu_target=gfx941") depends_on("llvm-amdgpu@5.7.0:", when="amdgpu_target=gfx941")
depends_on("llvm-amdgpu@5.7.0:", when="amdgpu_target=gfx942") depends_on("llvm-amdgpu@5.7.0:", when="amdgpu_target=gfx942")
depends_on("llvm-amdgpu@4.5.0:", when="amdgpu_target=gfx1013")
depends_on("llvm-amdgpu@3.8.0:", when="amdgpu_target=gfx1030")
depends_on("llvm-amdgpu@3.9.0:", when="amdgpu_target=gfx1031")
depends_on("llvm-amdgpu@4.1.0:", when="amdgpu_target=gfx1032")
depends_on("llvm-amdgpu@4.1.0:", when="amdgpu_target=gfx1033")
depends_on("llvm-amdgpu@4.3.0:", when="amdgpu_target=gfx1034")
depends_on("llvm-amdgpu@4.5.0:", when="amdgpu_target=gfx1035")
depends_on("llvm-amdgpu@5.2.0:", when="amdgpu_target=gfx1036") depends_on("llvm-amdgpu@5.2.0:", when="amdgpu_target=gfx1036")
depends_on("llvm-amdgpu@5.3.0:", when="amdgpu_target=gfx1100") depends_on("llvm-amdgpu@5.3.0:", when="amdgpu_target=gfx1100")
depends_on("llvm-amdgpu@5.3.0:", when="amdgpu_target=gfx1101") depends_on("llvm-amdgpu@5.3.0:", when="amdgpu_target=gfx1101")

View File

@@ -9,8 +9,6 @@
import inspect import inspect
from typing import List, Optional, Tuple from typing import List, Optional, Tuple
from llnl.util import lang
import spack.build_environment import spack.build_environment
#: Builder classes, as registered by the "builder" decorator #: Builder classes, as registered by the "builder" decorator
@@ -233,27 +231,24 @@ def __new__(mcs, name, bases, attr_dict):
for temporary_stage in (_RUN_BEFORE, _RUN_AFTER): for temporary_stage in (_RUN_BEFORE, _RUN_AFTER):
staged_callbacks = temporary_stage.callbacks staged_callbacks = temporary_stage.callbacks
# Here we have an adapter from an old-style package. This means there is no # We don't have callbacks in this class, move on
# hierarchy of builders, and every callback that had to be combined between if not staged_callbacks:
# *Package and *Builder has been combined already by _PackageAdapterMeta
if name == "Adapter":
continue continue
# If we are here we have callbacks. To get a complete list, we accumulate all the # If we are here we have callbacks. To get a complete list, get first what
# callbacks from base classes, we deduplicate them, then prepend what we have # was attached to parent classes, then prepend what we have registered here.
# registered here.
# #
# The order should be: # The order should be:
# 1. Callbacks are registered in order within the same class # 1. Callbacks are registered in order within the same class
# 2. Callbacks defined in derived classes precede those defined in base # 2. Callbacks defined in derived classes precede those defined in base
# classes # classes
callbacks_from_base = []
for base in bases: for base in bases:
current_callbacks = getattr(base, temporary_stage.attribute_name, None) callbacks_from_base = getattr(base, temporary_stage.attribute_name, None)
if not current_callbacks: if callbacks_from_base:
continue break
callbacks_from_base.extend(current_callbacks) else:
callbacks_from_base = list(lang.dedupe(callbacks_from_base)) callbacks_from_base = []
# Set the callbacks in this class and flush the temporary stage # Set the callbacks in this class and flush the temporary stage
attr_dict[temporary_stage.attribute_name] = staged_callbacks[:] + callbacks_from_base attr_dict[temporary_stage.attribute_name] = staged_callbacks[:] + callbacks_from_base
del temporary_stage.callbacks[:] del temporary_stage.callbacks[:]

View File

@@ -70,7 +70,7 @@
JOB_NAME_FORMAT = ( JOB_NAME_FORMAT = (
"{name}{@version} {/hash:7} {%compiler.name}{@compiler.version}{arch=architecture}" "{name}{@version} {/hash:7} {%compiler.name}{@compiler.version}{arch=architecture}"
) )
IS_WINDOWS = sys.platform == "win32"
spack_gpg = spack.main.SpackCommand("gpg") spack_gpg = spack.main.SpackCommand("gpg")
spack_compiler = spack.main.SpackCommand("compiler") spack_compiler = spack.main.SpackCommand("compiler")
@@ -103,7 +103,7 @@ def get_job_name(spec: spack.spec.Spec, build_group: str = ""):
job_name = spec.format(JOB_NAME_FORMAT) job_name = spec.format(JOB_NAME_FORMAT)
if build_group: if build_group:
job_name = f"{job_name} {build_group}" job_name = "{0} {1}".format(job_name, build_group)
return job_name[:255] return job_name[:255]
@@ -114,7 +114,7 @@ def _remove_reserved_tags(tags):
def _spec_deps_key(s): def _spec_deps_key(s):
return f"{s.name}/{s.dag_hash(7)}" return "{0}/{1}".format(s.name, s.dag_hash(7))
def _add_dependency(spec_label, dep_label, deps): def _add_dependency(spec_label, dep_label, deps):
@@ -213,7 +213,7 @@ def _print_staging_summary(spec_labels, stages, mirrors_to_check, rebuild_decisi
mirrors = spack.mirror.MirrorCollection(mirrors=mirrors_to_check, binary=True) mirrors = spack.mirror.MirrorCollection(mirrors=mirrors_to_check, binary=True)
tty.msg("Checked the following mirrors for binaries:") tty.msg("Checked the following mirrors for binaries:")
for m in mirrors.values(): for m in mirrors.values():
tty.msg(f" {m.fetch_url}") tty.msg(" {0}".format(m.fetch_url))
tty.msg("Staging summary ([x] means a job needs rebuilding):") tty.msg("Staging summary ([x] means a job needs rebuilding):")
for stage_index, stage in enumerate(stages): for stage_index, stage in enumerate(stages):
@@ -296,7 +296,7 @@ def append_dep(s, d):
for spec in spec_list: for spec in spec_list:
for s in spec.traverse(deptype="all"): for s in spec.traverse(deptype="all"):
if s.external: if s.external:
tty.msg(f"Will not stage external pkg: {s}") tty.msg("Will not stage external pkg: {0}".format(s))
continue continue
skey = _spec_deps_key(s) skey = _spec_deps_key(s)
@@ -305,7 +305,7 @@ def append_dep(s, d):
for d in s.dependencies(deptype="all"): for d in s.dependencies(deptype="all"):
dkey = _spec_deps_key(d) dkey = _spec_deps_key(d)
if d.external: if d.external:
tty.msg(f"Will not stage external dep: {d}") tty.msg("Will not stage external dep: {0}".format(d))
continue continue
append_dep(skey, dkey) append_dep(skey, dkey)
@@ -374,8 +374,8 @@ def get_stack_changed(env_path, rev1="HEAD^", rev2="HEAD"):
for path in lines: for path in lines:
if ".gitlab-ci.yml" in path or path in env_path: if ".gitlab-ci.yml" in path or path in env_path:
tty.debug(f"env represented by {env_path} changed") tty.debug("env represented by {0} changed".format(env_path))
tty.debug(f"touched file: {path}") tty.debug("touched file: {0}".format(path))
return True return True
return False return False
@@ -419,7 +419,7 @@ def get_spec_filter_list(env, affected_pkgs, dependent_traverse_depth=None):
all_concrete_specs = env.all_specs() all_concrete_specs = env.all_specs()
tty.debug("All concrete environment specs:") tty.debug("All concrete environment specs:")
for s in all_concrete_specs: for s in all_concrete_specs:
tty.debug(f" {s.name}/{s.dag_hash()[:7]}") tty.debug(" {0}/{1}".format(s.name, s.dag_hash()[:7]))
affected_pkgs = frozenset(affected_pkgs) affected_pkgs = frozenset(affected_pkgs)
env_matches = [s for s in all_concrete_specs if s.name in affected_pkgs] env_matches = [s for s in all_concrete_specs if s.name in affected_pkgs]
visited = set() visited = set()
@@ -510,7 +510,7 @@ def __is_named(self, section):
and if so return the name otherwise return none. and if so return the name otherwise return none.
""" """
for _name in self.named_jobs: for _name in self.named_jobs:
keys = [f"{_name}-job", f"{_name}-job-remove"] keys = ["{0}-job".format(_name), "{0}-job-remove".format(_name)]
if any([key for key in keys if key in section]): if any([key for key in keys if key in section]):
return _name return _name
@@ -525,9 +525,9 @@ def __job_name(name, suffix=""):
jname = name jname = name
if suffix: if suffix:
jname = f"{name}-job{suffix}" jname = "{0}-job{1}".format(name, suffix)
else: else:
jname = f"{name}-job" jname = "{0}-job".format(name)
return jname return jname
@@ -739,7 +739,7 @@ def generate_gitlab_ci_yaml(
# Requested to prune untouched packages, but assume we won't do that # Requested to prune untouched packages, but assume we won't do that
# unless we're actually in a git repo. # unless we're actually in a git repo.
rev1, rev2 = get_change_revisions() rev1, rev2 = get_change_revisions()
tty.debug(f"Got following revisions: rev1={rev1}, rev2={rev2}") tty.debug("Got following revisions: rev1={0}, rev2={1}".format(rev1, rev2))
if rev1 and rev2: if rev1 and rev2:
# If the stack file itself did not change, proceed with pruning # If the stack file itself did not change, proceed with pruning
if not get_stack_changed(env.manifest_path, rev1, rev2): if not get_stack_changed(env.manifest_path, rev1, rev2):
@@ -747,13 +747,13 @@ def generate_gitlab_ci_yaml(
affected_pkgs = compute_affected_packages(rev1, rev2) affected_pkgs = compute_affected_packages(rev1, rev2)
tty.debug("affected pkgs:") tty.debug("affected pkgs:")
for p in affected_pkgs: for p in affected_pkgs:
tty.debug(f" {p}") tty.debug(" {0}".format(p))
affected_specs = get_spec_filter_list( affected_specs = get_spec_filter_list(
env, affected_pkgs, dependent_traverse_depth=dependent_depth env, affected_pkgs, dependent_traverse_depth=dependent_depth
) )
tty.debug("all affected specs:") tty.debug("all affected specs:")
for s in affected_specs: for s in affected_specs:
tty.debug(f" {s.name}/{s.dag_hash()[:7]}") tty.debug(" {0}/{1}".format(s.name, s.dag_hash()[:7]))
# Allow overriding --prune-dag cli opt with environment variable # Allow overriding --prune-dag cli opt with environment variable
prune_dag_override = os.environ.get("SPACK_PRUNE_UP_TO_DATE", None) prune_dag_override = os.environ.get("SPACK_PRUNE_UP_TO_DATE", None)
@@ -978,7 +978,7 @@ def generate_gitlab_ci_yaml(
rebuild_decisions = {} rebuild_decisions = {}
for stage_jobs in stages: for stage_jobs in stages:
stage_name = f"stage-{stage_id}" stage_name = "stage-{0}".format(stage_id)
stage_names.append(stage_name) stage_names.append(stage_name)
stage_id += 1 stage_id += 1
@@ -1009,7 +1009,7 @@ def generate_gitlab_ci_yaml(
job_object = spack_ci_ir["jobs"][release_spec_dag_hash]["attributes"] job_object = spack_ci_ir["jobs"][release_spec_dag_hash]["attributes"]
if not job_object: if not job_object:
tty.warn(f"No match found for {release_spec}, skipping it") tty.warn("No match found for {0}, skipping it".format(release_spec))
continue continue
if spack_pipeline_type is not None: if spack_pipeline_type is not None:
@@ -1119,7 +1119,7 @@ def main_script_replacements(cmd):
if artifacts_root: if artifacts_root:
job_object["needs"].append( job_object["needs"].append(
{"job": generate_job_name, "pipeline": f"{parent_pipeline_id}"} {"job": generate_job_name, "pipeline": "{0}".format(parent_pipeline_id)}
) )
# Let downstream jobs know whether the spec needed rebuilding, regardless # Let downstream jobs know whether the spec needed rebuilding, regardless
@@ -1185,17 +1185,19 @@ def main_script_replacements(cmd):
if spack_pipeline_type == "spack_pull_request": if spack_pipeline_type == "spack_pull_request":
spack.mirror.remove("ci_shared_pr_mirror", cfg.default_modify_scope()) spack.mirror.remove("ci_shared_pr_mirror", cfg.default_modify_scope())
tty.debug(f"{job_id} build jobs generated in {stage_id} stages") tty.debug("{0} build jobs generated in {1} stages".format(job_id, stage_id))
if job_id > 0: if job_id > 0:
tty.debug(f"The max_needs_job is {max_needs_job}, with {max_length_needs} needs") tty.debug(
"The max_needs_job is {0}, with {1} needs".format(max_needs_job, max_length_needs)
)
# Use "all_job_names" to populate the build group for this set # Use "all_job_names" to populate the build group for this set
if cdash_handler and cdash_handler.auth_token: if cdash_handler and cdash_handler.auth_token:
try: try:
cdash_handler.populate_buildgroup(all_job_names) cdash_handler.populate_buildgroup(all_job_names)
except (SpackError, HTTPError, URLError) as err: except (SpackError, HTTPError, URLError) as err:
tty.warn(f"Problem populating buildgroup: {err}") tty.warn("Problem populating buildgroup: {0}".format(err))
else: else:
tty.warn("Unable to populate buildgroup without CDash credentials") tty.warn("Unable to populate buildgroup without CDash credentials")
@@ -1209,7 +1211,9 @@ def main_script_replacements(cmd):
sync_job = copy.deepcopy(spack_ci_ir["jobs"]["copy"]["attributes"]) sync_job = copy.deepcopy(spack_ci_ir["jobs"]["copy"]["attributes"])
sync_job["stage"] = "copy" sync_job["stage"] = "copy"
if artifacts_root: if artifacts_root:
sync_job["needs"] = [{"job": generate_job_name, "pipeline": f"{parent_pipeline_id}"}] sync_job["needs"] = [
{"job": generate_job_name, "pipeline": "{0}".format(parent_pipeline_id)}
]
if "variables" not in sync_job: if "variables" not in sync_job:
sync_job["variables"] = {} sync_job["variables"] = {}
@@ -1226,7 +1230,6 @@ def main_script_replacements(cmd):
# TODO: Remove this condition in Spack 0.23 # TODO: Remove this condition in Spack 0.23
buildcache_source = os.environ.get("SPACK_SOURCE_MIRROR", None) buildcache_source = os.environ.get("SPACK_SOURCE_MIRROR", None)
sync_job["variables"]["SPACK_BUILDCACHE_SOURCE"] = buildcache_source sync_job["variables"]["SPACK_BUILDCACHE_SOURCE"] = buildcache_source
sync_job["dependencies"] = []
output_object["copy"] = sync_job output_object["copy"] = sync_job
job_id += 1 job_id += 1
@@ -1345,7 +1348,7 @@ def main_script_replacements(cmd):
copy_specs_file = os.path.join( copy_specs_file = os.path.join(
copy_specs_dir, copy_specs_dir,
f"copy_{spack_stack_name if spack_stack_name else 'rebuilt'}_specs.json", "copy_{}_specs.json".format(spack_stack_name if spack_stack_name else "rebuilt"),
) )
with open(copy_specs_file, "w") as fd: with open(copy_specs_file, "w") as fd:
@@ -1437,7 +1440,7 @@ def import_signing_key(base64_signing_key):
fd.write(decoded_key) fd.write(decoded_key)
key_import_output = spack_gpg("trust", sign_key_path, output=str) key_import_output = spack_gpg("trust", sign_key_path, output=str)
tty.debug(f"spack gpg trust {sign_key_path}") tty.debug("spack gpg trust {0}".format(sign_key_path))
tty.debug(key_import_output) tty.debug(key_import_output)
# Now print the keys we have for verifying and signing # Now print the keys we have for verifying and signing
@@ -1463,39 +1466,45 @@ def can_verify_binaries():
return len(gpg_util.public_keys()) >= 1 return len(gpg_util.public_keys()) >= 1
def _push_to_build_cache(spec: spack.spec.Spec, sign_binaries: bool, mirror_url: str) -> None: def _push_mirror_contents(input_spec, sign_binaries, mirror_url):
"""Unchecked version of the public API, for easier mocking""" """Unchecked version of the public API, for easier mocking"""
bindist.push_or_raise( unsigned = not sign_binaries
spec, tty.debug("Creating buildcache ({0})".format("unsigned" if unsigned else "signed"))
spack.mirror.Mirror.from_url(mirror_url).push_url, push_url = spack.mirror.Mirror.from_url(mirror_url).push_url
bindist.PushOptions(force=True, unsigned=not sign_binaries), return bindist.push(input_spec, push_url, bindist.PushOptions(force=True, unsigned=unsigned))
)
def push_to_build_cache(spec: spack.spec.Spec, mirror_url: str, sign_binaries: bool) -> bool: def push_mirror_contents(input_spec: spack.spec.Spec, mirror_url, sign_binaries):
"""Push one or more binary packages to the mirror. """Push one or more binary packages to the mirror.
Arguments: Arguments:
spec: Installed spec to push input_spec(spack.spec.Spec): Installed spec to push
mirror_url: URL of target mirror mirror_url (str): Base url of target mirror
sign_binaries: If True, spack will attempt to sign binary package before pushing. sign_binaries (bool): If True, spack will attempt to sign binary
package before pushing.
""" """
tty.debug(f"Pushing to build cache ({'signed' if sign_binaries else 'unsigned'})")
try: try:
_push_to_build_cache(spec, sign_binaries, mirror_url) return _push_mirror_contents(input_spec, sign_binaries, mirror_url)
return True except Exception as inst:
except bindist.PushToBuildCacheError as e: # If the mirror we're pushing to is on S3 and there's some
tty.error(str(e)) # permissions problem, for example, we can't just target
return False # that exception type here, since users of the
except Exception as e: # `spack ci rebuild' may not need or want any dependency
# TODO (zackgalbreath): write an adapter for boto3 exceptions so we can catch a specific # on boto3. So we use the first non-boto exception type
# exception instead of parsing str(e)... # in the heirarchy:
msg = str(e) # boto3.exceptions.S3UploadFailedError
if any(x in msg for x in ["Access Denied", "InvalidAccessKeyId"]): # boto3.exceptions.Boto3Error
tty.error(f"Permission problem writing to {mirror_url}: {msg}") # Exception
# BaseException
# object
err_msg = "Error msg: {0}".format(inst)
if any(x in err_msg for x in ["Access Denied", "InvalidAccessKeyId"]):
tty.msg("Permission problem writing to {0}".format(mirror_url))
tty.msg(err_msg)
return False return False
raise else:
raise inst
def remove_other_mirrors(mirrors_to_keep, scope=None): def remove_other_mirrors(mirrors_to_keep, scope=None):
@@ -1522,9 +1531,8 @@ def copy_files_to_artifacts(src, artifacts_dir):
try: try:
fs.copy(src, artifacts_dir) fs.copy(src, artifacts_dir)
except Exception as err: except Exception as err:
msg = ( msg = ("Unable to copy files ({0}) to artifacts {1} due to " "exception: {2}").format(
f"Unable to copy files ({src}) to artifacts {artifacts_dir} due to " src, artifacts_dir, str(err)
f"exception: {str(err)}"
) )
tty.warn(msg) tty.warn(msg)
@@ -1540,23 +1548,23 @@ def copy_stage_logs_to_artifacts(job_spec: spack.spec.Spec, job_log_dir: str) ->
job_spec: spec associated with spack install log job_spec: spec associated with spack install log
job_log_dir: path into which build log should be copied job_log_dir: path into which build log should be copied
""" """
tty.debug(f"job spec: {job_spec}") tty.debug("job spec: {0}".format(job_spec))
if not job_spec: if not job_spec:
msg = f"Cannot copy stage logs: job spec ({job_spec}) is required" msg = "Cannot copy stage logs: job spec ({0}) is required"
tty.error(msg) tty.error(msg.format(job_spec))
return return
try: try:
pkg_cls = spack.repo.PATH.get_pkg_class(job_spec.name) pkg_cls = spack.repo.PATH.get_pkg_class(job_spec.name)
job_pkg = pkg_cls(job_spec) job_pkg = pkg_cls(job_spec)
tty.debug(f"job package: {job_pkg}") tty.debug("job package: {0}".format(job_pkg))
except AssertionError: except AssertionError:
msg = f"Cannot copy stage logs: job spec ({job_spec}) must be concrete" msg = "Cannot copy stage logs: job spec ({0}) must be concrete"
tty.error(msg) tty.error(msg.format(job_spec))
return return
stage_dir = job_pkg.stage.path stage_dir = job_pkg.stage.path
tty.debug(f"stage dir: {stage_dir}") tty.debug("stage dir: {0}".format(stage_dir))
for file in [job_pkg.log_path, job_pkg.env_mods_path, *job_pkg.builder.archive_files]: for file in [job_pkg.log_path, job_pkg.env_mods_path, *job_pkg.builder.archive_files]:
copy_files_to_artifacts(file, job_log_dir) copy_files_to_artifacts(file, job_log_dir)
@@ -1569,10 +1577,10 @@ def copy_test_logs_to_artifacts(test_stage, job_test_dir):
test_stage (str): test stage path test_stage (str): test stage path
job_test_dir (str): the destination artifacts test directory job_test_dir (str): the destination artifacts test directory
""" """
tty.debug(f"test stage: {test_stage}") tty.debug("test stage: {0}".format(test_stage))
if not os.path.exists(test_stage): if not os.path.exists(test_stage):
msg = f"Cannot copy test logs: job test stage ({test_stage}) does not exist" msg = "Cannot copy test logs: job test stage ({0}) does not exist"
tty.error(msg) tty.error(msg.format(test_stage))
return return
copy_files_to_artifacts(os.path.join(test_stage, "*", "*.txt"), job_test_dir) copy_files_to_artifacts(os.path.join(test_stage, "*", "*.txt"), job_test_dir)
@@ -1587,7 +1595,7 @@ def download_and_extract_artifacts(url, work_dir):
url (str): Complete url to artifacts.zip file url (str): Complete url to artifacts.zip file
work_dir (str): Path to destination where artifacts should be extracted work_dir (str): Path to destination where artifacts should be extracted
""" """
tty.msg(f"Fetching artifacts from: {url}\n") tty.msg("Fetching artifacts from: {0}\n".format(url))
headers = {"Content-Type": "application/zip"} headers = {"Content-Type": "application/zip"}
@@ -1604,7 +1612,7 @@ def download_and_extract_artifacts(url, work_dir):
response_code = response.getcode() response_code = response.getcode()
if response_code != 200: if response_code != 200:
msg = f"Error response code ({response_code}) in reproduce_ci_job" msg = "Error response code ({0}) in reproduce_ci_job".format(response_code)
raise SpackError(msg) raise SpackError(msg)
artifacts_zip_path = os.path.join(work_dir, "artifacts.zip") artifacts_zip_path = os.path.join(work_dir, "artifacts.zip")
@@ -1634,7 +1642,7 @@ def get_spack_info():
return git_log return git_log
return f"no git repo, use spack {spack.spack_version}" return "no git repo, use spack {0}".format(spack.spack_version)
def setup_spack_repro_version(repro_dir, checkout_commit, merge_commit=None): def setup_spack_repro_version(repro_dir, checkout_commit, merge_commit=None):
@@ -1657,8 +1665,8 @@ def setup_spack_repro_version(repro_dir, checkout_commit, merge_commit=None):
""" """
# figure out the path to the spack git version being used for the # figure out the path to the spack git version being used for the
# reproduction # reproduction
print(f"checkout_commit: {checkout_commit}") print("checkout_commit: {0}".format(checkout_commit))
print(f"merge_commit: {merge_commit}") print("merge_commit: {0}".format(merge_commit))
dot_git_path = os.path.join(spack.paths.prefix, ".git") dot_git_path = os.path.join(spack.paths.prefix, ".git")
if not os.path.exists(dot_git_path): if not os.path.exists(dot_git_path):
@@ -1677,14 +1685,14 @@ def setup_spack_repro_version(repro_dir, checkout_commit, merge_commit=None):
git("log", "-1", checkout_commit, output=str, error=os.devnull, fail_on_error=False) git("log", "-1", checkout_commit, output=str, error=os.devnull, fail_on_error=False)
if git.returncode != 0: if git.returncode != 0:
tty.error(f"Missing commit: {checkout_commit}") tty.error("Missing commit: {0}".format(checkout_commit))
return False return False
if merge_commit: if merge_commit:
git("log", "-1", merge_commit, output=str, error=os.devnull, fail_on_error=False) git("log", "-1", merge_commit, output=str, error=os.devnull, fail_on_error=False)
if git.returncode != 0: if git.returncode != 0:
tty.error(f"Missing commit: {merge_commit}") tty.error("Missing commit: {0}".format(merge_commit))
return False return False
# Next attempt to clone your local spack repo into the repro dir # Next attempt to clone your local spack repo into the repro dir
@@ -1707,7 +1715,7 @@ def setup_spack_repro_version(repro_dir, checkout_commit, merge_commit=None):
) )
if git.returncode != 0: if git.returncode != 0:
tty.error(f"Unable to checkout {checkout_commit}") tty.error("Unable to checkout {0}".format(checkout_commit))
tty.msg(co_out) tty.msg(co_out)
return False return False
@@ -1726,7 +1734,7 @@ def setup_spack_repro_version(repro_dir, checkout_commit, merge_commit=None):
) )
if git.returncode != 0: if git.returncode != 0:
tty.error(f"Unable to merge {merge_commit}") tty.error("Unable to merge {0}".format(merge_commit))
tty.msg(merge_out) tty.msg(merge_out)
return False return False
@@ -1747,7 +1755,6 @@ def reproduce_ci_job(url, work_dir, autostart, gpg_url, runtime):
commands to run to reproduce the build once inside the container. commands to run to reproduce the build once inside the container.
""" """
work_dir = os.path.realpath(work_dir) work_dir = os.path.realpath(work_dir)
platform_script_ext = "ps1" if IS_WINDOWS else "sh"
download_and_extract_artifacts(url, work_dir) download_and_extract_artifacts(url, work_dir)
gpg_path = None gpg_path = None
@@ -1758,13 +1765,13 @@ def reproduce_ci_job(url, work_dir, autostart, gpg_url, runtime):
lock_file = fs.find(work_dir, "spack.lock")[0] lock_file = fs.find(work_dir, "spack.lock")[0]
repro_lock_dir = os.path.dirname(lock_file) repro_lock_dir = os.path.dirname(lock_file)
tty.debug(f"Found lock file in: {repro_lock_dir}") tty.debug("Found lock file in: {0}".format(repro_lock_dir))
yaml_files = fs.find(work_dir, ["*.yaml", "*.yml"]) yaml_files = fs.find(work_dir, ["*.yaml", "*.yml"])
tty.debug("yaml files:") tty.debug("yaml files:")
for yaml_file in yaml_files: for yaml_file in yaml_files:
tty.debug(f" {yaml_file}") tty.debug(" {0}".format(yaml_file))
pipeline_yaml = None pipeline_yaml = None
@@ -1779,10 +1786,10 @@ def reproduce_ci_job(url, work_dir, autostart, gpg_url, runtime):
pipeline_yaml = yaml_obj pipeline_yaml = yaml_obj
if pipeline_yaml: if pipeline_yaml:
tty.debug(f"\n{yf} is likely your pipeline file") tty.debug("\n{0} is likely your pipeline file".format(yf))
relative_concrete_env_dir = pipeline_yaml["variables"]["SPACK_CONCRETE_ENV_DIR"] relative_concrete_env_dir = pipeline_yaml["variables"]["SPACK_CONCRETE_ENV_DIR"]
tty.debug(f"Relative environment path used by cloud job: {relative_concrete_env_dir}") tty.debug("Relative environment path used by cloud job: {0}".format(relative_concrete_env_dir))
# Using the relative concrete environment path found in the generated # Using the relative concrete environment path found in the generated
# pipeline variable above, copy the spack environment files so they'll # pipeline variable above, copy the spack environment files so they'll
@@ -1796,11 +1803,10 @@ def reproduce_ci_job(url, work_dir, autostart, gpg_url, runtime):
shutil.copyfile(orig_yaml_path, copy_yaml_path) shutil.copyfile(orig_yaml_path, copy_yaml_path)
# Find the install script in the unzipped artifacts and make it executable # Find the install script in the unzipped artifacts and make it executable
install_script = fs.find(work_dir, f"install.{platform_script_ext}")[0] install_script = fs.find(work_dir, "install.sh")[0]
if not IS_WINDOWS: st = os.stat(install_script)
# pointless on Windows os.chmod(install_script, st.st_mode | stat.S_IEXEC)
st = os.stat(install_script)
os.chmod(install_script, st.st_mode | stat.S_IEXEC)
# Find the repro details file. This just includes some values we wrote # Find the repro details file. This just includes some values we wrote
# during `spack ci rebuild` to make reproduction easier. E.g. the job # during `spack ci rebuild` to make reproduction easier. E.g. the job
# name is written here so we can easily find the configuration of the # name is written here so we can easily find the configuration of the
@@ -1838,7 +1844,7 @@ def reproduce_ci_job(url, work_dir, autostart, gpg_url, runtime):
job_image = job_image_elt["name"] job_image = job_image_elt["name"]
else: else:
job_image = job_image_elt job_image = job_image_elt
tty.msg(f"Job ran with the following image: {job_image}") tty.msg("Job ran with the following image: {0}".format(job_image))
# Because we found this job was run with a docker image, so we will try # Because we found this job was run with a docker image, so we will try
# to print a "docker run" command that bind-mounts the directory where # to print a "docker run" command that bind-mounts the directory where
@@ -1913,75 +1919,65 @@ def reproduce_ci_job(url, work_dir, autostart, gpg_url, runtime):
job_tags = None job_tags = None
if "tags" in job_yaml: if "tags" in job_yaml:
job_tags = job_yaml["tags"] job_tags = job_yaml["tags"]
tty.msg(f"Job ran with the following tags: {job_tags}") tty.msg("Job ran with the following tags: {0}".format(job_tags))
entrypoint_script = [ entrypoint_script = [
["git", "config", "--global", "--add", "safe.directory", mount_as_dir], ["git", "config", "--global", "--add", "safe.directory", mount_as_dir],
[ [".", os.path.join(mount_as_dir if job_image else work_dir, "share/spack/setup-env.sh")],
".",
os.path.join(
mount_as_dir if job_image else work_dir,
f"share/spack/setup-env.{platform_script_ext}",
),
],
["spack", "gpg", "trust", mounted_gpg_path if job_image else gpg_path] if gpg_path else [], ["spack", "gpg", "trust", mounted_gpg_path if job_image else gpg_path] if gpg_path else [],
["spack", "env", "activate", mounted_env_dir if job_image else repro_dir], ["spack", "env", "activate", mounted_env_dir if job_image else repro_dir],
[ [os.path.join(mounted_repro_dir, "install.sh") if job_image else install_script],
(
os.path.join(mounted_repro_dir, f"install.{platform_script_ext}")
if job_image
else install_script
)
],
] ]
entry_script = os.path.join(mounted_workdir, f"entrypoint.{platform_script_ext}")
inst_list = [] inst_list = []
# Finally, print out some instructions to reproduce the build # Finally, print out some instructions to reproduce the build
if job_image: if job_image:
# Allow interactive # Allow interactive
install_mechanism = ( entrypoint_script.extend(
os.path.join(mounted_repro_dir, f"install.{platform_script_ext}") [
if job_image [
else install_script "echo",
"Re-run install script using:\n\t{0}".format(
os.path.join(mounted_repro_dir, "install.sh")
if job_image
else install_script
),
],
# Allow interactive
["exec", "$@"],
]
) )
entrypoint_script.append(["echo", f"Re-run install script using:\n\t{install_mechanism}"])
# Allow interactive
if IS_WINDOWS:
entrypoint_script.append(["&", "($args -Join ' ')", "-NoExit"])
else:
entrypoint_script.append(["exec", "$@"])
process_command( process_command(
"entrypoint", entrypoint_script, work_dir, run=False, exit_on_failure=False "entrypoint", entrypoint_script, work_dir, run=False, exit_on_failure=False
) )
docker_command = [ docker_command = [
runtime, [
"run", runtime,
"-i", "run",
"-t", "-i",
"--rm", "-t",
"--name", "--rm",
"spack_reproducer", "--name",
"-v", "spack_reproducer",
":".join([work_dir, mounted_workdir, "Z"]), "-v",
"-v", ":".join([work_dir, mounted_workdir, "Z"]),
":".join( "-v",
[ ":".join(
os.path.join(work_dir, "jobs_scratch_dir"), [
os.path.join(mount_as_dir, "jobs_scratch_dir"), os.path.join(work_dir, "jobs_scratch_dir"),
"Z", os.path.join(mount_as_dir, "jobs_scratch_dir"),
] "Z",
), ]
"-v", ),
":".join([os.path.join(work_dir, "spack"), mount_as_dir, "Z"]), "-v",
"--entrypoint", ":".join([os.path.join(work_dir, "spack"), mount_as_dir, "Z"]),
"--entrypoint",
os.path.join(mounted_workdir, "entrypoint.sh"),
job_image,
"bash",
]
] ]
if IS_WINDOWS:
docker_command.extend(["powershell.exe", job_image, entry_script, "powershell.exe"])
else:
docker_command.extend([entry_script, job_image, "bash"])
docker_command = [docker_command]
autostart = autostart and setup_result autostart = autostart and setup_result
process_command("start", docker_command, work_dir, run=autostart) process_command("start", docker_command, work_dir, run=autostart)
@@ -1990,26 +1986,22 @@ def reproduce_ci_job(url, work_dir, autostart, gpg_url, runtime):
inst_list.extend( inst_list.extend(
[ [
" - Start the docker container install", " - Start the docker container install",
f" $ {work_dir}/start.{platform_script_ext}", " $ {0}/start.sh".format(work_dir),
] ]
) )
else: else:
autostart = autostart and setup_result process_command("reproducer", entrypoint_script, work_dir, run=False)
process_command("reproducer", entrypoint_script, work_dir, run=autostart)
inst_list.append("\nOnce on the tagged runner:\n\n") inst_list.append("\nOnce on the tagged runner:\n\n")
inst_list.extent( inst_list.extent(
[ [" - Run the reproducer script", " $ {0}/reproducer.sh".format(work_dir)]
" - Run the reproducer script",
f" $ {work_dir}/reproducer.{platform_script_ext}",
]
) )
if not setup_result: if not setup_result:
inst_list.append("\n - Clone spack and acquire tested commit") inst_list.append("\n - Clone spack and acquire tested commit")
inst_list.append(f"\n {spack_info}\n") inst_list.append("\n {0}\n".format(spack_info))
inst_list.append("\n") inst_list.append("\n")
inst_list.append(f"\n Path to clone spack: {work_dir}/spack\n\n") inst_list.append("\n Path to clone spack: {0}/spack\n\n".format(work_dir))
tty.msg("".join(inst_list)) tty.msg("".join(inst_list))
@@ -2028,78 +2020,50 @@ def process_command(name, commands, repro_dir, run=True, exit_on_failure=True):
Returns: the exit code from processing the command Returns: the exit code from processing the command
""" """
tty.debug("spack {0} arguments: {1}".format(name, commands))
tty.debug(f"spack {name} arguments: {commands}")
if len(commands) == 0 or isinstance(commands[0], str): if len(commands) == 0 or isinstance(commands[0], str):
commands = [commands] commands = [commands]
def compose_command_err_handling(args): # Create a string [command 1] && [command 2] && ... && [command n] with commands
if not IS_WINDOWS: # quoted using double quotes.
args = [f'"{arg}"' for arg in args] args_to_string = lambda args: " ".join('"{}"'.format(arg) for arg in args)
arg_str = " ".join(args) full_command = " \n ".join(map(args_to_string, commands))
result = arg_str + "\n"
# ErrorActionPreference will handle PWSH commandlets (Spack calls),
# but we need to handle EXEs (git, etc) ourselves
catch_exe_failure = (
"""
if ($LASTEXITCODE -ne 0){
throw "Command {} has failed"
}
"""
if IS_WINDOWS
else ""
)
if exit_on_failure and catch_exe_failure:
result += catch_exe_failure.format(arg_str)
return result
# Create a string [command 1] \n [command 2] \n ... \n [command n] with
# commands composed into a platform dependent shell script, pwsh on Windows,
full_command = "\n".join(map(compose_command_err_handling, commands))
# Write the command to a python script
if IS_WINDOWS:
script = f"{name}.ps1"
script_content = [f"\n# spack {name} command\n"]
if exit_on_failure:
script_content.append('$ErrorActionPreference = "Stop"\n')
if os.environ.get("SPACK_VERBOSE_SCRIPT"):
script_content.append("Set-PSDebug -Trace 2\n")
else:
script = f"{name}.sh"
script_content = ["#!/bin/sh\n\n", f"\n# spack {name} command\n"]
if exit_on_failure:
script_content.append("set -e\n")
if os.environ.get("SPACK_VERBOSE_SCRIPT"):
script_content.append("set -x\n")
script_content.append(full_command)
script_content.append("\n")
# Write the command to a shell script
script = "{0}.sh".format(name)
with open(script, "w") as fd: with open(script, "w") as fd:
for line in script_content: fd.write("#!/bin/sh\n\n")
fd.write(line) fd.write("\n# spack {0} command\n".format(name))
if exit_on_failure:
fd.write("set -e\n")
if os.environ.get("SPACK_VERBOSE_SCRIPT"):
fd.write("set -x\n")
fd.write(full_command)
fd.write("\n")
st = os.stat(script)
os.chmod(script, st.st_mode | stat.S_IEXEC)
copy_path = os.path.join(repro_dir, script) copy_path = os.path.join(repro_dir, script)
shutil.copyfile(script, copy_path) shutil.copyfile(script, copy_path)
if not IS_WINDOWS: st = os.stat(copy_path)
st = os.stat(copy_path) os.chmod(copy_path, st.st_mode | stat.S_IEXEC)
os.chmod(copy_path, st.st_mode | stat.S_IEXEC)
# Run the generated shell script as if it were being run in # Run the generated install.sh shell script as if it were being run in
# a login shell. # a login shell.
exit_code = None exit_code = None
if run: if run:
try: try:
# We use sh as executor on Linux like platforms, pwsh on Windows cmd_process = subprocess.Popen(["/bin/sh", "./{0}".format(script)])
interpreter = "powershell.exe" if IS_WINDOWS else "/bin/sh"
cmd_process = subprocess.Popen([interpreter, f"./{script}"])
cmd_process.wait() cmd_process.wait()
exit_code = cmd_process.returncode exit_code = cmd_process.returncode
except (ValueError, subprocess.CalledProcessError, OSError) as err: except (ValueError, subprocess.CalledProcessError, OSError) as err:
tty.error(f"Encountered error running {name} script") tty.error("Encountered error running {0} script".format(name))
tty.error(err) tty.error(err)
exit_code = 1 exit_code = 1
tty.debug(f"spack {name} exited {exit_code}") tty.debug("spack {0} exited {1}".format(name, exit_code))
else: else:
# Delete the script, it is copied to the destination dir # Delete the script, it is copied to the destination dir
os.remove(script) os.remove(script)
@@ -2124,7 +2088,7 @@ def create_buildcache(
for mirror_url in destination_mirror_urls: for mirror_url in destination_mirror_urls:
results.append( results.append(
PushResult( PushResult(
success=push_to_build_cache(input_spec, mirror_url, sign_binaries), url=mirror_url success=push_mirror_contents(input_spec, mirror_url, sign_binaries), url=mirror_url
) )
) )
@@ -2158,7 +2122,7 @@ def write_broken_spec(url, pkg_name, stack_name, job_url, pipeline_url, spec_dic
# If there is an S3 error (e.g., access denied or connection # If there is an S3 error (e.g., access denied or connection
# error), the first non boto-specific class in the exception # error), the first non boto-specific class in the exception
# hierarchy is Exception. Just print a warning and return # hierarchy is Exception. Just print a warning and return
msg = f"Error writing to broken specs list {url}: {err}" msg = "Error writing to broken specs list {0}: {1}".format(url, err)
tty.warn(msg) tty.warn(msg)
finally: finally:
shutil.rmtree(tmpdir) shutil.rmtree(tmpdir)
@@ -2171,7 +2135,7 @@ def read_broken_spec(broken_spec_url):
try: try:
_, _, fs = web_util.read_from_url(broken_spec_url) _, _, fs = web_util.read_from_url(broken_spec_url)
except (URLError, web_util.SpackWebError, HTTPError): except (URLError, web_util.SpackWebError, HTTPError):
tty.warn(f"Unable to read broken spec from {broken_spec_url}") tty.warn("Unable to read broken spec from {0}".format(broken_spec_url))
return None return None
broken_spec_contents = codecs.getreader("utf-8")(fs).read() broken_spec_contents = codecs.getreader("utf-8")(fs).read()
@@ -2186,14 +2150,14 @@ def display_broken_spec_messages(base_url, hashes):
for spec_hash, broken_spec in [tup for tup in broken_specs if tup[1]]: for spec_hash, broken_spec in [tup for tup in broken_specs if tup[1]]:
details = broken_spec["broken-spec"] details = broken_spec["broken-spec"]
if "job-name" in details: if "job-name" in details:
item_name = f"{details['job-name']}/{spec_hash[:7]}" item_name = "{0}/{1}".format(details["job-name"], spec_hash[:7])
else: else:
item_name = spec_hash item_name = spec_hash
if "job-stack" in details: if "job-stack" in details:
item_name = f"{item_name} (in stack {details['job-stack']})" item_name = "{0} (in stack {1})".format(item_name, details["job-stack"])
msg = f" {item_name} was reported broken here: {details['job-url']}" msg = " {0} was reported broken here: {1}".format(item_name, details["job-url"])
tty.msg(msg) tty.msg(msg)
@@ -2216,7 +2180,7 @@ def run_standalone_tests(**kwargs):
log_file = kwargs.get("log_file") log_file = kwargs.get("log_file")
if cdash and log_file: if cdash and log_file:
tty.msg(f"The test log file {log_file} option is ignored with CDash reporting") tty.msg("The test log file {0} option is ignored with CDash reporting".format(log_file))
log_file = None log_file = None
# Error out but do NOT terminate if there are missing required arguments. # Error out but do NOT terminate if there are missing required arguments.
@@ -2242,10 +2206,10 @@ def run_standalone_tests(**kwargs):
test_args.extend(["--log-file", log_file]) test_args.extend(["--log-file", log_file])
test_args.append(job_spec.name) test_args.append(job_spec.name)
tty.debug(f"Running {job_spec.name} stand-alone tests") tty.debug("Running {0} stand-alone tests".format(job_spec.name))
exit_code = process_command("test", test_args, repro_dir) exit_code = process_command("test", test_args, repro_dir)
tty.debug(f"spack test exited {exit_code}") tty.debug("spack test exited {0}".format(exit_code))
class CDashHandler: class CDashHandler:
@@ -2268,7 +2232,7 @@ def __init__(self, ci_cdash):
# append runner description to the site if available # append runner description to the site if available
runner = os.environ.get("CI_RUNNER_DESCRIPTION") runner = os.environ.get("CI_RUNNER_DESCRIPTION")
if runner: if runner:
self.site += f" ({runner})" self.site += " ({0})".format(runner)
# track current spec, if any # track current spec, if any
self.current_spec = None self.current_spec = None
@@ -2296,13 +2260,21 @@ def build_name(self):
Returns: (str) current spec's CDash build name.""" Returns: (str) current spec's CDash build name."""
spec = self.current_spec spec = self.current_spec
if spec: if spec:
build_name = f"{spec.name}@{spec.version}%{spec.compiler} \ build_name = "{0}@{1}%{2} hash={3} arch={4} ({5})".format(
hash={spec.dag_hash()} arch={spec.architecture} ({self.build_group})" spec.name,
tty.debug(f"Generated CDash build name ({build_name}) from the {spec.name}") spec.version,
spec.compiler,
spec.dag_hash(),
spec.architecture,
self.build_group,
)
tty.debug(
"Generated CDash build name ({0}) from the {1}".format(build_name, spec.name)
)
return build_name return build_name
build_name = os.environ.get("SPACK_CDASH_BUILD_NAME") build_name = os.environ.get("SPACK_CDASH_BUILD_NAME")
tty.debug(f"Using CDash build name ({build_name}) from the environment") tty.debug("Using CDash build name ({0}) from the environment".format(build_name))
return build_name return build_name
@property # type: ignore @property # type: ignore
@@ -2316,25 +2288,25 @@ def build_stamp(self):
Returns: (str) current CDash build stamp""" Returns: (str) current CDash build stamp"""
build_stamp = os.environ.get("SPACK_CDASH_BUILD_STAMP") build_stamp = os.environ.get("SPACK_CDASH_BUILD_STAMP")
if build_stamp: if build_stamp:
tty.debug(f"Using build stamp ({build_stamp}) from the environment") tty.debug("Using build stamp ({0}) from the environment".format(build_stamp))
return build_stamp return build_stamp
build_stamp = cdash_build_stamp(self.build_group, time.time()) build_stamp = cdash_build_stamp(self.build_group, time.time())
tty.debug(f"Generated new build stamp ({build_stamp})") tty.debug("Generated new build stamp ({0})".format(build_stamp))
return build_stamp return build_stamp
@property # type: ignore @property # type: ignore
@memoized @memoized
def project_enc(self): def project_enc(self):
tty.debug(f"Encoding project ({type(self.project)}): {self.project})") tty.debug("Encoding project ({0}): {1})".format(type(self.project), self.project))
encode = urlencode({"project": self.project}) encode = urlencode({"project": self.project})
index = encode.find("=") + 1 index = encode.find("=") + 1
return encode[index:] return encode[index:]
@property @property
def upload_url(self): def upload_url(self):
url_format = f"{self.url}/submit.php?project={self.project_enc}" url_format = "{0}/submit.php?project={1}"
return url_format return url_format.format(self.url, self.project_enc)
def copy_test_results(self, source, dest): def copy_test_results(self, source, dest):
"""Copy test results to artifacts directory.""" """Copy test results to artifacts directory."""
@@ -2352,7 +2324,7 @@ def create_buildgroup(self, opener, headers, url, group_name, group_type):
response_code = response.getcode() response_code = response.getcode()
if response_code not in [200, 201]: if response_code not in [200, 201]:
msg = f"Creating buildgroup failed (response code = {response_code})" msg = "Creating buildgroup failed (response code = {0})".format(response_code)
tty.warn(msg) tty.warn(msg)
return None return None
@@ -2363,10 +2335,10 @@ def create_buildgroup(self, opener, headers, url, group_name, group_type):
return build_group_id return build_group_id
def populate_buildgroup(self, job_names): def populate_buildgroup(self, job_names):
url = f"{self.url}/api/v1/buildgroup.php" url = "{0}/api/v1/buildgroup.php".format(self.url)
headers = { headers = {
"Authorization": f"Bearer {self.auth_token}", "Authorization": "Bearer {0}".format(self.auth_token),
"Content-Type": "application/json", "Content-Type": "application/json",
} }
@@ -2374,11 +2346,11 @@ def populate_buildgroup(self, job_names):
parent_group_id = self.create_buildgroup(opener, headers, url, self.build_group, "Daily") parent_group_id = self.create_buildgroup(opener, headers, url, self.build_group, "Daily")
group_id = self.create_buildgroup( group_id = self.create_buildgroup(
opener, headers, url, f"Latest {self.build_group}", "Latest" opener, headers, url, "Latest {0}".format(self.build_group), "Latest"
) )
if not parent_group_id or not group_id: if not parent_group_id or not group_id:
msg = f"Failed to create or retrieve buildgroups for {self.build_group}" msg = "Failed to create or retrieve buildgroups for {0}".format(self.build_group)
tty.warn(msg) tty.warn(msg)
return return
@@ -2398,7 +2370,7 @@ def populate_buildgroup(self, job_names):
response_code = response.getcode() response_code = response.getcode()
if response_code != 200: if response_code != 200:
msg = f"Error response code ({response_code}) in populate_buildgroup" msg = "Error response code ({0}) in populate_buildgroup".format(response_code)
tty.warn(msg) tty.warn(msg)
def report_skipped(self, spec: spack.spec.Spec, report_dir: str, reason: Optional[str]): def report_skipped(self, spec: spack.spec.Spec, report_dir: str, reason: Optional[str]):

View File

@@ -275,37 +275,23 @@ def setup_parser(subparser: argparse.ArgumentParser):
# Sync buildcache entries from one mirror to another # Sync buildcache entries from one mirror to another
sync = subparsers.add_parser("sync", help=sync_fn.__doc__) sync = subparsers.add_parser("sync", help=sync_fn.__doc__)
sync.add_argument(
sync_manifest_source = sync.add_argument_group( "--manifest-glob", help="a quoted glob pattern identifying copy manifest files"
"Manifest Source",
"Specify a list of build cache objects to sync using manifest file(s)."
'This option takes the place of the "source mirror" for synchronization'
'and optionally takes a "destination mirror" ',
) )
sync_manifest_source.add_argument( sync.add_argument(
"--manifest-glob", help="a quoted glob pattern identifying CI rebuild manifest files"
)
sync_source_mirror = sync.add_argument_group(
"Named Source",
"Specify a single registered source mirror to synchronize from. This option requires"
"the specification of a destination mirror.",
)
sync_source_mirror.add_argument(
"src_mirror", "src_mirror",
metavar="source mirror", metavar="source mirror",
nargs="?",
type=arguments.mirror_name_or_url, type=arguments.mirror_name_or_url,
nargs="?",
help="source mirror name, path, or URL", help="source mirror name, path, or URL",
) )
sync.add_argument( sync.add_argument(
"dest_mirror", "dest_mirror",
metavar="destination mirror", metavar="destination mirror",
nargs="?",
type=arguments.mirror_name_or_url, type=arguments.mirror_name_or_url,
nargs="?",
help="destination mirror name, path, or URL", help="destination mirror name, path, or URL",
) )
sync.set_defaults(func=sync_fn) sync.set_defaults(func=sync_fn)
# Update buildcache index without copying any additional packages # Update buildcache index without copying any additional packages
@@ -608,15 +594,6 @@ def _put_manifest(
base_manifest, base_config = base_images[architecture] base_manifest, base_config = base_images[architecture]
env = _retrieve_env_dict_from_config(base_config) env = _retrieve_env_dict_from_config(base_config)
# If the base image uses `vnd.docker.distribution.manifest.v2+json`, then we use that too.
# This is because Singularity / Apptainer is very strict about not mixing them.
base_manifest_mediaType = base_manifest.get(
"mediaType", "application/vnd.oci.image.manifest.v1+json"
)
use_docker_format = (
base_manifest_mediaType == "application/vnd.docker.distribution.manifest.v2+json"
)
spack.user_environment.environment_modifications_for_specs(*specs).apply_modifications(env) spack.user_environment.environment_modifications_for_specs(*specs).apply_modifications(env)
# Create an oci.image.config file # Create an oci.image.config file
@@ -648,8 +625,8 @@ def _put_manifest(
# Upload the config file # Upload the config file
upload_blob_with_retry(image_ref, file=config_file, digest=config_file_checksum) upload_blob_with_retry(image_ref, file=config_file, digest=config_file_checksum)
manifest = { oci_manifest = {
"mediaType": base_manifest_mediaType, "mediaType": "application/vnd.oci.image.manifest.v1+json",
"schemaVersion": 2, "schemaVersion": 2,
"config": { "config": {
"mediaType": base_manifest["config"]["mediaType"], "mediaType": base_manifest["config"]["mediaType"],
@@ -660,11 +637,7 @@ def _put_manifest(
*(layer for layer in base_manifest["layers"]), *(layer for layer in base_manifest["layers"]),
*( *(
{ {
"mediaType": ( "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
"application/vnd.docker.image.rootfs.diff.tar.gzip"
if use_docker_format
else "application/vnd.oci.image.layer.v1.tar+gzip"
),
"digest": str(checksums[s.dag_hash()].compressed_digest), "digest": str(checksums[s.dag_hash()].compressed_digest),
"size": checksums[s.dag_hash()].size, "size": checksums[s.dag_hash()].size,
} }
@@ -673,11 +646,11 @@ def _put_manifest(
], ],
} }
if not use_docker_format and annotations: if annotations:
manifest["annotations"] = annotations oci_manifest["annotations"] = annotations
# Finally upload the manifest # Finally upload the manifest
upload_manifest_with_retry(image_ref, manifest=manifest) upload_manifest_with_retry(image_ref, oci_manifest=oci_manifest)
# delete the config file # delete the config file
os.unlink(config_file) os.unlink(config_file)
@@ -1084,17 +1057,7 @@ def sync_fn(args):
requires an active environment in order to know which specs to sync requires an active environment in order to know which specs to sync
""" """
if args.manifest_glob: if args.manifest_glob:
# Passing the args.src_mirror here because it is not possible to manifest_copy(glob.glob(args.manifest_glob))
# have the destination be required when specifying a named source
# mirror and optional for the --manifest-glob argument. In the case
# of manifest glob sync, the source mirror positional argument is the
# destination mirror if it is specified. If there are two mirrors
# specified, the second is ignored and the first is the override
# destination.
if args.dest_mirror:
tty.warn(f"Ignoring unused arguemnt: {args.dest_mirror.name}")
manifest_copy(glob.glob(args.manifest_glob), args.src_mirror)
return 0 return 0
if args.src_mirror is None or args.dest_mirror is None: if args.src_mirror is None or args.dest_mirror is None:
@@ -1145,7 +1108,7 @@ def sync_fn(args):
shutil.rmtree(tmpdir) shutil.rmtree(tmpdir)
def manifest_copy(manifest_file_list, dest_mirror=None): def manifest_copy(manifest_file_list):
"""Read manifest files containing information about specific specs to copy """Read manifest files containing information about specific specs to copy
from source to destination, remove duplicates since any binary packge for from source to destination, remove duplicates since any binary packge for
a given hash should be the same as any other, and copy all files specified a given hash should be the same as any other, and copy all files specified
@@ -1159,17 +1122,10 @@ def manifest_copy(manifest_file_list, dest_mirror=None):
# Last duplicate hash wins # Last duplicate hash wins
deduped_manifest[spec_hash] = copy_list deduped_manifest[spec_hash] = copy_list
build_cache_dir = bindist.build_cache_relative_path()
for spec_hash, copy_list in deduped_manifest.items(): for spec_hash, copy_list in deduped_manifest.items():
for copy_file in copy_list: for copy_file in copy_list:
dest = copy_file["dest"] tty.debug("copying {0} to {1}".format(copy_file["src"], copy_file["dest"]))
if dest_mirror: copy_buildcache_file(copy_file["src"], copy_file["dest"])
src_relative_path = os.path.join(
build_cache_dir, copy_file["src"].rsplit(build_cache_dir, 1)[1].lstrip("/")
)
dest = url_util.join(dest_mirror.push_url, src_relative_path)
tty.debug("copying {0} to {1}".format(copy_file["src"], dest))
copy_buildcache_file(copy_file["src"], dest)
def update_index(mirror: spack.mirror.Mirror, update_keys=False): def update_index(mirror: spack.mirror.Mirror, update_keys=False):
@@ -1196,18 +1152,14 @@ def update_index(mirror: spack.mirror.Mirror, update_keys=False):
url, bindist.build_cache_relative_path(), bindist.build_cache_keys_relative_path() url, bindist.build_cache_relative_path(), bindist.build_cache_keys_relative_path()
) )
try: bindist.generate_key_index(keys_url)
bindist.generate_key_index(keys_url)
except bindist.CannotListKeys as e:
# Do not error out if listing keys went wrong. This usually means that the _gpg path
# does not exist. TODO: distinguish between this and other errors.
tty.warn(f"did not update the key index: {e}")
def update_index_fn(args): def update_index_fn(args):
"""update a buildcache index""" """update a buildcache index"""
return update_index(args.mirror, update_keys=args.keys) update_index(args.mirror, update_keys=args.keys)
def buildcache(parser, args): def buildcache(parser, args):
return args.func(args) if args.func:
args.func(args)

View File

@@ -183,7 +183,7 @@ def checksum(parser, args):
print() print()
if args.add_to_package: if args.add_to_package:
add_versions_to_package(pkg, version_lines, args.batch) add_versions_to_package(pkg, version_lines)
def print_checksum_status(pkg: PackageBase, version_hashes: dict): def print_checksum_status(pkg: PackageBase, version_hashes: dict):
@@ -229,7 +229,7 @@ def print_checksum_status(pkg: PackageBase, version_hashes: dict):
tty.die("Invalid checksums found.") tty.die("Invalid checksums found.")
def add_versions_to_package(pkg: PackageBase, version_lines: str, is_batch: bool): def add_versions_to_package(pkg: PackageBase, version_lines: str):
""" """
Add checksumed versions to a package's instructions and open a user's Add checksumed versions to a package's instructions and open a user's
editor so they may double check the work of the function. editor so they may double check the work of the function.
@@ -282,5 +282,5 @@ def add_versions_to_package(pkg: PackageBase, version_lines: str, is_batch: bool
tty.msg(f"Added {num_versions_added} new versions to {pkg.name}") tty.msg(f"Added {num_versions_added} new versions to {pkg.name}")
tty.msg(f"Open {filename} to review the additions.") tty.msg(f"Open {filename} to review the additions.")
if sys.stdout.isatty() and not is_batch: if sys.stdout.isatty():
editor(filename) editor(filename)

View File

@@ -14,7 +14,6 @@
import spack.binary_distribution as bindist import spack.binary_distribution as bindist
import spack.ci as spack_ci import spack.ci as spack_ci
import spack.cmd
import spack.cmd.buildcache as buildcache import spack.cmd.buildcache as buildcache
import spack.config as cfg import spack.config as cfg
import spack.environment as ev import spack.environment as ev
@@ -33,7 +32,6 @@
SPACK_COMMAND = "spack" SPACK_COMMAND = "spack"
MAKE_COMMAND = "make" MAKE_COMMAND = "make"
INSTALL_FAIL_CODE = 1 INSTALL_FAIL_CODE = 1
FAILED_CREATE_BUILDCACHE_CODE = 100
def deindent(desc): def deindent(desc):
@@ -707,9 +705,11 @@ def ci_rebuild(args):
cdash_handler.report_skipped(job_spec, reports_dir, reason=msg) cdash_handler.report_skipped(job_spec, reports_dir, reason=msg)
cdash_handler.copy_test_results(reports_dir, job_test_dir) cdash_handler.copy_test_results(reports_dir, job_test_dir)
# If the install succeeded, create a buildcache entry for this job spec
# and push it to one or more mirrors. If the install did not succeed,
# print out some instructions on how to reproduce this build failure
# outside of the pipeline environment.
if install_exit_code == 0: if install_exit_code == 0:
# If the install succeeded, push it to one or more mirrors. Failure to push to any mirror
# will result in a non-zero exit code. Pushing is best-effort.
mirror_urls = [buildcache_mirror_url] mirror_urls = [buildcache_mirror_url]
# TODO: Remove this block in Spack 0.23 # TODO: Remove this block in Spack 0.23
@@ -721,12 +721,13 @@ def ci_rebuild(args):
destination_mirror_urls=mirror_urls, destination_mirror_urls=mirror_urls,
sign_binaries=spack_ci.can_sign_binaries(), sign_binaries=spack_ci.can_sign_binaries(),
): ):
if not result.success: msg = tty.msg if result.success else tty.warn
install_exit_code = FAILED_CREATE_BUILDCACHE_CODE msg(
(tty.msg if result.success else tty.error)( "{} {} to {}".format(
f'{"Pushed" if result.success else "Failed to push"} ' "Pushed" if result.success else "Failed to push",
f'{job_spec.format("{name}{@version}{/hash:7}", color=clr.get_color_when())} ' job_spec.format("{name}{@version}{/hash:7}", color=clr.get_color_when()),
f"to {result.url}" result.url,
)
) )
# If this is a develop pipeline, check if the spec that we just built is # If this is a develop pipeline, check if the spec that we just built is
@@ -747,22 +748,22 @@ def ci_rebuild(args):
tty.warn(msg.format(broken_spec_path, err)) tty.warn(msg.format(broken_spec_path, err))
else: else:
# If the install did not succeed, print out some instructions on how to reproduce this
# build failure outside of the pipeline environment.
tty.debug("spack install exited non-zero, will not create buildcache") tty.debug("spack install exited non-zero, will not create buildcache")
api_root_url = os.environ.get("CI_API_V4_URL") api_root_url = os.environ.get("CI_API_V4_URL")
ci_project_id = os.environ.get("CI_PROJECT_ID") ci_project_id = os.environ.get("CI_PROJECT_ID")
ci_job_id = os.environ.get("CI_JOB_ID") ci_job_id = os.environ.get("CI_JOB_ID")
repro_job_url = f"{api_root_url}/projects/{ci_project_id}/jobs/{ci_job_id}/artifacts" repro_job_url = "{0}/projects/{1}/jobs/{2}/artifacts".format(
api_root_url, ci_project_id, ci_job_id
)
# Control characters cause this to be printed in blue so it stands out # Control characters cause this to be printed in blue so it stands out
print( reproduce_msg = """
f"""
\033[34mTo reproduce this build locally, run: \033[34mTo reproduce this build locally, run:
spack ci reproduce-build {repro_job_url} [--working-dir <dir>] [--autostart] spack ci reproduce-build {0} [--working-dir <dir>] [--autostart]
If this project does not have public pipelines, you will need to first: If this project does not have public pipelines, you will need to first:
@@ -770,9 +771,12 @@ def ci_rebuild(args):
... then follow the printed instructions.\033[0;0m ... then follow the printed instructions.\033[0;0m
""" """.format(
repro_job_url
) )
print(reproduce_msg)
rebuild_timer.stop() rebuild_timer.stop()
try: try:
with open("install_timers.json", "w") as timelog: with open("install_timers.json", "w") as timelog:

View File

@@ -570,14 +570,6 @@ def add_concretizer_args(subparser):
default=None, default=None,
help="reuse installed dependencies only", help="reuse installed dependencies only",
) )
subgroup.add_argument(
"--deprecated",
action=ConfigSetAction,
dest="config:deprecated",
const=True,
default=None,
help="allow concretizer to select deprecated versions",
)
def add_connection_args(subparser, add_help): def add_connection_args(subparser, add_help):

View File

@@ -115,7 +115,7 @@ def emulate_env_utility(cmd_name, context: Context, args):
f"Not all dependencies of {spec.name} are installed. " f"Not all dependencies of {spec.name} are installed. "
f"Cannot setup {context} environment:", f"Cannot setup {context} environment:",
spec.tree( spec.tree(
status_fn=spack.spec.Spec.install_status, install_status=True,
hashlen=7, hashlen=7,
hashes=True, hashes=True,
# This shows more than necessary, but we cannot dynamically change deptypes # This shows more than necessary, but we cannot dynamically change deptypes

View File

@@ -89,7 +89,7 @@ def compiler_find(args):
paths, scope=None, mixed_toolchain=args.mixed_toolchain paths, scope=None, mixed_toolchain=args.mixed_toolchain
) )
if new_compilers: if new_compilers:
spack.compilers.add_compilers_to_config(new_compilers, scope=args.scope) spack.compilers.add_compilers_to_config(new_compilers, scope=args.scope, init_config=False)
n = len(new_compilers) n = len(new_compilers)
s = "s" if n > 1 else "" s = "s" if n > 1 else ""

View File

@@ -19,7 +19,7 @@
def setup_parser(subparser): def setup_parser(subparser):
arguments.add_common_arguments(subparser, ["jobs", "no_checksum", "spec"]) arguments.add_common_arguments(subparser, ["jobs"])
subparser.add_argument( subparser.add_argument(
"-d", "-d",
"--source-path", "--source-path",
@@ -34,6 +34,7 @@ def setup_parser(subparser):
dest="ignore_deps", dest="ignore_deps",
help="do not try to install dependencies of requested packages", help="do not try to install dependencies of requested packages",
) )
arguments.add_common_arguments(subparser, ["no_checksum", "deprecated"])
subparser.add_argument( subparser.add_argument(
"--keep-prefix", "--keep-prefix",
action="store_true", action="store_true",
@@ -62,6 +63,7 @@ def setup_parser(subparser):
choices=["root", "all"], choices=["root", "all"],
help="run tests on only root packages or all packages", help="run tests on only root packages or all packages",
) )
arguments.add_common_arguments(subparser, ["spec"])
stop_group = subparser.add_mutually_exclusive_group() stop_group = subparser.add_mutually_exclusive_group()
stop_group.add_argument( stop_group.add_argument(
@@ -123,6 +125,9 @@ def dev_build(self, args):
if args.no_checksum: if args.no_checksum:
spack.config.set("config:checksum", False, scope="command_line") spack.config.set("config:checksum", False, scope="command_line")
if args.deprecated:
spack.config.set("config:deprecated", True, scope="command_line")
tests = False tests = False
if args.test == "all": if args.test == "all":
tests = True tests = True

View File

@@ -9,7 +9,6 @@
import shutil import shutil
import sys import sys
import tempfile import tempfile
from pathlib import Path
from typing import Optional from typing import Optional
import llnl.string as string import llnl.string as string
@@ -45,7 +44,6 @@
"deactivate", "deactivate",
"create", "create",
["remove", "rm"], ["remove", "rm"],
["rename", "mv"],
["list", "ls"], ["list", "ls"],
["status", "st"], ["status", "st"],
"loads", "loads",
@@ -272,8 +270,7 @@ def create_temp_env_directory():
def _tty_info(msg): def _tty_info(msg):
"""tty.info like function that prints the equivalent printf statement for eval.""" """tty.info like function that prints the equivalent printf statement for eval."""
decorated = f'{colorize("@*b{==>}")} {msg}\n' decorated = f'{colorize("@*b{==>}")} {msg}\n'
executor = "echo" if sys.platform == "win32" else "printf" print(f"printf {shlex.quote(decorated)};")
print(f"{executor} {shlex.quote(decorated)};")
def env_activate(args): def env_activate(args):
@@ -474,82 +471,11 @@ def env_remove(args):
tty.msg(f"Successfully removed environment '{bad_env_name}'") tty.msg(f"Successfully removed environment '{bad_env_name}'")
#
# env rename
#
def env_rename_setup_parser(subparser):
"""rename an existing environment"""
subparser.add_argument(
"mv_from", metavar="from", help="name (or path) of existing environment"
)
subparser.add_argument(
"mv_to", metavar="to", help="new name (or path) for existing environment"
)
subparser.add_argument(
"-d",
"--dir",
action="store_true",
help="the specified arguments correspond to directory paths",
)
subparser.add_argument(
"-f", "--force", action="store_true", help="allow overwriting of an existing environment"
)
def env_rename(args):
"""Rename an environment.
This renames a managed environment or moves an anonymous environment.
"""
# Directory option has been specified
if args.dir:
if not ev.is_env_dir(args.mv_from):
tty.die("The specified path does not correspond to a valid spack environment")
from_path = Path(args.mv_from)
if not args.force:
if ev.is_env_dir(args.mv_to):
tty.die(
"The new path corresponds to an existing environment;"
" specify the --force flag to overwrite it."
)
if Path(args.mv_to).exists():
tty.die("The new path already exists; specify the --force flag to overwrite it.")
to_path = Path(args.mv_to)
# Name option being used
elif ev.exists(args.mv_from):
from_path = ev.environment.environment_dir_from_name(args.mv_from)
if not args.force and ev.exists(args.mv_to):
tty.die(
"The new name corresponds to an existing environment;"
" specify the --force flag to overwrite it."
)
to_path = ev.environment.root(args.mv_to)
# Neither
else:
tty.die("The specified name does not correspond to a managed spack environment")
# Guard against renaming from or to an active environment
active_env = ev.active_environment()
if active_env:
from_env = ev.Environment(from_path)
if from_env.path == active_env.path:
tty.die("Cannot rename active environment")
if to_path == active_env.path:
tty.die(f"{args.mv_to} is an active environment")
shutil.rmtree(to_path, ignore_errors=True)
fs.rename(from_path, to_path)
tty.msg(f"Successfully renamed environment {args.mv_from} to {args.mv_to}")
# #
# env list # env list
# #
def env_list_setup_parser(subparser): def env_list_setup_parser(subparser):
"""list managed environments""" """list available environments"""
def env_list(args): def env_list(args):

View File

@@ -18,7 +18,6 @@
import spack.cray_manifest as cray_manifest import spack.cray_manifest as cray_manifest
import spack.detection import spack.detection
import spack.error import spack.error
import spack.repo
import spack.util.environment import spack.util.environment
from spack.cmd.common import arguments from spack.cmd.common import arguments
@@ -153,9 +152,9 @@ def external_find(args):
def packages_to_search_for( def packages_to_search_for(
*, names: Optional[List[str]], tags: List[str], exclude: Optional[List[str]] *, names: Optional[List[str]], tags: List[str], exclude: Optional[List[str]]
): ):
result = list( result = []
{pkg for tag in tags for pkg in spack.repo.PATH.packages_with_tags(tag, full=True)} for current_tag in tags:
) result.extend(spack.repo.PATH.packages_with_tags(current_tag, full=True))
if names: if names:
# Match both fully qualified and unqualified # Match both fully qualified and unqualified

View File

@@ -18,7 +18,7 @@
def setup_parser(subparser): def setup_parser(subparser):
arguments.add_common_arguments(subparser, ["no_checksum", "specs"]) arguments.add_common_arguments(subparser, ["no_checksum", "deprecated"])
subparser.add_argument( subparser.add_argument(
"-m", "-m",
"--missing", "--missing",
@@ -28,7 +28,7 @@ def setup_parser(subparser):
subparser.add_argument( subparser.add_argument(
"-D", "--dependencies", action="store_true", help="also fetch all dependencies" "-D", "--dependencies", action="store_true", help="also fetch all dependencies"
) )
arguments.add_concretizer_args(subparser) arguments.add_common_arguments(subparser, ["specs"])
subparser.epilog = ( subparser.epilog = (
"With an active environment, the specs " "With an active environment, the specs "
"parameter can be omitted. In this case all (uninstalled" "parameter can be omitted. In this case all (uninstalled"
@@ -40,6 +40,9 @@ def fetch(parser, args):
if args.no_checksum: if args.no_checksum:
spack.config.set("config:checksum", False, scope="command_line") spack.config.set("config:checksum", False, scope="command_line")
if args.deprecated:
spack.config.set("config:deprecated", True, scope="command_line")
if args.specs: if args.specs:
specs = spack.cmd.parse_specs(args.specs, concretize=True) specs = spack.cmd.parse_specs(args.specs, concretize=True)
else: else:

View File

@@ -140,12 +140,6 @@ def setup_parser(subparser):
subparser.add_argument( subparser.add_argument(
"--only-deprecated", action="store_true", help="show only deprecated packages" "--only-deprecated", action="store_true", help="show only deprecated packages"
) )
subparser.add_argument(
"--install-tree",
action="store",
default="all",
help="Install trees to query: 'all' (default), 'local', 'upstream', upstream name or path",
)
subparser.add_argument("--start-date", help="earliest date of installation [YYYY-MM-DD]") subparser.add_argument("--start-date", help="earliest date of installation [YYYY-MM-DD]")
subparser.add_argument("--end-date", help="latest date of installation [YYYY-MM-DD]") subparser.add_argument("--end-date", help="latest date of installation [YYYY-MM-DD]")
@@ -174,12 +168,6 @@ def query_arguments(args):
q_args = {"installed": installed, "known": known, "explicit": explicit} q_args = {"installed": installed, "known": known, "explicit": explicit}
install_tree = args.install_tree
upstreams = spack.config.get("upstreams", {})
if install_tree in upstreams.keys():
install_tree = upstreams[install_tree]["install_tree"]
q_args["install_tree"] = install_tree
# Time window of installation # Time window of installation
for attribute in ("start_date", "end_date"): for attribute in ("start_date", "end_date"):
date = getattr(args, attribute) date = getattr(args, attribute)

View File

@@ -176,7 +176,7 @@ def setup_parser(subparser):
dest="install_source", dest="install_source",
help="install source files in prefix", help="install source files in prefix",
) )
arguments.add_common_arguments(subparser, ["no_checksum"]) arguments.add_common_arguments(subparser, ["no_checksum", "deprecated"])
subparser.add_argument( subparser.add_argument(
"-v", "-v",
"--verbose", "--verbose",
@@ -326,6 +326,9 @@ def install(parser, args):
if args.no_checksum: if args.no_checksum:
spack.config.set("config:checksum", False, scope="command_line") spack.config.set("config:checksum", False, scope="command_line")
if args.deprecated:
spack.config.set("config:deprecated", True, scope="command_line")
if args.log_file and not args.log_format: if args.log_file and not args.log_format:
msg = "the '--log-format' must be specified when using '--log-file'" msg = "the '--log-format' must be specified when using '--log-file'"
tty.die(msg) tty.die(msg)
@@ -420,9 +423,10 @@ def install_with_active_env(env: ev.Environment, args, install_kwargs, reporter_
with reporter_factory(specs_to_install): with reporter_factory(specs_to_install):
env.install_specs(specs_to_install, **install_kwargs) env.install_specs(specs_to_install, **install_kwargs)
finally: finally:
if env.views: # TODO: this is doing way too much to trigger
with env.write_transaction(): # views and modules to be generated.
env.write(regenerate=True) with env.write_transaction():
env.write(regenerate=True)
def concrete_specs_from_cli(args, install_kwargs): def concrete_specs_from_cli(args, install_kwargs):

View File

@@ -5,6 +5,8 @@
import sys import sys
import llnl.util.tty as tty
import spack.cmd import spack.cmd
import spack.cmd.find import spack.cmd.find
import spack.environment as ev import spack.environment as ev
@@ -68,6 +70,16 @@ def setup_parser(subparser):
help="load the first match if multiple packages match the spec", help="load the first match if multiple packages match the spec",
) )
subparser.add_argument(
"--only",
default="package,dependencies",
dest="things_to_load",
choices=["package", "dependencies"],
help="select whether to load the package and its dependencies\n\n"
"the default is to load the package and all dependencies. alternatively, "
"one can decide to load only the package or only the dependencies",
)
subparser.add_argument( subparser.add_argument(
"--list", "--list",
action="store_true", action="store_true",
@@ -98,6 +110,11 @@ def load(parser, args):
) )
return 1 return 1
if args.things_to_load != "package,dependencies":
tty.warn(
"The `--only` flag in spack load is deprecated and will be removed in Spack v0.22"
)
with spack.store.STORE.db.read_transaction(): with spack.store.STORE.db.read_transaction():
env_mod = uenv.environment_modifications_for_specs(*specs) env_mod = uenv.environment_modifications_for_specs(*specs)
for spec in specs: for spec in specs:

View File

@@ -53,7 +53,6 @@ def setup_parser(subparser):
"-S", "--stages", action="store_true", help="top level stage directory" "-S", "--stages", action="store_true", help="top level stage directory"
) )
directories.add_argument( directories.add_argument(
"-c",
"--source-dir", "--source-dir",
action="store_true", action="store_true",
help="source directory for a spec (requires it to be staged first)", help="source directory for a spec (requires it to be staged first)",

View File

@@ -28,7 +28,7 @@
def setup_parser(subparser): def setup_parser(subparser):
arguments.add_common_arguments(subparser, ["no_checksum"]) arguments.add_common_arguments(subparser, ["no_checksum", "deprecated"])
sp = subparser.add_subparsers(metavar="SUBCOMMAND", dest="mirror_command") sp = subparser.add_subparsers(metavar="SUBCOMMAND", dest="mirror_command")
@@ -72,7 +72,6 @@ def setup_parser(subparser):
" retrieve all versions of each package", " retrieve all versions of each package",
) )
arguments.add_common_arguments(create_parser, ["specs"]) arguments.add_common_arguments(create_parser, ["specs"])
arguments.add_concretizer_args(create_parser)
# Destroy # Destroy
destroy_parser = sp.add_parser("destroy", help=mirror_destroy.__doc__) destroy_parser = sp.add_parser("destroy", help=mirror_destroy.__doc__)
@@ -108,11 +107,6 @@ def setup_parser(subparser):
"and source use `--type binary --type source` (default)" "and source use `--type binary --type source` (default)"
), ),
) )
add_parser.add_argument(
"--autopush",
action="store_true",
help=("set mirror to push automatically after installation"),
)
add_parser_signed = add_parser.add_mutually_exclusive_group(required=False) add_parser_signed = add_parser.add_mutually_exclusive_group(required=False)
add_parser_signed.add_argument( add_parser_signed.add_argument(
"--unsigned", "--unsigned",
@@ -180,21 +174,6 @@ def setup_parser(subparser):
), ),
) )
set_parser.add_argument("--url", help="url of mirror directory from 'spack mirror create'") set_parser.add_argument("--url", help="url of mirror directory from 'spack mirror create'")
set_parser_autopush = set_parser.add_mutually_exclusive_group(required=False)
set_parser_autopush.add_argument(
"--autopush",
help="set mirror to push automatically after installation",
action="store_true",
default=None,
dest="autopush",
)
set_parser_autopush.add_argument(
"--no-autopush",
help="set mirror to not push automatically after installation",
action="store_false",
default=None,
dest="autopush",
)
set_parser_unsigned = set_parser.add_mutually_exclusive_group(required=False) set_parser_unsigned = set_parser.add_mutually_exclusive_group(required=False)
set_parser_unsigned.add_argument( set_parser_unsigned.add_argument(
"--unsigned", "--unsigned",
@@ -238,7 +217,6 @@ def mirror_add(args):
or args.type or args.type
or args.oci_username or args.oci_username
or args.oci_password or args.oci_password
or args.autopush
or args.signed is not None or args.signed is not None
): ):
connection = {"url": args.url} connection = {"url": args.url}
@@ -255,8 +233,6 @@ def mirror_add(args):
if args.type: if args.type:
connection["binary"] = "binary" in args.type connection["binary"] = "binary" in args.type
connection["source"] = "source" in args.type connection["source"] = "source" in args.type
if args.autopush:
connection["autopush"] = args.autopush
if args.signed is not None: if args.signed is not None:
connection["signed"] = args.signed connection["signed"] = args.signed
mirror = spack.mirror.Mirror(connection, name=args.name) mirror = spack.mirror.Mirror(connection, name=args.name)
@@ -293,8 +269,6 @@ def _configure_mirror(args):
changes["access_pair"] = [args.oci_username, args.oci_password] changes["access_pair"] = [args.oci_username, args.oci_password]
if getattr(args, "signed", None) is not None: if getattr(args, "signed", None) is not None:
changes["signed"] = args.signed changes["signed"] = args.signed
if getattr(args, "autopush", None) is not None:
changes["autopush"] = args.autopush
# argparse cannot distinguish between --binary and --no-binary when same dest :( # argparse cannot distinguish between --binary and --no-binary when same dest :(
# notice that set-url does not have these args, so getattr # notice that set-url does not have these args, so getattr
@@ -575,4 +549,7 @@ def mirror(parser, args):
if args.no_checksum: if args.no_checksum:
spack.config.set("config:checksum", False, scope="command_line") spack.config.set("config:checksum", False, scope="command_line")
if args.deprecated:
spack.config.set("config:deprecated", True, scope="command_line")
action[args.mirror_command](args) action[args.mirror_command](args)

View File

@@ -19,7 +19,7 @@
def setup_parser(subparser): def setup_parser(subparser):
arguments.add_common_arguments(subparser, ["no_checksum", "specs"]) arguments.add_common_arguments(subparser, ["no_checksum", "deprecated", "specs"])
arguments.add_concretizer_args(subparser) arguments.add_concretizer_args(subparser)
@@ -33,6 +33,9 @@ def patch(parser, args):
if args.no_checksum: if args.no_checksum:
spack.config.set("config:checksum", False, scope="command_line") spack.config.set("config:checksum", False, scope="command_line")
if args.deprecated:
spack.config.set("config:deprecated", True, scope="command_line")
specs = spack.cmd.parse_specs(args.specs, concretize=False) specs = spack.cmd.parse_specs(args.specs, concretize=False)
for spec in specs: for spec in specs:
_patch(spack.cmd.matching_spec_from_env(spec).package) _patch(spack.cmd.matching_spec_from_env(spec).package)

View File

@@ -116,38 +116,39 @@ def ipython_interpreter(args):
def python_interpreter(args): def python_interpreter(args):
"""A python interpreter is the default interpreter""" """A python interpreter is the default interpreter"""
# Fake a main python shell by setting __name__ to __main__.
console = code.InteractiveConsole({"__name__": "__main__", "spack": spack})
if "PYTHONSTARTUP" in os.environ:
startup_file = os.environ["PYTHONSTARTUP"]
if os.path.isfile(startup_file):
with open(startup_file) as startup:
console.runsource(startup.read(), startup_file, "exec")
if args.python_args and not args.python_command: if args.python_command:
propagate_exceptions_from(console)
console.runsource(args.python_command)
elif args.python_args:
propagate_exceptions_from(console)
sys.argv = args.python_args sys.argv = args.python_args
runpy.run_path(args.python_args[0], run_name="__main__") with open(args.python_args[0]) as file:
console.runsource(file.read(), args.python_args[0], "exec")
else: else:
# Fake a main python shell by setting __name__ to __main__. # Provides readline support, allowing user to use arrow keys
console = code.InteractiveConsole({"__name__": "__main__", "spack": spack}) console.push("import readline")
if "PYTHONSTARTUP" in os.environ: # Provide tabcompletion
startup_file = os.environ["PYTHONSTARTUP"] console.push("from rlcompleter import Completer")
if os.path.isfile(startup_file): console.push("readline.set_completer(Completer(locals()).complete)")
with open(startup_file) as startup: console.push('readline.parse_and_bind("tab: complete")')
console.runsource(startup.read(), startup_file, "exec")
if args.python_command:
propagate_exceptions_from(console)
console.runsource(args.python_command)
else:
# Provides readline support, allowing user to use arrow keys
console.push("import readline")
# Provide tabcompletion
console.push("from rlcompleter import Completer")
console.push("readline.set_completer(Completer(locals()).complete)")
console.push('readline.parse_and_bind("tab: complete")')
console.interact( console.interact(
"Spack version %s\nPython %s, %s %s" "Spack version %s\nPython %s, %s %s"
% ( % (
spack.spack_version, spack.spack_version,
platform.python_version(), platform.python_version(),
platform.system(), platform.system(),
platform.machine(), platform.machine(),
)
) )
)
def propagate_exceptions_from(console): def propagate_exceptions_from(console):

View File

@@ -91,6 +91,7 @@ def setup_parser(subparser):
def _process_result(result, show, required_format, kwargs): def _process_result(result, show, required_format, kwargs):
result.raise_if_unsat()
opt, _, _ = min(result.answers) opt, _, _ = min(result.answers)
if ("opt" in show) and (not required_format): if ("opt" in show) and (not required_format):
tty.msg("Best of %d considered solutions." % result.nmodels) tty.msg("Best of %d considered solutions." % result.nmodels)
@@ -126,13 +127,14 @@ def _process_result(result, show, required_format, kwargs):
print() print()
if result.unsolved_specs and "solutions" in show: if result.unsolved_specs and "solutions" in show:
tty.msg(asp.Result.format_unsolved(result.unsolved_specs)) tty.msg("Unsolved specs")
for spec in result.unsolved_specs:
print(spec)
print()
def solve(parser, args): def solve(parser, args):
# these are the same options as `spack spec` # these are the same options as `spack spec`
install_status_fn = spack.spec.Spec.install_status
fmt = spack.spec.DISPLAY_FORMAT fmt = spack.spec.DISPLAY_FORMAT
if args.namespaces: if args.namespaces:
fmt = "{namespace}." + fmt fmt = "{namespace}." + fmt
@@ -142,7 +144,7 @@ def solve(parser, args):
"format": fmt, "format": fmt,
"hashlen": None if args.very_long else 7, "hashlen": None if args.very_long else 7,
"show_types": args.types, "show_types": args.types,
"status_fn": install_status_fn if args.install_status else None, "install_status": args.install_status,
"hashes": args.long or args.very_long, "hashes": args.long or args.very_long,
} }

View File

@@ -75,8 +75,6 @@ def setup_parser(subparser):
def spec(parser, args): def spec(parser, args):
install_status_fn = spack.spec.Spec.install_status
fmt = spack.spec.DISPLAY_FORMAT fmt = spack.spec.DISPLAY_FORMAT
if args.namespaces: if args.namespaces:
fmt = "{namespace}." + fmt fmt = "{namespace}." + fmt
@@ -86,7 +84,7 @@ def spec(parser, args):
"format": fmt, "format": fmt,
"hashlen": None if args.very_long else 7, "hashlen": None if args.very_long else 7,
"show_types": args.types, "show_types": args.types,
"status_fn": install_status_fn if args.install_status else None, "install_status": args.install_status,
} }
# use a read transaction if we are getting install status for every # use a read transaction if we are getting install status for every

View File

@@ -22,7 +22,7 @@
def setup_parser(subparser): def setup_parser(subparser):
arguments.add_common_arguments(subparser, ["no_checksum", "specs"]) arguments.add_common_arguments(subparser, ["no_checksum", "deprecated", "specs"])
subparser.add_argument( subparser.add_argument(
"-p", "--path", dest="path", help="path to stage package, does not add to spack tree" "-p", "--path", dest="path", help="path to stage package, does not add to spack tree"
) )
@@ -33,6 +33,9 @@ def stage(parser, args):
if args.no_checksum: if args.no_checksum:
spack.config.set("config:checksum", False, scope="command_line") spack.config.set("config:checksum", False, scope="command_line")
if args.deprecated:
spack.config.set("config:deprecated", True, scope="command_line")
if not args.specs: if not args.specs:
env = ev.active_environment() env = ev.active_environment()
if not env: if not env:

View File

@@ -228,7 +228,7 @@ def create_reporter(args, specs_to_test, test_suite):
def test_list(args): def test_list(args):
"""list installed packages with available tests""" """list installed packages with available tests"""
tagged = spack.repo.PATH.packages_with_tags(*args.tag) if args.tag else set() tagged = set(spack.repo.PATH.packages_with_tags(*args.tag)) if args.tag else set()
def has_test_and_tags(pkg_class): def has_test_and_tags(pkg_class):
tests = spack.install_test.test_functions(pkg_class) tests = spack.install_test.test_functions(pkg_class)

View File

@@ -34,13 +34,6 @@ def setup_parser(subparser):
default=False, default=False,
help="show full pytest help, with advanced options", help="show full pytest help, with advanced options",
) )
subparser.add_argument(
"-n",
"--numprocesses",
type=int,
default=1,
help="run tests in parallel up to this wide, default 1 for sequential",
)
# extra spack arguments to list tests # extra spack arguments to list tests
list_group = subparser.add_argument_group("listing tests") list_group = subparser.add_argument_group("listing tests")
@@ -236,16 +229,6 @@ def unit_test(parser, args, unknown_args):
if args.extension: if args.extension:
pytest_root = spack.extensions.load_extension(args.extension) pytest_root = spack.extensions.load_extension(args.extension)
if args.numprocesses is not None and args.numprocesses > 1:
pytest_args.extend(
[
"--dist",
"loadfile",
"--tx",
f"{args.numprocesses}*popen//python=spack-tmpconfig spack python",
]
)
# pytest.ini lives in the root of the spack repository. # pytest.ini lives in the root of the spack repository.
with llnl.util.filesystem.working_dir(pytest_root): with llnl.util.filesystem.working_dir(pytest_root):
if args.list: if args.list:

View File

@@ -334,40 +334,6 @@ def __init__(
# used for version checks for API, e.g. C++11 flag # used for version checks for API, e.g. C++11 flag
self._real_version = None self._real_version = None
def __eq__(self, other):
return (
self.cc == other.cc
and self.cxx == other.cxx
and self.fc == other.fc
and self.f77 == other.f77
and self.spec == other.spec
and self.operating_system == other.operating_system
and self.target == other.target
and self.flags == other.flags
and self.modules == other.modules
and self.environment == other.environment
and self.extra_rpaths == other.extra_rpaths
and self.enable_implicit_rpaths == other.enable_implicit_rpaths
)
def __hash__(self):
return hash(
(
self.cc,
self.cxx,
self.fc,
self.f77,
self.spec,
self.operating_system,
self.target,
str(self.flags),
str(self.modules),
str(self.environment),
str(self.extra_rpaths),
self.enable_implicit_rpaths,
)
)
def verify_executables(self): def verify_executables(self):
"""Raise an error if any of the compiler executables is not valid. """Raise an error if any of the compiler executables is not valid.
@@ -423,7 +389,8 @@ def implicit_rpaths(self):
# Put CXX first since it has the most linking issues # Put CXX first since it has the most linking issues
# And because it has flags that affect linking # And because it has flags that affect linking
link_dirs = self._get_compiler_link_paths() exe_paths = [x for x in [self.cxx, self.cc, self.fc, self.f77] if x]
link_dirs = self._get_compiler_link_paths(exe_paths)
all_required_libs = list(self.required_libs) + Compiler._all_compiler_rpath_libraries all_required_libs = list(self.required_libs) + Compiler._all_compiler_rpath_libraries
return list(paths_containing_libs(link_dirs, all_required_libs)) return list(paths_containing_libs(link_dirs, all_required_libs))
@@ -436,33 +403,43 @@ def required_libs(self):
# By default every compiler returns the empty list # By default every compiler returns the empty list
return [] return []
def _get_compiler_link_paths(self): def _get_compiler_link_paths(self, paths):
cc = self.cc if self.cc else self.cxx first_compiler = next((c for c in paths if c), None)
if not cc or not self.verbose_flag: if not first_compiler:
# Cannot determine implicit link paths without a compiler / verbose flag return []
if not self.verbose_flag:
# In this case there is no mechanism to learn what link directories
# are used by the compiler
return [] return []
# What flag types apply to first_compiler, in what order # What flag types apply to first_compiler, in what order
if cc == self.cc: flags = ["cppflags", "ldflags"]
flags = ["cflags", "cppflags", "ldflags"] if first_compiler == self.cc:
flags = ["cflags"] + flags
elif first_compiler == self.cxx:
flags = ["cxxflags"] + flags
else: else:
flags = ["cxxflags", "cppflags", "ldflags"] flags.append("fflags")
try: try:
tmpdir = tempfile.mkdtemp(prefix="spack-implicit-link-info") tmpdir = tempfile.mkdtemp(prefix="spack-implicit-link-info")
fout = os.path.join(tmpdir, "output") fout = os.path.join(tmpdir, "output")
fin = os.path.join(tmpdir, "main.c") fin = os.path.join(tmpdir, "main.c")
with open(fin, "w") as csource: with open(fin, "w+") as csource:
csource.write( csource.write(
"int main(int argc, char* argv[]) { (void)argc; (void)argv; return 0; }\n" "int main(int argc, char* argv[]) { " "(void)argc; (void)argv; return 0; }\n"
) )
cc_exe = spack.util.executable.Executable(cc) compiler_exe = spack.util.executable.Executable(first_compiler)
for flag_type in flags: for flag_type in flags:
cc_exe.add_default_arg(*self.flags.get(flag_type, [])) for flag in self.flags.get(flag_type, []):
compiler_exe.add_default_arg(flag)
output = ""
with self.compiler_environment(): with self.compiler_environment():
output = cc_exe(self.verbose_flag, fin, "-o", fout, output=str, error=str) output = str(
compiler_exe(self.verbose_flag, fin, "-o", fout, output=str, error=str)
) # str for py2
return _parse_non_system_link_dirs(output) return _parse_non_system_link_dirs(output)
except spack.util.executable.ProcessError as pe: except spack.util.executable.ProcessError as pe:
tty.debug("ProcessError: Command exited with non-zero status: " + pe.long_message) tty.debug("ProcessError: Command exited with non-zero status: " + pe.long_message)

View File

@@ -10,7 +10,6 @@
import itertools import itertools
import multiprocessing.pool import multiprocessing.pool
import os import os
import warnings
from typing import Dict, List, Optional, Tuple from typing import Dict, List, Optional, Tuple
import archspec.cpu import archspec.cpu
@@ -110,128 +109,29 @@ def _to_dict(compiler):
return {"compiler": d} return {"compiler": d}
def get_compiler_config( def get_compiler_config(scope=None, init_config=True):
configuration: "spack.config.Configuration",
*,
scope: Optional[str] = None,
init_config: bool = False,
) -> List[Dict]:
"""Return the compiler configuration for the specified architecture.""" """Return the compiler configuration for the specified architecture."""
config = configuration.get("compilers", scope=scope) or []
config = spack.config.get("compilers", scope=scope) or []
if config or not init_config: if config or not init_config:
return config return config
merged_config = configuration.get("compilers") merged_config = spack.config.get("compilers")
if merged_config: if merged_config:
# Config is empty for this scope
# Do not init config because there is a non-empty scope
return config return config
_init_compiler_config(configuration, scope=scope) _init_compiler_config(scope=scope)
config = configuration.get("compilers", scope=scope) config = spack.config.get("compilers", scope=scope)
return config return config
def get_compiler_config_from_packages( def _init_compiler_config(*, scope):
configuration: "spack.config.Configuration", *, scope: Optional[str] = None
) -> List[Dict]:
"""Return the compiler configuration from packages.yaml"""
config = configuration.get("packages", scope=scope)
if not config:
return []
packages = []
compiler_package_names = supported_compilers() + list(package_name_to_compiler_name.keys())
for name, entry in config.items():
if name not in compiler_package_names:
continue
externals_config = entry.get("externals", None)
if not externals_config:
continue
packages.extend(_compiler_config_from_package_config(externals_config))
return packages
def _compiler_config_from_package_config(config):
compilers = []
for entry in config:
compiler = _compiler_config_from_external(entry)
if compiler:
compilers.append(compiler)
return compilers
def _compiler_config_from_external(config):
spec = spack.spec.parse_with_version_concrete(config["spec"])
# use str(spec.versions) to allow `@x.y.z` instead of `@=x.y.z`
compiler_spec = spack.spec.CompilerSpec(
package_name_to_compiler_name.get(spec.name, spec.name), spec.version
)
extra_attributes = config.get("extra_attributes", {})
prefix = config.get("prefix", None)
compiler_class = class_for_compiler_name(compiler_spec.name)
paths = extra_attributes.get("paths", {})
compiler_langs = ["cc", "cxx", "fc", "f77"]
for lang in compiler_langs:
if paths.setdefault(lang, None):
continue
if not prefix:
continue
# Check for files that satisfy the naming scheme for this compiler
bindir = os.path.join(prefix, "bin")
for f, regex in itertools.product(os.listdir(bindir), compiler_class.search_regexps(lang)):
if regex.match(f):
paths[lang] = os.path.join(bindir, f)
if all(v is None for v in paths.values()):
return None
if not spec.architecture:
host_platform = spack.platforms.host()
operating_system = host_platform.operating_system("default_os")
target = host_platform.target("default_target").microarchitecture
else:
target = spec.target
if not target:
host_platform = spack.platforms.host()
target = host_platform.target("default_target").microarchitecture
operating_system = spec.os
if not operating_system:
host_platform = spack.platforms.host()
operating_system = host_platform.operating_system("default_os")
compiler_entry = {
"compiler": {
"spec": str(compiler_spec),
"paths": paths,
"flags": extra_attributes.get("flags", {}),
"operating_system": str(operating_system),
"target": str(target.family),
"modules": config.get("modules", []),
"environment": extra_attributes.get("environment", {}),
"extra_rpaths": extra_attributes.get("extra_rpaths", []),
"implicit_rpaths": extra_attributes.get("implicit_rpaths", None),
}
}
return compiler_entry
def _init_compiler_config(
configuration: "spack.config.Configuration", *, scope: Optional[str]
) -> None:
"""Compiler search used when Spack has no compilers.""" """Compiler search used when Spack has no compilers."""
compilers = find_compilers() compilers = find_compilers()
compilers_dict = [] compilers_dict = []
for compiler in compilers: for compiler in compilers:
compilers_dict.append(_to_dict(compiler)) compilers_dict.append(_to_dict(compiler))
configuration.set("compilers", compilers_dict, scope=scope) spack.config.set("compilers", compilers_dict, scope=scope)
def compiler_config_files(): def compiler_config_files():
@@ -242,22 +142,17 @@ def compiler_config_files():
compiler_config = config.get("compilers", scope=name) compiler_config = config.get("compilers", scope=name)
if compiler_config: if compiler_config:
config_files.append(config.get_config_filename(name, "compilers")) config_files.append(config.get_config_filename(name, "compilers"))
compiler_config_from_packages = get_compiler_config_from_packages(config, scope=name)
if compiler_config_from_packages:
config_files.append(config.get_config_filename(name, "packages"))
return config_files return config_files
def add_compilers_to_config(compilers, scope=None): def add_compilers_to_config(compilers, scope=None, init_config=True):
"""Add compilers to the config for the specified architecture. """Add compilers to the config for the specified architecture.
Arguments: Arguments:
compilers: a list of Compiler objects. compilers: a list of Compiler objects.
scope: configuration scope to modify. scope: configuration scope to modify.
""" """
compiler_config = get_compiler_config( compiler_config = get_compiler_config(scope, init_config)
configuration=spack.config.CONFIG, scope=scope, init_config=False
)
for compiler in compilers: for compiler in compilers:
if not compiler.cc: if not compiler.cc:
tty.debug(f"{compiler.spec} does not have a C compiler") tty.debug(f"{compiler.spec} does not have a C compiler")
@@ -289,9 +184,6 @@ def remove_compiler_from_config(compiler_spec, scope=None):
for current_scope in candidate_scopes: for current_scope in candidate_scopes:
removal_happened |= _remove_compiler_from_scope(compiler_spec, scope=current_scope) removal_happened |= _remove_compiler_from_scope(compiler_spec, scope=current_scope)
msg = "`spack compiler remove` will not remove compilers defined in packages.yaml"
msg += "\nTo remove these compilers, either edit the config or use `spack external remove`"
tty.debug(msg)
return removal_happened return removal_happened
@@ -306,9 +198,7 @@ def _remove_compiler_from_scope(compiler_spec, scope):
True if one or more compiler entries were actually removed, False otherwise True if one or more compiler entries were actually removed, False otherwise
""" """
assert scope is not None, "a specific scope is needed when calling this function" assert scope is not None, "a specific scope is needed when calling this function"
compiler_config = get_compiler_config( compiler_config = get_compiler_config(scope)
configuration=spack.config.CONFIG, scope=scope, init_config=False
)
filtered_compiler_config = [ filtered_compiler_config = [
compiler_entry compiler_entry
for compiler_entry in compiler_config for compiler_entry in compiler_config
@@ -323,36 +213,22 @@ def _remove_compiler_from_scope(compiler_spec, scope):
# We need to preserve the YAML type for comments, hence we are copying the # We need to preserve the YAML type for comments, hence we are copying the
# items in the list that has just been retrieved # items in the list that has just been retrieved
compiler_config[:] = filtered_compiler_config compiler_config[:] = filtered_compiler_config
spack.config.CONFIG.set("compilers", compiler_config, scope=scope) spack.config.set("compilers", compiler_config, scope=scope)
return True return True
def all_compilers_config( def all_compilers_config(scope=None, init_config=True):
configuration: "spack.config.Configuration",
*,
scope: Optional[str] = None,
init_config: bool = True,
) -> List["spack.compiler.Compiler"]:
"""Return a set of specs for all the compiler versions currently """Return a set of specs for all the compiler versions currently
available to build with. These are instances of CompilerSpec. available to build with. These are instances of CompilerSpec.
""" """
from_packages_yaml = get_compiler_config_from_packages(configuration, scope=scope) return get_compiler_config(scope, init_config)
if from_packages_yaml:
init_config = False
from_compilers_yaml = get_compiler_config(configuration, scope=scope, init_config=init_config)
result = from_compilers_yaml + from_packages_yaml
# Dedupe entries by the compiler they represent
# If the entry is invalid, treat it as unique for deduplication
key = lambda c: _compiler_from_config_entry(c["compiler"] or id(c))
return list(llnl.util.lang.dedupe(result, key=key))
def all_compiler_specs(scope=None, init_config=True): def all_compiler_specs(scope=None, init_config=True):
# Return compiler specs from the merged config. # Return compiler specs from the merged config.
return [ return [
spack.spec.parse_with_version_concrete(s["compiler"]["spec"], compiler=True) spack.spec.parse_with_version_concrete(s["compiler"]["spec"], compiler=True)
for s in all_compilers_config(spack.config.CONFIG, scope=scope, init_config=init_config) for s in all_compilers_config(scope, init_config)
] ]
@@ -512,20 +388,11 @@ def find_specs_by_arch(compiler_spec, arch_spec, scope=None, init_config=True):
def all_compilers(scope=None, init_config=True): def all_compilers(scope=None, init_config=True):
return all_compilers_from( config = get_compiler_config(scope, init_config=init_config)
configuration=spack.config.CONFIG, scope=scope, init_config=init_config compilers = list()
) for items in config:
def all_compilers_from(configuration, scope=None, init_config=True):
compilers = []
for items in all_compilers_config(
configuration=configuration, scope=scope, init_config=init_config
):
items = items["compiler"] items = items["compiler"]
compiler = _compiler_from_config_entry(items) # can be None in error case compilers.append(_compiler_from_config_entry(items))
if compiler:
compilers.append(compiler)
return compilers return compilers
@@ -536,7 +403,10 @@ def compilers_for_spec(
"""This gets all compilers that satisfy the supplied CompilerSpec. """This gets all compilers that satisfy the supplied CompilerSpec.
Returns an empty list if none are found. Returns an empty list if none are found.
""" """
config = all_compilers_config(spack.config.CONFIG, scope=scope, init_config=init_config) if use_cache:
config = all_compilers_config(scope, init_config)
else:
config = get_compiler_config(scope, init_config)
matches = set(find(compiler_spec, scope, init_config)) matches = set(find(compiler_spec, scope, init_config))
compilers = [] compilers = []
@@ -546,7 +416,7 @@ def compilers_for_spec(
def compilers_for_arch(arch_spec, scope=None): def compilers_for_arch(arch_spec, scope=None):
config = all_compilers_config(spack.config.CONFIG, scope=scope) config = all_compilers_config(scope)
return list(get_compilers(config, arch_spec=arch_spec)) return list(get_compilers(config, arch_spec=arch_spec))
@@ -632,10 +502,7 @@ def _compiler_from_config_entry(items):
compiler = _compiler_cache.get(config_id, None) compiler = _compiler_cache.get(config_id, None)
if compiler is None: if compiler is None:
try: compiler = compiler_from_dict(items)
compiler = compiler_from_dict(items)
except UnknownCompilerError as e:
warnings.warn(e.message)
_compiler_cache[config_id] = compiler _compiler_cache[config_id] = compiler
return compiler return compiler
@@ -688,9 +555,7 @@ def get_compilers(config, cspec=None, arch_spec=None):
raise ValueError(msg) raise ValueError(msg)
continue continue
compiler = _compiler_from_config_entry(items) compilers.append(_compiler_from_config_entry(items))
if compiler:
compilers.append(compiler)
return compilers return compilers
@@ -718,7 +583,9 @@ def get_compiler_duplicates(compiler_spec, arch_spec):
scope_to_compilers = {} scope_to_compilers = {}
for scope in config.scopes: for scope in config.scopes:
compilers = compilers_for_spec(compiler_spec, arch_spec=arch_spec, scope=scope) compilers = compilers_for_spec(
compiler_spec, arch_spec=arch_spec, scope=scope, use_cache=False
)
if compilers: if compilers:
scope_to_compilers[scope] = compilers scope_to_compilers[scope] = compilers

View File

@@ -0,0 +1,34 @@
# Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
import spack.compilers.oneapi
class Dpcpp(spack.compilers.oneapi.Oneapi):
"""This is the same as the oneAPI compiler but uses dpcpp instead of
icpx (for DPC++ source files). It explicitly refers to dpcpp, so that
CMake test files which check the compiler name (e.g. CMAKE_CXX_COMPILER)
detect it as dpcpp.
Ideally we could switch out icpx for dpcpp where needed in the oneAPI
compiler definition, but two things are needed for that: (a) a way to
tell the compiler that it should be using dpcpp and (b) a way to
customize the link_paths
See also: https://www.intel.com/content/www/us/en/develop/documentation/oneapi-dpcpp-cpp-compiler-dev-guide-and-reference/top/compiler-setup/using-the-command-line/invoking-the-compiler.html
"""
# Subclasses use possible names of C++ compiler
cxx_names = ["dpcpp"]
# Named wrapper links within build_env_path
link_paths = {
"cc": os.path.join("oneapi", "icx"),
"cxx": os.path.join("oneapi", "dpcpp"),
"f77": os.path.join("oneapi", "ifx"),
"fc": os.path.join("oneapi", "ifx"),
}

View File

@@ -10,8 +10,6 @@
import tempfile import tempfile
from typing import Dict, List, Set from typing import Dict, List, Set
import archspec.cpu
import spack.compiler import spack.compiler
import spack.operating_systems.windows_os import spack.operating_systems.windows_os
import spack.platforms import spack.platforms
@@ -188,9 +186,6 @@ def __init__(self, *args, **kwargs):
# get current platform architecture and format for vcvars argument # get current platform architecture and format for vcvars argument
arch = spack.platforms.real_host().default.lower() arch = spack.platforms.real_host().default.lower()
arch = arch.replace("-", "_") arch = arch.replace("-", "_")
if str(archspec.cpu.host().family) == "x86_64":
arch = "amd64"
self.vcvars_call = VCVarsInvocation(vcvars_script_path, arch, self.msvc_version) self.vcvars_call = VCVarsInvocation(vcvars_script_path, arch, self.msvc_version)
env_cmds.append(self.vcvars_call) env_cmds.append(self.vcvars_call)
# Below is a check for a valid fortran path # Below is a check for a valid fortran path
@@ -199,7 +194,7 @@ def __init__(self, *args, **kwargs):
# for a fortran compiler # for a fortran compiler
if paths[2]: if paths[2]:
# If this found, it sets all the vars # If this found, it sets all the vars
oneapi_root = os.path.join(self.cc, "../../..") oneapi_root = os.getenv("ONEAPI_ROOT")
oneapi_root_setvars = os.path.join(oneapi_root, "setvars.bat") oneapi_root_setvars = os.path.join(oneapi_root, "setvars.bat")
oneapi_version_setvars = os.path.join( oneapi_version_setvars = os.path.join(
oneapi_root, "compiler", str(self.ifx_version), "env", "vars.bat" oneapi_root, "compiler", str(self.ifx_version), "env", "vars.bat"
@@ -323,7 +318,7 @@ def fc_version(cls, fc):
fc_path[fc_ver] = fc fc_path[fc_ver] = fc
if os.getenv("ONEAPI_ROOT"): if os.getenv("ONEAPI_ROOT"):
try: try:
sps = spack.operating_systems.windows_os.WindowsOs().compiler_search_paths sps = spack.operating_systems.windows_os.WindowsOs.compiler_search_paths
except AttributeError: except AttributeError:
raise SpackError("Windows compiler search paths not established") raise SpackError("Windows compiler search paths not established")
clp = spack.util.executable.which_string("cl", path=sps) clp = spack.util.executable.which_string("cl", path=sps)

View File

@@ -749,6 +749,7 @@ def _concretize_specs_together_new(*abstract_specs, **kwargs):
result = solver.solve( result = solver.solve(
abstract_specs, tests=kwargs.get("tests", False), allow_deprecated=allow_deprecated abstract_specs, tests=kwargs.get("tests", False), allow_deprecated=allow_deprecated
) )
result.raise_if_unsat()
return [s.copy() for s in result.specs] return [s.copy() for s in result.specs]

View File

@@ -107,7 +107,7 @@
#: metavar to use for commands that accept scopes #: metavar to use for commands that accept scopes
#: this is shorter and more readable than listing all choices #: this is shorter and more readable than listing all choices
SCOPES_METAVAR = "{defaults,system,site,user,command_line}[/PLATFORM] or env:ENVIRONMENT" SCOPES_METAVAR = "{defaults,system,site,user}[/PLATFORM] or env:ENVIRONMENT"
#: Base name for the (internal) overrides scope. #: Base name for the (internal) overrides scope.
_OVERRIDES_BASE_NAME = "overrides-" _OVERRIDES_BASE_NAME = "overrides-"
@@ -764,31 +764,6 @@ def _add_platform_scope(
cfg.push_scope(scope_type(plat_name, plat_path)) cfg.push_scope(scope_type(plat_name, plat_path))
def config_paths_from_entry_points() -> List[Tuple[str, str]]:
"""Load configuration paths from entry points
A python package can register entry point metadata so that Spack can find
its configuration by adding the following to the project's pyproject.toml:
.. code-block:: toml
[project.entry-points."spack.config"]
baz = "baz:get_spack_config_path"
The function ``get_spack_config_path`` returns the path to the package's
spack configuration scope
"""
config_paths: List[Tuple[str, str]] = []
for entry_point in lang.get_entry_points(group="spack.config"):
hook = entry_point.load()
if callable(hook):
config_path = hook()
if config_path and os.path.exists(config_path):
config_paths.append(("plugin-%s" % entry_point.name, str(config_path)))
return config_paths
def _add_command_line_scopes( def _add_command_line_scopes(
cfg: Union[Configuration, lang.Singleton], command_line_scopes: List[str] cfg: Union[Configuration, lang.Singleton], command_line_scopes: List[str]
) -> None: ) -> None:
@@ -841,9 +816,6 @@ def create() -> Configuration:
# No site-level configs should be checked into spack by default. # No site-level configs should be checked into spack by default.
configuration_paths.append(("site", os.path.join(spack.paths.etc_path))) configuration_paths.append(("site", os.path.join(spack.paths.etc_path)))
# Python package's can register configuration scopes via entry_points
configuration_paths.extend(config_paths_from_entry_points())
# User configuration can override both spack defaults and site config # User configuration can override both spack defaults and site config
# This is disabled if user asks for no local configuration. # This is disabled if user asks for no local configuration.
if not disable_local_config: if not disable_local_config:

View File

@@ -19,6 +19,9 @@
}, },
"os_package_manager": "dnf", "os_package_manager": "dnf",
"build": "spack/fedora38", "build": "spack/fedora38",
"build_tags": {
"develop": "latest"
},
"final": { "final": {
"image": "docker.io/fedora:38" "image": "docker.io/fedora:38"
} }
@@ -30,6 +33,9 @@
}, },
"os_package_manager": "dnf", "os_package_manager": "dnf",
"build": "spack/fedora37", "build": "spack/fedora37",
"build_tags": {
"develop": "latest"
},
"final": { "final": {
"image": "docker.io/fedora:37" "image": "docker.io/fedora:37"
} }
@@ -41,6 +47,9 @@
}, },
"os_package_manager": "dnf_epel", "os_package_manager": "dnf_epel",
"build": "spack/rockylinux9", "build": "spack/rockylinux9",
"build_tags": {
"develop": "latest"
},
"final": { "final": {
"image": "docker.io/rockylinux:9" "image": "docker.io/rockylinux:9"
} }
@@ -52,6 +61,9 @@
}, },
"os_package_manager": "dnf_epel", "os_package_manager": "dnf_epel",
"build": "spack/rockylinux8", "build": "spack/rockylinux8",
"build_tags": {
"develop": "latest"
},
"final": { "final": {
"image": "docker.io/rockylinux:8" "image": "docker.io/rockylinux:8"
} }
@@ -63,6 +75,9 @@
}, },
"os_package_manager": "dnf_epel", "os_package_manager": "dnf_epel",
"build": "spack/almalinux9", "build": "spack/almalinux9",
"build_tags": {
"develop": "latest"
},
"final": { "final": {
"image": "quay.io/almalinuxorg/almalinux:9" "image": "quay.io/almalinuxorg/almalinux:9"
} }
@@ -74,6 +89,9 @@
}, },
"os_package_manager": "dnf_epel", "os_package_manager": "dnf_epel",
"build": "spack/almalinux8", "build": "spack/almalinux8",
"build_tags": {
"develop": "latest"
},
"final": { "final": {
"image": "quay.io/almalinuxorg/almalinux:8" "image": "quay.io/almalinuxorg/almalinux:8"
} }
@@ -87,6 +105,9 @@
"build": "spack/centos-stream", "build": "spack/centos-stream",
"final": { "final": {
"image": "quay.io/centos/centos:stream" "image": "quay.io/centos/centos:stream"
},
"build_tags": {
"develop": "latest"
} }
}, },
"centos:7": { "centos:7": {
@@ -94,7 +115,10 @@
"template": "container/centos_7.dockerfile" "template": "container/centos_7.dockerfile"
}, },
"os_package_manager": "yum", "os_package_manager": "yum",
"build": "spack/centos7" "build": "spack/centos7",
"build_tags": {
"develop": "latest"
}
}, },
"opensuse/leap:15": { "opensuse/leap:15": {
"bootstrap": { "bootstrap": {
@@ -102,6 +126,9 @@
}, },
"os_package_manager": "zypper", "os_package_manager": "zypper",
"build": "spack/leap15", "build": "spack/leap15",
"build_tags": {
"develop": "latest"
},
"final": { "final": {
"image": "opensuse/leap:latest" "image": "opensuse/leap:latest"
} }
@@ -121,13 +148,19 @@
"template": "container/ubuntu_2204.dockerfile" "template": "container/ubuntu_2204.dockerfile"
}, },
"os_package_manager": "apt", "os_package_manager": "apt",
"build": "spack/ubuntu-jammy" "build": "spack/ubuntu-jammy",
"build_tags": {
"develop": "latest"
}
}, },
"ubuntu:20.04": { "ubuntu:20.04": {
"bootstrap": { "bootstrap": {
"template": "container/ubuntu_2004.dockerfile" "template": "container/ubuntu_2004.dockerfile"
}, },
"build": "spack/ubuntu-focal", "build": "spack/ubuntu-focal",
"build_tags": {
"develop": "latest"
},
"os_package_manager": "apt" "os_package_manager": "apt"
}, },
"ubuntu:18.04": { "ubuntu:18.04": {
@@ -135,7 +168,10 @@
"template": "container/ubuntu_1804.dockerfile" "template": "container/ubuntu_1804.dockerfile"
}, },
"os_package_manager": "apt", "os_package_manager": "apt",
"build": "spack/ubuntu-bionic" "build": "spack/ubuntu-bionic",
"build_tags": {
"develop": "latest"
}
} }
}, },
"os_package_managers": { "os_package_managers": {

View File

@@ -50,7 +50,10 @@ def build_info(image, spack_version):
if not build_image: if not build_image:
return None, None return None, None
return build_image, spack_version # Translate version from git to docker if necessary
build_tag = image_data["build_tags"].get(spack_version, spack_version)
return build_image, build_tag
def os_package_manager_for(image): def os_package_manager_for(image):

View File

@@ -227,7 +227,7 @@ def read(path, apply_updates):
if apply_updates and compilers: if apply_updates and compilers:
for compiler in compilers: for compiler in compilers:
try: try:
spack.compilers.add_compilers_to_config([compiler]) spack.compilers.add_compilers_to_config([compiler], init_config=False)
except Exception: except Exception:
warnings.warn( warnings.warn(
f"Could not add compiler {str(compiler.spec)}: " f"Could not add compiler {str(compiler.spec)}: "

View File

@@ -1621,32 +1621,15 @@ def query_local(self, *args, **kwargs):
query_local.__doc__ += _QUERY_DOCSTRING query_local.__doc__ += _QUERY_DOCSTRING
def query(self, *args, **kwargs): def query(self, *args, **kwargs):
"""Query the Spack database including all upstream databases. """Query the Spack database including all upstream databases."""
Additional Arguments:
install_tree (str): query 'all' (default), 'local', 'upstream', or upstream path
"""
install_tree = kwargs.pop("install_tree", "all")
valid_trees = ["all", "upstream", "local", self.root] + [u.root for u in self.upstream_dbs]
if install_tree not in valid_trees:
msg = "Invalid install_tree argument to Database.query()\n"
msg += f"Try one of {', '.join(valid_trees)}"
tty.error(msg)
return []
upstream_results = [] upstream_results = []
upstreams = self.upstream_dbs for upstream_db in self.upstream_dbs:
if install_tree not in ("all", "upstream"):
upstreams = [u for u in self.upstream_dbs if u.root == install_tree]
for upstream_db in upstreams:
# queries for upstream DBs need to *not* lock - we may not # queries for upstream DBs need to *not* lock - we may not
# have permissions to do this and the upstream DBs won't know about # have permissions to do this and the upstream DBs won't know about
# us anyway (so e.g. they should never uninstall specs) # us anyway (so e.g. they should never uninstall specs)
upstream_results.extend(upstream_db._query(*args, **kwargs) or []) upstream_results.extend(upstream_db._query(*args, **kwargs) or [])
local_results = [] local_results = set(self.query_local(*args, **kwargs))
if install_tree in ("all", "local") or self.root == install_tree:
local_results = set(self.query_local(*args, **kwargs))
results = list(local_results) + list(x for x in upstream_results if x not in local_results) results = list(local_results) + list(x for x in upstream_results if x not in local_results)
@@ -1704,11 +1687,7 @@ def root(key, record):
with self.read_transaction(): with self.read_transaction():
roots = [rec.spec for key, rec in self._data.items() if root(key, rec)] roots = [rec.spec for key, rec in self._data.items() if root(key, rec)]
needed = set(id(spec) for spec in tr.traverse_nodes(roots, deptype=deptype)) needed = set(id(spec) for spec in tr.traverse_nodes(roots, deptype=deptype))
return [ return [rec.spec for rec in self._data.values() if id(rec.spec) not in needed]
rec.spec
for rec in self._data.values()
if id(rec.spec) not in needed and rec.installed
]
def update_explicit(self, spec, explicit): def update_explicit(self, spec, explicit):
""" """

View File

@@ -9,6 +9,8 @@
import tempfile import tempfile
from typing import Any, Deque, Dict, Generator, List, NamedTuple, Tuple from typing import Any, Deque, Dict, Generator, List, NamedTuple, Tuple
import jinja2
from llnl.util import filesystem from llnl.util import filesystem
import spack.repo import spack.repo
@@ -83,8 +85,6 @@ def _mock_layout(self) -> Generator[List[str], None, None]:
self.tmpdir.cleanup() self.tmpdir.cleanup()
def _create_executable_scripts(self, mock_executables: MockExecutables) -> List[pathlib.Path]: def _create_executable_scripts(self, mock_executables: MockExecutables) -> List[pathlib.Path]:
import jinja2
relative_paths = mock_executables.executables relative_paths = mock_executables.executables
script = mock_executables.script script = mock_executables.script
script_template = jinja2.Template("#!/bin/bash\n{{ script }}\n") script_template = jinja2.Template("#!/bin/bash\n{{ script }}\n")

View File

@@ -94,9 +94,6 @@ class OpenMpi(Package):
PatchesType = Optional[Union[Patcher, str, List[Union[Patcher, str]]]] PatchesType = Optional[Union[Patcher, str, List[Union[Patcher, str]]]]
SUPPORTED_LANGUAGES = ("fortran", "cxx")
def _make_when_spec(value: WhenType) -> Optional["spack.spec.Spec"]: def _make_when_spec(value: WhenType) -> Optional["spack.spec.Spec"]:
"""Create a ``Spec`` that indicates when a directive should be applied. """Create a ``Spec`` that indicates when a directive should be applied.
@@ -588,9 +585,6 @@ def depends_on(
@see The section "Dependency specs" in the Spack Packaging Guide. @see The section "Dependency specs" in the Spack Packaging Guide.
""" """
if spack.spec.Spec(spec).name in SUPPORTED_LANGUAGES:
assert type == "build", "languages must be of 'build' type"
return _language(lang_spec_str=spec, when=when)
def _execute_depends_on(pkg: "spack.package_base.PackageBase"): def _execute_depends_on(pkg: "spack.package_base.PackageBase"):
_depends_on(pkg, spec, when=when, type=type, patches=patches) _depends_on(pkg, spec, when=when, type=type, patches=patches)
@@ -666,7 +660,6 @@ def patch(
level: int = 1, level: int = 1,
when: WhenType = None, when: WhenType = None,
working_dir: str = ".", working_dir: str = ".",
reverse: bool = False,
sha256: Optional[str] = None, sha256: Optional[str] = None,
archive_sha256: Optional[str] = None, archive_sha256: Optional[str] = None,
) -> Patcher: ) -> Patcher:
@@ -680,10 +673,10 @@ def patch(
level: patch level (as in the patch shell command) level: patch level (as in the patch shell command)
when: optional anonymous spec that specifies when to apply the patch when: optional anonymous spec that specifies when to apply the patch
working_dir: dir to change to before applying working_dir: dir to change to before applying
reverse: reverse the patch
sha256: sha256 sum of the patch, used to verify the patch (only required for URL patches) sha256: sha256 sum of the patch, used to verify the patch (only required for URL patches)
archive_sha256: sha256 sum of the *archive*, if the patch is compressed (only required for archive_sha256: sha256 sum of the *archive*, if the patch is compressed (only required for
compressed URL patches) compressed URL patches)
""" """
def _execute_patch(pkg_or_dep: Union["spack.package_base.PackageBase", Dependency]): def _execute_patch(pkg_or_dep: Union["spack.package_base.PackageBase", Dependency]):
@@ -710,22 +703,18 @@ def _execute_patch(pkg_or_dep: Union["spack.package_base.PackageBase", Dependenc
patch: spack.patch.Patch patch: spack.patch.Patch
if "://" in url_or_filename: if "://" in url_or_filename:
if sha256 is None:
raise ValueError("patch() with a url requires a sha256")
patch = spack.patch.UrlPatch( patch = spack.patch.UrlPatch(
pkg, pkg,
url_or_filename, url_or_filename,
level, level,
working_dir=working_dir, working_dir,
reverse=reverse,
ordering_key=ordering_key, ordering_key=ordering_key,
sha256=sha256, sha256=sha256,
archive_sha256=archive_sha256, archive_sha256=archive_sha256,
) )
else: else:
patch = spack.patch.FilePatch( patch = spack.patch.FilePatch(
pkg, url_or_filename, level, working_dir, reverse, ordering_key=ordering_key pkg, url_or_filename, level, working_dir, ordering_key=ordering_key
) )
cur_patches.append(patch) cur_patches.append(patch)
@@ -927,9 +916,9 @@ def maintainers(*names: str):
""" """
def _execute_maintainer(pkg): def _execute_maintainer(pkg):
maintainers = set(getattr(pkg, "maintainers", [])) maintainers_from_base = getattr(pkg, "maintainers", [])
maintainers.update(names) # Here it is essential to copy, otherwise we might add to an empty list in the parent
pkg.maintainers = sorted(maintainers) pkg.maintainers = list(sorted(set(maintainers_from_base + list(names))))
return _execute_maintainer return _execute_maintainer
@@ -973,6 +962,7 @@ def license(
checked_by: string or list of strings indicating which github user checked the checked_by: string or list of strings indicating which github user checked the
license (if any). license (if any).
when: A spec specifying when the license applies. when: A spec specifying when the license applies.
when: A spec specifying when the license applies.
""" """
return lambda pkg: _execute_license(pkg, license_identifier, when) return lambda pkg: _execute_license(pkg, license_identifier, when)
@@ -1019,21 +1009,6 @@ def _execute_requires(pkg: "spack.package_base.PackageBase"):
return _execute_requires return _execute_requires
@directive("languages")
def _language(lang_spec_str: str, *, when: Optional[Union[str, bool]] = None):
"""Temporary implementation of language virtuals, until compilers are proper dependencies."""
def _execute_languages(pkg: "spack.package_base.PackageBase"):
when_spec = _make_when_spec(when)
if not when_spec:
return
languages = pkg.languages.setdefault(when_spec, set())
languages.add(lang_spec_str)
return _execute_languages
class DirectiveError(spack.error.SpackError): class DirectiveError(spack.error.SpackError):
"""This is raised when something is wrong with a package directive.""" """This is raised when something is wrong with a package directive."""

View File

@@ -626,13 +626,14 @@ def view(self, new: Optional[str] = None) -> SimpleFilesystemView:
new: If a string, create a FilesystemView rooted at that path. Default None. This new: If a string, create a FilesystemView rooted at that path. Default None. This
should only be used to regenerate the view, and cannot be used to access specs. should only be used to regenerate the view, and cannot be used to access specs.
""" """
path = new if new else self._current_root root = new if new else self._current_root
if not path: if not root:
# This can only be hit if we write a future bug # This can only be hit if we write a future bug
raise SpackEnvironmentViewError( raise SpackEnvironmentViewError(
f"Attempting to get nonexistent view from environment. View root is at {self.root}" "Attempting to get nonexistent view from environment. "
f"View root is at {self.root}"
) )
return self._view(path) return self._view(root)
def _view(self, root: str) -> SimpleFilesystemView: def _view(self, root: str) -> SimpleFilesystemView:
"""Returns a view object for a given root dir.""" """Returns a view object for a given root dir."""
@@ -677,9 +678,7 @@ def specs_for_view(self, concrete_roots: List[Spec]) -> List[Spec]:
# Filter selected, installed specs # Filter selected, installed specs
with spack.store.STORE.db.read_transaction(): with spack.store.STORE.db.read_transaction():
result = [s for s in specs if s in self and s.installed] return [s for s in specs if s in self and s.installed]
return self._exclude_duplicate_runtimes(result)
def regenerate(self, concrete_roots: List[Spec]) -> None: def regenerate(self, concrete_roots: List[Spec]) -> None:
specs = self.specs_for_view(concrete_roots) specs = self.specs_for_view(concrete_roots)
@@ -766,16 +765,6 @@ def regenerate(self, concrete_roots: List[Spec]) -> None:
msg += str(e) msg += str(e)
tty.warn(msg) tty.warn(msg)
def _exclude_duplicate_runtimes(self, nodes):
all_runtimes = spack.repo.PATH.packages_with_tags("runtime")
runtimes_by_name = {}
for s in nodes:
if s.name not in all_runtimes:
continue
current_runtime = runtimes_by_name.get(s.name, s)
runtimes_by_name[s.name] = max(current_runtime, s, key=lambda x: x.version)
return [x for x in nodes if x.name not in all_runtimes or runtimes_by_name[x.name] == x]
def _create_environment(path): def _create_environment(path):
return Environment(path) return Environment(path)
@@ -1427,7 +1416,7 @@ def _concretize_separately(self, tests=False):
# Ensure we have compilers in compilers.yaml to avoid that # Ensure we have compilers in compilers.yaml to avoid that
# processes try to write the config file in parallel # processes try to write the config file in parallel
_ = spack.compilers.get_compiler_config(spack.config.CONFIG, init_config=True) _ = spack.compilers.get_compiler_config()
# Early return if there is nothing to do # Early return if there is nothing to do
if len(args) == 0: if len(args) == 0:
@@ -1496,6 +1485,44 @@ def _concretize_separately(self, tests=False):
] ]
return results return results
def concretize_and_add(self, user_spec, concrete_spec=None, tests=False):
"""Concretize and add a single spec to the environment.
Concretize the provided ``user_spec`` and add it along with the
concretized result to the environment. If the given ``user_spec`` was
already present in the environment, this does not add a duplicate.
The concretized spec will be added unless the ``user_spec`` was
already present and an associated concrete spec was already present.
Args:
concrete_spec: if provided, then it is assumed that it is the
result of concretizing the provided ``user_spec``
"""
if self.unify is True:
msg = (
"cannot install a single spec in an environment that is "
"configured to be concretized together. Run instead:\n\n"
" $ spack add <spec>\n"
" $ spack install\n"
)
raise SpackEnvironmentError(msg)
spec = Spec(user_spec)
if self.add(spec):
concrete = concrete_spec or spec.concretized(tests=tests)
self._add_concrete_spec(spec, concrete)
else:
# spec might be in the user_specs, but not installed.
# TODO: Redo name-based comparison for old style envs
spec = next(s for s in self.user_specs if s.satisfies(user_spec))
concrete = self.specs_by_hash.get(spec.dag_hash())
if not concrete:
concrete = spec.concretized(tests=tests)
self._add_concrete_spec(spec, concrete)
return concrete
@property @property
def default_view(self): def default_view(self):
if not self.has_view(default_view_name): if not self.has_view(default_view_name):
@@ -2185,7 +2212,7 @@ def _tree_to_display(spec):
return spec.tree( return spec.tree(
recurse_dependencies=True, recurse_dependencies=True,
format=spack.spec.DISPLAY_FORMAT, format=spack.spec.DISPLAY_FORMAT,
status_fn=spack.spec.Spec.install_status, install_status=True,
hashlen=7, hashlen=7,
hashes=True, hashes=True,
) )

View File

@@ -12,7 +12,6 @@
import re import re
import sys import sys
import types import types
from pathlib import Path
from typing import List from typing import List
import llnl.util.lang import llnl.util.lang
@@ -133,38 +132,10 @@ def load_extension(name: str) -> str:
def get_extension_paths(): def get_extension_paths():
"""Return the list of canonicalized extension paths from config:extensions.""" """Return the list of canonicalized extension paths from config:extensions."""
extension_paths = spack.config.get("config:extensions") or [] extension_paths = spack.config.get("config:extensions") or []
extension_paths.extend(extension_paths_from_entry_points())
paths = [spack.util.path.canonicalize_path(p) for p in extension_paths] paths = [spack.util.path.canonicalize_path(p) for p in extension_paths]
return paths return paths
def extension_paths_from_entry_points() -> List[str]:
"""Load extensions from a Python package's entry points.
A python package can register entry point metadata so that Spack can find
its extensions by adding the following to the project's pyproject.toml:
.. code-block:: toml
[project.entry-points."spack.extensions"]
baz = "baz:get_spack_extensions"
The function ``get_spack_extensions`` returns paths to the package's
spack extensions
"""
extension_paths: List[str] = []
for entry_point in llnl.util.lang.get_entry_points(group="spack.extensions"):
hook = entry_point.load()
if callable(hook):
paths = hook() or []
if isinstance(paths, (Path, str)):
extension_paths.append(str(paths))
else:
extension_paths.extend(paths)
return extension_paths
def get_command_paths(): def get_command_paths():
"""Return the list of paths where to search for command files.""" """Return the list of paths where to search for command files."""
command_paths = [] command_paths = []

Some files were not shown because too many files have changed in this diff Show More